xref: /openbsd-src/sys/dev/pci/drm/ttm/ttm_bo_vm.c (revision f46a341eeea409411cd35db6a031d18c6a612c5d)
17f4dd379Sjsg /* SPDX-License-Identifier: GPL-2.0 OR MIT */
21099013bSjsg /**************************************************************************
31099013bSjsg  *
41099013bSjsg  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
51099013bSjsg  * All Rights Reserved.
61099013bSjsg  *
71099013bSjsg  * Permission is hereby granted, free of charge, to any person obtaining a
81099013bSjsg  * copy of this software and associated documentation files (the
91099013bSjsg  * "Software"), to deal in the Software without restriction, including
101099013bSjsg  * without limitation the rights to use, copy, modify, merge, publish,
111099013bSjsg  * distribute, sub license, and/or sell copies of the Software, and to
121099013bSjsg  * permit persons to whom the Software is furnished to do so, subject to
131099013bSjsg  * the following conditions:
141099013bSjsg  *
151099013bSjsg  * The above copyright notice and this permission notice (including the
161099013bSjsg  * next paragraph) shall be included in all copies or substantial portions
171099013bSjsg  * of the Software.
181099013bSjsg  *
191099013bSjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
201099013bSjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
211099013bSjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
221099013bSjsg  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
231099013bSjsg  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
241099013bSjsg  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
251099013bSjsg  * USE OR OTHER DEALINGS IN THE SOFTWARE.
261099013bSjsg  *
271099013bSjsg  **************************************************************************/
281099013bSjsg /*
291099013bSjsg  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
301099013bSjsg  */
311099013bSjsg 
321099013bSjsg #define pr_fmt(fmt) "[TTM] " fmt
331099013bSjsg 
34f005ef32Sjsg #include <drm/ttm/ttm_bo.h>
357f4dd379Sjsg #include <drm/ttm/ttm_placement.h>
36f005ef32Sjsg #include <drm/ttm/ttm_tt.h>
37f005ef32Sjsg 
385ca02815Sjsg #include <drm/drm_drv.h>
395ca02815Sjsg #include <drm/drm_managed.h>
401099013bSjsg 
417f4dd379Sjsg #ifdef __linux__
421099013bSjsg 
437f4dd379Sjsg static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
447f4dd379Sjsg 				struct vm_fault *vmf)
457f4dd379Sjsg {
461bb76ff1Sjsg 	long err = 0;
471099013bSjsg 
487ccd5a2cSjsg 	/*
497ccd5a2cSjsg 	 * Quick non-stalling check for idle.
507ccd5a2cSjsg 	 */
511bb76ff1Sjsg 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
521bb76ff1Sjsg 		return 0;
537ccd5a2cSjsg 
547ccd5a2cSjsg 	/*
55ad8b1aafSjsg 	 * If possible, avoid waiting for GPU with mmap_lock
56c349dbc7Sjsg 	 * held.  We only do this if the fault allows retry and this
57c349dbc7Sjsg 	 * is the first attempt.
587ccd5a2cSjsg 	 */
59c349dbc7Sjsg 	if (fault_flag_allow_retry_first(vmf->flags)) {
607ccd5a2cSjsg 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
611bb76ff1Sjsg 			return VM_FAULT_RETRY;
627ccd5a2cSjsg 
637f4dd379Sjsg 		ttm_bo_get(bo);
64ad8b1aafSjsg 		mmap_read_unlock(vmf->vma->vm_mm);
651bb76ff1Sjsg 		(void)dma_resv_wait_timeout(bo->base.resv,
661bb76ff1Sjsg 					    DMA_RESV_USAGE_KERNEL, true,
671bb76ff1Sjsg 					    MAX_SCHEDULE_TIMEOUT);
68c349dbc7Sjsg 		dma_resv_unlock(bo->base.resv);
697f4dd379Sjsg 		ttm_bo_put(bo);
701bb76ff1Sjsg 		return VM_FAULT_RETRY;
717ccd5a2cSjsg 	}
727ccd5a2cSjsg 
737ccd5a2cSjsg 	/*
747ccd5a2cSjsg 	 * Ordinary wait.
757ccd5a2cSjsg 	 */
761bb76ff1Sjsg 	err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
771bb76ff1Sjsg 				    MAX_SCHEDULE_TIMEOUT);
781bb76ff1Sjsg 	if (unlikely(err < 0)) {
791bb76ff1Sjsg 		return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
807f4dd379Sjsg 			VM_FAULT_NOPAGE;
817f4dd379Sjsg 	}
827f4dd379Sjsg 
831bb76ff1Sjsg 	return 0;
847ccd5a2cSjsg }
857ccd5a2cSjsg 
867f4dd379Sjsg static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
877f4dd379Sjsg 				       unsigned long page_offset)
887ccd5a2cSjsg {
895ca02815Sjsg 	struct ttm_device *bdev = bo->bdev;
907f4dd379Sjsg 
915ca02815Sjsg 	if (bdev->funcs->io_mem_pfn)
925ca02815Sjsg 		return bdev->funcs->io_mem_pfn(bo, page_offset);
937f4dd379Sjsg 
945ca02815Sjsg 	return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
957f4dd379Sjsg }
967f4dd379Sjsg 
97c349dbc7Sjsg /**
98c349dbc7Sjsg  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
99c349dbc7Sjsg  * @bo: The buffer object
100c349dbc7Sjsg  * @vmf: The fault structure handed to the callback
101c349dbc7Sjsg  *
1021bb76ff1Sjsg  * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
103c349dbc7Sjsg  * during long waits, and after the wait the callback will be restarted. This
104c349dbc7Sjsg  * is to allow other threads using the same virtual memory space concurrent
105c349dbc7Sjsg  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
106c349dbc7Sjsg  * object reservations sometimes wait for GPU and should therefore be
107c349dbc7Sjsg  * considered long waits. This function reserves the buffer object interruptibly
108c349dbc7Sjsg  * taking this into account. Starvation is avoided by the vm system not
109c349dbc7Sjsg  * allowing too many repeated restarts.
110c349dbc7Sjsg  * This function is intended to be used in customized fault() and _mkwrite()
111c349dbc7Sjsg  * handlers.
112c349dbc7Sjsg  *
113c349dbc7Sjsg  * Return:
114c349dbc7Sjsg  *    0 on success and the bo was reserved.
115c349dbc7Sjsg  *    VM_FAULT_RETRY if blocking wait.
116c349dbc7Sjsg  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
117c349dbc7Sjsg  */
118c349dbc7Sjsg vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
119c349dbc7Sjsg 			     struct vm_fault *vmf)
1207f4dd379Sjsg {
1217ccd5a2cSjsg 	/*
1227ccd5a2cSjsg 	 * Work around locking order reversal in fault / nopfn
123ad8b1aafSjsg 	 * between mmap_lock and bo_reserve: Perform a trylock operation
1247ccd5a2cSjsg 	 * for reserve, and if it fails, retry the fault after waiting
1257ccd5a2cSjsg 	 * for the buffer to become unreserved.
1267ccd5a2cSjsg 	 */
127c349dbc7Sjsg 	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
128c349dbc7Sjsg 		/*
129c349dbc7Sjsg 		 * If the fault allows retry and this is the first
130ad8b1aafSjsg 		 * fault attempt, we try to release the mmap_lock
131c349dbc7Sjsg 		 * before waiting
132c349dbc7Sjsg 		 */
133c349dbc7Sjsg 		if (fault_flag_allow_retry_first(vmf->flags)) {
1347ccd5a2cSjsg 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1357f4dd379Sjsg 				ttm_bo_get(bo);
136ad8b1aafSjsg 				mmap_read_unlock(vmf->vma->vm_mm);
137c349dbc7Sjsg 				if (!dma_resv_lock_interruptible(bo->base.resv,
138c349dbc7Sjsg 								 NULL))
139c349dbc7Sjsg 					dma_resv_unlock(bo->base.resv);
1407f4dd379Sjsg 				ttm_bo_put(bo);
1417ccd5a2cSjsg 			}
1427ccd5a2cSjsg 
1437ccd5a2cSjsg 			return VM_FAULT_RETRY;
1447ccd5a2cSjsg 		}
1457ccd5a2cSjsg 
146c349dbc7Sjsg 		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
1477ccd5a2cSjsg 			return VM_FAULT_NOPAGE;
1487ccd5a2cSjsg 	}
1497ccd5a2cSjsg 
1505ca02815Sjsg 	/*
1515ca02815Sjsg 	 * Refuse to fault imported pages. This should be handled
1525ca02815Sjsg 	 * (if at all) by redirecting mmap to the exporter.
1535ca02815Sjsg 	 */
1541bb76ff1Sjsg 	if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
1551bb76ff1Sjsg 		if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
1565ca02815Sjsg 			dma_resv_unlock(bo->base.resv);
1575ca02815Sjsg 			return VM_FAULT_SIGBUS;
1585ca02815Sjsg 		}
1591bb76ff1Sjsg 	}
1605ca02815Sjsg 
161c349dbc7Sjsg 	return 0;
162c349dbc7Sjsg }
163c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_reserve);
164c349dbc7Sjsg 
165c349dbc7Sjsg /**
166c349dbc7Sjsg  * ttm_bo_vm_fault_reserved - TTM fault helper
167c349dbc7Sjsg  * @vmf: The struct vm_fault given as argument to the fault callback
168c349dbc7Sjsg  * @prot: The page protection to be used for this memory area.
169c349dbc7Sjsg  * @num_prefault: Maximum number of prefault pages. The caller may want to
170c349dbc7Sjsg  * specify this based on madvice settings and the size of the GPU object
171c349dbc7Sjsg  * backed by the memory.
172c349dbc7Sjsg  *
173c349dbc7Sjsg  * This function inserts one or more page table entries pointing to the
174c349dbc7Sjsg  * memory backing the buffer object, and then returns a return code
175c349dbc7Sjsg  * instructing the caller to retry the page access.
176c349dbc7Sjsg  *
177c349dbc7Sjsg  * Return:
178c349dbc7Sjsg  *   VM_FAULT_NOPAGE on success or pending signal
179c349dbc7Sjsg  *   VM_FAULT_SIGBUS on unspecified error
180c349dbc7Sjsg  *   VM_FAULT_OOM on out-of-memory
181c349dbc7Sjsg  *   VM_FAULT_RETRY if retryable wait
182c349dbc7Sjsg  */
183c349dbc7Sjsg vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
184c349dbc7Sjsg 				    pgprot_t prot,
1855ca02815Sjsg 				    pgoff_t num_prefault)
186c349dbc7Sjsg {
187c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
188c349dbc7Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
1895ca02815Sjsg 	struct ttm_device *bdev = bo->bdev;
190c349dbc7Sjsg 	unsigned long page_offset;
191c349dbc7Sjsg 	unsigned long page_last;
192c349dbc7Sjsg 	unsigned long pfn;
193c349dbc7Sjsg 	struct ttm_tt *ttm = NULL;
194c349dbc7Sjsg 	struct vm_page *page;
195c349dbc7Sjsg 	int err;
196c349dbc7Sjsg 	pgoff_t i;
197c349dbc7Sjsg 	vm_fault_t ret = VM_FAULT_NOPAGE;
198c349dbc7Sjsg 	unsigned long address = vmf->address;
199c349dbc7Sjsg 
2007ccd5a2cSjsg 	/*
2017ccd5a2cSjsg 	 * Wait for buffer data in transit, due to a pipelined
2027ccd5a2cSjsg 	 * move.
2037ccd5a2cSjsg 	 */
2047f4dd379Sjsg 	ret = ttm_bo_vm_fault_idle(bo, vmf);
205c349dbc7Sjsg 	if (unlikely(ret != 0))
2067f4dd379Sjsg 		return ret;
2077ccd5a2cSjsg 
2085ca02815Sjsg 	err = ttm_mem_io_reserve(bdev, bo->resource);
209c349dbc7Sjsg 	if (unlikely(err != 0))
210ad8b1aafSjsg 		return VM_FAULT_SIGBUS;
2117ccd5a2cSjsg 
2127ccd5a2cSjsg 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
213c349dbc7Sjsg 		vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
2147ccd5a2cSjsg 	page_last = vma_pages(vma) + vma->vm_pgoff -
215c349dbc7Sjsg 		drm_vma_node_start(&bo->base.vma_node);
2167ccd5a2cSjsg 
217f005ef32Sjsg 	if (unlikely(page_offset >= PFN_UP(bo->base.size)))
218ad8b1aafSjsg 		return VM_FAULT_SIGBUS;
2197ccd5a2cSjsg 
2205ca02815Sjsg 	prot = ttm_io_prot(bo, bo->resource, prot);
2215ca02815Sjsg 	if (!bo->resource->bus.is_iomem) {
2227f4dd379Sjsg 		struct ttm_operation_ctx ctx = {
223f005ef32Sjsg 			.interruptible = true,
2247f4dd379Sjsg 			.no_wait_gpu = false,
2255ca02815Sjsg 			.force_alloc = true
2267f4dd379Sjsg 		};
2277f4dd379Sjsg 
2287ccd5a2cSjsg 		ttm = bo->ttm;
229f005ef32Sjsg 		err = ttm_tt_populate(bdev, bo->ttm, &ctx);
230f005ef32Sjsg 		if (err) {
231f005ef32Sjsg 			if (err == -EINTR || err == -ERESTARTSYS ||
232f005ef32Sjsg 			    err == -EAGAIN)
233f005ef32Sjsg 				return VM_FAULT_NOPAGE;
234f005ef32Sjsg 
235f005ef32Sjsg 			pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
236f005ef32Sjsg 			return VM_FAULT_SIGBUS;
237f005ef32Sjsg 		}
238c349dbc7Sjsg 	} else {
239c349dbc7Sjsg 		/* Iomem should not be marked encrypted */
240c349dbc7Sjsg 		prot = pgprot_decrypted(prot);
241c349dbc7Sjsg 	}
242c349dbc7Sjsg 
2437ccd5a2cSjsg 	/*
2447ccd5a2cSjsg 	 * Speculatively prefault a number of pages. Only error on
2457ccd5a2cSjsg 	 * first page.
2467ccd5a2cSjsg 	 */
247c349dbc7Sjsg 	for (i = 0; i < num_prefault; ++i) {
2485ca02815Sjsg 		if (bo->resource->bus.is_iomem) {
2497f4dd379Sjsg 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
2507f4dd379Sjsg 		} else {
2517ccd5a2cSjsg 			page = ttm->pages[page_offset];
2527ccd5a2cSjsg 			if (unlikely(!page && i == 0)) {
253ad8b1aafSjsg 				return VM_FAULT_OOM;
2547ccd5a2cSjsg 			} else if (unlikely(!page)) {
2557ccd5a2cSjsg 				break;
2567ccd5a2cSjsg 			}
2577ccd5a2cSjsg 			pfn = page_to_pfn(page);
2587ccd5a2cSjsg 		}
2597ccd5a2cSjsg 
260c349dbc7Sjsg 		/*
261c349dbc7Sjsg 		 * Note that the value of @prot at this point may differ from
262c349dbc7Sjsg 		 * the value of @vma->vm_page_prot in the caching- and
263c349dbc7Sjsg 		 * encryption bits. This is because the exact location of the
264c349dbc7Sjsg 		 * data may not be known at mmap() time and may also change
265c349dbc7Sjsg 		 * at arbitrary times while the data is mmap'ed.
266f005ef32Sjsg 		 * See vmf_insert_pfn_prot() for a discussion.
267c349dbc7Sjsg 		 */
268c349dbc7Sjsg 		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
2697ccd5a2cSjsg 
270d042e597Sjsg 		/* Never error on prefaulted PTEs */
271d042e597Sjsg 		if (unlikely((ret & VM_FAULT_ERROR))) {
272d042e597Sjsg 			if (i == 0)
273ad8b1aafSjsg 				return VM_FAULT_NOPAGE;
274d042e597Sjsg 			else
275d042e597Sjsg 				break;
276d042e597Sjsg 		}
2777ccd5a2cSjsg 
2787ccd5a2cSjsg 		address += PAGE_SIZE;
2797ccd5a2cSjsg 		if (unlikely(++page_offset >= page_last))
2807ccd5a2cSjsg 			break;
2817ccd5a2cSjsg 	}
2827f4dd379Sjsg 	return ret;
2837ccd5a2cSjsg }
284c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
2857f4dd379Sjsg 
2865ca02815Sjsg static void ttm_bo_release_dummy_page(struct drm_device *dev, void *res)
2875ca02815Sjsg {
2885ca02815Sjsg 	struct page *dummy_page = (struct page *)res;
2895ca02815Sjsg 
2905ca02815Sjsg 	__free_page(dummy_page);
2915ca02815Sjsg }
2925ca02815Sjsg 
2935ca02815Sjsg vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot)
2945ca02815Sjsg {
2955ca02815Sjsg 	struct vm_area_struct *vma = vmf->vma;
2965ca02815Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
2975ca02815Sjsg 	struct drm_device *ddev = bo->base.dev;
2985ca02815Sjsg 	vm_fault_t ret = VM_FAULT_NOPAGE;
2995ca02815Sjsg 	unsigned long address;
3005ca02815Sjsg 	unsigned long pfn;
3015ca02815Sjsg 	struct page *page;
3025ca02815Sjsg 
3035ca02815Sjsg 	/* Allocate new dummy page to map all the VA range in this VMA to it*/
3045ca02815Sjsg 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3055ca02815Sjsg 	if (!page)
3065ca02815Sjsg 		return VM_FAULT_OOM;
3075ca02815Sjsg 
3085ca02815Sjsg 	/* Set the page to be freed using drmm release action */
3095ca02815Sjsg 	if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
3105ca02815Sjsg 		return VM_FAULT_OOM;
3115ca02815Sjsg 
3125ca02815Sjsg 	pfn = page_to_pfn(page);
3135ca02815Sjsg 
3145ca02815Sjsg 	/* Prefault the entire VMA range right away to avoid further faults */
3155ca02815Sjsg 	for (address = vma->vm_start; address < vma->vm_end;
3165ca02815Sjsg 	     address += PAGE_SIZE)
3175ca02815Sjsg 		ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
3185ca02815Sjsg 
3195ca02815Sjsg 	return ret;
3205ca02815Sjsg }
3215ca02815Sjsg EXPORT_SYMBOL(ttm_bo_vm_dummy_page);
3225ca02815Sjsg 
323c349dbc7Sjsg vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
324c349dbc7Sjsg {
325c349dbc7Sjsg 	struct vm_area_struct *vma = vmf->vma;
326c349dbc7Sjsg 	pgprot_t prot;
327c349dbc7Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
3285ca02815Sjsg 	struct drm_device *ddev = bo->base.dev;
329c349dbc7Sjsg 	vm_fault_t ret;
3305ca02815Sjsg 	int idx;
331c349dbc7Sjsg 
332c349dbc7Sjsg 	ret = ttm_bo_vm_reserve(bo, vmf);
333c349dbc7Sjsg 	if (ret)
334c349dbc7Sjsg 		return ret;
335c349dbc7Sjsg 
336c349dbc7Sjsg 	prot = vma->vm_page_prot;
3375ca02815Sjsg 	if (drm_dev_enter(ddev, &idx)) {
3385ca02815Sjsg 		ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
3395ca02815Sjsg 		drm_dev_exit(idx);
3405ca02815Sjsg 	} else {
3415ca02815Sjsg 		ret = ttm_bo_vm_dummy_page(vmf, prot);
3425ca02815Sjsg 	}
343c349dbc7Sjsg 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
344c349dbc7Sjsg 		return ret;
345c349dbc7Sjsg 
346c349dbc7Sjsg 	dma_resv_unlock(bo->base.resv);
347c349dbc7Sjsg 
348c349dbc7Sjsg 	return ret;
349c349dbc7Sjsg }
350c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_fault);
351c349dbc7Sjsg 
352c349dbc7Sjsg #else /* !__linux__ */
3537f4dd379Sjsg 
3547f4dd379Sjsg static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
3557f4dd379Sjsg     struct uvm_faultinfo *ufi)
3567f4dd379Sjsg {
3571bb76ff1Sjsg 	long err = 0;
3587f4dd379Sjsg 
3597f4dd379Sjsg 	/*
3607f4dd379Sjsg 	 * Quick non-stalling check for idle.
3617f4dd379Sjsg 	 */
3621bb76ff1Sjsg 	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
3631bb76ff1Sjsg 		return 0;
3647f4dd379Sjsg 
36595cdf236Skettenis #ifdef __linux__
3667f4dd379Sjsg 	/*
367ad8b1aafSjsg 	 * If possible, avoid waiting for GPU with mmap_lock
36895cdf236Skettenis 	 * held.  We only do this if the fault allows retry and this
36995cdf236Skettenis 	 * is the first attempt.
3707f4dd379Sjsg 	 */
37195cdf236Skettenis 	if (fault_flag_allow_retry_first(vmf->flags)) {
37295cdf236Skettenis 		if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3731bb76ff1Sjsg 			return VM_FAULT_RETRY;
3747f4dd379Sjsg 
3757f4dd379Sjsg 		ttm_bo_get(bo);
376ad8b1aafSjsg 		mmap_read_unlock(vmf->vma->vm_mm);
3777f4dd379Sjsg 		(void) dma_fence_wait(bo->moving, true);
3781bb76ff1Sjsg 		(void)dma_resv_wait_timeout(bo->base.resv,
3791bb76ff1Sjsg 					    DMA_RESV_USAGE_KERNEL, true,
3801bb76ff1Sjsg 					    MAX_SCHEDULE_TIMEOUT);
38124a3fa5dSjsg 		dma_resv_unlock(bo->base.resv);
3827f4dd379Sjsg 		ttm_bo_put(bo);
3831bb76ff1Sjsg 		return VM_FAULT_RETRY;
38495cdf236Skettenis 	}
38595cdf236Skettenis #endif
38695cdf236Skettenis 
38795cdf236Skettenis 	/*
38895cdf236Skettenis 	 * Ordinary wait.
38995cdf236Skettenis 	 */
3901bb76ff1Sjsg 	err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
3911bb76ff1Sjsg 				    MAX_SCHEDULE_TIMEOUT);
3921bb76ff1Sjsg 	if (unlikely(err < 0)) {
3931bb76ff1Sjsg 		return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
39495cdf236Skettenis 			VM_FAULT_NOPAGE;
39595cdf236Skettenis 	}
3967f4dd379Sjsg 
3971bb76ff1Sjsg 	return 0;
3987f4dd379Sjsg }
3997f4dd379Sjsg 
40095cdf236Skettenis static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
40195cdf236Skettenis 				       unsigned long page_offset)
4021099013bSjsg {
4035ca02815Sjsg 	struct ttm_device *bdev = bo->bdev;
4041099013bSjsg 
4055ca02815Sjsg 	if (bdev->funcs->io_mem_pfn)
4065ca02815Sjsg 		return bdev->funcs->io_mem_pfn(bo, page_offset);
40795cdf236Skettenis 
4085ca02815Sjsg 	return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
40995cdf236Skettenis }
41095cdf236Skettenis 
41195cdf236Skettenis /**
41295cdf236Skettenis  * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
41395cdf236Skettenis  * @bo: The buffer object
41495cdf236Skettenis  * @vmf: The fault structure handed to the callback
41595cdf236Skettenis  *
4161bb76ff1Sjsg  * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
41795cdf236Skettenis  * during long waits, and after the wait the callback will be restarted. This
41895cdf236Skettenis  * is to allow other threads using the same virtual memory space concurrent
41995cdf236Skettenis  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
42095cdf236Skettenis  * object reservations sometimes wait for GPU and should therefore be
42195cdf236Skettenis  * considered long waits. This function reserves the buffer object interruptibly
42295cdf236Skettenis  * taking this into account. Starvation is avoided by the vm system not
42395cdf236Skettenis  * allowing too many repeated restarts.
42495cdf236Skettenis  * This function is intended to be used in customized fault() and _mkwrite()
42595cdf236Skettenis  * handlers.
42695cdf236Skettenis  *
42795cdf236Skettenis  * Return:
42895cdf236Skettenis  *    0 on success and the bo was reserved.
42995cdf236Skettenis  *    VM_FAULT_RETRY if blocking wait.
43095cdf236Skettenis  *    VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
43195cdf236Skettenis  */
43295cdf236Skettenis vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo)
43395cdf236Skettenis {
4341099013bSjsg 	/*
4351099013bSjsg 	 * Work around locking order reversal in fault / nopfn
436ad8b1aafSjsg 	 * between mmap_lock and bo_reserve: Perform a trylock operation
4377ccd5a2cSjsg 	 * for reserve, and if it fails, retry the fault after waiting
4387ccd5a2cSjsg 	 * for the buffer to become unreserved.
4391099013bSjsg 	 */
44024a3fa5dSjsg 	if (unlikely(!dma_resv_trylock(bo->base.resv))) {
44195cdf236Skettenis #ifdef __linux__
44295cdf236Skettenis 		/*
44395cdf236Skettenis 		 * If the fault allows retry and this is the first
444ad8b1aafSjsg 		 * fault attempt, we try to release the mmap_lock
44595cdf236Skettenis 		 * before waiting
44695cdf236Skettenis 		 */
44795cdf236Skettenis 		if (fault_flag_allow_retry_first(vmf->flags)) {
44895cdf236Skettenis 			if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
4497f4dd379Sjsg 				ttm_bo_get(bo);
450ad8b1aafSjsg 				mmap_read_unlock(vmf->vma->vm_mm);
451c349dbc7Sjsg 				if (!dma_resv_lock_interruptible(bo->base.resv,
452c349dbc7Sjsg 								 NULL))
453c349dbc7Sjsg 					dma_resv_unlock(bo->base.resv);
4547f4dd379Sjsg 				ttm_bo_put(bo);
4551099013bSjsg 			}
4561099013bSjsg 
45795cdf236Skettenis 			return VM_FAULT_RETRY;
45895cdf236Skettenis 		}
45995cdf236Skettenis #endif
46095cdf236Skettenis 
46195cdf236Skettenis 		if (dma_resv_lock_interruptible(bo->base.resv, NULL))
46295cdf236Skettenis 			return VM_FAULT_NOPAGE;
46395cdf236Skettenis 	}
46495cdf236Skettenis 
4655ca02815Sjsg 	/*
4665ca02815Sjsg 	 * Refuse to fault imported pages. This should be handled
4675ca02815Sjsg 	 * (if at all) by redirecting mmap to the exporter.
4685ca02815Sjsg 	 */
4691bb76ff1Sjsg 	if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
4701bb76ff1Sjsg 		if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
4715ca02815Sjsg 			dma_resv_unlock(bo->base.resv);
4725ca02815Sjsg 			return VM_FAULT_SIGBUS;
4735ca02815Sjsg 		}
4741bb76ff1Sjsg 	}
4755ca02815Sjsg 
47695cdf236Skettenis 	return 0;
47795cdf236Skettenis }
47895cdf236Skettenis 
47995cdf236Skettenis vm_fault_t ttm_bo_vm_fault_reserved(struct uvm_faultinfo *ufi,
48095cdf236Skettenis 				    vaddr_t vaddr,
48195cdf236Skettenis 				    pgoff_t num_prefault,
48295cdf236Skettenis 				    pgoff_t fault_page_size)
48395cdf236Skettenis {
48495cdf236Skettenis 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
48595cdf236Skettenis 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
4865ca02815Sjsg 	struct ttm_device *bdev = bo->bdev;
48795cdf236Skettenis 	unsigned long page_offset;
48895cdf236Skettenis 	unsigned long page_last;
48995cdf236Skettenis 	unsigned long pfn;
49095cdf236Skettenis 	struct ttm_tt *ttm = NULL;
49195cdf236Skettenis 	struct vm_page *page;
49295cdf236Skettenis 	bus_addr_t addr;
49395cdf236Skettenis 	paddr_t paddr;
49495cdf236Skettenis 	vm_prot_t prot;
49595cdf236Skettenis 	int pmap_flags;
49695cdf236Skettenis 	int err;
49795cdf236Skettenis 	pgoff_t i;
49895cdf236Skettenis 	vm_fault_t ret = VM_FAULT_NOPAGE;
49995cdf236Skettenis 	unsigned long address = (unsigned long)vaddr;
50095cdf236Skettenis 
5017ccd5a2cSjsg 	/*
5021099013bSjsg 	 * Wait for buffer data in transit, due to a pipelined
5031099013bSjsg 	 * move.
5041099013bSjsg 	 */
5057f4dd379Sjsg 	ret = ttm_bo_vm_fault_idle(bo, ufi);
50695cdf236Skettenis 	if (unlikely(ret != 0))
5077f4dd379Sjsg 		return ret;
508ad8b1aafSjsg 	ret = VM_FAULT_NOPAGE;
5091099013bSjsg 
5105ca02815Sjsg 	err = ttm_mem_io_reserve(bdev, bo->resource);
51195cdf236Skettenis 	if (unlikely(err != 0))
512ad8b1aafSjsg 		return VM_FAULT_SIGBUS;
5131099013bSjsg 
5141099013bSjsg 	page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT) +
515c349dbc7Sjsg 	    drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
5161099013bSjsg 	page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT) +
517c349dbc7Sjsg 	    drm_vma_node_start(&bo->base.vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
5181099013bSjsg 
519f005ef32Sjsg 	if (unlikely(page_offset >= PFN_UP(bo->base.size)))
520ad8b1aafSjsg 		return VM_FAULT_SIGBUS;
5211099013bSjsg 
52295cdf236Skettenis 	prot = ufi->entry->protection;
5235ca02815Sjsg 	pmap_flags = ttm_io_prot(bo, bo->resource, 0);
5245ca02815Sjsg 	if (!bo->resource->bus.is_iomem) {
5257f4dd379Sjsg 		struct ttm_operation_ctx ctx = {
526f005ef32Sjsg 			.interruptible = true,
5277f4dd379Sjsg 			.no_wait_gpu = false,
5285ca02815Sjsg 			.force_alloc = true
5297f4dd379Sjsg 		};
5307f4dd379Sjsg 
5311099013bSjsg 		ttm = bo->ttm;
532f005ef32Sjsg 		err = ttm_tt_populate(bdev, bo->ttm, &ctx);
533f005ef32Sjsg 		if (err) {
534f005ef32Sjsg 			if (err == -EINTR || err == -ERESTARTSYS ||
535f005ef32Sjsg 			    err == -EAGAIN)
536f005ef32Sjsg 				return VM_FAULT_NOPAGE;
537f005ef32Sjsg 
538f005ef32Sjsg 			pr_debug("TTM fault hit %pe.\n", ERR_PTR(err));
539f005ef32Sjsg 			return VM_FAULT_SIGBUS;
540f005ef32Sjsg 		}
5411099013bSjsg 	}
5421099013bSjsg 
54395cdf236Skettenis #ifdef __linux__
54495cdf236Skettenis 	/* We don't prefault on huge faults. Yet. */
545ad8b1aafSjsg 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1)
546ad8b1aafSjsg 		return ttm_bo_vm_insert_huge(vmf, bo, page_offset,
54795cdf236Skettenis 					     fault_page_size, prot);
54895cdf236Skettenis #endif
54995cdf236Skettenis 
5501099013bSjsg 	/*
5511099013bSjsg 	 * Speculatively prefault a number of pages. Only error on
5521099013bSjsg 	 * first page.
5531099013bSjsg 	 */
55495cdf236Skettenis 	for (i = 0; i < num_prefault; ++i) {
5555ca02815Sjsg 		if (bo->resource->bus.is_iomem) {
55695cdf236Skettenis 			pfn = ttm_bo_io_mem_pfn(bo, page_offset);
55795cdf236Skettenis 			addr = pfn << PAGE_SHIFT;
55895cdf236Skettenis 			paddr = bus_space_mmap(bdev->memt, addr, 0, prot, 0);
5591099013bSjsg 		} else {
5601099013bSjsg 			page = ttm->pages[page_offset];
5611099013bSjsg 			if (unlikely(!page && i == 0)) {
562ad8b1aafSjsg 				return VM_FAULT_OOM;
5631099013bSjsg 			} else if (unlikely(!page)) {
5641099013bSjsg 				break;
5651099013bSjsg 			}
5661099013bSjsg 			paddr = VM_PAGE_TO_PHYS(page);
5671099013bSjsg 		}
5681099013bSjsg 
56995cdf236Skettenis 		err = pmap_enter(ufi->orig_map->pmap, address,
57095cdf236Skettenis 		    paddr | pmap_flags, prot, PMAP_CANFAIL | prot);
5711099013bSjsg 
57295cdf236Skettenis 		/* Never error on prefaulted PTEs */
57395cdf236Skettenis 		if (unlikely(err)) {
57495cdf236Skettenis 			ret = VM_FAULT_OOM;
57595cdf236Skettenis 			if (i == 0)
576ad8b1aafSjsg 				return VM_FAULT_NOPAGE;
57795cdf236Skettenis 			else
5781099013bSjsg 				break;
5791099013bSjsg 		}
5801099013bSjsg 
5811099013bSjsg 		address += PAGE_SIZE;
5821099013bSjsg 		if (unlikely(++page_offset >= page_last))
5831099013bSjsg 			break;
5841099013bSjsg 	}
5851099013bSjsg 	pmap_update(ufi->orig_map->pmap);
5867f4dd379Sjsg 	return ret;
5871099013bSjsg }
58895cdf236Skettenis EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
58995cdf236Skettenis 
59095cdf236Skettenis int
59195cdf236Skettenis ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
59295cdf236Skettenis     int npages, int centeridx, vm_fault_t fault_type,
59395cdf236Skettenis     vm_prot_t access_type, int flags)
59495cdf236Skettenis {
59595cdf236Skettenis 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
59695cdf236Skettenis 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
59795cdf236Skettenis 	vm_fault_t ret;
59895cdf236Skettenis 
59995cdf236Skettenis 	ret = ttm_bo_vm_reserve(bo);
60095cdf236Skettenis 	if (ret) {
601*f46a341eSmpi 		goto out;
60295cdf236Skettenis 	}
60395cdf236Skettenis 
60495cdf236Skettenis 	ret = ttm_bo_vm_fault_reserved(ufi, vaddr, TTM_BO_VM_NUM_PREFAULT, 1);
605*f46a341eSmpi 	dma_resv_unlock(bo->base.resv);
606*f46a341eSmpi out:
60795cdf236Skettenis 	switch (ret) {
60895cdf236Skettenis 	case VM_FAULT_NOPAGE:
609*f46a341eSmpi 		ret = 0;
61095cdf236Skettenis 		break;
61195cdf236Skettenis 	case VM_FAULT_RETRY:
612*f46a341eSmpi 		ret = ERESTART;
61395cdf236Skettenis 		break;
61495cdf236Skettenis 	default:
615*f46a341eSmpi 		ret = EACCES;
61695cdf236Skettenis 		break;
61795cdf236Skettenis 	}
61816c74684Smpi 	uvmfault_unlockall(ufi, NULL, uobj);
61995cdf236Skettenis 	return ret;
62095cdf236Skettenis }
62195cdf236Skettenis EXPORT_SYMBOL(ttm_bo_vm_fault);
6227f4dd379Sjsg 
623c349dbc7Sjsg #endif /* !__linux__ */
6247ccd5a2cSjsg 
6257ccd5a2cSjsg #ifdef notyet
626c349dbc7Sjsg void ttm_bo_vm_open(struct vm_area_struct *vma)
6277ccd5a2cSjsg {
628c349dbc7Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
6297ccd5a2cSjsg 
6307ccd5a2cSjsg 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
6317ccd5a2cSjsg 
6327f4dd379Sjsg 	ttm_bo_get(bo);
6337ccd5a2cSjsg }
634c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_open);
6357ccd5a2cSjsg 
636c349dbc7Sjsg void ttm_bo_vm_close(struct vm_area_struct *vma)
6377ccd5a2cSjsg {
638c349dbc7Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
6397ccd5a2cSjsg 
6407f4dd379Sjsg 	ttm_bo_put(bo);
6417ccd5a2cSjsg 	vma->vm_private_data = NULL;
6427ccd5a2cSjsg }
643c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_close);
6447ccd5a2cSjsg 
6457f4dd379Sjsg static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
6467f4dd379Sjsg 				 unsigned long offset,
6477f4dd379Sjsg 				 uint8_t *buf, int len, int write)
6487f4dd379Sjsg {
6497f4dd379Sjsg 	unsigned long page = offset >> PAGE_SHIFT;
6507f4dd379Sjsg 	unsigned long bytes_left = len;
6517f4dd379Sjsg 	int ret;
6527f4dd379Sjsg 
6537f4dd379Sjsg 	/* Copy a page at a time, that way no extra virtual address
6547f4dd379Sjsg 	 * mapping is needed
6557f4dd379Sjsg 	 */
6567f4dd379Sjsg 	offset -= page << PAGE_SHIFT;
6577f4dd379Sjsg 	do {
6587f4dd379Sjsg 		unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
6597f4dd379Sjsg 		struct ttm_bo_kmap_obj map;
6607f4dd379Sjsg 		void *ptr;
6617f4dd379Sjsg 		bool is_iomem;
6627f4dd379Sjsg 
6637f4dd379Sjsg 		ret = ttm_bo_kmap(bo, page, 1, &map);
6647f4dd379Sjsg 		if (ret)
6657f4dd379Sjsg 			return ret;
6667f4dd379Sjsg 
6677f4dd379Sjsg 		ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
6687f4dd379Sjsg 		WARN_ON_ONCE(is_iomem);
6697f4dd379Sjsg 		if (write)
6707f4dd379Sjsg 			memcpy(ptr, buf, bytes);
6717f4dd379Sjsg 		else
6727f4dd379Sjsg 			memcpy(buf, ptr, bytes);
6737f4dd379Sjsg 		ttm_bo_kunmap(&map);
6747f4dd379Sjsg 
6757f4dd379Sjsg 		page++;
6767f4dd379Sjsg 		buf += bytes;
6777f4dd379Sjsg 		bytes_left -= bytes;
6787f4dd379Sjsg 		offset = 0;
6797f4dd379Sjsg 	} while (bytes_left);
6807f4dd379Sjsg 
6817f4dd379Sjsg 	return len;
6827f4dd379Sjsg }
6837f4dd379Sjsg 
684c349dbc7Sjsg int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
6857f4dd379Sjsg 		     void *buf, int len, int write)
6867f4dd379Sjsg {
6877f4dd379Sjsg 	struct ttm_buffer_object *bo = vma->vm_private_data;
68874bf7892Sjsg 	unsigned long offset = (addr) - vma->vm_start +
68974bf7892Sjsg 		((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
69074bf7892Sjsg 		 << PAGE_SHIFT);
6917f4dd379Sjsg 	int ret;
6927f4dd379Sjsg 
693f005ef32Sjsg 	if (len < 1 || (offset + len) > bo->base.size)
6947f4dd379Sjsg 		return -EIO;
6957f4dd379Sjsg 
6967f4dd379Sjsg 	ret = ttm_bo_reserve(bo, true, false, NULL);
6977f4dd379Sjsg 	if (ret)
6987f4dd379Sjsg 		return ret;
6997f4dd379Sjsg 
7005ca02815Sjsg 	switch (bo->resource->mem_type) {
7017f4dd379Sjsg 	case TTM_PL_SYSTEM:
702ad8b1aafSjsg 		fallthrough;
7037f4dd379Sjsg 	case TTM_PL_TT:
7047f4dd379Sjsg 		ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
7057f4dd379Sjsg 		break;
7067f4dd379Sjsg 	default:
7075ca02815Sjsg 		if (bo->bdev->funcs->access_memory)
7085ca02815Sjsg 			ret = bo->bdev->funcs->access_memory(
7097f4dd379Sjsg 				bo, offset, buf, len, write);
7107f4dd379Sjsg 		else
7117f4dd379Sjsg 			ret = -EIO;
7127f4dd379Sjsg 	}
7137f4dd379Sjsg 
7147f4dd379Sjsg 	ttm_bo_unreserve(bo);
7157f4dd379Sjsg 
7167f4dd379Sjsg 	return ret;
7177f4dd379Sjsg }
718c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_vm_access);
7197f4dd379Sjsg 
7207ccd5a2cSjsg static const struct vm_operations_struct ttm_bo_vm_ops = {
7217ccd5a2cSjsg 	.fault = ttm_bo_vm_fault,
7227ccd5a2cSjsg 	.open = ttm_bo_vm_open,
7237f4dd379Sjsg 	.close = ttm_bo_vm_close,
724c349dbc7Sjsg 	.access = ttm_bo_vm_access,
7257ccd5a2cSjsg };
7267ccd5a2cSjsg #endif
7271099013bSjsg 
7281099013bSjsg void
7291099013bSjsg ttm_bo_vm_reference(struct uvm_object *uobj)
7301099013bSjsg {
7311099013bSjsg 	struct ttm_buffer_object *bo =
7321099013bSjsg 	    (struct ttm_buffer_object *)uobj;
7331099013bSjsg 
7347f4dd379Sjsg 	ttm_bo_get(bo);
7351099013bSjsg }
7361099013bSjsg 
7371099013bSjsg void
7381099013bSjsg ttm_bo_vm_detach(struct uvm_object *uobj)
7391099013bSjsg {
7401099013bSjsg 	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
7411099013bSjsg 
742c349dbc7Sjsg 	ttm_bo_put(bo);
7431099013bSjsg }
7441099013bSjsg 
7459f7b7ef0Smpi const struct uvm_pagerops ttm_bo_vm_ops = {
7461099013bSjsg 	.pgo_fault = ttm_bo_vm_fault,
7471099013bSjsg 	.pgo_reference = ttm_bo_vm_reference,
7481099013bSjsg 	.pgo_detach = ttm_bo_vm_detach
7491099013bSjsg };
7501099013bSjsg 
7515ca02815Sjsg #ifdef __linux__
752f005ef32Sjsg /**
753f005ef32Sjsg  * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
754f005ef32Sjsg  *
755f005ef32Sjsg  * @vma:       vma as input from the fbdev mmap method.
756f005ef32Sjsg  * @bo:        The bo backing the address space.
757f005ef32Sjsg  *
758f005ef32Sjsg  * Maps a buffer object.
759f005ef32Sjsg  */
7605ca02815Sjsg int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
761beea9bb1Skettenis {
7625ca02815Sjsg 	/* Enforce no COW since would have really strange behavior with it. */
7635ca02815Sjsg 	if (is_cow_mapping(vma->vm_flags))
7645ca02815Sjsg 		return -EINVAL;
765beea9bb1Skettenis 
7665ca02815Sjsg 	ttm_bo_get(bo);
767beea9bb1Skettenis 
7685ca02815Sjsg 	/*
7695ca02815Sjsg 	 * Drivers may want to override the vm_ops field. Otherwise we
7705ca02815Sjsg 	 * use TTM's default callbacks.
7715ca02815Sjsg 	 */
7725ca02815Sjsg 	if (!vma->vm_ops)
7737ccd5a2cSjsg 		vma->vm_ops = &ttm_bo_vm_ops;
7747ccd5a2cSjsg 
7757ccd5a2cSjsg 	/*
7767ccd5a2cSjsg 	 * Note: We're transferring the bo reference to
7777ccd5a2cSjsg 	 * vma->vm_private_data here.
7787ccd5a2cSjsg 	 */
7797ccd5a2cSjsg 
7807ccd5a2cSjsg 	vma->vm_private_data = bo;
7817ccd5a2cSjsg 
782f005ef32Sjsg 	vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
7831099013bSjsg 	return 0;
7841099013bSjsg }
785c349dbc7Sjsg EXPORT_SYMBOL(ttm_bo_mmap_obj);
7865ca02815Sjsg #else /* !__linux__ */
7875ca02815Sjsg int ttm_bo_mmap_obj(struct ttm_buffer_object *bo)
7885ca02815Sjsg {
7895ca02815Sjsg 	/* Enforce no COW since would have really strange behavior with it. */
7905ca02815Sjsg #ifdef notyet
7915ca02815Sjsg 	if (UVM_ET_ISCOPYONWRITE(entry))
7925ca02815Sjsg 		return -EINVAL;
7937ccd5a2cSjsg #endif
7945ca02815Sjsg 
7955ca02815Sjsg 	ttm_bo_get(bo);
7965ca02815Sjsg 
7975ca02815Sjsg 	/*
7985ca02815Sjsg 	 * Drivers may want to override the vm_ops field. Otherwise we
7995ca02815Sjsg 	 * use TTM's default callbacks.
8005ca02815Sjsg 	 */
8015ca02815Sjsg 	if (bo->base.uobj.pgops == NULL)
8025ca02815Sjsg 		uvm_obj_init(&bo->base.uobj, &ttm_bo_vm_ops, 1);
8035ca02815Sjsg 
8045ca02815Sjsg 	/*
8055ca02815Sjsg 	 * Note: We're transferring the bo reference to
8065ca02815Sjsg 	 * vma->vm_private_data here.
8075ca02815Sjsg 	 */
8085ca02815Sjsg 
8095ca02815Sjsg #ifdef notyet
8105ca02815Sjsg 	vma->vm_private_data = bo;
8115ca02815Sjsg 
8125ca02815Sjsg 	vma->vm_flags |= VM_PFNMAP;
8135ca02815Sjsg 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
8145ca02815Sjsg #endif
8155ca02815Sjsg 	return 0;
8165ca02815Sjsg }
8175ca02815Sjsg #endif /* !__linux__ */
818