xref: /openbsd-src/sys/dev/pci/drm/i915/i915_mm.c (revision 1bb76ff151c0aba8e3312a604e4cd2e5195cf4b7)
17f4dd379Sjsg /*
27f4dd379Sjsg  * Copyright © 2014 Intel Corporation
37f4dd379Sjsg  *
47f4dd379Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
57f4dd379Sjsg  * copy of this software and associated documentation files (the "Software"),
67f4dd379Sjsg  * to deal in the Software without restriction, including without limitation
77f4dd379Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87f4dd379Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
97f4dd379Sjsg  * Software is furnished to do so, subject to the following conditions:
107f4dd379Sjsg  *
117f4dd379Sjsg  * The above copyright notice and this permission notice (including the next
127f4dd379Sjsg  * paragraph) shall be included in all copies or substantial portions of the
137f4dd379Sjsg  * Software.
147f4dd379Sjsg  *
157f4dd379Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167f4dd379Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177f4dd379Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187f4dd379Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197f4dd379Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
207f4dd379Sjsg  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
217f4dd379Sjsg  * IN THE SOFTWARE.
227f4dd379Sjsg  *
237f4dd379Sjsg  */
247f4dd379Sjsg 
257f4dd379Sjsg #include <linux/mm.h>
267f4dd379Sjsg #include <linux/io-mapping.h>
277f4dd379Sjsg 
287f4dd379Sjsg 
297f4dd379Sjsg #include "i915_drv.h"
30*1bb76ff1Sjsg #include "i915_mm.h"
317f4dd379Sjsg 
327f4dd379Sjsg struct remap_pfn {
337f4dd379Sjsg 	struct mm_struct *mm;
347f4dd379Sjsg 	unsigned long pfn;
357f4dd379Sjsg 	pgprot_t prot;
36c349dbc7Sjsg 
37c349dbc7Sjsg 	struct sgt_iter sgt;
38c349dbc7Sjsg 	resource_size_t iobase;
397f4dd379Sjsg };
407f4dd379Sjsg 
41c349dbc7Sjsg #define use_dma(io) ((io) != -1)
42c349dbc7Sjsg 
sgt_pfn(const struct remap_pfn * r)43c349dbc7Sjsg static inline unsigned long sgt_pfn(const struct remap_pfn *r)
44c349dbc7Sjsg {
45c349dbc7Sjsg 	if (use_dma(r->iobase))
46c349dbc7Sjsg 		return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
47c349dbc7Sjsg 	else
48c349dbc7Sjsg 		return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
49c349dbc7Sjsg }
50c349dbc7Sjsg 
51c349dbc7Sjsg #ifdef notyet
52c349dbc7Sjsg 
remap_sg(pte_t * pte,unsigned long addr,void * data)53c349dbc7Sjsg static int remap_sg(pte_t *pte, unsigned long addr, void *data)
54c349dbc7Sjsg {
55c349dbc7Sjsg 	struct remap_pfn *r = data;
56c349dbc7Sjsg 
575ca02815Sjsg 	if (GEM_WARN_ON(!r->sgt.sgp))
58c349dbc7Sjsg 		return -EINVAL;
59c349dbc7Sjsg 
60c349dbc7Sjsg 	/* Special PTE are not associated with any struct vm_page */
61c349dbc7Sjsg 	set_pte_at(r->mm, addr, pte,
62c349dbc7Sjsg 		   pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
63c349dbc7Sjsg 	r->pfn++; /* track insertions in case we need to unwind later */
64c349dbc7Sjsg 
65c349dbc7Sjsg 	r->sgt.curr += PAGE_SIZE;
66c349dbc7Sjsg 	if (r->sgt.curr >= r->sgt.max)
67c349dbc7Sjsg 		r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
68c349dbc7Sjsg 
69c349dbc7Sjsg 	return 0;
70c349dbc7Sjsg }
717f4dd379Sjsg 
72*1bb76ff1Sjsg #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
73*1bb76ff1Sjsg 
74*1bb76ff1Sjsg #if IS_ENABLED(CONFIG_X86)
75*1bb76ff1Sjsg #ifdef notyet
remap_pfn(pte_t * pte,unsigned long addr,void * data)76*1bb76ff1Sjsg static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
77*1bb76ff1Sjsg {
78*1bb76ff1Sjsg 	struct remap_pfn *r = data;
79*1bb76ff1Sjsg 
80*1bb76ff1Sjsg 	/* Special PTE are not associated with any struct page */
81*1bb76ff1Sjsg 	set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
82*1bb76ff1Sjsg 	r->pfn++;
83*1bb76ff1Sjsg 
84*1bb76ff1Sjsg 	return 0;
85*1bb76ff1Sjsg }
86*1bb76ff1Sjsg #endif
87*1bb76ff1Sjsg 
887f4dd379Sjsg /**
897f4dd379Sjsg  * remap_io_mapping - remap an IO mapping to userspace
907f4dd379Sjsg  * @vma: user vma to map to
917f4dd379Sjsg  * @addr: target user address to start at
927f4dd379Sjsg  * @pfn: physical address of kernel memory
937f4dd379Sjsg  * @size: size of map area
947f4dd379Sjsg  * @iomap: the source io_mapping
957f4dd379Sjsg  *
967f4dd379Sjsg  *  Note: this is only safe if the mm semaphore is held when called.
977f4dd379Sjsg  */
remap_io_mapping(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,struct io_mapping * iomap)987f4dd379Sjsg int remap_io_mapping(struct vm_area_struct *vma,
997f4dd379Sjsg 		     unsigned long addr, unsigned long pfn, unsigned long size,
1007f4dd379Sjsg 		     struct io_mapping *iomap)
1017f4dd379Sjsg {
1027f4dd379Sjsg 	struct remap_pfn r;
1037f4dd379Sjsg 	int err;
1047f4dd379Sjsg 
105c349dbc7Sjsg 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
1067f4dd379Sjsg 
1077f4dd379Sjsg 	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
1087f4dd379Sjsg 	r.mm = vma->vm_mm;
1097f4dd379Sjsg 	r.pfn = pfn;
1107f4dd379Sjsg 	r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
1117f4dd379Sjsg 			  (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
1127f4dd379Sjsg 
1137f4dd379Sjsg 	err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
1147f4dd379Sjsg 	if (unlikely(err)) {
1157f4dd379Sjsg 		zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
1167f4dd379Sjsg 		return err;
1177f4dd379Sjsg 	}
1187f4dd379Sjsg 
1197f4dd379Sjsg 	return 0;
1207f4dd379Sjsg }
121*1bb76ff1Sjsg #endif
122c349dbc7Sjsg 
123c349dbc7Sjsg /**
124c349dbc7Sjsg  * remap_io_sg - remap an IO mapping to userspace
125c349dbc7Sjsg  * @vma: user vma to map to
126c349dbc7Sjsg  * @addr: target user address to start at
127c349dbc7Sjsg  * @size: size of map area
128c349dbc7Sjsg  * @sgl: Start sg entry
129c349dbc7Sjsg  * @iobase: Use stored dma address offset by this address or pfn if -1
130c349dbc7Sjsg  *
131c349dbc7Sjsg  *  Note: this is only safe if the mm semaphore is held when called.
132c349dbc7Sjsg  */
remap_io_sg(struct vm_area_struct * vma,unsigned long addr,unsigned long size,struct scatterlist * sgl,resource_size_t iobase)133c349dbc7Sjsg int remap_io_sg(struct vm_area_struct *vma,
134c349dbc7Sjsg 		unsigned long addr, unsigned long size,
135c349dbc7Sjsg 		struct scatterlist *sgl, resource_size_t iobase)
136c349dbc7Sjsg {
137c349dbc7Sjsg 	struct remap_pfn r = {
138c349dbc7Sjsg 		.mm = vma->vm_mm,
139c349dbc7Sjsg 		.prot = vma->vm_page_prot,
140c349dbc7Sjsg 		.sgt = __sgt_iter(sgl, use_dma(iobase)),
141c349dbc7Sjsg 		.iobase = iobase,
142c349dbc7Sjsg 	};
143c349dbc7Sjsg 	int err;
144c349dbc7Sjsg 
145c349dbc7Sjsg 	/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
146c349dbc7Sjsg 	GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
147c349dbc7Sjsg 
148c349dbc7Sjsg 	if (!use_dma(iobase))
149c349dbc7Sjsg 		flush_cache_range(vma, addr, size);
150c349dbc7Sjsg 
151c349dbc7Sjsg 	err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
152c349dbc7Sjsg 	if (unlikely(err)) {
153c349dbc7Sjsg 		zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
154c349dbc7Sjsg 		return err;
155c349dbc7Sjsg 	}
156c349dbc7Sjsg 
157c349dbc7Sjsg 	return 0;
158c349dbc7Sjsg }
159c349dbc7Sjsg 
160c349dbc7Sjsg #endif /* notyet */
161