1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/mm.h>
26 #include <linux/io-mapping.h>
27
28
29 #include "i915_drv.h"
30 #include "i915_mm.h"
31
32 struct remap_pfn {
33 struct mm_struct *mm;
34 unsigned long pfn;
35 pgprot_t prot;
36
37 struct sgt_iter sgt;
38 resource_size_t iobase;
39 };
40
41 #define use_dma(io) ((io) != -1)
42
sgt_pfn(const struct remap_pfn * r)43 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
44 {
45 if (use_dma(r->iobase))
46 return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
47 else
48 return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
49 }
50
51 #ifdef notyet
52
remap_sg(pte_t * pte,unsigned long addr,void * data)53 static int remap_sg(pte_t *pte, unsigned long addr, void *data)
54 {
55 struct remap_pfn *r = data;
56
57 if (GEM_WARN_ON(!r->sgt.sgp))
58 return -EINVAL;
59
60 /* Special PTE are not associated with any struct vm_page */
61 set_pte_at(r->mm, addr, pte,
62 pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
63 r->pfn++; /* track insertions in case we need to unwind later */
64
65 r->sgt.curr += PAGE_SIZE;
66 if (r->sgt.curr >= r->sgt.max)
67 r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
68
69 return 0;
70 }
71
72 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
73
74 #if IS_ENABLED(CONFIG_X86)
75 #ifdef notyet
remap_pfn(pte_t * pte,unsigned long addr,void * data)76 static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
77 {
78 struct remap_pfn *r = data;
79
80 /* Special PTE are not associated with any struct page */
81 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
82 r->pfn++;
83
84 return 0;
85 }
86 #endif
87
88 /**
89 * remap_io_mapping - remap an IO mapping to userspace
90 * @vma: user vma to map to
91 * @addr: target user address to start at
92 * @pfn: physical address of kernel memory
93 * @size: size of map area
94 * @iomap: the source io_mapping
95 *
96 * Note: this is only safe if the mm semaphore is held when called.
97 */
remap_io_mapping(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,struct io_mapping * iomap)98 int remap_io_mapping(struct vm_area_struct *vma,
99 unsigned long addr, unsigned long pfn, unsigned long size,
100 struct io_mapping *iomap)
101 {
102 struct remap_pfn r;
103 int err;
104
105 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
106
107 /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
108 r.mm = vma->vm_mm;
109 r.pfn = pfn;
110 r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
111 (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
112
113 err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
114 if (unlikely(err)) {
115 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
116 return err;
117 }
118
119 return 0;
120 }
121 #endif
122
123 /**
124 * remap_io_sg - remap an IO mapping to userspace
125 * @vma: user vma to map to
126 * @addr: target user address to start at
127 * @size: size of map area
128 * @sgl: Start sg entry
129 * @iobase: Use stored dma address offset by this address or pfn if -1
130 *
131 * Note: this is only safe if the mm semaphore is held when called.
132 */
remap_io_sg(struct vm_area_struct * vma,unsigned long addr,unsigned long size,struct scatterlist * sgl,resource_size_t iobase)133 int remap_io_sg(struct vm_area_struct *vma,
134 unsigned long addr, unsigned long size,
135 struct scatterlist *sgl, resource_size_t iobase)
136 {
137 struct remap_pfn r = {
138 .mm = vma->vm_mm,
139 .prot = vma->vm_page_prot,
140 .sgt = __sgt_iter(sgl, use_dma(iobase)),
141 .iobase = iobase,
142 };
143 int err;
144
145 /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
146 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
147
148 if (!use_dma(iobase))
149 flush_cache_range(vma, addr, size);
150
151 err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
152 if (unlikely(err)) {
153 zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
154 return err;
155 }
156
157 return 0;
158 }
159
160 #endif /* notyet */
161