1 /*
2 * Copyright 2012 Red Hat Inc
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie <airlied@redhat.com>
25 */
26
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
29
30 #include <drm/drmP.h>
31
32 #include "i915_drv.h"
33
dma_buf_to_obj(struct dma_buf * buf)34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35 {
36 return to_intel_bo(buf->priv);
37 }
38
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40 enum dma_data_direction dir)
41 {
42 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 struct sg_table *st;
44 struct scatterlist *src, *dst;
45 int ret, i;
46
47 ret = i915_gem_object_pin_pages(obj);
48 if (ret)
49 goto err;
50
51 /* Copy sg so that we make an independent mapping */
52 st = kmalloc(sizeof(struct sg_table), M_DRM, GFP_KERNEL);
53 if (st == NULL) {
54 ret = -ENOMEM;
55 goto err_unpin_pages;
56 }
57
58 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
59 if (ret)
60 goto err_free;
61
62 src = obj->mm.pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->mm.pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), src->length, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 ret = -ENOMEM;
72 goto err_free_sg;
73 }
74
75 return st;
76
77 err_free_sg:
78 sg_free_table(st);
79 err_free:
80 kfree(st);
81 err_unpin_pages:
82 i915_gem_object_unpin_pages(obj);
83 err:
84 return ERR_PTR(ret);
85 }
86
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)87 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
88 struct sg_table *sg,
89 enum dma_data_direction dir)
90 {
91 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
92
93 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
94 sg_free_table(sg);
95 kfree(sg);
96
97 i915_gem_object_unpin_pages(obj);
98 }
99
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)100 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101 {
102 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103
104 return i915_gem_object_pin_map(obj, I915_MAP_WB);
105 }
106
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)107 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108 {
109 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110
111 i915_gem_object_unpin_map(obj);
112 }
113
i915_gem_dmabuf_kmap_atomic(struct dma_buf * dma_buf,unsigned long page_num)114 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
115 {
116 return NULL;
117 }
118
i915_gem_dmabuf_kunmap_atomic(struct dma_buf * dma_buf,unsigned long page_num,void * addr)119 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
120 {
121
122 }
i915_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)123 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
124 {
125 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
126 struct page *page;
127
128 if (page_num >= obj->base.size >> PAGE_SHIFT)
129 return NULL;
130
131 if (!i915_gem_object_has_struct_page(obj))
132 return NULL;
133
134 if (i915_gem_object_pin_pages(obj))
135 return NULL;
136
137 /* Synchronisation is left to the caller (via .begin_cpu_access()) */
138 page = i915_gem_object_get_page(obj, page_num);
139 if (IS_ERR(page))
140 goto err_unpin;
141
142 return kmap(page);
143
144 err_unpin:
145 i915_gem_object_unpin_pages(obj);
146 return NULL;
147 }
148
i915_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)149 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
150 {
151 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
152
153 kunmap(virt_to_page(addr));
154 i915_gem_object_unpin_pages(obj);
155 }
156
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)157 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
158 {
159 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
160 #if 0
161 int ret;
162 #endif
163
164 if (obj->base.size < vma->vm_end - vma->vm_start)
165 return -EINVAL;
166
167 if (!obj->base.filp)
168 return -ENODEV;
169
170 #if 0
171 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
172 if (ret)
173 return ret;
174
175 fput(vma->vm_file);
176 vma->vm_file = get_file(obj->base.filp);
177 #endif
178
179 return 0;
180 }
181
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)182 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
183 {
184 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
185 struct drm_device *dev = obj->base.dev;
186 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
187 int err;
188
189 err = i915_gem_object_pin_pages(obj);
190 if (err)
191 return err;
192
193 err = i915_mutex_lock_interruptible(dev);
194 if (err)
195 goto out;
196
197 err = i915_gem_object_set_to_cpu_domain(obj, write);
198 mutex_unlock(&dev->struct_mutex);
199
200 out:
201 i915_gem_object_unpin_pages(obj);
202 return err;
203 }
204
i915_gem_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)205 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
206 {
207 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
208 struct drm_device *dev = obj->base.dev;
209 int err;
210
211 err = i915_gem_object_pin_pages(obj);
212 if (err)
213 return err;
214
215 err = i915_mutex_lock_interruptible(dev);
216 if (err)
217 goto out;
218
219 err = i915_gem_object_set_to_gtt_domain(obj, false);
220 mutex_unlock(&dev->struct_mutex);
221
222 out:
223 i915_gem_object_unpin_pages(obj);
224 return err;
225 }
226
227 static const struct dma_buf_ops i915_dmabuf_ops = {
228 .map_dma_buf = i915_gem_map_dma_buf,
229 .unmap_dma_buf = i915_gem_unmap_dma_buf,
230 .release = drm_gem_dmabuf_release,
231 .map = i915_gem_dmabuf_kmap,
232 .map_atomic = i915_gem_dmabuf_kmap_atomic,
233 .unmap = i915_gem_dmabuf_kunmap,
234 .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
235 .mmap = i915_gem_dmabuf_mmap,
236 .vmap = i915_gem_dmabuf_vmap,
237 .vunmap = i915_gem_dmabuf_vunmap,
238 .begin_cpu_access = i915_gem_begin_cpu_access,
239 .end_cpu_access = i915_gem_end_cpu_access,
240 };
241
i915_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gem_obj,int flags)242 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
243 struct drm_gem_object *gem_obj, int flags)
244 {
245 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
246 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
247
248 exp_info.ops = &i915_dmabuf_ops;
249 exp_info.size = gem_obj->size;
250 exp_info.flags = flags;
251 exp_info.priv = gem_obj;
252 exp_info.resv = obj->resv;
253
254 if (obj->ops->dmabuf_export) {
255 int ret = obj->ops->dmabuf_export(obj);
256 if (ret)
257 return ERR_PTR(ret);
258 }
259
260 return drm_gem_dmabuf_export(dev, &exp_info);
261 }
262
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)263 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
264 {
265 struct sg_table *pages;
266 unsigned int sg_page_sizes;
267
268 pages = dma_buf_map_attachment(obj->base.import_attach,
269 DMA_BIDIRECTIONAL);
270 if (IS_ERR(pages))
271 return PTR_ERR(pages);
272
273 sg_page_sizes = i915_sg_page_sizes(pages->sgl);
274
275 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
276
277 return 0;
278 }
279
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj,struct sg_table * pages)280 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
281 struct sg_table *pages)
282 {
283 dma_buf_unmap_attachment(obj->base.import_attach, pages,
284 DMA_BIDIRECTIONAL);
285 }
286
287 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
288 .get_pages = i915_gem_object_get_pages_dmabuf,
289 .put_pages = i915_gem_object_put_pages_dmabuf,
290 };
291
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)292 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
293 struct dma_buf *dma_buf)
294 {
295 struct dma_buf_attachment *attach;
296 struct drm_i915_gem_object *obj;
297 int ret;
298
299 /* is this one of own objects? */
300 if (dma_buf->ops == &i915_dmabuf_ops) {
301 obj = dma_buf_to_obj(dma_buf);
302 /* is it from our device? */
303 if (obj->base.dev == dev) {
304 /*
305 * Importing dmabuf exported from out own gem increases
306 * refcount on gem itself instead of f_count of dmabuf.
307 */
308 return &i915_gem_object_get(obj)->base;
309 }
310 }
311
312 /* need to attach */
313 attach = dma_buf_attach(dma_buf, dev->dev);
314 if (IS_ERR(attach))
315 return ERR_CAST(attach);
316
317 #if 0
318 get_dma_buf(dma_buf);
319 #endif
320
321 obj = i915_gem_object_alloc(to_i915(dev));
322 if (obj == NULL) {
323 ret = -ENOMEM;
324 goto fail_detach;
325 }
326
327 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
328 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
329 obj->base.import_attach = attach;
330 obj->resv = dma_buf->resv;
331
332 /* We use GTT as shorthand for a coherent domain, one that is
333 * neither in the GPU cache nor in the CPU cache, where all
334 * writes are immediately visible in memory. (That's not strictly
335 * true, but it's close! There are internal buffers such as the
336 * write-combined buffer or a delay through the chipset for GTT
337 * writes that do require us to treat GTT as a separate cache domain.)
338 */
339 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
340 obj->base.write_domain = 0;
341
342 return &obj->base;
343
344 fail_detach:
345 #if 0
346 dma_buf_detach(dma_buf, attach);
347 dma_buf_put(dma_buf);
348 #endif
349
350 return ERR_PTR(ret);
351 }
352
353 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
354 #include "selftests/mock_dmabuf.c"
355 #include "selftests/i915_gem_dmabuf.c"
356 #endif
357