xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/xen/xen_drm_front_gem.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: xen_drm_front_gem.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 
5 /*
6  *  Xen para-virtual DRM device
7  *
8  * Copyright (C) 2016-2018 EPAM Systems Inc.
9  *
10  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
11  */
12 
13 #include <sys/cdefs.h>
14 __KERNEL_RCSID(0, "$NetBSD: xen_drm_front_gem.c,v 1.2 2021/12/18 23:45:45 riastradh Exp $");
15 
16 #include <linux/dma-buf.h>
17 #include <linux/scatterlist.h>
18 #include <linux/shmem_fs.h>
19 
20 #include <drm/drm_fb_helper.h>
21 #include <drm/drm_gem.h>
22 #include <drm/drm_prime.h>
23 #include <drm/drm_probe_helper.h>
24 
25 #include <xen/balloon.h>
26 
27 #include "xen_drm_front.h"
28 #include "xen_drm_front_gem.h"
29 
30 struct xen_gem_object {
31 	struct drm_gem_object base;
32 
33 	size_t num_pages;
34 	struct page **pages;
35 
36 	/* set for buffers allocated by the backend */
37 	bool be_alloc;
38 
39 	/* this is for imported PRIME buffer */
40 	struct sg_table *sgt_imported;
41 };
42 
43 static inline struct xen_gem_object *
to_xen_gem_obj(struct drm_gem_object * gem_obj)44 to_xen_gem_obj(struct drm_gem_object *gem_obj)
45 {
46 	return container_of(gem_obj, struct xen_gem_object, base);
47 }
48 
gem_alloc_pages_array(struct xen_gem_object * xen_obj,size_t buf_size)49 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
50 				 size_t buf_size)
51 {
52 	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
53 	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
54 					sizeof(struct page *), GFP_KERNEL);
55 	return !xen_obj->pages ? -ENOMEM : 0;
56 }
57 
gem_free_pages_array(struct xen_gem_object * xen_obj)58 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
59 {
60 	kvfree(xen_obj->pages);
61 	xen_obj->pages = NULL;
62 }
63 
gem_create_obj(struct drm_device * dev,size_t size)64 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
65 					     size_t size)
66 {
67 	struct xen_gem_object *xen_obj;
68 	int ret;
69 
70 	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
71 	if (!xen_obj)
72 		return ERR_PTR(-ENOMEM);
73 
74 	ret = drm_gem_object_init(dev, &xen_obj->base, size);
75 	if (ret < 0) {
76 		kfree(xen_obj);
77 		return ERR_PTR(ret);
78 	}
79 
80 	return xen_obj;
81 }
82 
gem_create(struct drm_device * dev,size_t size)83 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
84 {
85 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
86 	struct xen_gem_object *xen_obj;
87 	int ret;
88 
89 	size = round_up(size, PAGE_SIZE);
90 	xen_obj = gem_create_obj(dev, size);
91 	if (IS_ERR_OR_NULL(xen_obj))
92 		return xen_obj;
93 
94 	if (drm_info->front_info->cfg.be_alloc) {
95 		/*
96 		 * backend will allocate space for this buffer, so
97 		 * only allocate array of pointers to pages
98 		 */
99 		ret = gem_alloc_pages_array(xen_obj, size);
100 		if (ret < 0)
101 			goto fail;
102 
103 		/*
104 		 * allocate ballooned pages which will be used to map
105 		 * grant references provided by the backend
106 		 */
107 		ret = alloc_xenballooned_pages(xen_obj->num_pages,
108 					       xen_obj->pages);
109 		if (ret < 0) {
110 			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
111 				  xen_obj->num_pages, ret);
112 			gem_free_pages_array(xen_obj);
113 			goto fail;
114 		}
115 
116 		xen_obj->be_alloc = true;
117 		return xen_obj;
118 	}
119 	/*
120 	 * need to allocate backing pages now, so we can share those
121 	 * with the backend
122 	 */
123 	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
124 	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
125 	if (IS_ERR_OR_NULL(xen_obj->pages)) {
126 		ret = PTR_ERR(xen_obj->pages);
127 		xen_obj->pages = NULL;
128 		goto fail;
129 	}
130 
131 	return xen_obj;
132 
133 fail:
134 	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
135 	return ERR_PTR(ret);
136 }
137 
xen_drm_front_gem_create(struct drm_device * dev,size_t size)138 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
139 						size_t size)
140 {
141 	struct xen_gem_object *xen_obj;
142 
143 	xen_obj = gem_create(dev, size);
144 	if (IS_ERR_OR_NULL(xen_obj))
145 		return ERR_CAST(xen_obj);
146 
147 	return &xen_obj->base;
148 }
149 
xen_drm_front_gem_free_object_unlocked(struct drm_gem_object * gem_obj)150 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
151 {
152 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
153 
154 	if (xen_obj->base.import_attach) {
155 		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
156 		gem_free_pages_array(xen_obj);
157 	} else {
158 		if (xen_obj->pages) {
159 			if (xen_obj->be_alloc) {
160 				free_xenballooned_pages(xen_obj->num_pages,
161 							xen_obj->pages);
162 				gem_free_pages_array(xen_obj);
163 			} else {
164 				drm_gem_put_pages(&xen_obj->base,
165 						  xen_obj->pages, true, false);
166 			}
167 		}
168 	}
169 	drm_gem_object_release(gem_obj);
170 	kfree(xen_obj);
171 }
172 
xen_drm_front_gem_get_pages(struct drm_gem_object * gem_obj)173 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
174 {
175 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
176 
177 	return xen_obj->pages;
178 }
179 
xen_drm_front_gem_get_sg_table(struct drm_gem_object * gem_obj)180 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
181 {
182 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
183 
184 	if (!xen_obj->pages)
185 		return ERR_PTR(-ENOMEM);
186 
187 	return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
188 }
189 
190 struct drm_gem_object *
xen_drm_front_gem_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)191 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
192 				  struct dma_buf_attachment *attach,
193 				  struct sg_table *sgt)
194 {
195 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
196 	struct xen_gem_object *xen_obj;
197 	size_t size;
198 	int ret;
199 
200 	size = attach->dmabuf->size;
201 	xen_obj = gem_create_obj(dev, size);
202 	if (IS_ERR_OR_NULL(xen_obj))
203 		return ERR_CAST(xen_obj);
204 
205 	ret = gem_alloc_pages_array(xen_obj, size);
206 	if (ret < 0)
207 		return ERR_PTR(ret);
208 
209 	xen_obj->sgt_imported = sgt;
210 
211 	ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
212 					       NULL, xen_obj->num_pages);
213 	if (ret < 0)
214 		return ERR_PTR(ret);
215 
216 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
217 					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
218 					0, 0, 0, size, xen_obj->pages);
219 	if (ret < 0)
220 		return ERR_PTR(ret);
221 
222 	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
223 		  size, sgt->nents);
224 
225 	return &xen_obj->base;
226 }
227 
gem_mmap_obj(struct xen_gem_object * xen_obj,struct vm_area_struct * vma)228 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
229 			struct vm_area_struct *vma)
230 {
231 	int ret;
232 
233 	/*
234 	 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
235 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
236 	 * the whole buffer.
237 	 */
238 	vma->vm_flags &= ~VM_PFNMAP;
239 	vma->vm_flags |= VM_MIXEDMAP;
240 	vma->vm_pgoff = 0;
241 	/*
242 	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
243 	 * all memory which is shared with other entities in the system
244 	 * (including the hypervisor and other guests) must reside in memory
245 	 * which is mapped as Normal Inner Write-Back Outer Write-Back
246 	 * Inner-Shareable.
247 	 */
248 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
249 
250 	/*
251 	 * vm_operations_struct.fault handler will be called if CPU access
252 	 * to VM is here. For GPUs this isn't the case, because CPU
253 	 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
254 	 * happy.
255 	 * FIXME: as we insert all the pages now then no .fault handler must
256 	 * be called, so don't provide one
257 	 */
258 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
259 	if (ret < 0)
260 		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
261 
262 	return ret;
263 }
264 
xen_drm_front_gem_mmap(struct file * filp,struct vm_area_struct * vma)265 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
266 {
267 	struct xen_gem_object *xen_obj;
268 	struct drm_gem_object *gem_obj;
269 	int ret;
270 
271 	ret = drm_gem_mmap(filp, vma);
272 	if (ret < 0)
273 		return ret;
274 
275 	gem_obj = vma->vm_private_data;
276 	xen_obj = to_xen_gem_obj(gem_obj);
277 	return gem_mmap_obj(xen_obj, vma);
278 }
279 
xen_drm_front_gem_prime_vmap(struct drm_gem_object * gem_obj)280 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
281 {
282 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
283 
284 	if (!xen_obj->pages)
285 		return NULL;
286 
287 	/* Please see comment in gem_mmap_obj on mapping and attributes. */
288 	return vmap(xen_obj->pages, xen_obj->num_pages,
289 		    VM_MAP, PAGE_KERNEL);
290 }
291 
xen_drm_front_gem_prime_vunmap(struct drm_gem_object * gem_obj,void * vaddr)292 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
293 				    void *vaddr)
294 {
295 	vunmap(vaddr);
296 }
297 
xen_drm_front_gem_prime_mmap(struct drm_gem_object * gem_obj,struct vm_area_struct * vma)298 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
299 				 struct vm_area_struct *vma)
300 {
301 	struct xen_gem_object *xen_obj;
302 	int ret;
303 
304 	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
305 	if (ret < 0)
306 		return ret;
307 
308 	xen_obj = to_xen_gem_obj(gem_obj);
309 	return gem_mmap_obj(xen_obj, vma);
310 }
311