xref: /openbsd-src/sys/dev/pci/drm/i915/gt/shmem_utils.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/mm.h>
7 #include <linux/pagemap.h>
8 #include <linux/shmem_fs.h>
9 
10 #include "gem/i915_gem_object.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "shmem_utils.h"
13 
14 #ifdef __linux__
15 
16 struct file *shmem_create_from_data(const char *name, void *data, size_t len)
17 {
18 	struct file *file;
19 	int err;
20 
21 	file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
22 	if (IS_ERR(file))
23 		return file;
24 
25 	err = shmem_write(file, 0, data, len);
26 	if (err) {
27 		fput(file);
28 		return ERR_PTR(err);
29 	}
30 
31 	return file;
32 }
33 
34 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
35 {
36 	struct file *file;
37 	void *ptr;
38 
39 	if (i915_gem_object_is_shmem(obj)) {
40 		file = obj->base.filp;
41 		atomic_long_inc(&file->f_count);
42 		return file;
43 	}
44 
45 	ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
46 						I915_MAP_WC : I915_MAP_WB);
47 	if (IS_ERR(ptr))
48 		return ERR_CAST(ptr);
49 
50 	file = shmem_create_from_data("", ptr, obj->base.size);
51 	i915_gem_object_unpin_map(obj);
52 
53 	return file;
54 }
55 
56 void *shmem_pin_map(struct file *file)
57 {
58 	struct page **pages;
59 	size_t n_pages, i;
60 	void *vaddr;
61 
62 	n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
63 	pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
64 	if (!pages)
65 		return NULL;
66 
67 	for (i = 0; i < n_pages; i++) {
68 		pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
69 						       GFP_KERNEL);
70 		if (IS_ERR(pages[i]))
71 			goto err_page;
72 	}
73 
74 	vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
75 	if (!vaddr)
76 		goto err_page;
77 	mapping_set_unevictable(file->f_mapping);
78 	return vaddr;
79 err_page:
80 	while (i--)
81 		put_page(pages[i]);
82 	kvfree(pages);
83 	return NULL;
84 }
85 
86 void shmem_unpin_map(struct file *file, void *ptr)
87 {
88 	mapping_clear_unevictable(file->f_mapping);
89 	vfree(ptr);
90 }
91 
92 static int __shmem_rw(struct file *file, loff_t off,
93 		      void *ptr, size_t len,
94 		      bool write)
95 {
96 	unsigned long pfn;
97 
98 	for (pfn = off >> PAGE_SHIFT; len; pfn++) {
99 		unsigned int this =
100 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
101 		struct page *page;
102 		void *vaddr;
103 
104 		page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
105 						   GFP_KERNEL);
106 		if (IS_ERR(page))
107 			return PTR_ERR(page);
108 
109 		vaddr = kmap(page);
110 		if (write) {
111 			memcpy(vaddr + offset_in_page(off), ptr, this);
112 			set_page_dirty(page);
113 		} else {
114 			memcpy(ptr, vaddr + offset_in_page(off), this);
115 		}
116 		mark_page_accessed(page);
117 		kunmap(page);
118 		put_page(page);
119 
120 		len -= this;
121 		ptr += this;
122 		off = 0;
123 	}
124 
125 	return 0;
126 }
127 
128 int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
129 {
130 	return __shmem_rw(file, off, dst, len, false);
131 }
132 
133 int shmem_write(struct file *file, loff_t off, void *src, size_t len)
134 {
135 	return __shmem_rw(file, off, src, len, true);
136 }
137 
138 #endif /* __linux__ */
139 
140 struct uvm_object *
141 uao_create_from_data(void *data, size_t len)
142 {
143 	struct uvm_object *uao;
144 	int err;
145 
146 	uao = uao_create(PAGE_ALIGN(len), 0);
147 	if (uao == NULL) {
148 		return ERR_PTR(-ENOMEM);
149 	}
150 
151 	err = uao_write(uao, 0, data, len);
152 	if (err) {
153 		uao_detach(uao);
154 		return ERR_PTR(err);
155 	}
156 
157 	return uao;
158 }
159 
160 struct uvm_object *
161 uao_create_from_object(struct drm_i915_gem_object *obj)
162 {
163 	struct uvm_object *uao;
164 	void *ptr;
165 
166 	if (i915_gem_object_is_shmem(obj)) {
167 		uao_reference(obj->base.uao);
168 		return obj->base.uao;
169 	}
170 
171 	ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
172 						I915_MAP_WC : I915_MAP_WB);
173 	if (IS_ERR(ptr))
174 		return ERR_CAST(ptr);
175 
176 	uao = uao_create_from_data(ptr, obj->base.size);
177 	i915_gem_object_unpin_map(obj);
178 
179 	return uao;
180 }
181 
182 static int __uao_rw(struct uvm_object *uao, loff_t off,
183 		      void *ptr, size_t len,
184 		      bool write)
185 {
186 	struct pglist plist;
187 	struct vm_page *page;
188 	vaddr_t pgoff = trunc_page(off);
189 	size_t olen = round_page(len);
190 
191 	TAILQ_INIT(&plist);
192 	if (uvm_obj_wire(uao, pgoff, olen, &plist))
193 		return -ENOMEM;
194 
195 	TAILQ_FOREACH(page, &plist, pageq) {
196 		unsigned int this =
197 			min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
198 		void *vaddr = kmap(page);
199 
200 		if (write) {
201 			memcpy(vaddr + offset_in_page(off), ptr, this);
202 			set_page_dirty(page);
203 		} else {
204 			memcpy(ptr, vaddr + offset_in_page(off), this);
205 		}
206 
207 		kunmap_va(vaddr);
208 		len -= this;
209 		ptr += this;
210 		off = 0;
211 	}
212 
213 	uvm_obj_unwire(uao, pgoff, olen);
214 
215 	return 0;
216 }
217 
218 int uao_read(struct uvm_object *uao, loff_t off, void *dst, size_t len)
219 {
220 	return __uao_rw(uao, off, dst, len, false);
221 }
222 
223 int uao_write(struct uvm_object *uao, loff_t off, void *src, size_t len)
224 {
225 	return __uao_rw(uao, off, src, len, true);
226 }
227 
228 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
229 #include "st_shmem_utils.c"
230 #endif
231