1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/iosys-map.h> 7 #include <linux/mm.h> 8 #include <linux/pagemap.h> 9 #include <linux/shmem_fs.h> 10 11 #include "gem/i915_gem_object.h" 12 #include "gem/i915_gem_lmem.h" 13 #include "shmem_utils.h" 14 15 #ifdef __linux__ 16 17 struct file *shmem_create_from_data(const char *name, void *data, size_t len) 18 { 19 struct file *file; 20 int err; 21 22 file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE); 23 if (IS_ERR(file)) 24 return file; 25 26 err = shmem_write(file, 0, data, len); 27 if (err) { 28 fput(file); 29 return ERR_PTR(err); 30 } 31 32 return file; 33 } 34 35 struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) 36 { 37 struct file *file; 38 void *ptr; 39 40 if (i915_gem_object_is_shmem(obj)) { 41 file = obj->base.filp; 42 atomic_long_inc(&file->f_count); 43 return file; 44 } 45 46 ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ? 47 I915_MAP_WC : I915_MAP_WB); 48 if (IS_ERR(ptr)) 49 return ERR_CAST(ptr); 50 51 file = shmem_create_from_data("", ptr, obj->base.size); 52 i915_gem_object_unpin_map(obj); 53 54 return file; 55 } 56 57 void *shmem_pin_map(struct file *file) 58 { 59 struct page **pages; 60 size_t n_pages, i; 61 void *vaddr; 62 63 n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT; 64 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 65 if (!pages) 66 return NULL; 67 68 for (i = 0; i < n_pages; i++) { 69 pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i, 70 GFP_KERNEL); 71 if (IS_ERR(pages[i])) 72 goto err_page; 73 } 74 75 vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL); 76 if (!vaddr) 77 goto err_page; 78 mapping_set_unevictable(file->f_mapping); 79 return vaddr; 80 err_page: 81 while (i--) 82 put_page(pages[i]); 83 kvfree(pages); 84 return NULL; 85 } 86 87 void shmem_unpin_map(struct file *file, void *ptr) 88 { 89 mapping_clear_unevictable(file->f_mapping); 90 vfree(ptr); 91 } 92 93 static int __shmem_rw(struct file *file, loff_t off, 94 void *ptr, size_t len, 95 bool write) 96 { 97 unsigned long pfn; 98 99 for (pfn = off >> PAGE_SHIFT; len; pfn++) { 100 unsigned int this = 101 min_t(size_t, PAGE_SIZE - offset_in_page(off), len); 102 struct page *page; 103 void *vaddr; 104 105 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, 106 GFP_KERNEL); 107 if (IS_ERR(page)) 108 return PTR_ERR(page); 109 110 vaddr = kmap(page); 111 if (write) { 112 memcpy(vaddr + offset_in_page(off), ptr, this); 113 set_page_dirty(page); 114 } else { 115 memcpy(ptr, vaddr + offset_in_page(off), this); 116 } 117 mark_page_accessed(page); 118 kunmap(page); 119 put_page(page); 120 121 len -= this; 122 ptr += this; 123 off = 0; 124 } 125 126 return 0; 127 } 128 129 int shmem_read_to_iosys_map(struct file *file, loff_t off, 130 struct iosys_map *map, size_t map_off, size_t len) 131 { 132 unsigned long pfn; 133 134 for (pfn = off >> PAGE_SHIFT; len; pfn++) { 135 unsigned int this = 136 min_t(size_t, PAGE_SIZE - offset_in_page(off), len); 137 struct page *page; 138 void *vaddr; 139 140 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, 141 GFP_KERNEL); 142 if (IS_ERR(page)) 143 return PTR_ERR(page); 144 145 vaddr = kmap(page); 146 iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off), 147 this); 148 mark_page_accessed(page); 149 kunmap(page); 150 put_page(page); 151 152 len -= this; 153 map_off += this; 154 off = 0; 155 } 156 157 return 0; 158 } 159 160 int shmem_read(struct file *file, loff_t off, void *dst, size_t len) 161 { 162 return __shmem_rw(file, off, dst, len, false); 163 } 164 165 int shmem_write(struct file *file, loff_t off, void *src, size_t len) 166 { 167 return __shmem_rw(file, off, src, len, true); 168 } 169 170 #endif /* __linux__ */ 171 172 struct uvm_object * 173 uao_create_from_data(void *data, size_t len) 174 { 175 struct uvm_object *uao; 176 int err; 177 178 uao = uao_create(PAGE_ALIGN(len), 0); 179 if (uao == NULL) { 180 return ERR_PTR(-ENOMEM); 181 } 182 183 err = uao_write(uao, 0, data, len); 184 if (err) { 185 uao_detach(uao); 186 return ERR_PTR(err); 187 } 188 189 return uao; 190 } 191 192 struct uvm_object * 193 uao_create_from_object(struct drm_i915_gem_object *obj) 194 { 195 struct uvm_object *uao; 196 void *ptr; 197 198 if (i915_gem_object_is_shmem(obj)) { 199 uao_reference(obj->base.uao); 200 return obj->base.uao; 201 } 202 203 ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ? 204 I915_MAP_WC : I915_MAP_WB); 205 if (IS_ERR(ptr)) 206 return ERR_CAST(ptr); 207 208 uao = uao_create_from_data(ptr, obj->base.size); 209 i915_gem_object_unpin_map(obj); 210 211 return uao; 212 } 213 214 static int __uao_rw(struct uvm_object *uao, loff_t off, 215 void *ptr, size_t len, 216 bool write) 217 { 218 struct pglist plist; 219 struct vm_page *page; 220 vaddr_t pgoff = trunc_page(off); 221 size_t olen = round_page(len); 222 223 TAILQ_INIT(&plist); 224 if (uvm_obj_wire(uao, pgoff, olen, &plist)) 225 return -ENOMEM; 226 227 TAILQ_FOREACH(page, &plist, pageq) { 228 unsigned int this = 229 min_t(size_t, PAGE_SIZE - offset_in_page(off), len); 230 void *vaddr = kmap(page); 231 232 if (write) { 233 memcpy(vaddr + offset_in_page(off), ptr, this); 234 set_page_dirty(page); 235 } else { 236 memcpy(ptr, vaddr + offset_in_page(off), this); 237 } 238 239 kunmap_va(vaddr); 240 len -= this; 241 ptr += this; 242 off = 0; 243 } 244 245 uvm_obj_unwire(uao, pgoff, olen); 246 247 return 0; 248 } 249 250 int uao_read(struct uvm_object *uao, loff_t off, void *dst, size_t len) 251 { 252 return __uao_rw(uao, off, dst, len, false); 253 } 254 255 int uao_write(struct uvm_object *uao, loff_t off, void *src, size_t len) 256 { 257 return __uao_rw(uao, off, src, len, true); 258 } 259 260 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 261 #include "st_shmem_utils.c" 262 #endif 263