1 /* $NetBSD: i915_gem_phys.c,v 1.9 2024/01/19 22:23:04 riastradh Exp $ */
2
3 /*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright © 2014-2016 Intel Corporation
7 */
8
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.9 2024/01/19 22:23:04 riastradh Exp $");
11
12 #ifdef __NetBSD__
13 /*
14 * Make sure this block comes before any linux includes, so we don't
15 * get mixed up by the PAGE_MASK complementation.
16 */
17
18 #include <sys/bus.h>
19
20 #include <uvm/uvm.h>
21 #include <uvm/uvm_extern.h>
22
23 #include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */
24
25 /*
26 * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter,
27 * so that it isn't affected by pmap_page_protect on the physical
28 * address. Adapted from sys/arch/x86/x86/bus_dma.c.
29 */
30 static int
bus_dmamem_kmap(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)31 bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
32 size_t size, void **kvap, int flags)
33 {
34 vaddr_t va;
35 bus_addr_t addr;
36 int curseg;
37 const uvm_flag_t kmflags =
38 (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
39 u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE;
40
41 size = round_page(size);
42 if (flags & BUS_DMA_NOCACHE)
43 pmapflags |= PMAP_NOCACHE;
44
45 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
46
47 if (va == 0)
48 return ENOMEM;
49
50 *kvap = (void *)va;
51
52 for (curseg = 0; curseg < nsegs; curseg++) {
53 for (addr = segs[curseg].ds_addr;
54 addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
55 addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
56 if (size == 0)
57 panic("bus_dmamem_kmap: size botch");
58 pmap_kenter_pa(va, addr,
59 VM_PROT_READ | VM_PROT_WRITE,
60 pmapflags);
61 }
62 }
63 pmap_update(pmap_kernel());
64
65 return 0;
66 }
67
68 static void
bus_dmamem_kunmap(bus_dma_tag_t t,void * kva,size_t size)69 bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size)
70 {
71 pt_entry_t *pte, opte;
72 vaddr_t va, sva, eva;
73
74 KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva);
75
76 size = round_page(size);
77 sva = (vaddr_t)kva;
78 eva = sva + size;
79
80 /*
81 * mark pages cacheable again.
82 */
83 for (va = sva; va < eva; va += PAGE_SIZE) {
84 pte = kvtopte(va);
85 opte = *pte;
86 if ((opte & PTE_PCD) != 0)
87 pmap_pte_clearbits(pte, PTE_PCD);
88 }
89 pmap_kremove((vaddr_t)kva, size);
90 pmap_update(pmap_kernel());
91 uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
92 }
93
94 #endif
95
96 #include <linux/highmem.h>
97 #include <linux/shmem_fs.h>
98 #include <linux/swap.h>
99
100 #include <drm/drm.h> /* for drm_legacy.h! */
101 #include <drm/drm_cache.h>
102 #include <drm/drm_legacy.h> /* for drm_pci.h! */
103 #include <drm/drm_pci.h>
104
105 #include "gt/intel_gt.h"
106 #include "i915_drv.h"
107 #include "i915_gem_object.h"
108 #include "i915_gem_region.h"
109 #include "i915_scatterlist.h"
110
111 #include <linux/nbsd-namespace.h>
112
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)113 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
114 {
115 #ifdef __NetBSD__
116 struct uvm_object *mapping = obj->base.filp;
117 #else
118 struct address_space *mapping = obj->base.filp->f_mapping;
119 #endif
120 struct scatterlist *sg;
121 struct sg_table *st;
122 dma_addr_t dma;
123 void *vaddr;
124 void *dst;
125 int i;
126
127 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
128 return -EINVAL;
129
130
131 /*
132 * Always aligning to the object size, allows a single allocation
133 * to handle all possible callers, and given typical object sizes,
134 * the alignment of the buddy allocation will naturally match.
135 */
136 #ifdef __NetBSD__
137 __USE(dma);
138 bus_dma_tag_t dmat = obj->base.dev->dmat;
139 bool loaded = false;
140 int rsegs = 0;
141 int ret;
142
143 vaddr = NULL;
144
145 /* XXX errno NetBSD->Linux */
146 ret = -bus_dmamem_alloc(dmat, roundup_pow_of_two(obj->base.size),
147 roundup_pow_of_two(obj->base.size), 0, &obj->mm.u.phys.seg, 1,
148 &rsegs, BUS_DMA_WAITOK);
149 if (ret)
150 return -ENOMEM;
151 KASSERT(rsegs == 1);
152 ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1,
153 roundup_pow_of_two(obj->base.size), &vaddr,
154 BUS_DMA_WAITOK|BUS_DMA_COHERENT);
155 if (ret)
156 goto err_pci;
157 obj->mm.u.phys.kva = vaddr;
158 #else
159 vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
160 roundup_pow_of_two(obj->base.size),
161 &dma, GFP_KERNEL);
162 if (!vaddr)
163 return -ENOMEM;
164 #endif
165
166 st = kmalloc(sizeof(*st), GFP_KERNEL);
167 if (!st)
168 goto err_pci;
169
170 #ifdef __NetBSD__
171 if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1,
172 GFP_KERNEL))
173 #else
174 if (sg_alloc_table(st, 1, GFP_KERNEL))
175 #endif
176 goto err_st;
177
178 sg = st->sgl;
179 #ifdef __NetBSD__
180 /* XXX errno NetBSD->Linux */
181 ret = -bus_dmamap_create(dmat, roundup_pow_of_two(obj->base.size), 1,
182 roundup_pow_of_two(obj->base.size), 0, BUS_DMA_WAITOK,
183 &sg->sg_dmamap);
184 if (ret) {
185 sg->sg_dmamap = NULL;
186 goto err_st1;
187 }
188 sg->sg_dmat = dmat;
189 /* XXX errno NetBSD->Linux */
190 ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, &obj->mm.u.phys.seg, 1,
191 roundup_pow_of_two(obj->base.size), BUS_DMA_WAITOK);
192 if (ret)
193 goto err_st1;
194 loaded = true;
195 #else
196 sg->offset = 0;
197 sg->length = obj->base.size;
198
199 sg_assign_page(sg, (struct page *)vaddr);
200 sg_dma_address(sg) = dma;
201 sg_dma_len(sg) = obj->base.size;
202 #endif
203
204 dst = vaddr;
205 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
206 struct page *page;
207 void *src;
208
209 page = shmem_read_mapping_page(mapping, i);
210 if (IS_ERR(page))
211 goto err_st;
212
213 src = kmap_atomic(page);
214 memcpy(dst, src, PAGE_SIZE);
215 drm_clflush_virt_range(dst, PAGE_SIZE);
216 kunmap_atomic(src);
217
218 #ifdef __NetBSD__
219 uvm_obj_unwirepages(mapping, i*PAGE_SIZE, (i + 1)*PAGE_SIZE);
220 #else
221 put_page(page);
222 #endif
223 dst += PAGE_SIZE;
224 }
225
226 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
227
228 __i915_gem_object_set_pages(obj, st, obj->base.size);
229
230 return 0;
231
232 #ifdef __NetBSD__
233 err_st1:
234 if (loaded)
235 bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
236 sg_free_table(st);
237 #endif
238 err_st:
239 kfree(st);
240 err_pci:
241 #ifdef __NetBSD__
242 if (vaddr) {
243 bus_dmamem_kunmap(dmat, vaddr,
244 roundup_pow_of_two(obj->base.size));
245 }
246 obj->mm.u.phys.kva = NULL;
247 if (rsegs)
248 bus_dmamem_free(dmat, &obj->mm.u.phys.seg, rsegs);
249 #else
250 dma_free_coherent(&obj->base.dev->pdev->dev,
251 roundup_pow_of_two(obj->base.size),
252 vaddr, dma);
253 #endif
254 return -ENOMEM;
255 }
256
257 static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)258 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
259 struct sg_table *pages)
260 {
261 #ifdef __NetBSD__
262 bus_dma_tag_t dmat = obj->base.dev->dmat;
263 void *vaddr = obj->mm.u.phys.kva;
264 #else
265 dma_addr_t dma = sg_dma_address(pages->sgl);
266 void *vaddr = sg_page(pages->sgl);
267 #endif
268
269 __i915_gem_object_release_shmem(obj, pages, false);
270
271 if (obj->mm.dirty) {
272 #ifdef __NetBSD__
273 struct uvm_object *mapping = obj->base.filp;
274 #else
275 struct address_space *mapping = obj->base.filp->f_mapping;
276 #endif
277 void *src = vaddr;
278 int i;
279
280 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
281 struct page *page;
282 char *dst;
283
284 page = shmem_read_mapping_page(mapping, i);
285 if (IS_ERR(page))
286 continue;
287
288 dst = kmap_atomic(page);
289 drm_clflush_virt_range(src, PAGE_SIZE);
290 memcpy(dst, src, PAGE_SIZE);
291 kunmap_atomic(dst);
292
293 set_page_dirty(page);
294 #ifdef __NetBSD__
295 /* XXX mark_page_accessed */
296 uvm_obj_unwirepages(mapping, i*PAGE_SIZE,
297 (i + 1)*PAGE_SIZE);
298 #else
299 if (obj->mm.madv == I915_MADV_WILLNEED)
300 mark_page_accessed(page);
301 put_page(page);
302 #endif
303
304 src += PAGE_SIZE;
305 }
306 obj->mm.dirty = false;
307 }
308
309 #ifdef __NetBSD__
310 bus_dmamap_unload(dmat, pages->sgl->sg_dmamap);
311 #endif
312
313 sg_free_table(pages);
314 kfree(pages);
315
316 #ifdef __NetBSD__
317 bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva,
318 roundup_pow_of_two(obj->base.size));
319 obj->mm.u.phys.kva = NULL;
320 bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);
321 #else
322 dma_free_coherent(&obj->base.dev->pdev->dev,
323 roundup_pow_of_two(obj->base.size),
324 vaddr, dma);
325 #endif
326 }
327
phys_release(struct drm_i915_gem_object * obj)328 static void phys_release(struct drm_i915_gem_object *obj)
329 {
330 #ifdef __NetBSD__
331 /* XXX Who acquires the reference? */
332 uao_detach(obj->base.filp);
333 #else
334 fput(obj->base.filp);
335 #endif
336 }
337
338 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
339 .get_pages = i915_gem_object_get_pages_phys,
340 .put_pages = i915_gem_object_put_pages_phys,
341
342 .release = phys_release,
343 };
344
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)345 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
346 {
347 struct sg_table *pages;
348 int err;
349
350 if (align > obj->base.size)
351 return -EINVAL;
352
353 if (obj->ops == &i915_gem_phys_ops)
354 return 0;
355
356 if (obj->ops != &i915_gem_shmem_ops)
357 return -EINVAL;
358
359 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
360 if (err)
361 return err;
362
363 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
364
365 if (obj->mm.madv != I915_MADV_WILLNEED) {
366 err = -EFAULT;
367 goto err_unlock;
368 }
369
370 if (obj->mm.quirked) {
371 err = -EFAULT;
372 goto err_unlock;
373 }
374
375 if (obj->mm.mapping) {
376 err = -EBUSY;
377 goto err_unlock;
378 }
379
380 pages = __i915_gem_object_unset_pages(obj);
381
382 obj->ops = &i915_gem_phys_ops;
383
384 err = ____i915_gem_object_get_pages(obj);
385 if (err)
386 goto err_xfer;
387
388 /* Perma-pin (until release) the physical set of pages */
389 __i915_gem_object_pin_pages(obj);
390
391 if (!IS_ERR_OR_NULL(pages)) {
392 i915_gem_shmem_ops.put_pages(obj, pages);
393 i915_gem_object_release_memory_region(obj);
394 }
395 mutex_unlock(&obj->mm.lock);
396 return 0;
397
398 err_xfer:
399 obj->ops = &i915_gem_shmem_ops;
400 if (!IS_ERR_OR_NULL(pages)) {
401 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
402
403 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
404 }
405 err_unlock:
406 mutex_unlock(&obj->mm.lock);
407 return err;
408 }
409
410 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
411 #include "selftests/i915_gem_phys.c"
412 #endif
413