1 /* $NetBSD: i915_gem_region.c,v 1.6 2024/01/19 22:23:19 riastradh Exp $ */
2
3 // SPDX-License-Identifier: MIT
4 /*
5 * Copyright © 2019 Intel Corporation
6 */
7
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: i915_gem_region.c,v 1.6 2024/01/19 22:23:19 riastradh Exp $");
10
11 #include "intel_memory_region.h"
12 #include "i915_gem_region.h"
13 #include "i915_drv.h"
14 #include "i915_trace.h"
15
16 void
i915_gem_object_put_pages_buddy(struct drm_i915_gem_object * obj,struct sg_table * pages)17 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
18 struct sg_table *pages)
19 {
20 __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
21
22 obj->mm.dirty = false;
23 #ifdef __NetBSD__
24 bus_dmamap_unload(obj->base.dev->dmat, pages->sgl->sg_dmamap);
25 #endif
26 sg_free_table(pages);
27 kfree(pages);
28 }
29
30 int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object * obj)31 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
32 {
33 struct intel_memory_region *mem = obj->mm.region;
34 struct list_head *blocks = &obj->mm.blocks;
35 resource_size_t size = obj->base.size;
36 resource_size_t prev_end;
37 struct i915_buddy_block *block;
38 unsigned int flags;
39 struct sg_table *st;
40 struct scatterlist *sg;
41 unsigned int sg_page_sizes;
42 int ret;
43
44 st = kmalloc(sizeof(*st), GFP_KERNEL);
45 if (!st)
46 return -ENOMEM;
47
48 #ifndef __NetBSD__
49 if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
50 kfree(st);
51 return -ENOMEM;
52 }
53 #endif
54
55 flags = I915_ALLOC_MIN_PAGE_SIZE;
56 if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
57 flags |= I915_ALLOC_CONTIGUOUS;
58
59 ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
60 if (ret)
61 goto err_free_sg;
62
63 GEM_BUG_ON(list_empty(blocks));
64
65 #ifdef __NetBSD__
66 __USE(prev_end);
67 bus_dma_tag_t dmat = obj->base.dev->dmat;
68 bus_dma_segment_t *segs = NULL;
69 int i = 0, nsegs = 0;
70 bool loaded = false;
71
72 sg = NULL;
73
74 list_for_each_entry(block, blocks, link) {
75 if (nsegs >= INT_MAX ||
76 nsegs >= SIZE_MAX/sizeof(segs[0]))
77 goto err;
78 nsegs++;
79 }
80 segs = kmem_zalloc(nsegs * sizeof(segs[0]), KM_SLEEP);
81 list_for_each_entry(block, blocks, link) {
82 u64 block_size, offset;
83
84 block_size = min_t(u64, size,
85 i915_buddy_block_size(&mem->mm, block));
86 offset = i915_buddy_block_offset(block);
87
88 segs[i].ds_addr = mem->region.start + offset;
89 segs[i].ds_len = block_size;
90 i++;
91 }
92 KASSERT(i == nsegs);
93
94 ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs,
95 GFP_KERNEL);
96 if (ret)
97 goto err;
98 sg = st->sgl;
99
100 /* XXX errno NetBSD->Linux */
101 ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK,
102 &sg->sg_dmamap);
103 if (ret) {
104 sg->sg_dmamap = NULL;
105 goto err;
106 }
107 sg->sg_dmat = dmat;
108
109 /* XXX errno NetBSD->Linux */
110 ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, segs, nsegs, size,
111 BUS_DMA_WAITOK);
112 if (ret)
113 goto err;
114 loaded = true;
115
116 kmem_free(segs, nsegs * sizeof(segs[0]));
117 segs = NULL;
118
119 sg_page_sizes = i915_sg_page_sizes(sg);
120 #else
121 sg = st->sgl;
122 st->nents = 0;
123 sg_page_sizes = 0;
124 prev_end = (resource_size_t)-1;
125
126 list_for_each_entry(block, blocks, link) {
127 u64 block_size, offset;
128
129 block_size = min_t(u64, size,
130 i915_buddy_block_size(&mem->mm, block));
131 offset = i915_buddy_block_offset(block);
132
133 GEM_BUG_ON(overflows_type(block_size, sg->length));
134
135 if (offset != prev_end ||
136 add_overflows_t(typeof(sg->length), sg->length, block_size)) {
137 if (st->nents) {
138 sg_page_sizes |= sg->length;
139 sg = __sg_next(sg);
140 }
141
142 sg_dma_address(sg) = mem->region.start + offset;
143 sg_dma_len(sg) = block_size;
144
145 sg->length = block_size;
146
147 st->nents++;
148 } else {
149 sg->length += block_size;
150 sg_dma_len(sg) += block_size;
151 }
152
153 prev_end = offset + block_size;
154 }
155
156 sg_page_sizes |= sg->length;
157 sg_mark_end(sg);
158 i915_sg_trim(st);
159 #endif
160
161 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
162
163 return 0;
164
165 #ifdef __NetBSD__
166 err:
167 if (loaded)
168 bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
169 if (sg && sg->sg_dmamap)
170 bus_dmamap_destroy(dmat, sg->sg_dmamap);
171 if (segs)
172 kmem_free(segs, nsegs * sizeof(segs[0]));
173 __intel_memory_region_put_pages_buddy(mem, blocks);
174 #endif
175 err_free_sg:
176 sg_free_table(st);
177 kfree(st);
178 return ret;
179 }
180
i915_gem_object_init_memory_region(struct drm_i915_gem_object * obj,struct intel_memory_region * mem,unsigned long flags)181 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
182 struct intel_memory_region *mem,
183 unsigned long flags)
184 {
185 INIT_LIST_HEAD(&obj->mm.blocks);
186 obj->mm.region = intel_memory_region_get(mem);
187
188 obj->flags |= flags;
189 if (obj->base.size <= mem->min_page_size)
190 obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
191
192 mutex_lock(&mem->objects.lock);
193
194 if (obj->flags & I915_BO_ALLOC_VOLATILE)
195 list_add(&obj->mm.region_link, &mem->objects.purgeable);
196 else
197 list_add(&obj->mm.region_link, &mem->objects.list);
198
199 mutex_unlock(&mem->objects.lock);
200 }
201
i915_gem_object_release_memory_region(struct drm_i915_gem_object * obj)202 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
203 {
204 struct intel_memory_region *mem = obj->mm.region;
205
206 mutex_lock(&mem->objects.lock);
207 list_del(&obj->mm.region_link);
208 mutex_unlock(&mem->objects.lock);
209
210 intel_memory_region_put(mem);
211 }
212
213 struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region * mem,resource_size_t size,unsigned int flags)214 i915_gem_object_create_region(struct intel_memory_region *mem,
215 resource_size_t size,
216 unsigned int flags)
217 {
218 struct drm_i915_gem_object *obj;
219
220 /*
221 * NB: Our use of resource_size_t for the size stems from using struct
222 * resource for the mem->region. We might need to revisit this in the
223 * future.
224 */
225
226 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
227
228 if (!mem)
229 return ERR_PTR(-ENODEV);
230
231 size = round_up(size, mem->min_page_size);
232
233 GEM_BUG_ON(!size);
234 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
235
236 /*
237 * XXX: There is a prevalence of the assumption that we fit the
238 * object's page count inside a 32bit _signed_ variable. Let's document
239 * this and catch if we ever need to fix it. In the meantime, if you do
240 * spot such a local variable, please consider fixing!
241 */
242
243 if (size >> PAGE_SHIFT > INT_MAX)
244 return ERR_PTR(-E2BIG);
245
246 if (overflows_type(size, obj->base.size))
247 return ERR_PTR(-E2BIG);
248
249 obj = mem->ops->create_object(mem, size, flags);
250 if (!IS_ERR(obj))
251 trace_i915_gem_object_create(obj);
252
253 return obj;
254 }
255