1 /* $NetBSD: intel_memory_region.c,v 1.4 2021/12/19 11:47:48 riastradh Exp $ */
2
3 // SPDX-License-Identifier: MIT
4 /*
5 * Copyright © 2019 Intel Corporation
6 */
7
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_memory_region.c,v 1.4 2021/12/19 11:47:48 riastradh Exp $");
10
11 #include "intel_memory_region.h"
12 #include "i915_drv.h"
13
14 #include <linux/nbsd-namespace.h>
15
16 /* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
17 #define REGION_MAP(type, inst) \
18 BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
19
20 const u32 intel_region_map[] = {
21 [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
22 [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
23 [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
24 };
25
26 struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private * i915,enum intel_memory_type mem_type)27 intel_memory_region_by_type(struct drm_i915_private *i915,
28 enum intel_memory_type mem_type)
29 {
30 struct intel_memory_region *mr;
31 int id;
32
33 for_each_memory_region(mr, i915, id)
34 if (mr->type == mem_type)
35 return mr;
36
37 return NULL;
38 }
39
40 static u64
intel_memory_region_free_pages(struct intel_memory_region * mem,struct list_head * blocks)41 intel_memory_region_free_pages(struct intel_memory_region *mem,
42 struct list_head *blocks)
43 {
44 struct i915_buddy_block *block, *on;
45 u64 size = 0;
46
47 list_for_each_entry_safe(block, on, blocks, link) {
48 size += i915_buddy_block_size(&mem->mm, block);
49 i915_buddy_free(&mem->mm, block);
50 }
51 INIT_LIST_HEAD(blocks);
52
53 return size;
54 }
55
56 void
__intel_memory_region_put_pages_buddy(struct intel_memory_region * mem,struct list_head * blocks)57 __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
58 struct list_head *blocks)
59 {
60 mutex_lock(&mem->mm_lock);
61 mem->avail += intel_memory_region_free_pages(mem, blocks);
62 mutex_unlock(&mem->mm_lock);
63 }
64
65 void
__intel_memory_region_put_block_buddy(struct i915_buddy_block * block)66 __intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
67 {
68 struct list_head blocks;
69
70 INIT_LIST_HEAD(&blocks);
71 list_add(&block->link, &blocks);
72 __intel_memory_region_put_pages_buddy(block->private, &blocks);
73 }
74
75 int
__intel_memory_region_get_pages_buddy(struct intel_memory_region * mem,resource_size_t size,unsigned int flags,struct list_head * blocks)76 __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
77 resource_size_t size,
78 unsigned int flags,
79 struct list_head *blocks)
80 {
81 unsigned int min_order = 0;
82 unsigned long n_pages;
83
84 GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
85 GEM_BUG_ON(!list_empty(blocks));
86
87 if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
88 min_order = ilog2(mem->min_page_size) -
89 ilog2(mem->mm.chunk_size);
90 }
91
92 if (flags & I915_ALLOC_CONTIGUOUS) {
93 size = roundup_pow_of_two(size);
94 min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
95 }
96
97 if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
98 return -E2BIG;
99
100 n_pages = size >> ilog2(mem->mm.chunk_size);
101
102 mutex_lock(&mem->mm_lock);
103
104 do {
105 struct i915_buddy_block *block;
106 unsigned int order;
107
108 order = fls(n_pages) - 1;
109 GEM_BUG_ON(order > mem->mm.max_order);
110 GEM_BUG_ON(order < min_order);
111
112 do {
113 block = i915_buddy_alloc(&mem->mm, order);
114 if (!IS_ERR(block))
115 break;
116
117 if (order-- == min_order)
118 goto err_free_blocks;
119 } while (1);
120
121 n_pages -= BIT(order);
122
123 block->private = mem;
124 list_add(&block->link, blocks);
125
126 if (!n_pages)
127 break;
128 } while (1);
129
130 mem->avail -= size;
131 mutex_unlock(&mem->mm_lock);
132 return 0;
133
134 err_free_blocks:
135 intel_memory_region_free_pages(mem, blocks);
136 mutex_unlock(&mem->mm_lock);
137 return -ENXIO;
138 }
139
140 struct i915_buddy_block *
__intel_memory_region_get_block_buddy(struct intel_memory_region * mem,resource_size_t size,unsigned int flags)141 __intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
142 resource_size_t size,
143 unsigned int flags)
144 {
145 struct i915_buddy_block *block;
146 LIST_HEAD(blocks);
147 int ret;
148
149 ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
150 if (ret)
151 return ERR_PTR(ret);
152
153 block = list_first_entry(&blocks, typeof(*block), link);
154 list_del_init(&block->link);
155 return block;
156 }
157
intel_memory_region_init_buddy(struct intel_memory_region * mem)158 int intel_memory_region_init_buddy(struct intel_memory_region *mem)
159 {
160 return i915_buddy_init(&mem->mm, resource_size(&mem->region),
161 PAGE_SIZE);
162 }
163
intel_memory_region_release_buddy(struct intel_memory_region * mem)164 void intel_memory_region_release_buddy(struct intel_memory_region *mem)
165 {
166 i915_buddy_fini(&mem->mm);
167 }
168
169 struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private * i915,resource_size_t start,resource_size_t size,resource_size_t min_page_size,resource_size_t io_start,const struct intel_memory_region_ops * ops)170 intel_memory_region_create(struct drm_i915_private *i915,
171 resource_size_t start,
172 resource_size_t size,
173 resource_size_t min_page_size,
174 resource_size_t io_start,
175 const struct intel_memory_region_ops *ops)
176 {
177 struct intel_memory_region *mem;
178 int err;
179
180 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
181 if (!mem)
182 return ERR_PTR(-ENOMEM);
183
184 mem->i915 = i915;
185 mem->region = (struct resource)DEFINE_RES_MEM(start, size);
186 mem->io_start = io_start;
187 mem->min_page_size = min_page_size;
188 mem->ops = ops;
189 mem->total = size;
190 mem->avail = mem->total;
191
192 mutex_init(&mem->objects.lock);
193 INIT_LIST_HEAD(&mem->objects.list);
194 INIT_LIST_HEAD(&mem->objects.purgeable);
195
196 mutex_init(&mem->mm_lock);
197
198 if (ops->init) {
199 err = ops->init(mem);
200 if (err)
201 goto err_free;
202 }
203
204 kref_init(&mem->kref);
205 return mem;
206
207 err_free:
208 mutex_destroy(&mem->mm_lock);
209 mutex_destroy(&mem->objects.lock);
210 kfree(mem);
211 return ERR_PTR(err);
212 }
213
intel_memory_region_set_name(struct intel_memory_region * mem,const char * fmt,...)214 void intel_memory_region_set_name(struct intel_memory_region *mem,
215 const char *fmt, ...)
216 {
217 va_list ap;
218
219 va_start(ap, fmt);
220 vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
221 va_end(ap);
222 }
223
__intel_memory_region_destroy(struct kref * kref)224 static void __intel_memory_region_destroy(struct kref *kref)
225 {
226 struct intel_memory_region *mem =
227 container_of(kref, typeof(*mem), kref);
228
229 if (mem->ops->release)
230 mem->ops->release(mem);
231
232 mutex_destroy(&mem->mm_lock);
233 mutex_destroy(&mem->objects.lock);
234 kfree(mem);
235 }
236
237 struct intel_memory_region *
intel_memory_region_get(struct intel_memory_region * mem)238 intel_memory_region_get(struct intel_memory_region *mem)
239 {
240 kref_get(&mem->kref);
241 return mem;
242 }
243
intel_memory_region_put(struct intel_memory_region * mem)244 void intel_memory_region_put(struct intel_memory_region *mem)
245 {
246 kref_put(&mem->kref, __intel_memory_region_destroy);
247 }
248
249 /* Global memory region registration -- only slight layer inversions! */
250
intel_memory_regions_hw_probe(struct drm_i915_private * i915)251 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
252 {
253 int err, i;
254
255 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
256 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
257 u32 type;
258
259 if (!HAS_REGION(i915, BIT(i)))
260 continue;
261
262 type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
263 switch (type) {
264 case INTEL_MEMORY_SYSTEM:
265 mem = i915_gem_shmem_setup(i915);
266 break;
267 case INTEL_MEMORY_STOLEN:
268 mem = i915_gem_stolen_setup(i915);
269 break;
270 case INTEL_MEMORY_LOCAL:
271 mem = intel_setup_fake_lmem(i915);
272 break;
273 }
274
275 if (IS_ERR(mem)) {
276 err = PTR_ERR(mem);
277 DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
278 goto out_cleanup;
279 }
280
281 mem->id = intel_region_map[i];
282 mem->type = type;
283 mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
284
285 i915->mm.regions[i] = mem;
286 }
287
288 return 0;
289
290 out_cleanup:
291 intel_memory_regions_driver_release(i915);
292 return err;
293 }
294
intel_memory_regions_driver_release(struct drm_i915_private * i915)295 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
296 {
297 int i;
298
299 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
300 struct intel_memory_region *region =
301 fetch_and_zero(&i915->mm.regions[i]);
302
303 if (region)
304 intel_memory_region_put(region);
305 }
306 }
307
308 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
309 #include "selftests/intel_memory_region.c"
310 #include "selftests/mock_region.c"
311 #endif
312