xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/core/nouveau_nvkm_core_mm.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_core_mm.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_core_mm.c,v 1.3 2021/12/18 23:45:34 riastradh Exp $");
28 
29 #include <core/mm.h>
30 
31 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
32 	list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
33 
34 void
nvkm_mm_dump(struct nvkm_mm * mm,const char * header)35 nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
36 {
37 	struct nvkm_mm_node *node;
38 
39 	pr_err("nvkm: %s\n", header);
40 	pr_err("nvkm: node list:\n");
41 	list_for_each_entry(node, &mm->nodes, nl_entry) {
42 		pr_err("nvkm: \t%08x %08x %d\n",
43 		       node->offset, node->length, node->type);
44 	}
45 	pr_err("nvkm: free list:\n");
46 	list_for_each_entry(node, &mm->free, fl_entry) {
47 		pr_err("nvkm: \t%08x %08x %d\n",
48 		       node->offset, node->length, node->type);
49 	}
50 }
51 
52 void
nvkm_mm_free(struct nvkm_mm * mm,struct nvkm_mm_node ** pthis)53 nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
54 {
55 	struct nvkm_mm_node *this = *pthis;
56 
57 	if (this) {
58 		struct nvkm_mm_node *prev = node(this, prev);
59 		struct nvkm_mm_node *next = node(this, next);
60 
61 		if (prev && prev->type == NVKM_MM_TYPE_NONE) {
62 			prev->length += this->length;
63 			list_del(&this->nl_entry);
64 			kfree(this); this = prev;
65 		}
66 
67 		if (next && next->type == NVKM_MM_TYPE_NONE) {
68 			next->offset  = this->offset;
69 			next->length += this->length;
70 			if (this->type == NVKM_MM_TYPE_NONE)
71 				list_del(&this->fl_entry);
72 			list_del(&this->nl_entry);
73 			kfree(this); this = NULL;
74 		}
75 
76 		if (this && this->type != NVKM_MM_TYPE_NONE) {
77 			list_for_each_entry(prev, &mm->free, fl_entry) {
78 				if (this->offset < prev->offset)
79 					break;
80 			}
81 
82 			list_add_tail(&this->fl_entry, &prev->fl_entry);
83 			this->type = NVKM_MM_TYPE_NONE;
84 		}
85 	}
86 
87 	*pthis = NULL;
88 }
89 
90 static struct nvkm_mm_node *
region_head(struct nvkm_mm * mm,struct nvkm_mm_node * a,u32 size)91 region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
92 {
93 	struct nvkm_mm_node *b;
94 
95 	if (a->length == size)
96 		return a;
97 
98 	b = kmalloc(sizeof(*b), GFP_KERNEL);
99 	if (unlikely(b == NULL))
100 		return NULL;
101 
102 	b->offset = a->offset;
103 	b->length = size;
104 	b->heap   = a->heap;
105 	b->type   = a->type;
106 	a->offset += size;
107 	a->length -= size;
108 	list_add_tail(&b->nl_entry, &a->nl_entry);
109 	if (b->type == NVKM_MM_TYPE_NONE)
110 		list_add_tail(&b->fl_entry, &a->fl_entry);
111 
112 	return b;
113 }
114 
115 int
nvkm_mm_head(struct nvkm_mm * mm,u8 heap,u8 type,u32 size_max,u32 size_min,u32 align,struct nvkm_mm_node ** pnode)116 nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
117 	     u32 align, struct nvkm_mm_node **pnode)
118 {
119 	struct nvkm_mm_node *prev, *this, *next;
120 	u32 mask = align - 1;
121 	u32 splitoff;
122 	u32 s, e;
123 
124 	BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
125 
126 	list_for_each_entry(this, &mm->free, fl_entry) {
127 		if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
128 			if (this->heap != heap)
129 				continue;
130 		}
131 		e = this->offset + this->length;
132 		s = this->offset;
133 
134 		prev = node(this, prev);
135 		if (prev && prev->type != type)
136 			s = roundup(s, mm->block_size);
137 
138 		next = node(this, next);
139 		if (next && next->type != type)
140 			e = rounddown(e, mm->block_size);
141 
142 		s  = (s + mask) & ~mask;
143 		e &= ~mask;
144 		if (s > e || e - s < size_min)
145 			continue;
146 
147 		splitoff = s - this->offset;
148 		if (splitoff && !region_head(mm, this, splitoff))
149 			return -ENOMEM;
150 
151 		this = region_head(mm, this, min(size_max, e - s));
152 		if (!this)
153 			return -ENOMEM;
154 
155 		this->next = NULL;
156 		this->type = type;
157 		list_del(&this->fl_entry);
158 		*pnode = this;
159 		return 0;
160 	}
161 
162 	return -ENOSPC;
163 }
164 
165 static struct nvkm_mm_node *
region_tail(struct nvkm_mm * mm,struct nvkm_mm_node * a,u32 size)166 region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
167 {
168 	struct nvkm_mm_node *b;
169 
170 	if (a->length == size)
171 		return a;
172 
173 	b = kmalloc(sizeof(*b), GFP_KERNEL);
174 	if (unlikely(b == NULL))
175 		return NULL;
176 
177 	a->length -= size;
178 	b->offset  = a->offset + a->length;
179 	b->length  = size;
180 	b->heap    = a->heap;
181 	b->type    = a->type;
182 
183 	list_add(&b->nl_entry, &a->nl_entry);
184 	if (b->type == NVKM_MM_TYPE_NONE)
185 		list_add(&b->fl_entry, &a->fl_entry);
186 
187 	return b;
188 }
189 
190 int
nvkm_mm_tail(struct nvkm_mm * mm,u8 heap,u8 type,u32 size_max,u32 size_min,u32 align,struct nvkm_mm_node ** pnode)191 nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
192 	     u32 align, struct nvkm_mm_node **pnode)
193 {
194 	struct nvkm_mm_node *prev, *this, *next;
195 	u32 mask = align - 1;
196 
197 	BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
198 
199 	list_for_each_entry_reverse(this, &mm->free, fl_entry) {
200 		u32 e = this->offset + this->length;
201 		u32 s = this->offset;
202 		u32 c = 0, a;
203 		if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
204 			if (this->heap != heap)
205 				continue;
206 		}
207 
208 		prev = node(this, prev);
209 		if (prev && prev->type != type)
210 			s = roundup(s, mm->block_size);
211 
212 		next = node(this, next);
213 		if (next && next->type != type) {
214 			e = rounddown(e, mm->block_size);
215 			c = next->offset - e;
216 		}
217 
218 		s = (s + mask) & ~mask;
219 		a = e - s;
220 		if (s > e || a < size_min)
221 			continue;
222 
223 		a  = min(a, size_max);
224 		s  = (e - a) & ~mask;
225 		c += (e - s) - a;
226 
227 		if (c && !region_tail(mm, this, c))
228 			return -ENOMEM;
229 
230 		this = region_tail(mm, this, a);
231 		if (!this)
232 			return -ENOMEM;
233 
234 		this->next = NULL;
235 		this->type = type;
236 		list_del(&this->fl_entry);
237 		*pnode = this;
238 		return 0;
239 	}
240 
241 	return -ENOSPC;
242 }
243 
244 int
nvkm_mm_init(struct nvkm_mm * mm,u8 heap,u32 offset,u32 length,u32 block)245 nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
246 {
247 	struct nvkm_mm_node *node, *prev;
248 	u32 next;
249 
250 	if (nvkm_mm_initialised(mm)) {
251 		prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
252 		next = prev->offset + prev->length;
253 		if (next != offset) {
254 			BUG_ON(next > offset);
255 			if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
256 				return -ENOMEM;
257 			node->type   = NVKM_MM_TYPE_HOLE;
258 			node->offset = next;
259 			node->length = offset - next;
260 			list_add_tail(&node->nl_entry, &mm->nodes);
261 		}
262 		BUG_ON(block != mm->block_size);
263 	} else {
264 		INIT_LIST_HEAD(&mm->nodes);
265 		INIT_LIST_HEAD(&mm->free);
266 		mm->block_size = block;
267 		mm->heap_nodes = 0;
268 	}
269 
270 	node = kzalloc(sizeof(*node), GFP_KERNEL);
271 	if (!node)
272 		return -ENOMEM;
273 
274 	if (length) {
275 		node->offset  = roundup(offset, mm->block_size);
276 		node->length  = rounddown(offset + length, mm->block_size);
277 		node->length -= node->offset;
278 	}
279 
280 	list_add_tail(&node->nl_entry, &mm->nodes);
281 	list_add_tail(&node->fl_entry, &mm->free);
282 	node->heap = heap;
283 	mm->heap_nodes++;
284 	return 0;
285 }
286 
287 int
nvkm_mm_fini(struct nvkm_mm * mm)288 nvkm_mm_fini(struct nvkm_mm *mm)
289 {
290 	struct nvkm_mm_node *node, *temp;
291 	int nodes = 0;
292 
293 	if (!nvkm_mm_initialised(mm))
294 		return 0;
295 
296 	list_for_each_entry(node, &mm->nodes, nl_entry) {
297 		if (node->type != NVKM_MM_TYPE_HOLE) {
298 			if (++nodes > mm->heap_nodes) {
299 				nvkm_mm_dump(mm, "mm not clean!");
300 				return -EBUSY;
301 			}
302 		}
303 	}
304 
305 	list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
306 		list_del(&node->nl_entry);
307 		kfree(node);
308 	}
309 
310 	mm->heap_nodes = 0;
311 	return 0;
312 }
313