xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_sa.c (revision 63f2ec2a80f7867420ac39c966d68633ffde2b3a)
1 /*	$NetBSD: amdgpu_sa.c,v 1.5 2022/10/08 19:06:30 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 Red Hat Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  *
27  */
28 /*
29  * Authors:
30  *    Jerome Glisse <glisse@freedesktop.org>
31  */
32 /* Algorithm:
33  *
34  * We store the last allocated bo in "hole", we always try to allocate
35  * after the last allocated bo. Principle is that in a linear GPU ring
36  * progression was is after last is the oldest bo we allocated and thus
37  * the first one that should no longer be in use by the GPU.
38  *
39  * If it's not the case we skip over the bo after last to the closest
40  * done bo if such one exist. If none exist and we are not asked to
41  * block we report failure to allocate.
42  *
43  * If we are asked to block we wait on all the oldest fence of all
44  * rings. We just wait for any of those fence to complete.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: amdgpu_sa.c,v 1.5 2022/10/08 19:06:30 riastradh Exp $");
49 
50 #include "amdgpu.h"
51 
52 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
53 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
54 
amdgpu_sa_bo_manager_init(struct amdgpu_device * adev,struct amdgpu_sa_manager * sa_manager,unsigned size,u32 align,u32 domain)55 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
56 			      struct amdgpu_sa_manager *sa_manager,
57 			      unsigned size, u32 align, u32 domain)
58 {
59 	int i, r;
60 
61 	spin_lock_init(&sa_manager->wq_lock);
62 	DRM_INIT_WAITQUEUE(&sa_manager->wq, "amdsabom");
63 	sa_manager->bo = NULL;
64 	sa_manager->size = size;
65 	sa_manager->domain = domain;
66 	sa_manager->align = align;
67 	sa_manager->hole = &sa_manager->olist;
68 	INIT_LIST_HEAD(&sa_manager->olist);
69 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
70 		INIT_LIST_HEAD(&sa_manager->flist[i]);
71 
72 	r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
73 				&sa_manager->gpu_addr, &sa_manager->cpu_ptr);
74 	if (r) {
75 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
76 		return r;
77 	}
78 
79 	memset(sa_manager->cpu_ptr, 0, sa_manager->size);
80 	return r;
81 }
82 
amdgpu_sa_bo_manager_fini(struct amdgpu_device * adev,struct amdgpu_sa_manager * sa_manager)83 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
84                               struct amdgpu_sa_manager *sa_manager)
85 {
86 	struct amdgpu_sa_bo *sa_bo, *tmp;
87 
88 	if (sa_manager->bo == NULL) {
89 		dev_err(adev->dev, "no bo for sa manager\n");
90 		return;
91 	}
92 
93 	if (!list_empty(&sa_manager->olist)) {
94 		sa_manager->hole = &sa_manager->olist,
95 		amdgpu_sa_bo_try_free(sa_manager);
96 		if (!list_empty(&sa_manager->olist)) {
97 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
98 		}
99 	}
100 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
101 		amdgpu_sa_bo_remove_locked(sa_bo);
102 	}
103 
104 	amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
105 	sa_manager->size = 0;
106 	DRM_DESTROY_WAITQUEUE(&sa_manager->wq);
107 	spin_lock_destroy(&sa_manager->wq_lock);
108 }
109 
amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo * sa_bo)110 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
111 {
112 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
113 	if (sa_manager->hole == &sa_bo->olist) {
114 		sa_manager->hole = sa_bo->olist.prev;
115 	}
116 	list_del_init(&sa_bo->olist);
117 	list_del_init(&sa_bo->flist);
118 	dma_fence_put(sa_bo->fence);
119 	kfree(sa_bo);
120 }
121 
amdgpu_sa_bo_try_free(struct amdgpu_sa_manager * sa_manager)122 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
123 {
124 	struct amdgpu_sa_bo *sa_bo, *tmp;
125 
126 	if (sa_manager->hole->next == &sa_manager->olist)
127 		return;
128 
129 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
130 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
131 		if (sa_bo->fence == NULL ||
132 		    !dma_fence_is_signaled(sa_bo->fence)) {
133 			return;
134 		}
135 		amdgpu_sa_bo_remove_locked(sa_bo);
136 	}
137 }
138 
amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager * sa_manager)139 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
140 {
141 	struct list_head *hole = sa_manager->hole;
142 
143 	if (hole != &sa_manager->olist) {
144 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
145 	}
146 	return 0;
147 }
148 
amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager * sa_manager)149 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
150 {
151 	struct list_head *hole = sa_manager->hole;
152 
153 	if (hole->next != &sa_manager->olist) {
154 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
155 	}
156 	return sa_manager->size;
157 }
158 
amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager * sa_manager,struct amdgpu_sa_bo * sa_bo,unsigned size,unsigned align)159 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
160 				   struct amdgpu_sa_bo *sa_bo,
161 				   unsigned size, unsigned align)
162 {
163 	unsigned soffset, eoffset, wasted;
164 
165 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
166 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
167 	wasted = (align - (soffset % align)) % align;
168 
169 	if ((eoffset - soffset) >= (size + wasted)) {
170 		soffset += wasted;
171 
172 		sa_bo->manager = sa_manager;
173 		sa_bo->soffset = soffset;
174 		sa_bo->eoffset = soffset + size;
175 		list_add(&sa_bo->olist, sa_manager->hole);
176 		INIT_LIST_HEAD(&sa_bo->flist);
177 		sa_manager->hole = &sa_bo->olist;
178 		return true;
179 	}
180 	return false;
181 }
182 
183 /**
184  * amdgpu_sa_event - Check if we can stop waiting
185  *
186  * @sa_manager: pointer to the sa_manager
187  * @size: number of bytes we want to allocate
188  * @align: alignment we need to match
189  *
190  * Check if either there is a fence we can wait for or
191  * enough free memory to satisfy the allocation directly
192  */
amdgpu_sa_event(struct amdgpu_sa_manager * sa_manager,unsigned size,unsigned align)193 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
194 			    unsigned size, unsigned align)
195 {
196 	unsigned soffset, eoffset, wasted;
197 	int i;
198 
199 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
200 		if (!list_empty(&sa_manager->flist[i]))
201 			return true;
202 
203 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
204 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
205 	wasted = (align - (soffset % align)) % align;
206 
207 	if ((eoffset - soffset) >= (size + wasted)) {
208 		return true;
209 	}
210 
211 	return false;
212 }
213 
amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager * sa_manager,struct dma_fence ** fences,unsigned * tries)214 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
215 				   struct dma_fence **fences,
216 				   unsigned *tries)
217 {
218 	struct amdgpu_sa_bo *best_bo = NULL;
219 	unsigned i, soffset, best, tmp;
220 
221 	/* if hole points to the end of the buffer */
222 	if (sa_manager->hole->next == &sa_manager->olist) {
223 		/* try again with its beginning */
224 		sa_manager->hole = &sa_manager->olist;
225 		return true;
226 	}
227 
228 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
229 	/* to handle wrap around we add sa_manager->size */
230 	best = sa_manager->size * 2;
231 	/* go over all fence list and try to find the closest sa_bo
232 	 * of the current last
233 	 */
234 	for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
235 		struct amdgpu_sa_bo *sa_bo;
236 
237 		fences[i] = NULL;
238 
239 		if (list_empty(&sa_manager->flist[i]))
240 			continue;
241 
242 		sa_bo = list_first_entry(&sa_manager->flist[i],
243 					 struct amdgpu_sa_bo, flist);
244 
245 		if (!dma_fence_is_signaled(sa_bo->fence)) {
246 			fences[i] = sa_bo->fence;
247 			continue;
248 		}
249 
250 		/* limit the number of tries each ring gets */
251 		if (tries[i] > 2) {
252 			continue;
253 		}
254 
255 		tmp = sa_bo->soffset;
256 		if (tmp < soffset) {
257 			/* wrap around, pretend it's after */
258 			tmp += sa_manager->size;
259 		}
260 		tmp -= soffset;
261 		if (tmp < best) {
262 			/* this sa bo is the closest one */
263 			best = tmp;
264 			best_bo = sa_bo;
265 		}
266 	}
267 
268 	if (best_bo) {
269 		uint32_t idx = best_bo->fence->context;
270 
271 		idx %= AMDGPU_SA_NUM_FENCE_LISTS;
272 		++tries[idx];
273 		sa_manager->hole = best_bo->olist.prev;
274 
275 		/* we knew that this one is signaled,
276 		   so it's save to remote it */
277 		amdgpu_sa_bo_remove_locked(best_bo);
278 		return true;
279 	}
280 	return false;
281 }
282 
amdgpu_sa_bo_new(struct amdgpu_sa_manager * sa_manager,struct amdgpu_sa_bo ** sa_bo,unsigned size,unsigned align)283 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
284 		     struct amdgpu_sa_bo **sa_bo,
285 		     unsigned size, unsigned align)
286 {
287 	struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
288 	unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
289 	unsigned count;
290 	int i, r;
291 	signed long t;
292 
293 	if (WARN_ON_ONCE(align > sa_manager->align))
294 		return -EINVAL;
295 
296 	if (WARN_ON_ONCE(size > sa_manager->size))
297 		return -EINVAL;
298 
299 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
300 	if (!(*sa_bo))
301 		return -ENOMEM;
302 	(*sa_bo)->manager = sa_manager;
303 	(*sa_bo)->fence = NULL;
304 	INIT_LIST_HEAD(&(*sa_bo)->olist);
305 	INIT_LIST_HEAD(&(*sa_bo)->flist);
306 
307 	spin_lock(&sa_manager->wq_lock);
308 	do {
309 		for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
310 			tries[i] = 0;
311 
312 		do {
313 			amdgpu_sa_bo_try_free(sa_manager);
314 
315 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
316 						   size, align)) {
317 				spin_unlock(&sa_manager->wq_lock);
318 				return 0;
319 			}
320 
321 			/* see if we can skip over some allocations */
322 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
323 
324 		for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
325 			if (fences[i])
326 				fences[count++] = dma_fence_get(fences[i]);
327 
328 		if (count) {
329 			spin_unlock(&sa_manager->wq_lock);
330 			t = dma_fence_wait_any_timeout(fences, count, false,
331 						       MAX_SCHEDULE_TIMEOUT,
332 						       NULL);
333 			for (i = 0; i < count; ++i)
334 				dma_fence_put(fences[i]);
335 
336 			r = (t > 0) ? 0 : t;
337 			spin_lock(&sa_manager->wq_lock);
338 		} else {
339 			/* if we have nothing to wait for block */
340 			DRM_SPIN_WAIT_UNTIL(r, &sa_manager->wq,
341 			    &sa_manager->wq_lock,
342 			    amdgpu_sa_event(sa_manager, size, align));
343 		}
344 
345 	} while (!r);
346 
347 	spin_unlock(&sa_manager->wq_lock);
348 	kfree(*sa_bo);
349 	*sa_bo = NULL;
350 	return r;
351 }
352 
amdgpu_sa_bo_free(struct amdgpu_device * adev,struct amdgpu_sa_bo ** sa_bo,struct dma_fence * fence)353 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
354 		       struct dma_fence *fence)
355 {
356 	struct amdgpu_sa_manager *sa_manager;
357 
358 	if (sa_bo == NULL || *sa_bo == NULL) {
359 		return;
360 	}
361 
362 	sa_manager = (*sa_bo)->manager;
363 	spin_lock(&sa_manager->wq_lock);
364 	if (fence && !dma_fence_is_signaled(fence)) {
365 		uint32_t idx;
366 
367 		(*sa_bo)->fence = dma_fence_get(fence);
368 		idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
369 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
370 	} else {
371 		amdgpu_sa_bo_remove_locked(*sa_bo);
372 	}
373 	DRM_SPIN_WAKEUP_ALL(&sa_manager->wq, &sa_manager->wq_lock);
374 	spin_unlock(&sa_manager->wq_lock);
375 	*sa_bo = NULL;
376 }
377 
378 #if defined(CONFIG_DEBUG_FS)
379 
amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager * sa_manager,struct seq_file * m)380 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
381 				  struct seq_file *m)
382 {
383 	struct amdgpu_sa_bo *i;
384 
385 	spin_lock(&sa_manager->wq.lock);
386 	list_for_each_entry(i, &sa_manager->olist, olist) {
387 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
388 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
389 		if (&i->olist == sa_manager->hole) {
390 			seq_printf(m, ">");
391 		} else {
392 			seq_printf(m, " ");
393 		}
394 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
395 			   soffset, eoffset, eoffset - soffset);
396 
397 		if (i->fence)
398 			seq_printf(m, " protected by 0x%016llx on context %llu",
399 				   i->fence->seqno, i->fence->context);
400 
401 		seq_printf(m, "\n");
402 	}
403 	spin_unlock(&sa_manager->wq.lock);
404 }
405 #endif
406