xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_sa.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: amdgpu_sa.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 Red Hat Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  *
27  */
28 /*
29  * Authors:
30  *    Jerome Glisse <glisse@freedesktop.org>
31  */
32 /* Algorithm:
33  *
34  * We store the last allocated bo in "hole", we always try to allocate
35  * after the last allocated bo. Principle is that in a linear GPU ring
36  * progression was is after last is the oldest bo we allocated and thus
37  * the first one that should no longer be in use by the GPU.
38  *
39  * If it's not the case we skip over the bo after last to the closest
40  * done bo if such one exist. If none exist and we are not asked to
41  * block we report failure to allocate.
42  *
43  * If we are asked to block we wait on all the oldest fence of all
44  * rings. We just wait for any of those fence to complete.
45  */
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: amdgpu_sa.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $");
48 
49 #include <drm/drmP.h>
50 #include "amdgpu.h"
51 
52 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
53 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
54 
55 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
56 			      struct amdgpu_sa_manager *sa_manager,
57 			      unsigned size, u32 align, u32 domain)
58 {
59 	int i, r;
60 
61 #ifdef __NetBSD__
62 	spin_lock_init(&sa_manager->wq_lock);
63 	DRM_INIT_WAITQUEUE(&sa_manager->wq, "amdsabom");
64 #else
65 	init_waitqueue_head(&sa_manager->wq);
66 #endif
67 	sa_manager->bo = NULL;
68 	sa_manager->size = size;
69 	sa_manager->domain = domain;
70 	sa_manager->align = align;
71 	sa_manager->hole = &sa_manager->olist;
72 	INIT_LIST_HEAD(&sa_manager->olist);
73 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
74 		INIT_LIST_HEAD(&sa_manager->flist[i]);
75 	}
76 
77 	r = amdgpu_bo_create(adev, size, align, true, domain,
78 			     0, NULL, NULL, &sa_manager->bo);
79 	if (r) {
80 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
81 		return r;
82 	}
83 
84 	return r;
85 }
86 
87 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
88 			       struct amdgpu_sa_manager *sa_manager)
89 {
90 	struct amdgpu_sa_bo *sa_bo, *tmp;
91 
92 	if (!list_empty(&sa_manager->olist)) {
93 		sa_manager->hole = &sa_manager->olist,
94 		amdgpu_sa_bo_try_free(sa_manager);
95 		if (!list_empty(&sa_manager->olist)) {
96 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
97 		}
98 	}
99 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
100 		amdgpu_sa_bo_remove_locked(sa_bo);
101 	}
102 	amdgpu_bo_unref(&sa_manager->bo);
103 	sa_manager->size = 0;
104 #ifdef __NetBSD__
105 	DRM_DESTROY_WAITQUEUE(&sa_manager->wq);
106 	spin_lock_destroy(&sa_manager->wq_lock);
107 #endif
108 }
109 
110 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
111 			       struct amdgpu_sa_manager *sa_manager)
112 {
113 	int r;
114 
115 	if (sa_manager->bo == NULL) {
116 		dev_err(adev->dev, "no bo for sa manager\n");
117 		return -EINVAL;
118 	}
119 
120 	/* map the buffer */
121 	r = amdgpu_bo_reserve(sa_manager->bo, false);
122 	if (r) {
123 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
124 		return r;
125 	}
126 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
127 	if (r) {
128 		amdgpu_bo_unreserve(sa_manager->bo);
129 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
130 		return r;
131 	}
132 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
133 	amdgpu_bo_unreserve(sa_manager->bo);
134 	return r;
135 }
136 
137 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
138 				 struct amdgpu_sa_manager *sa_manager)
139 {
140 	int r;
141 
142 	if (sa_manager->bo == NULL) {
143 		dev_err(adev->dev, "no bo for sa manager\n");
144 		return -EINVAL;
145 	}
146 
147 	r = amdgpu_bo_reserve(sa_manager->bo, false);
148 	if (!r) {
149 		amdgpu_bo_kunmap(sa_manager->bo);
150 		amdgpu_bo_unpin(sa_manager->bo);
151 		amdgpu_bo_unreserve(sa_manager->bo);
152 	}
153 	return r;
154 }
155 
156 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
157 {
158 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
159 	if (sa_manager->hole == &sa_bo->olist) {
160 		sa_manager->hole = sa_bo->olist.prev;
161 	}
162 	list_del_init(&sa_bo->olist);
163 	list_del_init(&sa_bo->flist);
164 	fence_put(sa_bo->fence);
165 	kfree(sa_bo);
166 }
167 
168 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
169 {
170 	struct amdgpu_sa_bo *sa_bo, *tmp;
171 
172 	if (sa_manager->hole->next == &sa_manager->olist)
173 		return;
174 
175 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
176 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
177 		if (sa_bo->fence == NULL ||
178 		    !fence_is_signaled(sa_bo->fence)) {
179 			return;
180 		}
181 		amdgpu_sa_bo_remove_locked(sa_bo);
182 	}
183 }
184 
185 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
186 {
187 	struct list_head *hole = sa_manager->hole;
188 
189 	if (hole != &sa_manager->olist) {
190 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
191 	}
192 	return 0;
193 }
194 
195 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
196 {
197 	struct list_head *hole = sa_manager->hole;
198 
199 	if (hole->next != &sa_manager->olist) {
200 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
201 	}
202 	return sa_manager->size;
203 }
204 
205 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
206 				   struct amdgpu_sa_bo *sa_bo,
207 				   unsigned size, unsigned align)
208 {
209 	unsigned soffset, eoffset, wasted;
210 
211 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
212 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
213 	wasted = (align - (soffset % align)) % align;
214 
215 	if ((eoffset - soffset) >= (size + wasted)) {
216 		soffset += wasted;
217 
218 		sa_bo->manager = sa_manager;
219 		sa_bo->soffset = soffset;
220 		sa_bo->eoffset = soffset + size;
221 		list_add(&sa_bo->olist, sa_manager->hole);
222 		INIT_LIST_HEAD(&sa_bo->flist);
223 		sa_manager->hole = &sa_bo->olist;
224 		return true;
225 	}
226 	return false;
227 }
228 
229 /**
230  * amdgpu_sa_event - Check if we can stop waiting
231  *
232  * @sa_manager: pointer to the sa_manager
233  * @size: number of bytes we want to allocate
234  * @align: alignment we need to match
235  *
236  * Check if either there is a fence we can wait for or
237  * enough free memory to satisfy the allocation directly
238  */
239 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
240 			    unsigned size, unsigned align)
241 {
242 	unsigned soffset, eoffset, wasted;
243 	int i;
244 
245 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
246 		if (!list_empty(&sa_manager->flist[i])) {
247 			return true;
248 		}
249 	}
250 
251 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
252 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
253 	wasted = (align - (soffset % align)) % align;
254 
255 	if ((eoffset - soffset) >= (size + wasted)) {
256 		return true;
257 	}
258 
259 	return false;
260 }
261 
262 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
263 				   struct fence **fences,
264 				   unsigned *tries)
265 {
266 	struct amdgpu_sa_bo *best_bo = NULL;
267 	unsigned i, soffset, best, tmp;
268 
269 	/* if hole points to the end of the buffer */
270 	if (sa_manager->hole->next == &sa_manager->olist) {
271 		/* try again with its beginning */
272 		sa_manager->hole = &sa_manager->olist;
273 		return true;
274 	}
275 
276 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
277 	/* to handle wrap around we add sa_manager->size */
278 	best = sa_manager->size * 2;
279 	/* go over all fence list and try to find the closest sa_bo
280 	 * of the current last
281 	 */
282 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
283 		struct amdgpu_sa_bo *sa_bo;
284 
285 		if (list_empty(&sa_manager->flist[i])) {
286 			continue;
287 		}
288 
289 		sa_bo = list_first_entry(&sa_manager->flist[i],
290 					 struct amdgpu_sa_bo, flist);
291 
292 		if (!fence_is_signaled(sa_bo->fence)) {
293 			fences[i] = sa_bo->fence;
294 			continue;
295 		}
296 
297 		/* limit the number of tries each ring gets */
298 		if (tries[i] > 2) {
299 			continue;
300 		}
301 
302 		tmp = sa_bo->soffset;
303 		if (tmp < soffset) {
304 			/* wrap around, pretend it's after */
305 			tmp += sa_manager->size;
306 		}
307 		tmp -= soffset;
308 		if (tmp < best) {
309 			/* this sa bo is the closest one */
310 			best = tmp;
311 			best_bo = sa_bo;
312 		}
313 	}
314 
315 	if (best_bo) {
316 		uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
317 		++tries[idx];
318 		sa_manager->hole = best_bo->olist.prev;
319 
320 		/* we knew that this one is signaled,
321 		   so it's save to remote it */
322 		amdgpu_sa_bo_remove_locked(best_bo);
323 		return true;
324 	}
325 	return false;
326 }
327 
328 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
329 		     struct amdgpu_sa_bo **sa_bo,
330 		     unsigned size, unsigned align)
331 {
332 	struct fence *fences[AMDGPU_MAX_RINGS];
333 	unsigned tries[AMDGPU_MAX_RINGS];
334 	unsigned count;
335 	int i, r;
336 	signed long t;
337 
338 	BUG_ON(align > sa_manager->align);
339 	BUG_ON(size > sa_manager->size);
340 
341 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
342 	if ((*sa_bo) == NULL) {
343 		return -ENOMEM;
344 	}
345 	(*sa_bo)->manager = sa_manager;
346 	(*sa_bo)->fence = NULL;
347 	INIT_LIST_HEAD(&(*sa_bo)->olist);
348 	INIT_LIST_HEAD(&(*sa_bo)->flist);
349 
350 #ifdef __NetBSD__
351 	spin_lock(&sa_manager->wq_lock);
352 #else
353 	spin_lock(&sa_manager->wq.lock);
354 #endif
355 	do {
356 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
357 			fences[i] = NULL;
358 			tries[i] = 0;
359 		}
360 
361 		do {
362 			amdgpu_sa_bo_try_free(sa_manager);
363 
364 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
365 						   size, align)) {
366 #ifdef __NetBSD__
367 				spin_unlock(&sa_manager->wq_lock);
368 #else
369 				spin_unlock(&sa_manager->wq.lock);
370 #endif
371 				return 0;
372 			}
373 
374 			/* see if we can skip over some allocations */
375 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
376 
377 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
378 			if (fences[i])
379 				fences[count++] = fence_get(fences[i]);
380 
381 		if (count) {
382 #ifdef __NetBSD__
383 			spin_unlock(&sa_manager->wq_lock);
384 #else
385 			spin_unlock(&sa_manager->wq.lock);
386 #endif
387 			t = fence_wait_any_timeout(fences, count, false,
388 						   MAX_SCHEDULE_TIMEOUT);
389 			for (i = 0; i < count; ++i)
390 				fence_put(fences[i]);
391 
392 			r = (t > 0) ? 0 : t;
393 #ifdef __NetBSD__
394 			spin_lock(&sa_manager->wq_lock);
395 #else
396 			spin_lock(&sa_manager->wq.lock);
397 #endif
398 		} else {
399 			/* if we have nothing to wait for block */
400 #ifdef __NetBSD__
401 			DRM_SPIN_WAIT_UNTIL(r, &sa_manager->wq,
402 			    &sa_manager->wq_lock,
403 			    amdgpu_sa_event(sa_manager, size, align));
404 #else
405 			r = wait_event_interruptible_locked(
406 				sa_manager->wq,
407 				amdgpu_sa_event(sa_manager, size, align)
408 			);
409 #endif
410 		}
411 
412 	} while (!r);
413 
414 #ifdef __NetBSD__
415 	spin_unlock(&sa_manager->wq_lock);
416 #else
417 	spin_unlock(&sa_manager->wq.lock);
418 #endif
419 	kfree(*sa_bo);
420 	*sa_bo = NULL;
421 	return r;
422 }
423 
424 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
425 		       struct fence *fence)
426 {
427 	struct amdgpu_sa_manager *sa_manager;
428 
429 	if (sa_bo == NULL || *sa_bo == NULL) {
430 		return;
431 	}
432 
433 	sa_manager = (*sa_bo)->manager;
434 #ifdef __NetBSD__
435 	spin_lock(&sa_manager->wq_lock);
436 #else
437 	spin_lock(&sa_manager->wq.lock);
438 #endif
439 	if (fence && !fence_is_signaled(fence)) {
440 		uint32_t idx;
441 		(*sa_bo)->fence = fence_get(fence);
442 		idx = amdgpu_ring_from_fence(fence)->idx;
443 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
444 	} else {
445 		amdgpu_sa_bo_remove_locked(*sa_bo);
446 	}
447 #ifdef __NetBSD__
448 	DRM_SPIN_WAKEUP_ALL(&sa_manager->wq, &sa_manager->wq_lock);
449 	spin_unlock(&sa_manager->wq_lock);
450 #else
451 	wake_up_all_locked(&sa_manager->wq);
452 	spin_unlock(&sa_manager->wq.lock);
453 #endif
454 	*sa_bo = NULL;
455 }
456 
457 #if defined(CONFIG_DEBUG_FS)
458 
459 static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
460 {
461 	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
462 	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
463 
464 	if (a_fence)
465 		seq_printf(m, " protected by 0x%016llx on ring %d",
466 			   a_fence->seq, a_fence->ring->idx);
467 
468 	if (s_fence) {
469 		struct amdgpu_ring *ring;
470 
471 
472 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
473 		seq_printf(m, " protected by 0x%016x on ring %d",
474 			   s_fence->base.seqno, ring->idx);
475 	}
476 }
477 
478 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
479 				  struct seq_file *m)
480 {
481 	struct amdgpu_sa_bo *i;
482 
483 	spin_lock(&sa_manager->wq.lock);
484 	list_for_each_entry(i, &sa_manager->olist, olist) {
485 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
486 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
487 		if (&i->olist == sa_manager->hole) {
488 			seq_printf(m, ">");
489 		} else {
490 			seq_printf(m, " ");
491 		}
492 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
493 			   soffset, eoffset, eoffset - soffset);
494 		if (i->fence)
495 			amdgpu_sa_bo_dump_fence(i->fence, m);
496 		seq_printf(m, "\n");
497 	}
498 	spin_unlock(&sa_manager->wq.lock);
499 }
500 #endif
501