xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_sync.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <christian.koenig@amd.com>
29  */
30 
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34 #include "amdgpu_amdkfd.h"
35 
36 struct amdgpu_sync_entry {
37 	struct hlist_node	node;
38 	struct dma_fence	*fence;
39 	bool	explicit;
40 };
41 
42 static struct pool amdgpu_sync_slab;
43 
44 /**
45  * amdgpu_sync_create - zero init sync object
46  *
47  * @sync: sync object to initialize
48  *
49  * Just clear the sync object for now.
50  */
51 void amdgpu_sync_create(struct amdgpu_sync *sync)
52 {
53 	hash_init(sync->fences);
54 	sync->last_vm_update = NULL;
55 }
56 
57 /**
58  * amdgpu_sync_same_dev - test if fence belong to us
59  *
60  * @adev: amdgpu device to use for the test
61  * @f: fence to test
62  *
63  * Test if the fence was issued by us.
64  */
65 static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
66 				 struct dma_fence *f)
67 {
68 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
69 
70 	if (s_fence) {
71 		struct amdgpu_ring *ring;
72 
73 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
74 		return ring->adev == adev;
75 	}
76 
77 	return false;
78 }
79 
80 /**
81  * amdgpu_sync_get_owner - extract the owner of a fence
82  *
83  * @fence: fence get the owner from
84  *
85  * Extract who originally created the fence.
86  */
87 static void *amdgpu_sync_get_owner(struct dma_fence *f)
88 {
89 	struct drm_sched_fence *s_fence;
90 	struct amdgpu_amdkfd_fence *kfd_fence;
91 
92 	if (!f)
93 		return AMDGPU_FENCE_OWNER_UNDEFINED;
94 
95 	s_fence = to_drm_sched_fence(f);
96 	if (s_fence)
97 		return s_fence->owner;
98 
99 	kfd_fence = to_amdgpu_amdkfd_fence(f);
100 	if (kfd_fence)
101 		return AMDGPU_FENCE_OWNER_KFD;
102 
103 	return AMDGPU_FENCE_OWNER_UNDEFINED;
104 }
105 
106 /**
107  * amdgpu_sync_keep_later - Keep the later fence
108  *
109  * @keep: existing fence to test
110  * @fence: new fence
111  *
112  * Either keep the existing fence or the new one, depending which one is later.
113  */
114 static void amdgpu_sync_keep_later(struct dma_fence **keep,
115 				   struct dma_fence *fence)
116 {
117 	if (*keep && dma_fence_is_later(*keep, fence))
118 		return;
119 
120 	dma_fence_put(*keep);
121 	*keep = dma_fence_get(fence);
122 }
123 
124 /**
125  * amdgpu_sync_add_later - add the fence to the hash
126  *
127  * @sync: sync object to add the fence to
128  * @f: fence to add
129  *
130  * Tries to add the fence to an existing hash entry. Returns true when an entry
131  * was found, false otherwise.
132  */
133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
134 {
135 	struct amdgpu_sync_entry *e;
136 
137 	hash_for_each_possible(sync->fences, e, node, f->context) {
138 		if (unlikely(e->fence->context != f->context))
139 			continue;
140 
141 		amdgpu_sync_keep_later(&e->fence, f);
142 
143 		/* Preserve eplicit flag to not loose pipe line sync */
144 		e->explicit |= explicit;
145 
146 		return true;
147 	}
148 	return false;
149 }
150 
151 /**
152  * amdgpu_sync_fence - remember to sync to this fence
153  *
154  * @sync: sync object to add fence to
155  * @fence: fence to sync to
156  *
157  */
158 int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
159 		      struct dma_fence *f, bool explicit)
160 {
161 	struct amdgpu_sync_entry *e;
162 
163 	if (!f)
164 		return 0;
165 	if (amdgpu_sync_same_dev(adev, f) &&
166 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
167 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
168 
169 	if (amdgpu_sync_add_later(sync, f, explicit))
170 		return 0;
171 
172 #ifdef __linux__
173 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
174 #else
175 	e = pool_get(&amdgpu_sync_slab, PR_WAITOK);
176 #endif
177 	if (!e)
178 		return -ENOMEM;
179 
180 	e->explicit = explicit;
181 
182 	hash_add(sync->fences, &e->node, f->context);
183 	e->fence = dma_fence_get(f);
184 	return 0;
185 }
186 
187 /**
188  * amdgpu_sync_resv - sync to a reservation object
189  *
190  * @sync: sync object to add fences from reservation object to
191  * @resv: reservation object with embedded fence
192  * @explicit_sync: true if we should only sync to the exclusive fence
193  *
194  * Sync to the fence
195  */
196 int amdgpu_sync_resv(struct amdgpu_device *adev,
197 		     struct amdgpu_sync *sync,
198 		     struct reservation_object *resv,
199 		     void *owner, bool explicit_sync)
200 {
201 	struct reservation_object_list *flist;
202 	struct dma_fence *f;
203 	void *fence_owner;
204 	unsigned i;
205 	int r = 0;
206 
207 	if (resv == NULL)
208 		return -EINVAL;
209 
210 	/* always sync to the exclusive fence */
211 	f = reservation_object_get_excl(resv);
212 	r = amdgpu_sync_fence(adev, sync, f, false);
213 
214 	flist = reservation_object_get_list(resv);
215 	if (!flist || r)
216 		return r;
217 
218 	for (i = 0; i < flist->shared_count; ++i) {
219 		f = rcu_dereference_protected(flist->shared[i],
220 					      reservation_object_held(resv));
221 		/* We only want to trigger KFD eviction fences on
222 		 * evict or move jobs. Skip KFD fences otherwise.
223 		 */
224 		fence_owner = amdgpu_sync_get_owner(f);
225 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
226 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
227 			continue;
228 
229 		if (amdgpu_sync_same_dev(adev, f)) {
230 			/* VM updates are only interesting
231 			 * for other VM updates and moves.
232 			 */
233 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
234 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
235 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
236 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
237 				continue;
238 
239 			/* Ignore fence from the same owner and explicit one as
240 			 * long as it isn't undefined.
241 			 */
242 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
243 			    (fence_owner == owner || explicit_sync))
244 				continue;
245 		}
246 
247 		r = amdgpu_sync_fence(adev, sync, f, false);
248 		if (r)
249 			break;
250 	}
251 	return r;
252 }
253 
254 /**
255  * amdgpu_sync_peek_fence - get the next fence not signaled yet
256  *
257  * @sync: the sync object
258  * @ring: optional ring to use for test
259  *
260  * Returns the next fence not signaled yet without removing it from the sync
261  * object.
262  */
263 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
264 					 struct amdgpu_ring *ring)
265 {
266 	struct amdgpu_sync_entry *e;
267 	struct hlist_node *tmp;
268 	int i;
269 
270 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
271 		struct dma_fence *f = e->fence;
272 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
273 
274 		if (dma_fence_is_signaled(f)) {
275 			hash_del(&e->node);
276 			dma_fence_put(f);
277 #ifdef __linux__
278 			kmem_cache_free(amdgpu_sync_slab, e);
279 #else
280 			pool_put(&amdgpu_sync_slab, e);
281 #endif
282 			continue;
283 		}
284 		if (ring && s_fence) {
285 			/* For fences from the same ring it is sufficient
286 			 * when they are scheduled.
287 			 */
288 			if (s_fence->sched == &ring->sched) {
289 				if (dma_fence_is_signaled(&s_fence->scheduled))
290 					continue;
291 
292 				return &s_fence->scheduled;
293 			}
294 		}
295 
296 		return f;
297 	}
298 
299 	return NULL;
300 }
301 
302 /**
303  * amdgpu_sync_get_fence - get the next fence from the sync object
304  *
305  * @sync: sync object to use
306  * @explicit: true if the next fence is explicit
307  *
308  * Get and removes the next fence from the sync object not signaled yet.
309  */
310 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
311 {
312 	struct amdgpu_sync_entry *e;
313 	struct hlist_node *tmp;
314 	struct dma_fence *f;
315 	int i;
316 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
317 
318 		f = e->fence;
319 		if (explicit)
320 			*explicit = e->explicit;
321 
322 		hash_del(&e->node);
323 #ifdef __linux__
324 		kmem_cache_free(amdgpu_sync_slab, e);
325 #else
326 		pool_put(&amdgpu_sync_slab, e);
327 #endif
328 
329 		if (!dma_fence_is_signaled(f))
330 			return f;
331 
332 		dma_fence_put(f);
333 	}
334 	return NULL;
335 }
336 
337 /**
338  * amdgpu_sync_clone - clone a sync object
339  *
340  * @source: sync object to clone
341  * @clone: pointer to destination sync object
342  *
343  * Adds references to all unsignaled fences in @source to @clone. Also
344  * removes signaled fences from @source while at it.
345  */
346 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
347 {
348 	struct amdgpu_sync_entry *e;
349 	struct hlist_node *tmp;
350 	struct dma_fence *f;
351 	int i, r;
352 
353 	hash_for_each_safe(source->fences, i, tmp, e, node) {
354 		f = e->fence;
355 		if (!dma_fence_is_signaled(f)) {
356 			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
357 			if (r)
358 				return r;
359 		} else {
360 			hash_del(&e->node);
361 			dma_fence_put(f);
362 #ifdef __linux__
363 			kmem_cache_free(amdgpu_sync_slab, e);
364 #else
365 			pool_put(&amdgpu_sync_slab, e);
366 #endif
367 		}
368 	}
369 
370 	dma_fence_put(clone->last_vm_update);
371 	clone->last_vm_update = dma_fence_get(source->last_vm_update);
372 
373 	return 0;
374 }
375 
376 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
377 {
378 	struct amdgpu_sync_entry *e;
379 	struct hlist_node *tmp;
380 	int i, r;
381 
382 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
383 		r = dma_fence_wait(e->fence, intr);
384 		if (r)
385 			return r;
386 
387 		hash_del(&e->node);
388 		dma_fence_put(e->fence);
389 #ifdef __linux__
390 		kmem_cache_free(amdgpu_sync_slab, e);
391 #else
392 		pool_put(&amdgpu_sync_slab, e);
393 #endif
394 	}
395 
396 	return 0;
397 }
398 
399 /**
400  * amdgpu_sync_free - free the sync object
401  *
402  * @sync: sync object to use
403  *
404  * Free the sync object.
405  */
406 void amdgpu_sync_free(struct amdgpu_sync *sync)
407 {
408 	struct amdgpu_sync_entry *e;
409 	struct hlist_node *tmp;
410 	unsigned i;
411 
412 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
413 		hash_del(&e->node);
414 		dma_fence_put(e->fence);
415 #ifdef __linux__
416 		kmem_cache_free(amdgpu_sync_slab, e);
417 #else
418 		pool_put(&amdgpu_sync_slab, e);
419 #endif
420 	}
421 
422 	dma_fence_put(sync->last_vm_update);
423 }
424 
425 /**
426  * amdgpu_sync_init - init sync object subsystem
427  *
428  * Allocate the slab allocator.
429  */
430 int amdgpu_sync_init(void)
431 {
432 #ifdef __linux__
433 	amdgpu_sync_slab = kmem_cache_create(
434 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
435 		SLAB_HWCACHE_ALIGN, NULL);
436 	if (!amdgpu_sync_slab)
437 		return -ENOMEM;
438 #else
439 	pool_init(&amdgpu_sync_slab, sizeof(struct amdgpu_sync_entry),
440 	    0, IPL_TTY, 0, "amdgpu_sync", NULL);
441 #endif
442 
443 	return 0;
444 }
445 
446 /**
447  * amdgpu_sync_fini - fini sync object subsystem
448  *
449  * Free the slab allocator.
450  */
451 void amdgpu_sync_fini(void)
452 {
453 #ifdef __linux__
454 	kmem_cache_destroy(amdgpu_sync_slab);
455 #else
456 	pool_destroy(&amdgpu_sync_slab);
457 #endif
458 }
459