xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_sync.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2014 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev  * All Rights Reserved.
4*b843c749SSergey Zigachev  *
5*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
6*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the
7*b843c749SSergey Zigachev  * "Software"), to deal in the Software without restriction, including
8*b843c749SSergey Zigachev  * without limitation the rights to use, copy, modify, merge, publish,
9*b843c749SSergey Zigachev  * distribute, sub license, and/or sell copies of the Software, and to
10*b843c749SSergey Zigachev  * permit persons to whom the Software is furnished to do so, subject to
11*b843c749SSergey Zigachev  * the following conditions:
12*b843c749SSergey Zigachev  *
13*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17*b843c749SSergey Zigachev  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18*b843c749SSergey Zigachev  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19*b843c749SSergey Zigachev  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20*b843c749SSergey Zigachev  *
21*b843c749SSergey Zigachev  * The above copyright notice and this permission notice (including the
22*b843c749SSergey Zigachev  * next paragraph) shall be included in all copies or substantial portions
23*b843c749SSergey Zigachev  * of the Software.
24*b843c749SSergey Zigachev  *
25*b843c749SSergey Zigachev  */
26*b843c749SSergey Zigachev /*
27*b843c749SSergey Zigachev  * Authors:
28*b843c749SSergey Zigachev  *    Christian König <christian.koenig@amd.com>
29*b843c749SSergey Zigachev  */
30*b843c749SSergey Zigachev 
31*b843c749SSergey Zigachev #include <drm/drmP.h>
32*b843c749SSergey Zigachev #include "amdgpu.h"
33*b843c749SSergey Zigachev #include "amdgpu_trace.h"
34*b843c749SSergey Zigachev #include "amdgpu_amdkfd.h"
35*b843c749SSergey Zigachev 
36*b843c749SSergey Zigachev struct amdgpu_sync_entry {
37*b843c749SSergey Zigachev 	struct hlist_node	node;
38*b843c749SSergey Zigachev 	struct dma_fence	*fence;
39*b843c749SSergey Zigachev 	bool	explicit;
40*b843c749SSergey Zigachev };
41*b843c749SSergey Zigachev 
42*b843c749SSergey Zigachev static struct kmem_cache *amdgpu_sync_slab;
43*b843c749SSergey Zigachev 
44*b843c749SSergey Zigachev /**
45*b843c749SSergey Zigachev  * amdgpu_sync_create - zero init sync object
46*b843c749SSergey Zigachev  *
47*b843c749SSergey Zigachev  * @sync: sync object to initialize
48*b843c749SSergey Zigachev  *
49*b843c749SSergey Zigachev  * Just clear the sync object for now.
50*b843c749SSergey Zigachev  */
amdgpu_sync_create(struct amdgpu_sync * sync)51*b843c749SSergey Zigachev void amdgpu_sync_create(struct amdgpu_sync *sync)
52*b843c749SSergey Zigachev {
53*b843c749SSergey Zigachev 	hash_init(sync->fences);
54*b843c749SSergey Zigachev 	sync->last_vm_update = NULL;
55*b843c749SSergey Zigachev }
56*b843c749SSergey Zigachev 
57*b843c749SSergey Zigachev /**
58*b843c749SSergey Zigachev  * amdgpu_sync_same_dev - test if fence belong to us
59*b843c749SSergey Zigachev  *
60*b843c749SSergey Zigachev  * @adev: amdgpu device to use for the test
61*b843c749SSergey Zigachev  * @f: fence to test
62*b843c749SSergey Zigachev  *
63*b843c749SSergey Zigachev  * Test if the fence was issued by us.
64*b843c749SSergey Zigachev  */
amdgpu_sync_same_dev(struct amdgpu_device * adev,struct dma_fence * f)65*b843c749SSergey Zigachev static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
66*b843c749SSergey Zigachev 				 struct dma_fence *f)
67*b843c749SSergey Zigachev {
68*b843c749SSergey Zigachev 	struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
69*b843c749SSergey Zigachev 
70*b843c749SSergey Zigachev 	if (s_fence) {
71*b843c749SSergey Zigachev 		struct amdgpu_ring *ring;
72*b843c749SSergey Zigachev 
73*b843c749SSergey Zigachev 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
74*b843c749SSergey Zigachev 		return ring->adev == adev;
75*b843c749SSergey Zigachev 	}
76*b843c749SSergey Zigachev 
77*b843c749SSergey Zigachev 	return false;
78*b843c749SSergey Zigachev }
79*b843c749SSergey Zigachev 
80*b843c749SSergey Zigachev /**
81*b843c749SSergey Zigachev  * amdgpu_sync_get_owner - extract the owner of a fence
82*b843c749SSergey Zigachev  *
83*b843c749SSergey Zigachev  * @fence: fence get the owner from
84*b843c749SSergey Zigachev  *
85*b843c749SSergey Zigachev  * Extract who originally created the fence.
86*b843c749SSergey Zigachev  */
amdgpu_sync_get_owner(struct dma_fence * f)87*b843c749SSergey Zigachev static void *amdgpu_sync_get_owner(struct dma_fence *f)
88*b843c749SSergey Zigachev {
89*b843c749SSergey Zigachev 	struct drm_sched_fence *s_fence;
90*b843c749SSergey Zigachev 	struct amdgpu_amdkfd_fence *kfd_fence;
91*b843c749SSergey Zigachev 
92*b843c749SSergey Zigachev 	if (!f)
93*b843c749SSergey Zigachev 		return AMDGPU_FENCE_OWNER_UNDEFINED;
94*b843c749SSergey Zigachev 
95*b843c749SSergey Zigachev 	s_fence = to_drm_sched_fence(f);
96*b843c749SSergey Zigachev 	if (s_fence)
97*b843c749SSergey Zigachev 		return s_fence->owner;
98*b843c749SSergey Zigachev 
99*b843c749SSergey Zigachev 	kfd_fence = to_amdgpu_amdkfd_fence(f);
100*b843c749SSergey Zigachev 	if (kfd_fence)
101*b843c749SSergey Zigachev 		return AMDGPU_FENCE_OWNER_KFD;
102*b843c749SSergey Zigachev 
103*b843c749SSergey Zigachev 	return AMDGPU_FENCE_OWNER_UNDEFINED;
104*b843c749SSergey Zigachev }
105*b843c749SSergey Zigachev 
106*b843c749SSergey Zigachev /**
107*b843c749SSergey Zigachev  * amdgpu_sync_keep_later - Keep the later fence
108*b843c749SSergey Zigachev  *
109*b843c749SSergey Zigachev  * @keep: existing fence to test
110*b843c749SSergey Zigachev  * @fence: new fence
111*b843c749SSergey Zigachev  *
112*b843c749SSergey Zigachev  * Either keep the existing fence or the new one, depending which one is later.
113*b843c749SSergey Zigachev  */
amdgpu_sync_keep_later(struct dma_fence ** keep,struct dma_fence * fence)114*b843c749SSergey Zigachev static void amdgpu_sync_keep_later(struct dma_fence **keep,
115*b843c749SSergey Zigachev 				   struct dma_fence *fence)
116*b843c749SSergey Zigachev {
117*b843c749SSergey Zigachev 	if (*keep && dma_fence_is_later(*keep, fence))
118*b843c749SSergey Zigachev 		return;
119*b843c749SSergey Zigachev 
120*b843c749SSergey Zigachev 	dma_fence_put(*keep);
121*b843c749SSergey Zigachev 	*keep = dma_fence_get(fence);
122*b843c749SSergey Zigachev }
123*b843c749SSergey Zigachev 
124*b843c749SSergey Zigachev /**
125*b843c749SSergey Zigachev  * amdgpu_sync_add_later - add the fence to the hash
126*b843c749SSergey Zigachev  *
127*b843c749SSergey Zigachev  * @sync: sync object to add the fence to
128*b843c749SSergey Zigachev  * @f: fence to add
129*b843c749SSergey Zigachev  *
130*b843c749SSergey Zigachev  * Tries to add the fence to an existing hash entry. Returns true when an entry
131*b843c749SSergey Zigachev  * was found, false otherwise.
132*b843c749SSergey Zigachev  */
amdgpu_sync_add_later(struct amdgpu_sync * sync,struct dma_fence * f,bool explicit)133*b843c749SSergey Zigachev static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
134*b843c749SSergey Zigachev {
135*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
136*b843c749SSergey Zigachev 
137*b843c749SSergey Zigachev 	hash_for_each_possible(sync->fences, e, node, f->context) {
138*b843c749SSergey Zigachev 		if (unlikely(e->fence->context != f->context))
139*b843c749SSergey Zigachev 			continue;
140*b843c749SSergey Zigachev 
141*b843c749SSergey Zigachev 		amdgpu_sync_keep_later(&e->fence, f);
142*b843c749SSergey Zigachev 
143*b843c749SSergey Zigachev 		/* Preserve eplicit flag to not loose pipe line sync */
144*b843c749SSergey Zigachev 		e->explicit |= explicit;
145*b843c749SSergey Zigachev 
146*b843c749SSergey Zigachev 		return true;
147*b843c749SSergey Zigachev 	}
148*b843c749SSergey Zigachev 	return false;
149*b843c749SSergey Zigachev }
150*b843c749SSergey Zigachev 
151*b843c749SSergey Zigachev /**
152*b843c749SSergey Zigachev  * amdgpu_sync_fence - remember to sync to this fence
153*b843c749SSergey Zigachev  *
154*b843c749SSergey Zigachev  * @sync: sync object to add fence to
155*b843c749SSergey Zigachev  * @fence: fence to sync to
156*b843c749SSergey Zigachev  *
157*b843c749SSergey Zigachev  */
amdgpu_sync_fence(struct amdgpu_device * adev,struct amdgpu_sync * sync,struct dma_fence * f,bool explicit)158*b843c749SSergey Zigachev int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
159*b843c749SSergey Zigachev 		      struct dma_fence *f, bool explicit)
160*b843c749SSergey Zigachev {
161*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
162*b843c749SSergey Zigachev 
163*b843c749SSergey Zigachev 	if (!f)
164*b843c749SSergey Zigachev 		return 0;
165*b843c749SSergey Zigachev 	if (amdgpu_sync_same_dev(adev, f) &&
166*b843c749SSergey Zigachev 	    amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
167*b843c749SSergey Zigachev 		amdgpu_sync_keep_later(&sync->last_vm_update, f);
168*b843c749SSergey Zigachev 
169*b843c749SSergey Zigachev 	if (amdgpu_sync_add_later(sync, f, explicit))
170*b843c749SSergey Zigachev 		return 0;
171*b843c749SSergey Zigachev 
172*b843c749SSergey Zigachev 	e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
173*b843c749SSergey Zigachev 	if (!e)
174*b843c749SSergey Zigachev 		return -ENOMEM;
175*b843c749SSergey Zigachev 
176*b843c749SSergey Zigachev 	e->explicit = explicit;
177*b843c749SSergey Zigachev 
178*b843c749SSergey Zigachev 	hash_add(sync->fences, &e->node, f->context);
179*b843c749SSergey Zigachev 	e->fence = dma_fence_get(f);
180*b843c749SSergey Zigachev 	return 0;
181*b843c749SSergey Zigachev }
182*b843c749SSergey Zigachev 
183*b843c749SSergey Zigachev /**
184*b843c749SSergey Zigachev  * amdgpu_sync_resv - sync to a reservation object
185*b843c749SSergey Zigachev  *
186*b843c749SSergey Zigachev  * @sync: sync object to add fences from reservation object to
187*b843c749SSergey Zigachev  * @resv: reservation object with embedded fence
188*b843c749SSergey Zigachev  * @explicit_sync: true if we should only sync to the exclusive fence
189*b843c749SSergey Zigachev  *
190*b843c749SSergey Zigachev  * Sync to the fence
191*b843c749SSergey Zigachev  */
amdgpu_sync_resv(struct amdgpu_device * adev,struct amdgpu_sync * sync,struct reservation_object * resv,void * owner,bool explicit_sync)192*b843c749SSergey Zigachev int amdgpu_sync_resv(struct amdgpu_device *adev,
193*b843c749SSergey Zigachev 		     struct amdgpu_sync *sync,
194*b843c749SSergey Zigachev 		     struct reservation_object *resv,
195*b843c749SSergey Zigachev 		     void *owner, bool explicit_sync)
196*b843c749SSergey Zigachev {
197*b843c749SSergey Zigachev 	struct reservation_object_list *flist;
198*b843c749SSergey Zigachev 	struct dma_fence *f;
199*b843c749SSergey Zigachev 	void *fence_owner;
200*b843c749SSergey Zigachev 	unsigned i;
201*b843c749SSergey Zigachev 	int r = 0;
202*b843c749SSergey Zigachev 
203*b843c749SSergey Zigachev 	if (resv == NULL)
204*b843c749SSergey Zigachev 		return -EINVAL;
205*b843c749SSergey Zigachev 
206*b843c749SSergey Zigachev 	/* always sync to the exclusive fence */
207*b843c749SSergey Zigachev 	f = reservation_object_get_excl(resv);
208*b843c749SSergey Zigachev 	r = amdgpu_sync_fence(adev, sync, f, false);
209*b843c749SSergey Zigachev 
210*b843c749SSergey Zigachev 	flist = reservation_object_get_list(resv);
211*b843c749SSergey Zigachev 	if (!flist || r)
212*b843c749SSergey Zigachev 		return r;
213*b843c749SSergey Zigachev 
214*b843c749SSergey Zigachev 	for (i = 0; i < flist->shared_count; ++i) {
215*b843c749SSergey Zigachev 		f = rcu_dereference_protected(flist->shared[i],
216*b843c749SSergey Zigachev 					      reservation_object_held(resv));
217*b843c749SSergey Zigachev 		/* We only want to trigger KFD eviction fences on
218*b843c749SSergey Zigachev 		 * evict or move jobs. Skip KFD fences otherwise.
219*b843c749SSergey Zigachev 		 */
220*b843c749SSergey Zigachev 		fence_owner = amdgpu_sync_get_owner(f);
221*b843c749SSergey Zigachev 		if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
222*b843c749SSergey Zigachev 		    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
223*b843c749SSergey Zigachev 			continue;
224*b843c749SSergey Zigachev 
225*b843c749SSergey Zigachev 		if (amdgpu_sync_same_dev(adev, f)) {
226*b843c749SSergey Zigachev 			/* VM updates are only interesting
227*b843c749SSergey Zigachev 			 * for other VM updates and moves.
228*b843c749SSergey Zigachev 			 */
229*b843c749SSergey Zigachev 			if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
230*b843c749SSergey Zigachev 			    (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
231*b843c749SSergey Zigachev 			    ((owner == AMDGPU_FENCE_OWNER_VM) !=
232*b843c749SSergey Zigachev 			     (fence_owner == AMDGPU_FENCE_OWNER_VM)))
233*b843c749SSergey Zigachev 				continue;
234*b843c749SSergey Zigachev 
235*b843c749SSergey Zigachev 			/* Ignore fence from the same owner and explicit one as
236*b843c749SSergey Zigachev 			 * long as it isn't undefined.
237*b843c749SSergey Zigachev 			 */
238*b843c749SSergey Zigachev 			if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
239*b843c749SSergey Zigachev 			    (fence_owner == owner || explicit_sync))
240*b843c749SSergey Zigachev 				continue;
241*b843c749SSergey Zigachev 		}
242*b843c749SSergey Zigachev 
243*b843c749SSergey Zigachev 		r = amdgpu_sync_fence(adev, sync, f, false);
244*b843c749SSergey Zigachev 		if (r)
245*b843c749SSergey Zigachev 			break;
246*b843c749SSergey Zigachev 	}
247*b843c749SSergey Zigachev 	return r;
248*b843c749SSergey Zigachev }
249*b843c749SSergey Zigachev 
250*b843c749SSergey Zigachev /**
251*b843c749SSergey Zigachev  * amdgpu_sync_peek_fence - get the next fence not signaled yet
252*b843c749SSergey Zigachev  *
253*b843c749SSergey Zigachev  * @sync: the sync object
254*b843c749SSergey Zigachev  * @ring: optional ring to use for test
255*b843c749SSergey Zigachev  *
256*b843c749SSergey Zigachev  * Returns the next fence not signaled yet without removing it from the sync
257*b843c749SSergey Zigachev  * object.
258*b843c749SSergey Zigachev  */
amdgpu_sync_peek_fence(struct amdgpu_sync * sync,struct amdgpu_ring * ring)259*b843c749SSergey Zigachev struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
260*b843c749SSergey Zigachev 					 struct amdgpu_ring *ring)
261*b843c749SSergey Zigachev {
262*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
263*b843c749SSergey Zigachev 	struct hlist_node *tmp;
264*b843c749SSergey Zigachev 	int i;
265*b843c749SSergey Zigachev 
266*b843c749SSergey Zigachev 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
267*b843c749SSergey Zigachev 		struct dma_fence *f = e->fence;
268*b843c749SSergey Zigachev 		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
269*b843c749SSergey Zigachev 
270*b843c749SSergey Zigachev 		if (dma_fence_is_signaled(f)) {
271*b843c749SSergey Zigachev 			hash_del(&e->node);
272*b843c749SSergey Zigachev 			dma_fence_put(f);
273*b843c749SSergey Zigachev 			kmem_cache_free(amdgpu_sync_slab, e);
274*b843c749SSergey Zigachev 			continue;
275*b843c749SSergey Zigachev 		}
276*b843c749SSergey Zigachev 		if (ring && s_fence) {
277*b843c749SSergey Zigachev 			/* For fences from the same ring it is sufficient
278*b843c749SSergey Zigachev 			 * when they are scheduled.
279*b843c749SSergey Zigachev 			 */
280*b843c749SSergey Zigachev 			if (s_fence->sched == &ring->sched) {
281*b843c749SSergey Zigachev 				if (dma_fence_is_signaled(&s_fence->scheduled))
282*b843c749SSergey Zigachev 					continue;
283*b843c749SSergey Zigachev 
284*b843c749SSergey Zigachev 				return &s_fence->scheduled;
285*b843c749SSergey Zigachev 			}
286*b843c749SSergey Zigachev 		}
287*b843c749SSergey Zigachev 
288*b843c749SSergey Zigachev 		return f;
289*b843c749SSergey Zigachev 	}
290*b843c749SSergey Zigachev 
291*b843c749SSergey Zigachev 	return NULL;
292*b843c749SSergey Zigachev }
293*b843c749SSergey Zigachev 
294*b843c749SSergey Zigachev /**
295*b843c749SSergey Zigachev  * amdgpu_sync_get_fence - get the next fence from the sync object
296*b843c749SSergey Zigachev  *
297*b843c749SSergey Zigachev  * @sync: sync object to use
298*b843c749SSergey Zigachev  * @explicit: true if the next fence is explicit
299*b843c749SSergey Zigachev  *
300*b843c749SSergey Zigachev  * Get and removes the next fence from the sync object not signaled yet.
301*b843c749SSergey Zigachev  */
amdgpu_sync_get_fence(struct amdgpu_sync * sync,bool * explicit)302*b843c749SSergey Zigachev struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
303*b843c749SSergey Zigachev {
304*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
305*b843c749SSergey Zigachev 	struct hlist_node *tmp;
306*b843c749SSergey Zigachev 	struct dma_fence *f;
307*b843c749SSergey Zigachev 	int i;
308*b843c749SSergey Zigachev 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
309*b843c749SSergey Zigachev 
310*b843c749SSergey Zigachev 		f = e->fence;
311*b843c749SSergey Zigachev 		if (explicit)
312*b843c749SSergey Zigachev 			*explicit = e->explicit;
313*b843c749SSergey Zigachev 
314*b843c749SSergey Zigachev 		hash_del(&e->node);
315*b843c749SSergey Zigachev 		kmem_cache_free(amdgpu_sync_slab, e);
316*b843c749SSergey Zigachev 
317*b843c749SSergey Zigachev 		if (!dma_fence_is_signaled(f))
318*b843c749SSergey Zigachev 			return f;
319*b843c749SSergey Zigachev 
320*b843c749SSergey Zigachev 		dma_fence_put(f);
321*b843c749SSergey Zigachev 	}
322*b843c749SSergey Zigachev 	return NULL;
323*b843c749SSergey Zigachev }
324*b843c749SSergey Zigachev 
325*b843c749SSergey Zigachev /**
326*b843c749SSergey Zigachev  * amdgpu_sync_clone - clone a sync object
327*b843c749SSergey Zigachev  *
328*b843c749SSergey Zigachev  * @source: sync object to clone
329*b843c749SSergey Zigachev  * @clone: pointer to destination sync object
330*b843c749SSergey Zigachev  *
331*b843c749SSergey Zigachev  * Adds references to all unsignaled fences in @source to @clone. Also
332*b843c749SSergey Zigachev  * removes signaled fences from @source while at it.
333*b843c749SSergey Zigachev  */
amdgpu_sync_clone(struct amdgpu_sync * source,struct amdgpu_sync * clone)334*b843c749SSergey Zigachev int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
335*b843c749SSergey Zigachev {
336*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
337*b843c749SSergey Zigachev 	struct hlist_node *tmp;
338*b843c749SSergey Zigachev 	struct dma_fence *f;
339*b843c749SSergey Zigachev 	int i, r;
340*b843c749SSergey Zigachev 
341*b843c749SSergey Zigachev 	hash_for_each_safe(source->fences, i, tmp, e, node) {
342*b843c749SSergey Zigachev 		f = e->fence;
343*b843c749SSergey Zigachev 		if (!dma_fence_is_signaled(f)) {
344*b843c749SSergey Zigachev 			r = amdgpu_sync_fence(NULL, clone, f, e->explicit);
345*b843c749SSergey Zigachev 			if (r)
346*b843c749SSergey Zigachev 				return r;
347*b843c749SSergey Zigachev 		} else {
348*b843c749SSergey Zigachev 			hash_del(&e->node);
349*b843c749SSergey Zigachev 			dma_fence_put(f);
350*b843c749SSergey Zigachev 			kmem_cache_free(amdgpu_sync_slab, e);
351*b843c749SSergey Zigachev 		}
352*b843c749SSergey Zigachev 	}
353*b843c749SSergey Zigachev 
354*b843c749SSergey Zigachev 	dma_fence_put(clone->last_vm_update);
355*b843c749SSergey Zigachev 	clone->last_vm_update = dma_fence_get(source->last_vm_update);
356*b843c749SSergey Zigachev 
357*b843c749SSergey Zigachev 	return 0;
358*b843c749SSergey Zigachev }
359*b843c749SSergey Zigachev 
amdgpu_sync_wait(struct amdgpu_sync * sync,bool intr)360*b843c749SSergey Zigachev int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
361*b843c749SSergey Zigachev {
362*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
363*b843c749SSergey Zigachev 	struct hlist_node *tmp;
364*b843c749SSergey Zigachev 	int i, r;
365*b843c749SSergey Zigachev 
366*b843c749SSergey Zigachev 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
367*b843c749SSergey Zigachev 		r = dma_fence_wait(e->fence, intr);
368*b843c749SSergey Zigachev 		if (r)
369*b843c749SSergey Zigachev 			return r;
370*b843c749SSergey Zigachev 
371*b843c749SSergey Zigachev 		hash_del(&e->node);
372*b843c749SSergey Zigachev 		dma_fence_put(e->fence);
373*b843c749SSergey Zigachev 		kmem_cache_free(amdgpu_sync_slab, e);
374*b843c749SSergey Zigachev 	}
375*b843c749SSergey Zigachev 
376*b843c749SSergey Zigachev 	return 0;
377*b843c749SSergey Zigachev }
378*b843c749SSergey Zigachev 
379*b843c749SSergey Zigachev /**
380*b843c749SSergey Zigachev  * amdgpu_sync_free - free the sync object
381*b843c749SSergey Zigachev  *
382*b843c749SSergey Zigachev  * @sync: sync object to use
383*b843c749SSergey Zigachev  *
384*b843c749SSergey Zigachev  * Free the sync object.
385*b843c749SSergey Zigachev  */
amdgpu_sync_free(struct amdgpu_sync * sync)386*b843c749SSergey Zigachev void amdgpu_sync_free(struct amdgpu_sync *sync)
387*b843c749SSergey Zigachev {
388*b843c749SSergey Zigachev 	struct amdgpu_sync_entry *e;
389*b843c749SSergey Zigachev 	struct hlist_node *tmp;
390*b843c749SSergey Zigachev 	unsigned i;
391*b843c749SSergey Zigachev 
392*b843c749SSergey Zigachev 	hash_for_each_safe(sync->fences, i, tmp, e, node) {
393*b843c749SSergey Zigachev 		hash_del(&e->node);
394*b843c749SSergey Zigachev 		dma_fence_put(e->fence);
395*b843c749SSergey Zigachev 		kmem_cache_free(amdgpu_sync_slab, e);
396*b843c749SSergey Zigachev 	}
397*b843c749SSergey Zigachev 
398*b843c749SSergey Zigachev 	dma_fence_put(sync->last_vm_update);
399*b843c749SSergey Zigachev }
400*b843c749SSergey Zigachev 
401*b843c749SSergey Zigachev /**
402*b843c749SSergey Zigachev  * amdgpu_sync_init - init sync object subsystem
403*b843c749SSergey Zigachev  *
404*b843c749SSergey Zigachev  * Allocate the slab allocator.
405*b843c749SSergey Zigachev  */
amdgpu_sync_init(void)406*b843c749SSergey Zigachev int amdgpu_sync_init(void)
407*b843c749SSergey Zigachev {
408*b843c749SSergey Zigachev 	amdgpu_sync_slab = kmem_cache_create(
409*b843c749SSergey Zigachev 		"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
410*b843c749SSergey Zigachev 		SLAB_HWCACHE_ALIGN, NULL);
411*b843c749SSergey Zigachev 	if (!amdgpu_sync_slab)
412*b843c749SSergey Zigachev 		return -ENOMEM;
413*b843c749SSergey Zigachev 
414*b843c749SSergey Zigachev 	return 0;
415*b843c749SSergey Zigachev }
416*b843c749SSergey Zigachev 
417*b843c749SSergey Zigachev /**
418*b843c749SSergey Zigachev  * amdgpu_sync_fini - fini sync object subsystem
419*b843c749SSergey Zigachev  *
420*b843c749SSergey Zigachev  * Free the slab allocator.
421*b843c749SSergey Zigachev  */
amdgpu_sync_fini(void)422*b843c749SSergey Zigachev void amdgpu_sync_fini(void)
423*b843c749SSergey Zigachev {
424*b843c749SSergey Zigachev 	kmem_cache_destroy(amdgpu_sync_slab);
425*b843c749SSergey Zigachev }
426