1*89400822Sriastradh /* $NetBSD: linux_dma_fence.c,v 1.42 2022/09/01 09:37:06 riastradh Exp $ */
23d8b8481Sriastradh
33d8b8481Sriastradh /*-
43d8b8481Sriastradh * Copyright (c) 2018 The NetBSD Foundation, Inc.
53d8b8481Sriastradh * All rights reserved.
63d8b8481Sriastradh *
73d8b8481Sriastradh * This code is derived from software contributed to The NetBSD Foundation
83d8b8481Sriastradh * by Taylor R. Campbell.
93d8b8481Sriastradh *
103d8b8481Sriastradh * Redistribution and use in source and binary forms, with or without
113d8b8481Sriastradh * modification, are permitted provided that the following conditions
123d8b8481Sriastradh * are met:
133d8b8481Sriastradh * 1. Redistributions of source code must retain the above copyright
143d8b8481Sriastradh * notice, this list of conditions and the following disclaimer.
153d8b8481Sriastradh * 2. Redistributions in binary form must reproduce the above copyright
163d8b8481Sriastradh * notice, this list of conditions and the following disclaimer in the
173d8b8481Sriastradh * documentation and/or other materials provided with the distribution.
183d8b8481Sriastradh *
193d8b8481Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
203d8b8481Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
213d8b8481Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
223d8b8481Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
233d8b8481Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
243d8b8481Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
253d8b8481Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
263d8b8481Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
273d8b8481Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
283d8b8481Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
293d8b8481Sriastradh * POSSIBILITY OF SUCH DAMAGE.
303d8b8481Sriastradh */
313d8b8481Sriastradh
323d8b8481Sriastradh #include <sys/cdefs.h>
33*89400822Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.42 2022/09/01 09:37:06 riastradh Exp $");
343d8b8481Sriastradh
353d8b8481Sriastradh #include <sys/atomic.h>
363d8b8481Sriastradh #include <sys/condvar.h>
37bd72383dSriastradh #include <sys/lock.h>
383d8b8481Sriastradh #include <sys/queue.h>
3915a96b9bSriastradh #include <sys/sdt.h>
403d8b8481Sriastradh
413d8b8481Sriastradh #include <linux/atomic.h>
42316fc19bSriastradh #include <linux/dma-fence.h>
433d8b8481Sriastradh #include <linux/errno.h>
443d8b8481Sriastradh #include <linux/kref.h>
453d8b8481Sriastradh #include <linux/sched.h>
463d8b8481Sriastradh #include <linux/spinlock.h>
473d8b8481Sriastradh
48f5ba9028Sriastradh #define FENCE_MAGIC_GOOD 0x607ba424048c37e5ULL
49f5ba9028Sriastradh #define FENCE_MAGIC_BAD 0x7641ca721344505fULL
50f5ba9028Sriastradh
5115a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, init,
5215a96b9bSriastradh "struct dma_fence *"/*fence*/);
5315a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, reset,
5415a96b9bSriastradh "struct dma_fence *"/*fence*/);
5515a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, release,
5615a96b9bSriastradh "struct dma_fence *"/*fence*/);
5715a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, free,
5815a96b9bSriastradh "struct dma_fence *"/*fence*/);
5915a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, destroy,
6015a96b9bSriastradh "struct dma_fence *"/*fence*/);
6115a96b9bSriastradh
6215a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, enable_signaling,
6315a96b9bSriastradh "struct dma_fence *"/*fence*/);
6415a96b9bSriastradh SDT_PROBE_DEFINE2(sdt, drm, fence, add_callback,
6515a96b9bSriastradh "struct dma_fence *"/*fence*/,
6615a96b9bSriastradh "struct dma_fence_callback *"/*callback*/);
6715a96b9bSriastradh SDT_PROBE_DEFINE2(sdt, drm, fence, remove_callback,
6815a96b9bSriastradh "struct dma_fence *"/*fence*/,
6915a96b9bSriastradh "struct dma_fence_callback *"/*callback*/);
7015a96b9bSriastradh SDT_PROBE_DEFINE2(sdt, drm, fence, callback,
7115a96b9bSriastradh "struct dma_fence *"/*fence*/,
7215a96b9bSriastradh "struct dma_fence_callback *"/*callback*/);
7315a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, test,
7415a96b9bSriastradh "struct dma_fence *"/*fence*/);
7515a96b9bSriastradh SDT_PROBE_DEFINE2(sdt, drm, fence, set_error,
7615a96b9bSriastradh "struct dma_fence *"/*fence*/,
7715a96b9bSriastradh "int"/*error*/);
7815a96b9bSriastradh SDT_PROBE_DEFINE1(sdt, drm, fence, signal,
7915a96b9bSriastradh "struct dma_fence *"/*fence*/);
8015a96b9bSriastradh
8115a96b9bSriastradh SDT_PROBE_DEFINE3(sdt, drm, fence, wait_start,
8215a96b9bSriastradh "struct dma_fence *"/*fence*/,
8315a96b9bSriastradh "bool"/*intr*/,
8415a96b9bSriastradh "long"/*timeout*/);
8515a96b9bSriastradh SDT_PROBE_DEFINE2(sdt, drm, fence, wait_done,
8615a96b9bSriastradh "struct dma_fence *"/*fence*/,
8715a96b9bSriastradh "long"/*ret*/);
8815a96b9bSriastradh
893d8b8481Sriastradh /*
90316fc19bSriastradh * linux_dma_fence_trace
913d8b8481Sriastradh *
92316fc19bSriastradh * True if we print DMA_FENCE_TRACE messages, false if not. These
93316fc19bSriastradh * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
94316fc19bSriastradh * in boothowto.
953d8b8481Sriastradh */
96316fc19bSriastradh int linux_dma_fence_trace = 0;
973d8b8481Sriastradh
98*89400822Sriastradh static struct {
99*89400822Sriastradh spinlock_t lock;
100*89400822Sriastradh struct dma_fence fence;
101*89400822Sriastradh } dma_fence_stub __cacheline_aligned;
102d83a40f3Sriastradh
dma_fence_stub_name(struct dma_fence * f)103d83a40f3Sriastradh static const char *dma_fence_stub_name(struct dma_fence *f)
104d83a40f3Sriastradh {
105d83a40f3Sriastradh
106*89400822Sriastradh KASSERT(f == &dma_fence_stub.fence);
107d83a40f3Sriastradh return "stub";
108d83a40f3Sriastradh }
109d83a40f3Sriastradh
110d83a40f3Sriastradh static void
dma_fence_stub_release(struct dma_fence * f)111d83a40f3Sriastradh dma_fence_stub_release(struct dma_fence *f)
112d83a40f3Sriastradh {
113d83a40f3Sriastradh
114*89400822Sriastradh KASSERT(f == &dma_fence_stub.fence);
115d83a40f3Sriastradh dma_fence_destroy(f);
116d83a40f3Sriastradh }
117d83a40f3Sriastradh
118d83a40f3Sriastradh static const struct dma_fence_ops dma_fence_stub_ops = {
119d83a40f3Sriastradh .get_driver_name = dma_fence_stub_name,
120d83a40f3Sriastradh .get_timeline_name = dma_fence_stub_name,
121d83a40f3Sriastradh .release = dma_fence_stub_release,
122d83a40f3Sriastradh };
123d83a40f3Sriastradh
124d83a40f3Sriastradh /*
125d83a40f3Sriastradh * linux_dma_fences_init(), linux_dma_fences_fini()
126d83a40f3Sriastradh *
127d83a40f3Sriastradh * Set up and tear down module state.
128d83a40f3Sriastradh */
129d83a40f3Sriastradh void
linux_dma_fences_init(void)130d83a40f3Sriastradh linux_dma_fences_init(void)
131d83a40f3Sriastradh {
132d83a40f3Sriastradh int error __diagused;
133d83a40f3Sriastradh
134*89400822Sriastradh spin_lock_init(&dma_fence_stub.lock);
135*89400822Sriastradh dma_fence_init(&dma_fence_stub.fence, &dma_fence_stub_ops,
136*89400822Sriastradh &dma_fence_stub.lock, /*context*/0, /*seqno*/0);
137*89400822Sriastradh error = dma_fence_signal(&dma_fence_stub.fence);
138d83a40f3Sriastradh KASSERTMSG(error == 0, "error=%d", error);
139d83a40f3Sriastradh }
140d83a40f3Sriastradh
141d83a40f3Sriastradh void
linux_dma_fences_fini(void)142d83a40f3Sriastradh linux_dma_fences_fini(void)
143d83a40f3Sriastradh {
144d83a40f3Sriastradh
145*89400822Sriastradh dma_fence_put(&dma_fence_stub.fence);
146*89400822Sriastradh spin_lock_destroy(&dma_fence_stub.lock);
147d83a40f3Sriastradh }
148d83a40f3Sriastradh
1493d8b8481Sriastradh /*
150316fc19bSriastradh * dma_fence_referenced_p(fence)
1513d8b8481Sriastradh *
1523d8b8481Sriastradh * True if fence has a positive reference count. True after
153316fc19bSriastradh * dma_fence_init; after the last dma_fence_put, this becomes
154f5ba9028Sriastradh * false. The fence must have been initialized and must not have
155f5ba9028Sriastradh * been destroyed.
1563d8b8481Sriastradh */
1573d8b8481Sriastradh static inline bool __diagused
dma_fence_referenced_p(struct dma_fence * fence)158316fc19bSriastradh dma_fence_referenced_p(struct dma_fence *fence)
1593d8b8481Sriastradh {
1603d8b8481Sriastradh
161f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
162f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
163f5ba9028Sriastradh
1643d8b8481Sriastradh return kref_referenced_p(&fence->refcount);
1653d8b8481Sriastradh }
1663d8b8481Sriastradh
1673d8b8481Sriastradh /*
168316fc19bSriastradh * dma_fence_init(fence, ops, lock, context, seqno)
1693d8b8481Sriastradh *
170316fc19bSriastradh * Initialize fence. Caller should call dma_fence_destroy when
171316fc19bSriastradh * done, after all references have been released.
1723d8b8481Sriastradh */
1733d8b8481Sriastradh void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,uint64_t context,uint64_t seqno)174316fc19bSriastradh dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
175bd72383dSriastradh spinlock_t *lock, uint64_t context, uint64_t seqno)
1763d8b8481Sriastradh {
1773d8b8481Sriastradh
1783d8b8481Sriastradh kref_init(&fence->refcount);
1793d8b8481Sriastradh fence->lock = lock;
1803d8b8481Sriastradh fence->flags = 0;
1813d8b8481Sriastradh fence->context = context;
1823d8b8481Sriastradh fence->seqno = seqno;
1833d8b8481Sriastradh fence->ops = ops;
18416f1ba6cSriastradh fence->error = 0;
1853d8b8481Sriastradh TAILQ_INIT(&fence->f_callbacks);
186316fc19bSriastradh cv_init(&fence->f_cv, "dmafence");
187f5ba9028Sriastradh
188f5ba9028Sriastradh #ifdef DIAGNOSTIC
189f5ba9028Sriastradh fence->f_magic = FENCE_MAGIC_GOOD;
190f5ba9028Sriastradh #endif
19115a96b9bSriastradh
19215a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, init, fence);
1933d8b8481Sriastradh }
1943d8b8481Sriastradh
1953d8b8481Sriastradh /*
19616f1ba6cSriastradh * dma_fence_reset(fence)
19716f1ba6cSriastradh *
19816f1ba6cSriastradh * Ensure fence is in a quiescent state. Allowed either for newly
19916f1ba6cSriastradh * initialized or freed fences, but not fences with more than one
20016f1ba6cSriastradh * reference.
20116f1ba6cSriastradh *
20216f1ba6cSriastradh * XXX extension to Linux API
20316f1ba6cSriastradh */
20416f1ba6cSriastradh void
dma_fence_reset(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,uint64_t context,uint64_t seqno)20516f1ba6cSriastradh dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
206bd72383dSriastradh spinlock_t *lock, uint64_t context, uint64_t seqno)
20716f1ba6cSriastradh {
20816f1ba6cSriastradh
209f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
210f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
21116f1ba6cSriastradh KASSERT(kref_read(&fence->refcount) == 0 ||
21216f1ba6cSriastradh kref_read(&fence->refcount) == 1);
21316f1ba6cSriastradh KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
21416f1ba6cSriastradh KASSERT(fence->lock == lock);
21516f1ba6cSriastradh KASSERT(fence->ops == ops);
21616f1ba6cSriastradh
21716f1ba6cSriastradh kref_init(&fence->refcount);
21816f1ba6cSriastradh fence->flags = 0;
21916f1ba6cSriastradh fence->context = context;
22016f1ba6cSriastradh fence->seqno = seqno;
22116f1ba6cSriastradh fence->error = 0;
22215a96b9bSriastradh
22315a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, reset, fence);
22416f1ba6cSriastradh }
22516f1ba6cSriastradh
22616f1ba6cSriastradh /*
227316fc19bSriastradh * dma_fence_destroy(fence)
2283d8b8481Sriastradh *
229316fc19bSriastradh * Clean up memory initialized with dma_fence_init. This is meant
230316fc19bSriastradh * to be used after a fence release callback.
2310caae222Sriastradh *
2320caae222Sriastradh * XXX extension to Linux API
2333d8b8481Sriastradh */
2343d8b8481Sriastradh void
dma_fence_destroy(struct dma_fence * fence)235316fc19bSriastradh dma_fence_destroy(struct dma_fence *fence)
2363d8b8481Sriastradh {
2373d8b8481Sriastradh
238316fc19bSriastradh KASSERT(!dma_fence_referenced_p(fence));
2393d8b8481Sriastradh
24015a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, destroy, fence);
24115a96b9bSriastradh
242f5ba9028Sriastradh #ifdef DIAGNOSTIC
243f5ba9028Sriastradh fence->f_magic = FENCE_MAGIC_BAD;
244f5ba9028Sriastradh #endif
245f5ba9028Sriastradh
2463d8b8481Sriastradh KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
2473d8b8481Sriastradh cv_destroy(&fence->f_cv);
2483d8b8481Sriastradh }
2493d8b8481Sriastradh
2503d8b8481Sriastradh static void
dma_fence_free_cb(struct rcu_head * rcu)251316fc19bSriastradh dma_fence_free_cb(struct rcu_head *rcu)
2523d8b8481Sriastradh {
2530caae222Sriastradh struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
2543d8b8481Sriastradh
255316fc19bSriastradh KASSERT(!dma_fence_referenced_p(fence));
2563d8b8481Sriastradh
257316fc19bSriastradh dma_fence_destroy(fence);
2583d8b8481Sriastradh kfree(fence);
2593d8b8481Sriastradh }
2603d8b8481Sriastradh
2613d8b8481Sriastradh /*
262316fc19bSriastradh * dma_fence_free(fence)
2633d8b8481Sriastradh *
2643d8b8481Sriastradh * Schedule fence to be destroyed and then freed with kfree after
2653d8b8481Sriastradh * any pending RCU read sections on all CPUs have completed.
2663d8b8481Sriastradh * Caller must guarantee all references have been released. This
2673d8b8481Sriastradh * is meant to be used after a fence release callback.
2683d8b8481Sriastradh *
2693d8b8481Sriastradh * NOTE: Callers assume kfree will be used. We don't even use
2703d8b8481Sriastradh * kmalloc to allocate these -- caller is expected to allocate
271316fc19bSriastradh * memory with kmalloc to be initialized with dma_fence_init.
2723d8b8481Sriastradh */
2733d8b8481Sriastradh void
dma_fence_free(struct dma_fence * fence)274316fc19bSriastradh dma_fence_free(struct dma_fence *fence)
2753d8b8481Sriastradh {
2763d8b8481Sriastradh
277316fc19bSriastradh KASSERT(!dma_fence_referenced_p(fence));
2783d8b8481Sriastradh
27915a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, free, fence);
28015a96b9bSriastradh
2810caae222Sriastradh call_rcu(&fence->rcu, &dma_fence_free_cb);
2823d8b8481Sriastradh }
2833d8b8481Sriastradh
2843d8b8481Sriastradh /*
285316fc19bSriastradh * dma_fence_context_alloc(n)
2863d8b8481Sriastradh *
2873d8b8481Sriastradh * Return the first of a contiguous sequence of unique
2883d8b8481Sriastradh * identifiers, at least until the system wraps around.
2893d8b8481Sriastradh */
290bd72383dSriastradh uint64_t
dma_fence_context_alloc(unsigned n)291316fc19bSriastradh dma_fence_context_alloc(unsigned n)
2923d8b8481Sriastradh {
293bd72383dSriastradh static struct {
294bd72383dSriastradh volatile unsigned lock;
295bd72383dSriastradh uint64_t context;
296bd72383dSriastradh } S;
297bd72383dSriastradh uint64_t c;
2983d8b8481Sriastradh
299f4ab16cdSriastradh while (__predict_false(atomic_swap_uint(&S.lock, 1)))
300bd72383dSriastradh SPINLOCK_BACKOFF_HOOK;
301f4ab16cdSriastradh membar_acquire();
302bd72383dSriastradh c = S.context;
303bd72383dSriastradh S.context += n;
304bd72383dSriastradh atomic_store_release(&S.lock, 0);
305bd72383dSriastradh
306bd72383dSriastradh return c;
307bd72383dSriastradh }
308bd72383dSriastradh
309bd72383dSriastradh /*
310bd72383dSriastradh * __dma_fence_is_later(a, b, ops)
311bd72383dSriastradh *
312bd72383dSriastradh * True if sequence number a is later than sequence number b,
313bd72383dSriastradh * according to the given fence ops.
314bd72383dSriastradh *
315bd72383dSriastradh * - For fence ops with 64-bit sequence numbers, this is simply
316bd72383dSriastradh * defined to be a > b as unsigned 64-bit integers.
317bd72383dSriastradh *
318bd72383dSriastradh * - For fence ops with 32-bit sequence numbers, this is defined
319bd72383dSriastradh * to mean that the 32-bit unsigned difference a - b is less
320bd72383dSriastradh * than INT_MAX.
321bd72383dSriastradh */
322bd72383dSriastradh bool
__dma_fence_is_later(uint64_t a,uint64_t b,const struct dma_fence_ops * ops)323bd72383dSriastradh __dma_fence_is_later(uint64_t a, uint64_t b, const struct dma_fence_ops *ops)
324bd72383dSriastradh {
325bd72383dSriastradh
326bd72383dSriastradh if (ops->use_64bit_seqno)
327bd72383dSriastradh return a > b;
328bd72383dSriastradh else
329bd72383dSriastradh return (unsigned)a - (unsigned)b < INT_MAX;
3303d8b8481Sriastradh }
3313d8b8481Sriastradh
3323d8b8481Sriastradh /*
333316fc19bSriastradh * dma_fence_is_later(a, b)
3343d8b8481Sriastradh *
3353d8b8481Sriastradh * True if the sequence number of fence a is later than the
3363d8b8481Sriastradh * sequence number of fence b. Since sequence numbers wrap
3373d8b8481Sriastradh * around, we define this to mean that the sequence number of
3383d8b8481Sriastradh * fence a is no more than INT_MAX past the sequence number of
3393d8b8481Sriastradh * fence b.
3403d8b8481Sriastradh *
341bd72383dSriastradh * The two fences must have the context. Whether sequence numbers
342bd72383dSriastradh * are 32-bit is determined by a.
3433d8b8481Sriastradh */
3443d8b8481Sriastradh bool
dma_fence_is_later(struct dma_fence * a,struct dma_fence * b)345316fc19bSriastradh dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
3463d8b8481Sriastradh {
3473d8b8481Sriastradh
348f5ba9028Sriastradh KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
349f5ba9028Sriastradh KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
350f5ba9028Sriastradh KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
351f5ba9028Sriastradh KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
3523d8b8481Sriastradh KASSERTMSG(a->context == b->context, "incommensurate fences"
353bd72383dSriastradh ": %"PRIu64" @ %p =/= %"PRIu64" @ %p",
354bd72383dSriastradh a->context, a, b->context, b);
3553d8b8481Sriastradh
356bd72383dSriastradh return __dma_fence_is_later(a->seqno, b->seqno, a->ops);
3573d8b8481Sriastradh }
3583d8b8481Sriastradh
3593d8b8481Sriastradh /*
360a735ce21Sriastradh * dma_fence_get_stub()
361a735ce21Sriastradh *
362a735ce21Sriastradh * Return a dma fence that is always already signalled.
363a735ce21Sriastradh */
364a735ce21Sriastradh struct dma_fence *
dma_fence_get_stub(void)365a735ce21Sriastradh dma_fence_get_stub(void)
366a735ce21Sriastradh {
367a735ce21Sriastradh
368*89400822Sriastradh return dma_fence_get(&dma_fence_stub.fence);
369a735ce21Sriastradh }
370a735ce21Sriastradh
371a735ce21Sriastradh /*
372316fc19bSriastradh * dma_fence_get(fence)
3733d8b8481Sriastradh *
374c6010e2aSriastradh * Acquire a reference to fence and return it, or return NULL if
375c6010e2aSriastradh * fence is NULL. The fence, if nonnull, must not be being
376c6010e2aSriastradh * destroyed.
3773d8b8481Sriastradh */
378316fc19bSriastradh struct dma_fence *
dma_fence_get(struct dma_fence * fence)379316fc19bSriastradh dma_fence_get(struct dma_fence *fence)
3803d8b8481Sriastradh {
3813d8b8481Sriastradh
382c6010e2aSriastradh if (fence == NULL)
383c6010e2aSriastradh return NULL;
384c6010e2aSriastradh
385f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
386f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
387f5ba9028Sriastradh
3883d8b8481Sriastradh kref_get(&fence->refcount);
3893d8b8481Sriastradh return fence;
3903d8b8481Sriastradh }
3913d8b8481Sriastradh
3923d8b8481Sriastradh /*
393316fc19bSriastradh * dma_fence_get_rcu(fence)
3943d8b8481Sriastradh *
3953d8b8481Sriastradh * Attempt to acquire a reference to a fence that may be about to
3963d8b8481Sriastradh * be destroyed, during a read section. Return the fence on
397c6010e2aSriastradh * success, or NULL on failure. The fence must be nonnull.
3983d8b8481Sriastradh */
399316fc19bSriastradh struct dma_fence *
dma_fence_get_rcu(struct dma_fence * fence)400316fc19bSriastradh dma_fence_get_rcu(struct dma_fence *fence)
4013d8b8481Sriastradh {
4023d8b8481Sriastradh
4039c1b6431Sriastradh __insn_barrier();
404f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
405f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
4063d8b8481Sriastradh if (!kref_get_unless_zero(&fence->refcount))
4073d8b8481Sriastradh return NULL;
4083d8b8481Sriastradh return fence;
4093d8b8481Sriastradh }
4103d8b8481Sriastradh
411e99c299dSriastradh /*
412e99c299dSriastradh * dma_fence_get_rcu_safe(fencep)
413e99c299dSriastradh *
414e99c299dSriastradh * Attempt to acquire a reference to the fence *fencep, which may
415e99c299dSriastradh * be about to be destroyed, during a read section. If the value
416e99c299dSriastradh * of *fencep changes after we read *fencep but before we
417e99c299dSriastradh * increment its reference count, retry. Return *fencep on
418e99c299dSriastradh * success, or NULL on failure.
419e99c299dSriastradh */
420e99c299dSriastradh struct dma_fence *
dma_fence_get_rcu_safe(struct dma_fence * volatile const * fencep)421429d51eeSriastradh dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
422e99c299dSriastradh {
423ece42d91Sriastradh struct dma_fence *fence;
424e99c299dSriastradh
425e99c299dSriastradh retry:
426ece42d91Sriastradh /*
427ece42d91Sriastradh * Load the fence, ensuring we observe the fully initialized
428ece42d91Sriastradh * content.
429ece42d91Sriastradh */
430ece42d91Sriastradh if ((fence = atomic_load_consume(fencep)) == NULL)
431e99c299dSriastradh return NULL;
432e99c299dSriastradh
433e99c299dSriastradh /* Try to acquire a reference. If we can't, try again. */
434e99c299dSriastradh if (!dma_fence_get_rcu(fence))
435e99c299dSriastradh goto retry;
436e99c299dSriastradh
437e99c299dSriastradh /*
438e99c299dSriastradh * Confirm that it's still the same fence. If not, release it
439e99c299dSriastradh * and retry.
440e99c299dSriastradh */
441ece42d91Sriastradh if (fence != atomic_load_relaxed(fencep)) {
442e99c299dSriastradh dma_fence_put(fence);
443e99c299dSriastradh goto retry;
444e99c299dSriastradh }
445e99c299dSriastradh
446e99c299dSriastradh /* Success! */
447f5ba9028Sriastradh KASSERT(dma_fence_referenced_p(fence));
448e99c299dSriastradh return fence;
449e99c299dSriastradh }
450e99c299dSriastradh
4513d8b8481Sriastradh static void
dma_fence_release(struct kref * refcount)452316fc19bSriastradh dma_fence_release(struct kref *refcount)
4533d8b8481Sriastradh {
454316fc19bSriastradh struct dma_fence *fence = container_of(refcount, struct dma_fence,
455316fc19bSriastradh refcount);
4563d8b8481Sriastradh
45751bebbffSriastradh KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
45851bebbffSriastradh "fence %p has pending callbacks", fence);
459316fc19bSriastradh KASSERT(!dma_fence_referenced_p(fence));
4603d8b8481Sriastradh
46115a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, release, fence);
46215a96b9bSriastradh
4633d8b8481Sriastradh if (fence->ops->release)
4643d8b8481Sriastradh (*fence->ops->release)(fence);
4653d8b8481Sriastradh else
466316fc19bSriastradh dma_fence_free(fence);
4673d8b8481Sriastradh }
4683d8b8481Sriastradh
4693d8b8481Sriastradh /*
470316fc19bSriastradh * dma_fence_put(fence)
4713d8b8481Sriastradh *
4723d8b8481Sriastradh * Release a reference to fence. If this was the last one, call
4733d8b8481Sriastradh * the fence's release callback.
4743d8b8481Sriastradh */
4753d8b8481Sriastradh void
dma_fence_put(struct dma_fence * fence)476316fc19bSriastradh dma_fence_put(struct dma_fence *fence)
4773d8b8481Sriastradh {
4783d8b8481Sriastradh
4793d8b8481Sriastradh if (fence == NULL)
4803d8b8481Sriastradh return;
481316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
482316fc19bSriastradh kref_put(&fence->refcount, &dma_fence_release);
4833d8b8481Sriastradh }
4843d8b8481Sriastradh
4853d8b8481Sriastradh /*
486316fc19bSriastradh * dma_fence_ensure_signal_enabled(fence)
4873d8b8481Sriastradh *
4883d8b8481Sriastradh * Internal subroutine. If the fence was already signalled,
4893d8b8481Sriastradh * return -ENOENT. Otherwise, if the enable signalling callback
4903d8b8481Sriastradh * has not been called yet, call it. If fails, signal the fence
4913d8b8481Sriastradh * and return -ENOENT. If it succeeds, or if it had already been
4923d8b8481Sriastradh * called, return zero to indicate success.
4933d8b8481Sriastradh *
4943d8b8481Sriastradh * Caller must hold the fence's lock.
4953d8b8481Sriastradh */
4963d8b8481Sriastradh static int
dma_fence_ensure_signal_enabled(struct dma_fence * fence)497316fc19bSriastradh dma_fence_ensure_signal_enabled(struct dma_fence *fence)
4983d8b8481Sriastradh {
499cf0190caSriastradh bool already_enabled;
5003d8b8481Sriastradh
501316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
5023d8b8481Sriastradh KASSERT(spin_is_locked(fence->lock));
5033d8b8481Sriastradh
504cf0190caSriastradh /* Determine whether signalling was enabled, and enable it. */
505cf0190caSriastradh already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
506cf0190caSriastradh &fence->flags);
507cf0190caSriastradh
5083d8b8481Sriastradh /* If the fence was already signalled, fail with -ENOENT. */
509316fc19bSriastradh if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
5103d8b8481Sriastradh return -ENOENT;
5113d8b8481Sriastradh
5123d8b8481Sriastradh /*
513cf0190caSriastradh * Otherwise, if it wasn't enabled yet, try to enable
5146412a72bSriastradh * signalling.
5153d8b8481Sriastradh */
51615a96b9bSriastradh if (!already_enabled && fence->ops->enable_signaling) {
51715a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, enable_signaling, fence);
51815a96b9bSriastradh if (!(*fence->ops->enable_signaling)(fence)) {
5193d8b8481Sriastradh /* If it failed, signal and return -ENOENT. */
520316fc19bSriastradh dma_fence_signal_locked(fence);
5213d8b8481Sriastradh return -ENOENT;
5223d8b8481Sriastradh }
52315a96b9bSriastradh }
5243d8b8481Sriastradh
5253d8b8481Sriastradh /* Success! */
5263d8b8481Sriastradh return 0;
5273d8b8481Sriastradh }
5283d8b8481Sriastradh
5293d8b8481Sriastradh /*
530316fc19bSriastradh * dma_fence_add_callback(fence, fcb, fn)
5313d8b8481Sriastradh *
5323d8b8481Sriastradh * If fence has been signalled, return -ENOENT. If the enable
5333d8b8481Sriastradh * signalling callback hasn't been called yet, call it; if it
5343d8b8481Sriastradh * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
5353d8b8481Sriastradh * fcb) when it is signalled, and return 0.
5363d8b8481Sriastradh *
5373d8b8481Sriastradh * The fence uses memory allocated by the caller in fcb from the
538316fc19bSriastradh * time of dma_fence_add_callback either to the time of
539316fc19bSriastradh * dma_fence_remove_callback, or just before calling fn.
5403d8b8481Sriastradh */
5413d8b8481Sriastradh int
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * fcb,dma_fence_func_t fn)542316fc19bSriastradh dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
543316fc19bSriastradh dma_fence_func_t fn)
5443d8b8481Sriastradh {
5453d8b8481Sriastradh int ret;
5463d8b8481Sriastradh
547316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
5483d8b8481Sriastradh
5493d8b8481Sriastradh /* Optimistically try to skip the lock if it's already signalled. */
550f806b9e8Sriastradh if (atomic_load_relaxed(&fence->flags) &
551f806b9e8Sriastradh (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
5523d8b8481Sriastradh ret = -ENOENT;
5533d8b8481Sriastradh goto out0;
5543d8b8481Sriastradh }
5553d8b8481Sriastradh
5563d8b8481Sriastradh /* Acquire the lock. */
5573d8b8481Sriastradh spin_lock(fence->lock);
5583d8b8481Sriastradh
5593d8b8481Sriastradh /* Ensure signalling is enabled, or fail if we can't. */
560316fc19bSriastradh ret = dma_fence_ensure_signal_enabled(fence);
5613d8b8481Sriastradh if (ret)
5623d8b8481Sriastradh goto out1;
5633d8b8481Sriastradh
5643d8b8481Sriastradh /* Insert the callback. */
56515a96b9bSriastradh SDT_PROBE2(sdt, drm, fence, add_callback, fence, fcb);
56630870510Sriastradh fcb->func = fn;
5673d8b8481Sriastradh TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
5683d8b8481Sriastradh fcb->fcb_onqueue = true;
5698041567eSriastradh ret = 0;
5703d8b8481Sriastradh
5713d8b8481Sriastradh /* Release the lock and we're done. */
5723d8b8481Sriastradh out1: spin_unlock(fence->lock);
5738041567eSriastradh out0: if (ret) {
5748041567eSriastradh fcb->func = NULL;
5758041567eSriastradh fcb->fcb_onqueue = false;
5768041567eSriastradh }
5778041567eSriastradh return ret;
5783d8b8481Sriastradh }
5793d8b8481Sriastradh
5803d8b8481Sriastradh /*
581316fc19bSriastradh * dma_fence_remove_callback(fence, fcb)
5823d8b8481Sriastradh *
5833d8b8481Sriastradh * Remove the callback fcb from fence. Return true if it was
5843d8b8481Sriastradh * removed from the list, or false if it had already run and so
5853d8b8481Sriastradh * was no longer queued anyway. Caller must have already called
586316fc19bSriastradh * dma_fence_add_callback(fence, fcb).
5873d8b8481Sriastradh */
5883d8b8481Sriastradh bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * fcb)589316fc19bSriastradh dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
5903d8b8481Sriastradh {
5913d8b8481Sriastradh bool onqueue;
5923d8b8481Sriastradh
593316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
5943d8b8481Sriastradh
5953d8b8481Sriastradh spin_lock(fence->lock);
5963d8b8481Sriastradh onqueue = fcb->fcb_onqueue;
5973d8b8481Sriastradh if (onqueue) {
59815a96b9bSriastradh SDT_PROBE2(sdt, drm, fence, remove_callback, fence, fcb);
5993d8b8481Sriastradh TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
6003d8b8481Sriastradh fcb->fcb_onqueue = false;
6013d8b8481Sriastradh }
6023d8b8481Sriastradh spin_unlock(fence->lock);
6033d8b8481Sriastradh
6043d8b8481Sriastradh return onqueue;
6053d8b8481Sriastradh }
6063d8b8481Sriastradh
6073d8b8481Sriastradh /*
608316fc19bSriastradh * dma_fence_enable_sw_signaling(fence)
6093d8b8481Sriastradh *
6103d8b8481Sriastradh * If it hasn't been called yet and the fence hasn't been
6113d8b8481Sriastradh * signalled yet, call the fence's enable_sw_signaling callback.
6123d8b8481Sriastradh * If when that happens, the callback indicates failure by
6133d8b8481Sriastradh * returning false, signal the fence.
6143d8b8481Sriastradh */
6153d8b8481Sriastradh void
dma_fence_enable_sw_signaling(struct dma_fence * fence)616316fc19bSriastradh dma_fence_enable_sw_signaling(struct dma_fence *fence)
6173d8b8481Sriastradh {
6183d8b8481Sriastradh
619316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
6203d8b8481Sriastradh
6213d8b8481Sriastradh spin_lock(fence->lock);
6229c4c6364Sriastradh if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
623316fc19bSriastradh (void)dma_fence_ensure_signal_enabled(fence);
6243d8b8481Sriastradh spin_unlock(fence->lock);
6253d8b8481Sriastradh }
6263d8b8481Sriastradh
6273d8b8481Sriastradh /*
628316fc19bSriastradh * dma_fence_is_signaled(fence)
6293d8b8481Sriastradh *
6303d8b8481Sriastradh * Test whether the fence has been signalled. If it has been
631316fc19bSriastradh * signalled by dma_fence_signal(_locked), return true. If the
6323d8b8481Sriastradh * signalled callback returns true indicating that some implicit
6333d8b8481Sriastradh * external condition has changed, call the callbacks as if with
634316fc19bSriastradh * dma_fence_signal.
6353d8b8481Sriastradh */
6363d8b8481Sriastradh bool
dma_fence_is_signaled(struct dma_fence * fence)637316fc19bSriastradh dma_fence_is_signaled(struct dma_fence *fence)
6383d8b8481Sriastradh {
6393d8b8481Sriastradh bool signaled;
6403d8b8481Sriastradh
641316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
6423d8b8481Sriastradh
6433d8b8481Sriastradh spin_lock(fence->lock);
644316fc19bSriastradh signaled = dma_fence_is_signaled_locked(fence);
6453d8b8481Sriastradh spin_unlock(fence->lock);
6463d8b8481Sriastradh
6473d8b8481Sriastradh return signaled;
6483d8b8481Sriastradh }
6493d8b8481Sriastradh
6503d8b8481Sriastradh /*
651316fc19bSriastradh * dma_fence_is_signaled_locked(fence)
6523d8b8481Sriastradh *
6533d8b8481Sriastradh * Test whether the fence has been signalled. Like
654316fc19bSriastradh * dma_fence_is_signaleed, but caller already holds the fence's lock.
6553d8b8481Sriastradh */
6563d8b8481Sriastradh bool
dma_fence_is_signaled_locked(struct dma_fence * fence)657316fc19bSriastradh dma_fence_is_signaled_locked(struct dma_fence *fence)
6583d8b8481Sriastradh {
6593d8b8481Sriastradh
660316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
6613d8b8481Sriastradh KASSERT(spin_is_locked(fence->lock));
6623d8b8481Sriastradh
6633d8b8481Sriastradh /* Check whether we already set the signalled bit. */
664316fc19bSriastradh if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
6653d8b8481Sriastradh return true;
6663d8b8481Sriastradh
6673d8b8481Sriastradh /* If there's a signalled callback, test it. */
6683d8b8481Sriastradh if (fence->ops->signaled) {
66915a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, test, fence);
6703d8b8481Sriastradh if ((*fence->ops->signaled)(fence)) {
6713d8b8481Sriastradh /*
6723d8b8481Sriastradh * It's been signalled implicitly by some
6733d8b8481Sriastradh * external phenomonen. Act as though someone
674316fc19bSriastradh * has called dma_fence_signal.
6753d8b8481Sriastradh */
676316fc19bSriastradh dma_fence_signal_locked(fence);
6773d8b8481Sriastradh return true;
6783d8b8481Sriastradh }
6793d8b8481Sriastradh }
6803d8b8481Sriastradh
6813d8b8481Sriastradh return false;
6823d8b8481Sriastradh }
6833d8b8481Sriastradh
6843d8b8481Sriastradh /*
6854473eaf4Sriastradh * dma_fence_set_error(fence, error)
6864473eaf4Sriastradh *
6874473eaf4Sriastradh * Set an error code prior to dma_fence_signal for use by a
6884473eaf4Sriastradh * waiter to learn about success or failure of the fence.
6894473eaf4Sriastradh */
6904473eaf4Sriastradh void
dma_fence_set_error(struct dma_fence * fence,int error)6914473eaf4Sriastradh dma_fence_set_error(struct dma_fence *fence, int error)
6924473eaf4Sriastradh {
6934473eaf4Sriastradh
694f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
695f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
696f806b9e8Sriastradh KASSERT((atomic_load_relaxed(&fence->flags) &
697f806b9e8Sriastradh (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0);
698414a87b4Sriastradh KASSERTMSG(error >= -ELAST, "%d", error);
6994473eaf4Sriastradh KASSERTMSG(error < 0, "%d", error);
7004473eaf4Sriastradh
70115a96b9bSriastradh SDT_PROBE2(sdt, drm, fence, set_error, fence, error);
7024473eaf4Sriastradh fence->error = error;
7034473eaf4Sriastradh }
7044473eaf4Sriastradh
7054473eaf4Sriastradh /*
70608d0f26fSriastradh * dma_fence_get_status(fence)
70708d0f26fSriastradh *
70808d0f26fSriastradh * Return 0 if fence has yet to be signalled, 1 if it has been
70908d0f26fSriastradh * signalled without error, or negative error code if
71008d0f26fSriastradh * dma_fence_set_error was used.
71108d0f26fSriastradh */
71208d0f26fSriastradh int
dma_fence_get_status(struct dma_fence * fence)71308d0f26fSriastradh dma_fence_get_status(struct dma_fence *fence)
71408d0f26fSriastradh {
71508d0f26fSriastradh int ret;
71608d0f26fSriastradh
717f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
718f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
719f5ba9028Sriastradh
72008d0f26fSriastradh spin_lock(fence->lock);
72108d0f26fSriastradh if (!dma_fence_is_signaled_locked(fence)) {
72208d0f26fSriastradh ret = 0;
72308d0f26fSriastradh } else if (fence->error) {
72408d0f26fSriastradh ret = fence->error;
72508d0f26fSriastradh KASSERTMSG(ret < 0, "%d", ret);
72608d0f26fSriastradh } else {
72708d0f26fSriastradh ret = 1;
72808d0f26fSriastradh }
72908d0f26fSriastradh spin_unlock(fence->lock);
73008d0f26fSriastradh
73108d0f26fSriastradh return ret;
73208d0f26fSriastradh }
73308d0f26fSriastradh
73408d0f26fSriastradh /*
735316fc19bSriastradh * dma_fence_signal(fence)
7363d8b8481Sriastradh *
7373d8b8481Sriastradh * Signal the fence. If it has already been signalled, return
7383d8b8481Sriastradh * -EINVAL. If it has not been signalled, call the enable
7393d8b8481Sriastradh * signalling callback if it hasn't been called yet, and remove
7403d8b8481Sriastradh * each registered callback from the queue and call it; then
7413d8b8481Sriastradh * return 0.
7423d8b8481Sriastradh */
7433d8b8481Sriastradh int
dma_fence_signal(struct dma_fence * fence)744316fc19bSriastradh dma_fence_signal(struct dma_fence *fence)
7453d8b8481Sriastradh {
7463d8b8481Sriastradh int ret;
7473d8b8481Sriastradh
748316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
7493d8b8481Sriastradh
7503d8b8481Sriastradh spin_lock(fence->lock);
751316fc19bSriastradh ret = dma_fence_signal_locked(fence);
7523d8b8481Sriastradh spin_unlock(fence->lock);
7533d8b8481Sriastradh
7543d8b8481Sriastradh return ret;
7553d8b8481Sriastradh }
7563d8b8481Sriastradh
7573d8b8481Sriastradh /*
758316fc19bSriastradh * dma_fence_signal_locked(fence)
7593d8b8481Sriastradh *
760316fc19bSriastradh * Signal the fence. Like dma_fence_signal, but caller already
761316fc19bSriastradh * holds the fence's lock.
7623d8b8481Sriastradh */
7633d8b8481Sriastradh int
dma_fence_signal_locked(struct dma_fence * fence)764316fc19bSriastradh dma_fence_signal_locked(struct dma_fence *fence)
7653d8b8481Sriastradh {
766316fc19bSriastradh struct dma_fence_cb *fcb, *next;
7673d8b8481Sriastradh
768316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
7693d8b8481Sriastradh KASSERT(spin_is_locked(fence->lock));
7703d8b8481Sriastradh
7713d8b8481Sriastradh /* If it's been signalled, fail; otherwise set the signalled bit. */
772316fc19bSriastradh if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
7733d8b8481Sriastradh return -EINVAL;
7743d8b8481Sriastradh
77515a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, signal, fence);
77615a96b9bSriastradh
777b724d402Sriastradh /* Set the timestamp. */
778b724d402Sriastradh fence->timestamp = ktime_get();
779b724d402Sriastradh set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
780b724d402Sriastradh
7813d8b8481Sriastradh /* Wake waiters. */
7823d8b8481Sriastradh cv_broadcast(&fence->f_cv);
7833d8b8481Sriastradh
7843d8b8481Sriastradh /* Remove and call the callbacks. */
7853d8b8481Sriastradh TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
78615a96b9bSriastradh SDT_PROBE2(sdt, drm, fence, callback, fence, fcb);
7873d8b8481Sriastradh TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
7883d8b8481Sriastradh fcb->fcb_onqueue = false;
78930870510Sriastradh (*fcb->func)(fence, fcb);
7903d8b8481Sriastradh }
7913d8b8481Sriastradh
7923d8b8481Sriastradh /* Success! */
7933d8b8481Sriastradh return 0;
7943d8b8481Sriastradh }
7953d8b8481Sriastradh
7963d8b8481Sriastradh struct wait_any {
797316fc19bSriastradh struct dma_fence_cb fcb;
7983d8b8481Sriastradh struct wait_any1 {
7993d8b8481Sriastradh kmutex_t lock;
8003d8b8481Sriastradh kcondvar_t cv;
80104d31dcdSriastradh struct wait_any *cb;
802ae93c501Sriastradh bool done;
8033d8b8481Sriastradh } *common;
8043d8b8481Sriastradh };
8053d8b8481Sriastradh
8063d8b8481Sriastradh static void
wait_any_cb(struct dma_fence * fence,struct dma_fence_cb * fcb)807316fc19bSriastradh wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
8083d8b8481Sriastradh {
8093d8b8481Sriastradh struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
8103d8b8481Sriastradh
811316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
8123d8b8481Sriastradh
8133d8b8481Sriastradh mutex_enter(&cb->common->lock);
8143d8b8481Sriastradh cb->common->done = true;
8153d8b8481Sriastradh cv_broadcast(&cb->common->cv);
8163d8b8481Sriastradh mutex_exit(&cb->common->lock);
8173d8b8481Sriastradh }
8183d8b8481Sriastradh
8193d8b8481Sriastradh /*
82004d31dcdSriastradh * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
8213d8b8481Sriastradh *
8223d8b8481Sriastradh * Wait for any of fences[0], fences[1], fences[2], ...,
823ec2508dcSriastradh * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
824ec2508dcSriastradh * to the index of the first one.
825ae93c501Sriastradh *
826ae93c501Sriastradh * Return -ERESTARTSYS if interrupted, 0 on timeout, or time
827ae93c501Sriastradh * remaining (at least 1) on success.
8283d8b8481Sriastradh */
8293d8b8481Sriastradh long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t nfences,bool intr,long timeout,uint32_t * ip)830316fc19bSriastradh dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
83104d31dcdSriastradh bool intr, long timeout, uint32_t *ip)
8323d8b8481Sriastradh {
8333d8b8481Sriastradh struct wait_any1 common;
8343d8b8481Sriastradh struct wait_any *cb;
8353d8b8481Sriastradh uint32_t i, j;
8363d8b8481Sriastradh int start, end;
8373d8b8481Sriastradh long ret = 0;
8383d8b8481Sriastradh
83905cdf8b4Sriastradh KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
84005cdf8b4Sriastradh KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
84105cdf8b4Sriastradh
842ae93c501Sriastradh /* Optimistically check whether any are signalled. */
843ae93c501Sriastradh for (i = 0; i < nfences; i++) {
84405cdf8b4Sriastradh KASSERT(dma_fence_referenced_p(fences[i]));
845ae93c501Sriastradh if (dma_fence_is_signaled(fences[i])) {
846ae93c501Sriastradh if (ip)
847ae93c501Sriastradh *ip = i;
848ae93c501Sriastradh return MAX(1, timeout);
849ae93c501Sriastradh }
850ae93c501Sriastradh }
851ae93c501Sriastradh
852ae93c501Sriastradh /*
853ae93c501Sriastradh * If timeout is zero, we're just polling, so stop here as if
854ae93c501Sriastradh * we timed out instantly.
855ae93c501Sriastradh */
856ae93c501Sriastradh if (timeout == 0)
857ae93c501Sriastradh return 0;
858ae93c501Sriastradh
8593d8b8481Sriastradh /* Allocate an array of callback records. */
8603d8b8481Sriastradh cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
86105cdf8b4Sriastradh if (cb == NULL)
86205cdf8b4Sriastradh return -ENOMEM;
8633d8b8481Sriastradh
8643d8b8481Sriastradh /* Initialize a mutex and condvar for the common wait. */
8653d8b8481Sriastradh mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
8663d8b8481Sriastradh cv_init(&common.cv, "fence");
86704d31dcdSriastradh common.cb = cb;
868ae93c501Sriastradh common.done = false;
8693d8b8481Sriastradh
870ae93c501Sriastradh /*
871ae93c501Sriastradh * Add a callback to each of the fences, or stop if already
872ae93c501Sriastradh * signalled.
873ae93c501Sriastradh */
8743d8b8481Sriastradh for (i = 0; i < nfences; i++) {
8753d8b8481Sriastradh cb[i].common = &common;
876316fc19bSriastradh KASSERT(dma_fence_referenced_p(fences[i]));
877316fc19bSriastradh ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
878316fc19bSriastradh &wait_any_cb);
879ae93c501Sriastradh if (ret) {
880ae93c501Sriastradh KASSERT(ret == -ENOENT);
88104d31dcdSriastradh if (ip)
882ae93c501Sriastradh *ip = i;
883ae93c501Sriastradh ret = MAX(1, timeout);
88405cdf8b4Sriastradh goto out;
8853d8b8481Sriastradh }
88604d31dcdSriastradh }
8873d8b8481Sriastradh
8883d8b8481Sriastradh /*
8893d8b8481Sriastradh * None of them was ready immediately. Wait for one of the
8903d8b8481Sriastradh * callbacks to notify us when it is done.
8913d8b8481Sriastradh */
8923d8b8481Sriastradh mutex_enter(&common.lock);
89305cdf8b4Sriastradh while (!common.done) {
89405cdf8b4Sriastradh /* Wait for the time remaining. */
8953d8b8481Sriastradh start = getticks();
8963d8b8481Sriastradh if (intr) {
8973d8b8481Sriastradh if (timeout != MAX_SCHEDULE_TIMEOUT) {
8983d8b8481Sriastradh ret = -cv_timedwait_sig(&common.cv,
8993d8b8481Sriastradh &common.lock, MIN(timeout, /* paranoia */
9003d8b8481Sriastradh MAX_SCHEDULE_TIMEOUT));
9013d8b8481Sriastradh } else {
9023d8b8481Sriastradh ret = -cv_wait_sig(&common.cv, &common.lock);
9033d8b8481Sriastradh }
9043d8b8481Sriastradh } else {
9053d8b8481Sriastradh if (timeout != MAX_SCHEDULE_TIMEOUT) {
9063d8b8481Sriastradh ret = -cv_timedwait(&common.cv,
9073d8b8481Sriastradh &common.lock, MIN(timeout, /* paranoia */
9083d8b8481Sriastradh MAX_SCHEDULE_TIMEOUT));
9093d8b8481Sriastradh } else {
9103d8b8481Sriastradh cv_wait(&common.cv, &common.lock);
9113d8b8481Sriastradh ret = 0;
9123d8b8481Sriastradh }
9133d8b8481Sriastradh }
9143d8b8481Sriastradh end = getticks();
91505cdf8b4Sriastradh
91605cdf8b4Sriastradh /* Deduct from time remaining. If none left, time out. */
91705cdf8b4Sriastradh if (timeout != MAX_SCHEDULE_TIMEOUT) {
91805cdf8b4Sriastradh timeout -= MIN(timeout,
91905cdf8b4Sriastradh (unsigned)end - (unsigned)start);
92005cdf8b4Sriastradh if (timeout == 0)
92105cdf8b4Sriastradh ret = -EWOULDBLOCK;
92205cdf8b4Sriastradh }
92305cdf8b4Sriastradh
92405cdf8b4Sriastradh /* If the wait failed, give up. */
925ae93c501Sriastradh if (ret)
9263d8b8481Sriastradh break;
9273d8b8481Sriastradh }
9283d8b8481Sriastradh mutex_exit(&common.lock);
9293d8b8481Sriastradh
9303d8b8481Sriastradh /*
93105cdf8b4Sriastradh * Massage the return code if nonzero:
93205cdf8b4Sriastradh * - if we were interrupted, return -ERESTARTSYS;
93305cdf8b4Sriastradh * - if we timed out, return 0.
93405cdf8b4Sriastradh * No other failure is possible. On success, ret=0 but we
93505cdf8b4Sriastradh * check again below to verify anyway.
93605cdf8b4Sriastradh */
93705cdf8b4Sriastradh if (ret) {
93805cdf8b4Sriastradh KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
93905cdf8b4Sriastradh ret == -EWOULDBLOCK), "ret=%ld", ret);
94005cdf8b4Sriastradh if (ret == -EINTR || ret == -ERESTART) {
94105cdf8b4Sriastradh ret = -ERESTARTSYS;
94205cdf8b4Sriastradh } else if (ret == -EWOULDBLOCK) {
94305cdf8b4Sriastradh KASSERT(timeout != MAX_SCHEDULE_TIMEOUT);
94405cdf8b4Sriastradh ret = 0; /* timed out */
94505cdf8b4Sriastradh }
94605cdf8b4Sriastradh }
94705cdf8b4Sriastradh
94805cdf8b4Sriastradh KASSERT(ret != -ERESTART); /* would be confused with time left */
94905cdf8b4Sriastradh
95005cdf8b4Sriastradh /*
951ae93c501Sriastradh * Test whether any of the fences has been signalled. If they
952ae93c501Sriastradh * have, return success.
953ae93c501Sriastradh */
954ae93c501Sriastradh for (j = 0; j < nfences; j++) {
955ae93c501Sriastradh if (dma_fence_is_signaled(fences[i])) {
956ae93c501Sriastradh if (ip)
957ae93c501Sriastradh *ip = j;
958ae93c501Sriastradh ret = MAX(1, timeout);
95905cdf8b4Sriastradh goto out;
960ae93c501Sriastradh }
961ae93c501Sriastradh }
962ae93c501Sriastradh
963ae93c501Sriastradh /*
96405cdf8b4Sriastradh * If user passed MAX_SCHEDULE_TIMEOUT, we can't return 0
96505cdf8b4Sriastradh * meaning timed out because we're supposed to wait forever.
9663d8b8481Sriastradh */
96705cdf8b4Sriastradh KASSERT(timeout == MAX_SCHEDULE_TIMEOUT ? ret != 0 : 1);
9683d8b8481Sriastradh
96905cdf8b4Sriastradh out: while (i --> 0)
970316fc19bSriastradh (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
9713d8b8481Sriastradh cv_destroy(&common.cv);
9723d8b8481Sriastradh mutex_destroy(&common.lock);
9733d8b8481Sriastradh kfree(cb);
97405cdf8b4Sriastradh return ret;
9753d8b8481Sriastradh }
9763d8b8481Sriastradh
9773d8b8481Sriastradh /*
978316fc19bSriastradh * dma_fence_wait_timeout(fence, intr, timeout)
9793d8b8481Sriastradh *
9803d8b8481Sriastradh * Wait until fence is signalled; or until interrupt, if intr is
9813d8b8481Sriastradh * true; or until timeout, if positive. Return -ERESTARTSYS if
9823d8b8481Sriastradh * interrupted, negative error code on any other error, zero on
9833d8b8481Sriastradh * timeout, or positive number of ticks remaining if the fence is
9843d8b8481Sriastradh * signalled before the timeout. Works by calling the fence wait
9853d8b8481Sriastradh * callback.
9863d8b8481Sriastradh *
987e8433fb6Sriastradh * The timeout must be nonnegative and at most
988e8433fb6Sriastradh * MAX_SCHEDULE_TIMEOUT, which means wait indefinitely.
9893d8b8481Sriastradh */
9903d8b8481Sriastradh long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,long timeout)991316fc19bSriastradh dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
9923d8b8481Sriastradh {
99315a96b9bSriastradh long ret;
9943d8b8481Sriastradh
995316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
996307b9e56Sriastradh KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
997e8433fb6Sriastradh KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
9983d8b8481Sriastradh
99915a96b9bSriastradh SDT_PROBE3(sdt, drm, fence, wait_start, fence, intr, timeout);
1000539b0db9Sriastradh if (fence->ops->wait)
100115a96b9bSriastradh ret = (*fence->ops->wait)(fence, intr, timeout);
1002539b0db9Sriastradh else
100315a96b9bSriastradh ret = dma_fence_default_wait(fence, intr, timeout);
100415a96b9bSriastradh SDT_PROBE2(sdt, drm, fence, wait_done, fence, ret);
100515a96b9bSriastradh
100615a96b9bSriastradh return ret;
10073d8b8481Sriastradh }
10083d8b8481Sriastradh
10093d8b8481Sriastradh /*
1010316fc19bSriastradh * dma_fence_wait(fence, intr)
10113d8b8481Sriastradh *
10123d8b8481Sriastradh * Wait until fence is signalled; or until interrupt, if intr is
10133d8b8481Sriastradh * true. Return -ERESTARTSYS if interrupted, negative error code
10143d8b8481Sriastradh * on any other error, zero on sucess. Works by calling the fence
10153d8b8481Sriastradh * wait callback with MAX_SCHEDULE_TIMEOUT.
10163d8b8481Sriastradh */
10173d8b8481Sriastradh long
dma_fence_wait(struct dma_fence * fence,bool intr)1018316fc19bSriastradh dma_fence_wait(struct dma_fence *fence, bool intr)
10193d8b8481Sriastradh {
10203d8b8481Sriastradh long ret;
10213d8b8481Sriastradh
1022316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
10233d8b8481Sriastradh
102419857f54Sriastradh ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
10253d8b8481Sriastradh KASSERT(ret != 0);
102695ce5684Sriastradh KASSERTMSG(ret == -ERESTARTSYS || ret == MAX_SCHEDULE_TIMEOUT,
102795ce5684Sriastradh "ret=%ld", ret);
10283d8b8481Sriastradh
10293d8b8481Sriastradh return (ret < 0 ? ret : 0);
10303d8b8481Sriastradh }
10313d8b8481Sriastradh
10323d8b8481Sriastradh /*
1033316fc19bSriastradh * dma_fence_default_wait(fence, intr, timeout)
10343d8b8481Sriastradh *
10353d8b8481Sriastradh * Default implementation of fence wait callback using a condition
10363d8b8481Sriastradh * variable. If the fence is already signalled, return timeout,
103731c11ad6Sriastradh * or 1 if timeout is zero meaning poll. If the enable signalling
103831c11ad6Sriastradh * callback hasn't been called, call it, and if it fails, act as
103931c11ad6Sriastradh * if the fence had been signalled. Otherwise, wait on the
104031c11ad6Sriastradh * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
104131c11ad6Sriastradh * indefinitely.
10423d8b8481Sriastradh */
10433d8b8481Sriastradh long
dma_fence_default_wait(struct dma_fence * fence,bool intr,long timeout)1044316fc19bSriastradh dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
10453d8b8481Sriastradh {
10463d8b8481Sriastradh int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
10473d8b8481Sriastradh kmutex_t *lock = &fence->lock->sl_lock;
10483d8b8481Sriastradh long ret = 0;
10493d8b8481Sriastradh
1050316fc19bSriastradh KASSERT(dma_fence_referenced_p(fence));
10513d8b8481Sriastradh KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
10523d8b8481Sriastradh KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
10533d8b8481Sriastradh
10543d8b8481Sriastradh /* Optimistically try to skip the lock if it's already signalled. */
1055f806b9e8Sriastradh if (atomic_load_relaxed(&fence->flags) &
1056f806b9e8Sriastradh (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
105705cdf8b4Sriastradh return MAX(1, timeout);
10583d8b8481Sriastradh
10593d8b8481Sriastradh /* Acquire the lock. */
10603d8b8481Sriastradh spin_lock(fence->lock);
10613d8b8481Sriastradh
106231c11ad6Sriastradh /* Ensure signalling is enabled, or stop if already completed. */
10636d7a92dcSriastradh if (dma_fence_ensure_signal_enabled(fence) != 0) {
106405cdf8b4Sriastradh ret = MAX(1, timeout);
106505cdf8b4Sriastradh goto out;
10666d7a92dcSriastradh }
106731c11ad6Sriastradh
106831c11ad6Sriastradh /* If merely polling, stop here. */
106931c11ad6Sriastradh if (timeout == 0) {
107005cdf8b4Sriastradh ret = 0;
107105cdf8b4Sriastradh goto out;
107231c11ad6Sriastradh }
10733d8b8481Sriastradh
10743d8b8481Sriastradh /* Find out what our deadline is so we can handle spurious wakeup. */
10753d8b8481Sriastradh if (timeout < MAX_SCHEDULE_TIMEOUT) {
10763d8b8481Sriastradh now = getticks();
10773d8b8481Sriastradh starttime = now;
10783d8b8481Sriastradh deadline = starttime + timeout;
10793d8b8481Sriastradh }
10803d8b8481Sriastradh
10813d8b8481Sriastradh /* Wait until the signalled bit is set. */
1082316fc19bSriastradh while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
10833d8b8481Sriastradh /*
10843d8b8481Sriastradh * If there's a timeout and we've passed the deadline,
10853d8b8481Sriastradh * give up.
10863d8b8481Sriastradh */
10873d8b8481Sriastradh if (timeout < MAX_SCHEDULE_TIMEOUT) {
10883d8b8481Sriastradh now = getticks();
108905cdf8b4Sriastradh if (deadline <= now) {
109005cdf8b4Sriastradh ret = -EWOULDBLOCK;
10913d8b8481Sriastradh break;
10923d8b8481Sriastradh }
109305cdf8b4Sriastradh }
109405cdf8b4Sriastradh
109505cdf8b4Sriastradh /* Wait for the time remaining. */
10963d8b8481Sriastradh if (intr) {
10973d8b8481Sriastradh if (timeout < MAX_SCHEDULE_TIMEOUT) {
10983d8b8481Sriastradh ret = -cv_timedwait_sig(&fence->f_cv, lock,
10993d8b8481Sriastradh deadline - now);
11003d8b8481Sriastradh } else {
11013d8b8481Sriastradh ret = -cv_wait_sig(&fence->f_cv, lock);
11023d8b8481Sriastradh }
11033d8b8481Sriastradh } else {
11043d8b8481Sriastradh if (timeout < MAX_SCHEDULE_TIMEOUT) {
11053d8b8481Sriastradh ret = -cv_timedwait(&fence->f_cv, lock,
11063d8b8481Sriastradh deadline - now);
11073d8b8481Sriastradh } else {
11083d8b8481Sriastradh cv_wait(&fence->f_cv, lock);
11093d8b8481Sriastradh ret = 0;
11103d8b8481Sriastradh }
11113d8b8481Sriastradh }
111205cdf8b4Sriastradh
11133d8b8481Sriastradh /* If the wait failed, give up. */
111405cdf8b4Sriastradh if (ret)
11153d8b8481Sriastradh break;
11163d8b8481Sriastradh }
11173d8b8481Sriastradh
11183d8b8481Sriastradh /*
111905cdf8b4Sriastradh * Massage the return code if nonzero:
112005cdf8b4Sriastradh * - if we were interrupted, return -ERESTARTSYS;
112105cdf8b4Sriastradh * - if we timed out, return 0.
112205cdf8b4Sriastradh * No other failure is possible. On success, ret=0 but we
112305cdf8b4Sriastradh * check again below to verify anyway.
11243d8b8481Sriastradh */
112505cdf8b4Sriastradh if (ret) {
112605cdf8b4Sriastradh KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
112705cdf8b4Sriastradh ret == -EWOULDBLOCK), "ret=%ld", ret);
112805cdf8b4Sriastradh if (ret == -EINTR || ret == -ERESTART) {
112905cdf8b4Sriastradh ret = -ERESTARTSYS;
113005cdf8b4Sriastradh } else if (ret == -EWOULDBLOCK) {
113105cdf8b4Sriastradh KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
113205cdf8b4Sriastradh ret = 0; /* timed out */
113305cdf8b4Sriastradh }
113405cdf8b4Sriastradh }
113505cdf8b4Sriastradh
113605cdf8b4Sriastradh KASSERT(ret != -ERESTART); /* would be confused with time left */
113705cdf8b4Sriastradh
113805cdf8b4Sriastradh /* Check again in case it was signalled after a wait. */
113905cdf8b4Sriastradh if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
114005cdf8b4Sriastradh if (timeout < MAX_SCHEDULE_TIMEOUT)
114105cdf8b4Sriastradh ret = MAX(1, deadline - now);
114205cdf8b4Sriastradh else
114305cdf8b4Sriastradh ret = MAX_SCHEDULE_TIMEOUT;
114405cdf8b4Sriastradh }
114505cdf8b4Sriastradh
114605cdf8b4Sriastradh out: /* All done. Release the lock. */
114705cdf8b4Sriastradh spin_unlock(fence->lock);
114805cdf8b4Sriastradh return ret;
11493d8b8481Sriastradh }
11504726870eSriastradh
11514726870eSriastradh /*
11524726870eSriastradh * __dma_fence_signal(fence)
11534726870eSriastradh *
11544726870eSriastradh * Set fence's signalled bit, without waking waiters yet. Return
11554726870eSriastradh * true if it was newly set, false if it was already set.
11564726870eSriastradh */
11574726870eSriastradh bool
__dma_fence_signal(struct dma_fence * fence)11584726870eSriastradh __dma_fence_signal(struct dma_fence *fence)
11594726870eSriastradh {
11604726870eSriastradh
1161f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1162f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1163f5ba9028Sriastradh
11644726870eSriastradh if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
11654726870eSriastradh return false;
11664726870eSriastradh
11674726870eSriastradh return true;
11684726870eSriastradh }
11694726870eSriastradh
11704726870eSriastradh /*
11714726870eSriastradh * __dma_fence_signal_wake(fence)
11724726870eSriastradh *
1173b724d402Sriastradh * Set fence's timestamp and wake fence's waiters. Caller must
1174b724d402Sriastradh * have previously called __dma_fence_signal and it must have
1175b724d402Sriastradh * previously returned true.
11764726870eSriastradh */
11774726870eSriastradh void
__dma_fence_signal_wake(struct dma_fence * fence,ktime_t timestamp)11784726870eSriastradh __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
11794726870eSriastradh {
11804726870eSriastradh struct dma_fence_cb *fcb, *next;
11814726870eSriastradh
1182f5ba9028Sriastradh KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1183f5ba9028Sriastradh KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1184f5ba9028Sriastradh
11854726870eSriastradh spin_lock(fence->lock);
11864726870eSriastradh
11874726870eSriastradh KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
11884726870eSriastradh
118915a96b9bSriastradh SDT_PROBE1(sdt, drm, fence, signal, fence);
119015a96b9bSriastradh
1191b724d402Sriastradh /* Set the timestamp. */
1192b724d402Sriastradh fence->timestamp = timestamp;
1193b724d402Sriastradh set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1194b724d402Sriastradh
11954726870eSriastradh /* Wake waiters. */
11964726870eSriastradh cv_broadcast(&fence->f_cv);
11974726870eSriastradh
11984726870eSriastradh /* Remove and call the callbacks. */
11994726870eSriastradh TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
12004726870eSriastradh TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
12014726870eSriastradh fcb->fcb_onqueue = false;
12024726870eSriastradh (*fcb->func)(fence, fcb);
12034726870eSriastradh }
12044726870eSriastradh
12054726870eSriastradh spin_unlock(fence->lock);
12064726870eSriastradh }
1207