1 /* $NetBSD: linux_dma_fence_chain.c,v 1.4 2022/04/09 23:44:44 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence_chain.c,v 1.4 2022/04/09 23:44:44 riastradh Exp $");
31
32 #include <sys/types.h>
33
34 #include <linux/dma-fence.h>
35 #include <linux/dma-fence-chain.h>
36 #include <linux/spinlock.h>
37
38 static void dma_fence_chain_irq_work(struct irq_work *);
39 static bool dma_fence_chain_enable_signaling(struct dma_fence *);
40
41 static const struct dma_fence_ops dma_fence_chain_ops;
42
43 /*
44 * dma_fence_chain_init(chain, prev, fence, seqno)
45 *
46 * Initialize a fence chain node. If prev was already a chain,
47 * extend it; otherwise; create a new chain context.
48 */
49 void
dma_fence_chain_init(struct dma_fence_chain * chain,struct dma_fence * prev,struct dma_fence * fence,uint64_t seqno)50 dma_fence_chain_init(struct dma_fence_chain *chain, struct dma_fence *prev,
51 struct dma_fence *fence, uint64_t seqno)
52 {
53 struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
54 uint64_t context;
55
56 spin_lock_init(&chain->dfc_lock);
57 chain->dfc_prev = prev; /* consume caller's reference */
58 chain->dfc_fence = fence; /* consume caller's reference */
59 init_irq_work(&chain->dfc_irq_work, &dma_fence_chain_irq_work);
60
61 if (prev_chain == NULL ||
62 !__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
63 context = dma_fence_context_alloc(1);
64 if (prev_chain)
65 seqno = MAX(prev->seqno, seqno);
66 chain->prev_seqno = 0;
67 } else {
68 context = prev->context;
69 chain->prev_seqno = prev->seqno;
70 }
71
72 dma_fence_init(&chain->base, &dma_fence_chain_ops, &chain->dfc_lock,
73 context, seqno);
74 }
75
76 static const char *
dma_fence_chain_driver_name(struct dma_fence * fence)77 dma_fence_chain_driver_name(struct dma_fence *fence)
78 {
79
80 return "dma_fence_chain";
81 }
82
83 static const char *
dma_fence_chain_timeline_name(struct dma_fence * fence)84 dma_fence_chain_timeline_name(struct dma_fence *fence)
85 {
86
87 return "unbound";
88 }
89
90 static void
dma_fence_chain_irq_work(struct irq_work * work)91 dma_fence_chain_irq_work(struct irq_work *work)
92 {
93 struct dma_fence_chain *chain = container_of(work,
94 struct dma_fence_chain, dfc_irq_work);
95
96 if (!dma_fence_chain_enable_signaling(&chain->base))
97 dma_fence_signal(&chain->base);
98 dma_fence_put(&chain->base);
99 }
100
101 static void
dma_fence_chain_callback(struct dma_fence * fence,struct dma_fence_cb * cb)102 dma_fence_chain_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
103 {
104 struct dma_fence_chain *chain = container_of(cb,
105 struct dma_fence_chain, dfc_callback);
106
107 irq_work_queue(&chain->dfc_irq_work);
108 dma_fence_put(fence);
109 }
110
111 static bool
dma_fence_chain_enable_signaling(struct dma_fence * fence)112 dma_fence_chain_enable_signaling(struct dma_fence *fence)
113 {
114 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
115 struct dma_fence_chain *chain1;
116 struct dma_fence *f, *f1;
117
118 KASSERT(chain);
119
120 dma_fence_get(&chain->base);
121 dma_fence_chain_for_each(f, &chain->base) {
122 f1 = (chain1 = to_dma_fence_chain(f)) ? chain1->dfc_fence : f;
123
124 dma_fence_get(f1);
125 if (dma_fence_add_callback(f, &chain->dfc_callback,
126 dma_fence_chain_callback) != 0) {
127 dma_fence_put(f);
128 return true;
129 }
130 dma_fence_put(f1);
131 }
132 dma_fence_put(&chain->base);
133
134 return false;
135 }
136
137 static bool
dma_fence_chain_signaled(struct dma_fence * fence)138 dma_fence_chain_signaled(struct dma_fence *fence)
139 {
140 struct dma_fence_chain *chain1;
141 struct dma_fence *f, *f1;
142
143 dma_fence_chain_for_each(f, fence) {
144 f1 = (chain1 = to_dma_fence_chain(f)) ? chain1->dfc_fence : f;
145
146 if (!dma_fence_is_signaled(f1)) {
147 dma_fence_put(f);
148 return false;
149 }
150 }
151
152 return true;
153 }
154
155 static void
dma_fence_chain_release(struct dma_fence * fence)156 dma_fence_chain_release(struct dma_fence *fence)
157 {
158 struct dma_fence_chain *chain = to_dma_fence_chain(fence);
159 struct dma_fence_chain *prev_chain;
160 struct dma_fence *prev;
161
162 KASSERT(chain);
163
164 /*
165 * Release the previous pointer, carefully. Caller has
166 * exclusive access to chain, so no need for atomics here.
167 */
168 while ((prev = chain->dfc_prev) != NULL) {
169 /*
170 * If anyone else still holds a reference to the
171 * previous fence, or if it's not a chain, stop here.
172 */
173 if (kref_read(&prev->refcount) > 1)
174 break;
175 if ((prev_chain = to_dma_fence_chain(prev)) == NULL)
176 break;
177
178 /*
179 * Cut it out and free it. We have exclusive access to
180 * prev so this is safe. This dma_fence_put triggers
181 * recursion into dma_fence_chain_release, but the
182 * recursion is bounded to one level.
183 */
184 chain->dfc_prev = prev_chain->dfc_prev;
185 prev_chain->dfc_prev = NULL;
186 dma_fence_put(prev);
187 }
188 dma_fence_put(prev);
189
190 dma_fence_put(chain->dfc_fence);
191 spin_lock_destroy(&chain->dfc_lock);
192 dma_fence_free(&chain->base);
193 }
194
195 static const struct dma_fence_ops dma_fence_chain_ops = {
196 .use_64bit_seqno = true,
197 .get_driver_name = dma_fence_chain_driver_name,
198 .get_timeline_name = dma_fence_chain_timeline_name,
199 .enable_signaling = dma_fence_chain_enable_signaling,
200 .signaled = dma_fence_chain_signaled,
201 .release = dma_fence_chain_release,
202 };
203
204 /*
205 * to_dma_fence_chain(fence)
206 *
207 * If fence is nonnull and in a chain, return the chain.
208 * Otherwise return NULL.
209 */
210 struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence * fence)211 to_dma_fence_chain(struct dma_fence *fence)
212 {
213
214 if (fence == NULL || fence->ops != &dma_fence_chain_ops)
215 return NULL;
216 return container_of(fence, struct dma_fence_chain, base);
217 }
218
219 /*
220 * get_prev(chain)
221 *
222 * Get the previous fence of the chain and add a reference, if
223 * possible; return NULL otherwise.
224 */
225 static struct dma_fence *
get_prev(struct dma_fence_chain * chain)226 get_prev(struct dma_fence_chain *chain)
227 {
228 struct dma_fence *prev;
229
230 rcu_read_lock();
231 prev = dma_fence_get_rcu_safe(&chain->dfc_prev);
232 rcu_read_unlock();
233
234 return prev;
235 }
236
237 /*
238 * dma_fence_chain_walk(fence)
239 *
240 * Find the first unsignalled fence in the chain, or NULL if fence
241 * is not a chain node or the chain's fences are all signalled.
242 * While searching, cull signalled fences.
243 */
244 struct dma_fence *
dma_fence_chain_walk(struct dma_fence * fence)245 dma_fence_chain_walk(struct dma_fence *fence)
246 {
247 struct dma_fence_chain *chain, *prev_chain;
248 struct dma_fence *prev, *splice;
249
250 if ((chain = to_dma_fence_chain(fence)) == NULL) {
251 dma_fence_put(fence);
252 return NULL;
253 }
254
255 while ((prev = get_prev(chain)) != NULL) {
256 if ((prev_chain = to_dma_fence_chain(prev)) != NULL) {
257 if (!dma_fence_is_signaled(prev_chain->dfc_fence))
258 break;
259 splice = get_prev(prev_chain);
260 } else {
261 if (!dma_fence_is_signaled(prev))
262 break;
263 splice = NULL;
264 }
265 membar_release(); /* pairs with dma_fence_get_rcu_safe */
266 if (atomic_cas_ptr(&chain->dfc_prev, prev, splice) == prev)
267 dma_fence_put(prev); /* transferred to splice */
268 else
269 dma_fence_put(splice);
270 dma_fence_put(prev);
271 }
272
273 dma_fence_put(fence);
274 return prev;
275 }
276
277 /*
278 * dma_fence_chain_find_seqno(&fence, seqno)
279 *
280 * If seqno is zero, do nothing and succeed.
281 *
282 * Otherwise, if fence is not on a chain or if its sequence
283 * number has not yet reached seqno, fail with EINVAL.
284 *
285 * Otherwise, set fence to the first fence in the chain which
286 * will signal this sequence number.
287 */
288 int
dma_fence_chain_find_seqno(struct dma_fence ** fencep,uint64_t seqno)289 dma_fence_chain_find_seqno(struct dma_fence **fencep, uint64_t seqno)
290 {
291 struct dma_fence_chain *chain;
292
293 if (seqno == 0)
294 return 0;
295
296 chain = to_dma_fence_chain(*fencep);
297 if (chain == NULL || chain->base.seqno < seqno)
298 return -EINVAL;
299
300 dma_fence_chain_for_each(*fencep, &chain->base) {
301 if ((*fencep)->context != chain->base.context ||
302 to_dma_fence_chain(*fencep)->prev_seqno < seqno)
303 break;
304 }
305
306 /* Release reference acquired by dma_fence_chain_for_each. */
307 dma_fence_put(&chain->base);
308
309 return 0;
310 }
311