1*89e9c143Sriastradh /* $NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $ */
2f3058635Sriastradh
3f3058635Sriastradh /*-
4f3058635Sriastradh * Copyright (c) 2018 The NetBSD Foundation, Inc.
5f3058635Sriastradh * All rights reserved.
6f3058635Sriastradh *
7f3058635Sriastradh * This code is derived from software contributed to The NetBSD Foundation
8f3058635Sriastradh * by Taylor R. Campbell.
9f3058635Sriastradh *
10f3058635Sriastradh * Redistribution and use in source and binary forms, with or without
11f3058635Sriastradh * modification, are permitted provided that the following conditions
12f3058635Sriastradh * are met:
13f3058635Sriastradh * 1. Redistributions of source code must retain the above copyright
14f3058635Sriastradh * notice, this list of conditions and the following disclaimer.
15f3058635Sriastradh * 2. Redistributions in binary form must reproduce the above copyright
16f3058635Sriastradh * notice, this list of conditions and the following disclaimer in the
17f3058635Sriastradh * documentation and/or other materials provided with the distribution.
18f3058635Sriastradh *
19f3058635Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20f3058635Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21f3058635Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22f3058635Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23f3058635Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24f3058635Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25f3058635Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26f3058635Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27f3058635Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28f3058635Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29f3058635Sriastradh * POSSIBILITY OF SUCH DAMAGE.
30f3058635Sriastradh */
31f3058635Sriastradh
32f3058635Sriastradh #include <sys/cdefs.h>
33*89e9c143Sriastradh __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $");
34f3058635Sriastradh
35f3058635Sriastradh #include <sys/param.h>
36f3058635Sriastradh #include <sys/poll.h>
37f3058635Sriastradh #include <sys/select.h>
38f3058635Sriastradh
39f3058635Sriastradh #include <linux/dma-fence.h>
40f3058635Sriastradh #include <linux/dma-resv.h>
41f3058635Sriastradh #include <linux/seqlock.h>
42f3058635Sriastradh #include <linux/ww_mutex.h>
43f3058635Sriastradh
44f3058635Sriastradh DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
45f3058635Sriastradh
46f3058635Sriastradh static struct dma_resv_list *
objlist_tryalloc(uint32_t n)47f3058635Sriastradh objlist_tryalloc(uint32_t n)
48f3058635Sriastradh {
49f3058635Sriastradh struct dma_resv_list *list;
50f3058635Sriastradh
51f3058635Sriastradh list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
52f3058635Sriastradh if (list == NULL)
53f3058635Sriastradh return NULL;
54f3058635Sriastradh list->shared_max = n;
55f3058635Sriastradh
56f3058635Sriastradh return list;
57f3058635Sriastradh }
58f3058635Sriastradh
59*89e9c143Sriastradh static struct dma_resv_list *
objlist_alloc(uint32_t n)60*89e9c143Sriastradh objlist_alloc(uint32_t n)
61*89e9c143Sriastradh {
62*89e9c143Sriastradh struct dma_resv_list *list;
63*89e9c143Sriastradh
64*89e9c143Sriastradh list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_SLEEP);
65*89e9c143Sriastradh list->shared_max = n;
66*89e9c143Sriastradh
67*89e9c143Sriastradh return list;
68*89e9c143Sriastradh }
69*89e9c143Sriastradh
70f3058635Sriastradh static void
objlist_free(struct dma_resv_list * list)71f3058635Sriastradh objlist_free(struct dma_resv_list *list)
72f3058635Sriastradh {
73f3058635Sriastradh uint32_t n = list->shared_max;
74f3058635Sriastradh
75f3058635Sriastradh kmem_free(list, offsetof(typeof(*list), shared[n]));
76f3058635Sriastradh }
77f3058635Sriastradh
78f3058635Sriastradh static void
objlist_free_cb(struct rcu_head * rcu)79f3058635Sriastradh objlist_free_cb(struct rcu_head *rcu)
80f3058635Sriastradh {
81f3058635Sriastradh struct dma_resv_list *list = container_of(rcu,
82f3058635Sriastradh struct dma_resv_list, rol_rcu);
83f3058635Sriastradh
84f3058635Sriastradh objlist_free(list);
85f3058635Sriastradh }
86f3058635Sriastradh
87f3058635Sriastradh static void
objlist_defer_free(struct dma_resv_list * list)88f3058635Sriastradh objlist_defer_free(struct dma_resv_list *list)
89f3058635Sriastradh {
90f3058635Sriastradh
91f3058635Sriastradh call_rcu(&list->rol_rcu, objlist_free_cb);
92f3058635Sriastradh }
93f3058635Sriastradh
94f3058635Sriastradh /*
95f3058635Sriastradh * dma_resv_init(robj)
96f3058635Sriastradh *
97f3058635Sriastradh * Initialize a reservation object. Caller must later destroy it
98f3058635Sriastradh * with dma_resv_fini.
99f3058635Sriastradh */
100f3058635Sriastradh void
dma_resv_init(struct dma_resv * robj)101f3058635Sriastradh dma_resv_init(struct dma_resv *robj)
102f3058635Sriastradh {
103f3058635Sriastradh
104f3058635Sriastradh ww_mutex_init(&robj->lock, &reservation_ww_class);
105f3058635Sriastradh seqcount_init(&robj->seq);
106f3058635Sriastradh robj->fence_excl = NULL;
107f3058635Sriastradh robj->fence = NULL;
108f3058635Sriastradh robj->robj_prealloc = NULL;
109f3058635Sriastradh }
110f3058635Sriastradh
111f3058635Sriastradh /*
112f3058635Sriastradh * dma_resv_fini(robj)
113f3058635Sriastradh *
114f3058635Sriastradh * Destroy a reservation object, freeing any memory that had been
115f3058635Sriastradh * allocated for it. Caller must have exclusive access to it.
116f3058635Sriastradh */
117f3058635Sriastradh void
dma_resv_fini(struct dma_resv * robj)118f3058635Sriastradh dma_resv_fini(struct dma_resv *robj)
119f3058635Sriastradh {
120f3058635Sriastradh unsigned i;
121f3058635Sriastradh
122cbc804a9Sriastradh if (robj->robj_prealloc) {
123f3058635Sriastradh objlist_free(robj->robj_prealloc);
124cbc804a9Sriastradh robj->robj_prealloc = NULL; /* paranoia */
125f3058635Sriastradh }
126cbc804a9Sriastradh if (robj->fence) {
127cbc804a9Sriastradh for (i = 0; i < robj->fence->shared_count; i++) {
128cbc804a9Sriastradh dma_fence_put(robj->fence->shared[i]);
129cbc804a9Sriastradh robj->fence->shared[i] = NULL; /* paranoia */
130cbc804a9Sriastradh }
131cbc804a9Sriastradh objlist_free(robj->fence);
132cbc804a9Sriastradh robj->fence = NULL; /* paranoia */
133cbc804a9Sriastradh }
134cbc804a9Sriastradh if (robj->fence_excl) {
135f3058635Sriastradh dma_fence_put(robj->fence_excl);
136cbc804a9Sriastradh robj->fence_excl = NULL; /* paranoia */
137cbc804a9Sriastradh }
138f3058635Sriastradh ww_mutex_destroy(&robj->lock);
139f3058635Sriastradh }
140f3058635Sriastradh
141f3058635Sriastradh /*
142f3058635Sriastradh * dma_resv_lock(robj, ctx)
143f3058635Sriastradh *
144f3058635Sriastradh * Acquire a reservation object's lock. Return 0 on success,
145f3058635Sriastradh * -EALREADY if caller already holds it, -EDEADLK if a
146f3058635Sriastradh * higher-priority owner holds it and the caller must back out and
147f3058635Sriastradh * retry.
148f3058635Sriastradh */
149f3058635Sriastradh int
dma_resv_lock(struct dma_resv * robj,struct ww_acquire_ctx * ctx)150f3058635Sriastradh dma_resv_lock(struct dma_resv *robj,
151f3058635Sriastradh struct ww_acquire_ctx *ctx)
152f3058635Sriastradh {
153f3058635Sriastradh
154f3058635Sriastradh return ww_mutex_lock(&robj->lock, ctx);
155f3058635Sriastradh }
156f3058635Sriastradh
157f3058635Sriastradh /*
158b04cfcfdSriastradh * dma_resv_lock_slow(robj, ctx)
159b04cfcfdSriastradh *
160b04cfcfdSriastradh * Acquire a reservation object's lock. Caller must not hold
161b04cfcfdSriastradh * this lock or any others -- this is to be used in slow paths
162b04cfcfdSriastradh * after dma_resv_lock or dma_resv_lock_interruptible has failed
163b04cfcfdSriastradh * and the caller has backed out all other locks.
164b04cfcfdSriastradh */
165b04cfcfdSriastradh void
dma_resv_lock_slow(struct dma_resv * robj,struct ww_acquire_ctx * ctx)166b04cfcfdSriastradh dma_resv_lock_slow(struct dma_resv *robj,
167b04cfcfdSriastradh struct ww_acquire_ctx *ctx)
168b04cfcfdSriastradh {
169b04cfcfdSriastradh
170b04cfcfdSriastradh ww_mutex_lock_slow(&robj->lock, ctx);
171b04cfcfdSriastradh }
172b04cfcfdSriastradh
173b04cfcfdSriastradh /*
174f3058635Sriastradh * dma_resv_lock_interruptible(robj, ctx)
175f3058635Sriastradh *
176f3058635Sriastradh * Acquire a reservation object's lock. Return 0 on success,
177f3058635Sriastradh * -EALREADY if caller already holds it, -EDEADLK if a
178f3058635Sriastradh * higher-priority owner holds it and the caller must back out and
17905665e3eSriastradh * retry, -EINTR if interrupted.
180f3058635Sriastradh */
181f3058635Sriastradh int
dma_resv_lock_interruptible(struct dma_resv * robj,struct ww_acquire_ctx * ctx)182f3058635Sriastradh dma_resv_lock_interruptible(struct dma_resv *robj,
183f3058635Sriastradh struct ww_acquire_ctx *ctx)
184f3058635Sriastradh {
185f3058635Sriastradh
186f3058635Sriastradh return ww_mutex_lock_interruptible(&robj->lock, ctx);
187f3058635Sriastradh }
188f3058635Sriastradh
189f3058635Sriastradh /*
190b04cfcfdSriastradh * dma_resv_lock_slow_interruptible(robj, ctx)
191b04cfcfdSriastradh *
192b04cfcfdSriastradh * Acquire a reservation object's lock. Caller must not hold
193b04cfcfdSriastradh * this lock or any others -- this is to be used in slow paths
194b04cfcfdSriastradh * after dma_resv_lock or dma_resv_lock_interruptible has failed
195b04cfcfdSriastradh * and the caller has backed out all other locks. Return 0 on
19605665e3eSriastradh * success, -EINTR if interrupted.
197b04cfcfdSriastradh */
198b04cfcfdSriastradh int
dma_resv_lock_slow_interruptible(struct dma_resv * robj,struct ww_acquire_ctx * ctx)199b04cfcfdSriastradh dma_resv_lock_slow_interruptible(struct dma_resv *robj,
200b04cfcfdSriastradh struct ww_acquire_ctx *ctx)
201b04cfcfdSriastradh {
202b04cfcfdSriastradh
203b04cfcfdSriastradh return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
204b04cfcfdSriastradh }
205b04cfcfdSriastradh
206b04cfcfdSriastradh /*
207f3058635Sriastradh * dma_resv_trylock(robj)
208f3058635Sriastradh *
209f3058635Sriastradh * Try to acquire a reservation object's lock without blocking.
210f3058635Sriastradh * Return true on success, false on failure.
211f3058635Sriastradh */
212f3058635Sriastradh bool
dma_resv_trylock(struct dma_resv * robj)213f3058635Sriastradh dma_resv_trylock(struct dma_resv *robj)
214f3058635Sriastradh {
215f3058635Sriastradh
216f3058635Sriastradh return ww_mutex_trylock(&robj->lock);
217f3058635Sriastradh }
218f3058635Sriastradh
219f3058635Sriastradh /*
220da53b7a5Sriastradh * dma_resv_locking_ctx(robj)
221da53b7a5Sriastradh *
222da53b7a5Sriastradh * Return a pointer to the ww_acquire_ctx used by the owner of
223da53b7a5Sriastradh * the reservation object's lock, or NULL if it is either not
224da53b7a5Sriastradh * owned or if it is locked without context.
225da53b7a5Sriastradh */
226da53b7a5Sriastradh struct ww_acquire_ctx *
dma_resv_locking_ctx(struct dma_resv * robj)227da53b7a5Sriastradh dma_resv_locking_ctx(struct dma_resv *robj)
228da53b7a5Sriastradh {
229da53b7a5Sriastradh
230da53b7a5Sriastradh return ww_mutex_locking_ctx(&robj->lock);
231da53b7a5Sriastradh }
232da53b7a5Sriastradh
233da53b7a5Sriastradh /*
234f3058635Sriastradh * dma_resv_unlock(robj)
235f3058635Sriastradh *
236f3058635Sriastradh * Release a reservation object's lock.
237f3058635Sriastradh */
238f3058635Sriastradh void
dma_resv_unlock(struct dma_resv * robj)239f3058635Sriastradh dma_resv_unlock(struct dma_resv *robj)
240f3058635Sriastradh {
241f3058635Sriastradh
242f3058635Sriastradh return ww_mutex_unlock(&robj->lock);
243f3058635Sriastradh }
244f3058635Sriastradh
245f3058635Sriastradh /*
2462b73d18aSriastradh * dma_resv_is_locked(robj)
2472b73d18aSriastradh *
2482b73d18aSriastradh * True if robj is locked.
2492b73d18aSriastradh */
2502b73d18aSriastradh bool
dma_resv_is_locked(struct dma_resv * robj)2512b73d18aSriastradh dma_resv_is_locked(struct dma_resv *robj)
2522b73d18aSriastradh {
2532b73d18aSriastradh
2542b73d18aSriastradh return ww_mutex_is_locked(&robj->lock);
2552b73d18aSriastradh }
2562b73d18aSriastradh
2572b73d18aSriastradh /*
258f3058635Sriastradh * dma_resv_held(robj)
259f3058635Sriastradh *
260f3058635Sriastradh * True if robj is locked.
261f3058635Sriastradh */
262f3058635Sriastradh bool
dma_resv_held(struct dma_resv * robj)263f3058635Sriastradh dma_resv_held(struct dma_resv *robj)
264f3058635Sriastradh {
265f3058635Sriastradh
266f3058635Sriastradh return ww_mutex_is_locked(&robj->lock);
267f3058635Sriastradh }
268f3058635Sriastradh
269f3058635Sriastradh /*
270f3058635Sriastradh * dma_resv_assert_held(robj)
271f3058635Sriastradh *
272f3058635Sriastradh * Panic if robj is not held, in DIAGNOSTIC builds.
273f3058635Sriastradh */
274f3058635Sriastradh void
dma_resv_assert_held(struct dma_resv * robj)275f3058635Sriastradh dma_resv_assert_held(struct dma_resv *robj)
276f3058635Sriastradh {
277f3058635Sriastradh
278f3058635Sriastradh KASSERT(dma_resv_held(robj));
279f3058635Sriastradh }
280f3058635Sriastradh
281f3058635Sriastradh /*
282f3058635Sriastradh * dma_resv_get_excl(robj)
283f3058635Sriastradh *
284f3058635Sriastradh * Return a pointer to the exclusive fence of the reservation
285f3058635Sriastradh * object robj.
286f3058635Sriastradh *
287f3058635Sriastradh * Caller must have robj locked.
288f3058635Sriastradh */
289f3058635Sriastradh struct dma_fence *
dma_resv_get_excl(struct dma_resv * robj)290f3058635Sriastradh dma_resv_get_excl(struct dma_resv *robj)
291f3058635Sriastradh {
292f3058635Sriastradh
293f3058635Sriastradh KASSERT(dma_resv_held(robj));
294f3058635Sriastradh return robj->fence_excl;
295f3058635Sriastradh }
296f3058635Sriastradh
297f3058635Sriastradh /*
298f3058635Sriastradh * dma_resv_get_list(robj)
299f3058635Sriastradh *
300f3058635Sriastradh * Return a pointer to the shared fence list of the reservation
301f3058635Sriastradh * object robj.
302f3058635Sriastradh *
303f3058635Sriastradh * Caller must have robj locked.
304f3058635Sriastradh */
305f3058635Sriastradh struct dma_resv_list *
dma_resv_get_list(struct dma_resv * robj)306f3058635Sriastradh dma_resv_get_list(struct dma_resv *robj)
307f3058635Sriastradh {
308f3058635Sriastradh
309f3058635Sriastradh KASSERT(dma_resv_held(robj));
310f3058635Sriastradh return robj->fence;
311f3058635Sriastradh }
312f3058635Sriastradh
313f3058635Sriastradh /*
31432925284Sriastradh * dma_resv_reserve_shared(robj, num_fences)
315f3058635Sriastradh *
31632925284Sriastradh * Reserve space in robj to add num_fences shared fences. To be
31732925284Sriastradh * used only once before calling dma_resv_add_shared_fence.
318f3058635Sriastradh *
319f3058635Sriastradh * Caller must have robj locked.
320f3058635Sriastradh *
321f3058635Sriastradh * Internally, we start with room for four entries and double if
322f3058635Sriastradh * we don't have enough. This is not guaranteed.
323f3058635Sriastradh */
324f3058635Sriastradh int
dma_resv_reserve_shared(struct dma_resv * robj,unsigned int num_fences)3251b9b1c96Sriastradh dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
326f3058635Sriastradh {
327f3058635Sriastradh struct dma_resv_list *list, *prealloc;
328f3058635Sriastradh uint32_t n, nalloc;
329f3058635Sriastradh
330f3058635Sriastradh KASSERT(dma_resv_held(robj));
331f3058635Sriastradh
332f3058635Sriastradh list = robj->fence;
333f3058635Sriastradh prealloc = robj->robj_prealloc;
334f3058635Sriastradh
335f3058635Sriastradh /* If there's an existing list, check it for space. */
336f3058635Sriastradh if (list) {
337f3058635Sriastradh /* If there's too many already, give up. */
33832925284Sriastradh if (list->shared_count > UINT32_MAX - num_fences)
339f3058635Sriastradh return -ENOMEM;
340f3058635Sriastradh
34132925284Sriastradh /* Add some more. */
34232925284Sriastradh n = list->shared_count + num_fences;
343f3058635Sriastradh
344f3058635Sriastradh /* If there's enough for one more, we're done. */
345f3058635Sriastradh if (n <= list->shared_max)
346f3058635Sriastradh return 0;
347f3058635Sriastradh } else {
34832925284Sriastradh /* No list already. We need space for num_fences. */
34932925284Sriastradh n = num_fences;
350f3058635Sriastradh }
351f3058635Sriastradh
352f3058635Sriastradh /* If not, maybe there's a preallocated list ready. */
353f3058635Sriastradh if (prealloc != NULL) {
354f3058635Sriastradh /* If there's enough room in it, stop here. */
355f3058635Sriastradh if (n <= prealloc->shared_max)
356f3058635Sriastradh return 0;
357f3058635Sriastradh
358f3058635Sriastradh /* Try to double its capacity. */
359f3058635Sriastradh nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
360*89e9c143Sriastradh prealloc = objlist_alloc(nalloc);
361f3058635Sriastradh
362f3058635Sriastradh /* Swap the new preallocated list and free the old one. */
363f3058635Sriastradh objlist_free(robj->robj_prealloc);
364f3058635Sriastradh robj->robj_prealloc = prealloc;
365f3058635Sriastradh } else {
366f3058635Sriastradh /* Start with some spare. */
367f3058635Sriastradh nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
368*89e9c143Sriastradh prealloc = objlist_alloc(nalloc);
369*89e9c143Sriastradh
370f3058635Sriastradh /* Save the new preallocated list. */
371f3058635Sriastradh robj->robj_prealloc = prealloc;
372f3058635Sriastradh }
373f3058635Sriastradh
374f3058635Sriastradh /* Success! */
375f3058635Sriastradh return 0;
376f3058635Sriastradh }
377f3058635Sriastradh
378f3058635Sriastradh struct dma_resv_write_ticket {
379f3058635Sriastradh };
380f3058635Sriastradh
381f3058635Sriastradh /*
382f3058635Sriastradh * dma_resv_write_begin(robj, ticket)
383f3058635Sriastradh *
384f3058635Sriastradh * Begin an atomic batch of writes to robj, and initialize opaque
385f3058635Sriastradh * ticket for it. The ticket must be passed to
386f3058635Sriastradh * dma_resv_write_commit to commit the writes.
387f3058635Sriastradh *
388f3058635Sriastradh * Caller must have robj locked.
389f3058635Sriastradh *
390f3058635Sriastradh * Implies membar_producer, i.e. store-before-store barrier. Does
391f3058635Sriastradh * NOT serve as an acquire operation, however.
392f3058635Sriastradh */
393f3058635Sriastradh static void
dma_resv_write_begin(struct dma_resv * robj,struct dma_resv_write_ticket * ticket)394f3058635Sriastradh dma_resv_write_begin(struct dma_resv *robj,
395f3058635Sriastradh struct dma_resv_write_ticket *ticket)
396f3058635Sriastradh {
397f3058635Sriastradh
398f3058635Sriastradh KASSERT(dma_resv_held(robj));
399f3058635Sriastradh
400f3058635Sriastradh write_seqcount_begin(&robj->seq);
401f3058635Sriastradh }
402f3058635Sriastradh
403f3058635Sriastradh /*
404f3058635Sriastradh * dma_resv_write_commit(robj, ticket)
405f3058635Sriastradh *
406f3058635Sriastradh * Commit an atomic batch of writes to robj begun with the call to
407f3058635Sriastradh * dma_resv_write_begin that returned ticket.
408f3058635Sriastradh *
409f3058635Sriastradh * Caller must have robj locked.
410f3058635Sriastradh *
411f3058635Sriastradh * Implies membar_producer, i.e. store-before-store barrier. Does
412f3058635Sriastradh * NOT serve as a release operation, however.
413f3058635Sriastradh */
414f3058635Sriastradh static void
dma_resv_write_commit(struct dma_resv * robj,struct dma_resv_write_ticket * ticket)415f3058635Sriastradh dma_resv_write_commit(struct dma_resv *robj,
416f3058635Sriastradh struct dma_resv_write_ticket *ticket)
417f3058635Sriastradh {
418f3058635Sriastradh
419f3058635Sriastradh KASSERT(dma_resv_held(robj));
420f3058635Sriastradh
421f3058635Sriastradh write_seqcount_end(&robj->seq);
422f3058635Sriastradh }
423f3058635Sriastradh
424f3058635Sriastradh struct dma_resv_read_ticket {
425f3058635Sriastradh unsigned version;
426f3058635Sriastradh };
427f3058635Sriastradh
428f3058635Sriastradh /*
429f3058635Sriastradh * dma_resv_read_begin(robj, ticket)
430f3058635Sriastradh *
431f3058635Sriastradh * Begin a read section, and initialize opaque ticket for it. The
432f3058635Sriastradh * ticket must be passed to dma_resv_read_exit, and the
433f3058635Sriastradh * caller must be prepared to retry reading if it fails.
434f3058635Sriastradh */
435f3058635Sriastradh static void
dma_resv_read_begin(const struct dma_resv * robj,struct dma_resv_read_ticket * ticket)436f3058635Sriastradh dma_resv_read_begin(const struct dma_resv *robj,
437f3058635Sriastradh struct dma_resv_read_ticket *ticket)
438f3058635Sriastradh {
439f3058635Sriastradh
440f3058635Sriastradh ticket->version = read_seqcount_begin(&robj->seq);
441f3058635Sriastradh }
442f3058635Sriastradh
443f3058635Sriastradh /*
444f3058635Sriastradh * dma_resv_read_valid(robj, ticket)
445f3058635Sriastradh *
446f3058635Sriastradh * Test whether the read sections are valid. Return true on
447f3058635Sriastradh * success, or false on failure if the read ticket has been
448f3058635Sriastradh * invalidated.
449f3058635Sriastradh */
450f3058635Sriastradh static bool
dma_resv_read_valid(const struct dma_resv * robj,struct dma_resv_read_ticket * ticket)451f3058635Sriastradh dma_resv_read_valid(const struct dma_resv *robj,
452f3058635Sriastradh struct dma_resv_read_ticket *ticket)
453f3058635Sriastradh {
454f3058635Sriastradh
455f3058635Sriastradh return !read_seqcount_retry(&robj->seq, ticket->version);
456f3058635Sriastradh }
457f3058635Sriastradh
458f3058635Sriastradh /*
4596a54306eSriastradh * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket)
4606a54306eSriastradh *
4616a54306eSriastradh * Set *listp and *shared_countp to a snapshot of the pointer to
4626a54306eSriastradh * and length of the shared fence list of robj and return true, or
4636a54306eSriastradh * set them to NULL/0 and return false if a writer intervened so
4646a54306eSriastradh * the caller must start over.
4656a54306eSriastradh *
4666a54306eSriastradh * Both *listp and *shared_countp are unconditionally initialized
4676a54306eSriastradh * on return. They may be NULL/0 even on success, if there is no
4686a54306eSriastradh * shared list at the moment. Does not take any fence references.
4696a54306eSriastradh */
4706a54306eSriastradh static bool
dma_resv_get_shared_reader(const struct dma_resv * robj,const struct dma_resv_list ** listp,unsigned * shared_countp,struct dma_resv_read_ticket * ticket)4716a54306eSriastradh dma_resv_get_shared_reader(const struct dma_resv *robj,
4726a54306eSriastradh const struct dma_resv_list **listp, unsigned *shared_countp,
4736a54306eSriastradh struct dma_resv_read_ticket *ticket)
4746a54306eSriastradh {
4756a54306eSriastradh struct dma_resv_list *list;
4766a54306eSriastradh unsigned shared_count = 0;
4776a54306eSriastradh
4786a54306eSriastradh /*
4796a54306eSriastradh * Get the list and, if it is present, its length. If the list
4806a54306eSriastradh * is present, it has a valid length. The atomic_load_consume
4816a54306eSriastradh * pairs with the membar_producer in dma_resv_write_begin.
4826a54306eSriastradh */
4836a54306eSriastradh list = atomic_load_consume(&robj->fence);
4846a54306eSriastradh shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0;
4856a54306eSriastradh
4866a54306eSriastradh /*
4876a54306eSriastradh * We are done reading from robj and list. Validate our
4886a54306eSriastradh * parking ticket. If it's invalid, do not pass go and do not
4896a54306eSriastradh * collect $200.
4906a54306eSriastradh */
4916a54306eSriastradh if (!dma_resv_read_valid(robj, ticket))
4926a54306eSriastradh goto fail;
4936a54306eSriastradh
4946a54306eSriastradh /* Success! */
4956a54306eSriastradh *listp = list;
4966a54306eSriastradh *shared_countp = shared_count;
4976a54306eSriastradh return true;
4986a54306eSriastradh
4996a54306eSriastradh fail: *listp = NULL;
5006a54306eSriastradh *shared_countp = 0;
5016a54306eSriastradh return false;
5026a54306eSriastradh }
5036a54306eSriastradh
5046a54306eSriastradh /*
5056a54306eSriastradh * dma_resv_get_excl_reader(robj, fencep, ticket)
5066a54306eSriastradh *
5076a54306eSriastradh * Set *fencep to the exclusive fence of robj and return true, or
5086a54306eSriastradh * set it to NULL and return false if either
5096a54306eSriastradh * (a) a writer intervened, or
5106a54306eSriastradh * (b) the fence is scheduled to be destroyed after this RCU grace
5116a54306eSriastradh * period,
5126a54306eSriastradh * in either case meaning the caller must restart.
5136a54306eSriastradh *
5146a54306eSriastradh * The value of *fencep is unconditionally initialized on return.
5156a54306eSriastradh * It may be NULL, if there is no exclusive fence at the moment.
5166a54306eSriastradh * If nonnull, *fencep is referenced; caller must dma_fence_put.
5176a54306eSriastradh */
5186a54306eSriastradh static bool
dma_resv_get_excl_reader(const struct dma_resv * robj,struct dma_fence ** fencep,struct dma_resv_read_ticket * ticket)5196a54306eSriastradh dma_resv_get_excl_reader(const struct dma_resv *robj,
5206a54306eSriastradh struct dma_fence **fencep,
5216a54306eSriastradh struct dma_resv_read_ticket *ticket)
5226a54306eSriastradh {
5236a54306eSriastradh struct dma_fence *fence;
5246a54306eSriastradh
5256a54306eSriastradh /*
5266a54306eSriastradh * Get the candidate fence pointer. The atomic_load_consume
5276a54306eSriastradh * pairs with the membar_consumer in dma_resv_write_begin.
5286a54306eSriastradh */
5296a54306eSriastradh fence = atomic_load_consume(&robj->fence_excl);
5306a54306eSriastradh
5316a54306eSriastradh /*
5326a54306eSriastradh * The load of robj->fence_excl is atomic, but the caller may
5336a54306eSriastradh * have previously loaded the shared fence list and should
5346a54306eSriastradh * restart if its view of the entire dma_resv object is not a
5356a54306eSriastradh * consistent snapshot.
5366a54306eSriastradh */
5376a54306eSriastradh if (!dma_resv_read_valid(robj, ticket))
5386a54306eSriastradh goto fail;
5396a54306eSriastradh
5406a54306eSriastradh /*
5416a54306eSriastradh * If the fence is already scheduled to away after this RCU
5426a54306eSriastradh * read section, give up. Otherwise, take a reference so it
5436a54306eSriastradh * won't go away until after dma_fence_put.
5446a54306eSriastradh */
5456a54306eSriastradh if (fence != NULL &&
5466a54306eSriastradh (fence = dma_fence_get_rcu(fence)) == NULL)
5476a54306eSriastradh goto fail;
5486a54306eSriastradh
5496a54306eSriastradh /* Success! */
5506a54306eSriastradh *fencep = fence;
5516a54306eSriastradh return true;
5526a54306eSriastradh
5536a54306eSriastradh fail: *fencep = NULL;
5546a54306eSriastradh return false;
5556a54306eSriastradh }
5566a54306eSriastradh
5576a54306eSriastradh /*
558f3058635Sriastradh * dma_resv_add_excl_fence(robj, fence)
559f3058635Sriastradh *
560f3058635Sriastradh * Empty and release all of robj's shared fences, and clear and
561f3058635Sriastradh * release its exclusive fence. If fence is nonnull, acquire a
562f3058635Sriastradh * reference to it and save it as robj's exclusive fence.
563f3058635Sriastradh *
564f3058635Sriastradh * Caller must have robj locked.
565f3058635Sriastradh */
566f3058635Sriastradh void
dma_resv_add_excl_fence(struct dma_resv * robj,struct dma_fence * fence)567f3058635Sriastradh dma_resv_add_excl_fence(struct dma_resv *robj,
568f3058635Sriastradh struct dma_fence *fence)
569f3058635Sriastradh {
570f3058635Sriastradh struct dma_fence *old_fence = robj->fence_excl;
571f3058635Sriastradh struct dma_resv_list *old_list = robj->fence;
572f3058635Sriastradh uint32_t old_shared_count;
573f3058635Sriastradh struct dma_resv_write_ticket ticket;
574f3058635Sriastradh
575f3058635Sriastradh KASSERT(dma_resv_held(robj));
576f3058635Sriastradh
577f3058635Sriastradh /*
578f3058635Sriastradh * If we are setting rather than just removing a fence, acquire
579f3058635Sriastradh * a reference for ourselves.
580f3058635Sriastradh */
581f3058635Sriastradh if (fence)
582f3058635Sriastradh (void)dma_fence_get(fence);
583f3058635Sriastradh
584f3058635Sriastradh /* If there are any shared fences, remember how many. */
585f3058635Sriastradh if (old_list)
586f3058635Sriastradh old_shared_count = old_list->shared_count;
587f3058635Sriastradh
588f03762a4Sriastradh /* Begin an update. Implies membar_producer for fence. */
589f3058635Sriastradh dma_resv_write_begin(robj, &ticket);
590f3058635Sriastradh
591f3058635Sriastradh /* Replace the fence and zero the shared count. */
592f03762a4Sriastradh atomic_store_relaxed(&robj->fence_excl, fence);
593f3058635Sriastradh if (old_list)
594f3058635Sriastradh old_list->shared_count = 0;
595f3058635Sriastradh
596f3058635Sriastradh /* Commit the update. */
597f3058635Sriastradh dma_resv_write_commit(robj, &ticket);
598f3058635Sriastradh
599f3058635Sriastradh /* Release the old exclusive fence, if any. */
600cbc804a9Sriastradh if (old_fence) {
601f3058635Sriastradh dma_fence_put(old_fence);
602cbc804a9Sriastradh old_fence = NULL; /* paranoia */
603cbc804a9Sriastradh }
604f3058635Sriastradh
605f3058635Sriastradh /* Release any old shared fences. */
606f3058635Sriastradh if (old_list) {
607cbc804a9Sriastradh while (old_shared_count--) {
608f3058635Sriastradh dma_fence_put(old_list->shared[old_shared_count]);
609cbc804a9Sriastradh /* paranoia */
610cbc804a9Sriastradh old_list->shared[old_shared_count] = NULL;
611cbc804a9Sriastradh }
612f3058635Sriastradh }
613f3058635Sriastradh }
614f3058635Sriastradh
615f3058635Sriastradh /*
616f3058635Sriastradh * dma_resv_add_shared_fence(robj, fence)
617f3058635Sriastradh *
618f3058635Sriastradh * Acquire a reference to fence and add it to robj's shared list.
619f3058635Sriastradh * If any fence was already added with the same context number,
620f3058635Sriastradh * release it and replace it by this one.
621f3058635Sriastradh *
622f3058635Sriastradh * Caller must have robj locked, and must have preceded with a
623f3058635Sriastradh * call to dma_resv_reserve_shared for each shared fence
624f3058635Sriastradh * added.
625f3058635Sriastradh */
626f3058635Sriastradh void
dma_resv_add_shared_fence(struct dma_resv * robj,struct dma_fence * fence)627f3058635Sriastradh dma_resv_add_shared_fence(struct dma_resv *robj,
628f3058635Sriastradh struct dma_fence *fence)
629f3058635Sriastradh {
630f3058635Sriastradh struct dma_resv_list *list = robj->fence;
631f3058635Sriastradh struct dma_resv_list *prealloc = robj->robj_prealloc;
632f3058635Sriastradh struct dma_resv_write_ticket ticket;
633f3058635Sriastradh struct dma_fence *replace = NULL;
634f3058635Sriastradh uint32_t i;
635f3058635Sriastradh
636f3058635Sriastradh KASSERT(dma_resv_held(robj));
637f3058635Sriastradh
638f3058635Sriastradh /* Acquire a reference to the fence. */
639f3058635Sriastradh KASSERT(fence != NULL);
640f3058635Sriastradh (void)dma_fence_get(fence);
641f3058635Sriastradh
642f3058635Sriastradh /* Check for a preallocated replacement list. */
643f3058635Sriastradh if (prealloc == NULL) {
644f3058635Sriastradh /*
645f3058635Sriastradh * If there is no preallocated replacement list, then
646f3058635Sriastradh * there must be room in the current list.
647f3058635Sriastradh */
648f3058635Sriastradh KASSERT(list != NULL);
649f3058635Sriastradh KASSERT(list->shared_count < list->shared_max);
650f3058635Sriastradh
651f3058635Sriastradh /* Begin an update. Implies membar_producer for fence. */
652f3058635Sriastradh dma_resv_write_begin(robj, &ticket);
653f3058635Sriastradh
654f3058635Sriastradh /* Find a fence with the same context number. */
655f3058635Sriastradh for (i = 0; i < list->shared_count; i++) {
656f3058635Sriastradh if (list->shared[i]->context == fence->context) {
657f3058635Sriastradh replace = list->shared[i];
658f03762a4Sriastradh atomic_store_relaxed(&list->shared[i], fence);
659f3058635Sriastradh break;
660f3058635Sriastradh }
661f3058635Sriastradh }
662f3058635Sriastradh
663f3058635Sriastradh /* If we didn't find one, add it at the end. */
664f03762a4Sriastradh if (i == list->shared_count) {
665f03762a4Sriastradh atomic_store_relaxed(&list->shared[list->shared_count],
666f03762a4Sriastradh fence);
667f03762a4Sriastradh atomic_store_relaxed(&list->shared_count,
668f03762a4Sriastradh list->shared_count + 1);
669f03762a4Sriastradh }
670f3058635Sriastradh
671f3058635Sriastradh /* Commit the update. */
672f3058635Sriastradh dma_resv_write_commit(robj, &ticket);
673f3058635Sriastradh } else {
674f3058635Sriastradh /*
675f3058635Sriastradh * There is a preallocated replacement list. There may
676f3058635Sriastradh * not be a current list. If not, treat it as a zero-
677f3058635Sriastradh * length list.
678f3058635Sriastradh */
679f3058635Sriastradh uint32_t shared_count = (list == NULL? 0 : list->shared_count);
680f3058635Sriastradh
681f3058635Sriastradh /* There had better be room in the preallocated list. */
682f3058635Sriastradh KASSERT(shared_count < prealloc->shared_max);
683f3058635Sriastradh
684f3058635Sriastradh /*
685f3058635Sriastradh * Copy the fences over, but replace if we find one
686f3058635Sriastradh * with the same context number.
687f3058635Sriastradh */
688f3058635Sriastradh for (i = 0; i < shared_count; i++) {
689f3058635Sriastradh if (replace == NULL &&
690f3058635Sriastradh list->shared[i]->context == fence->context) {
691f3058635Sriastradh replace = list->shared[i];
692f3058635Sriastradh prealloc->shared[i] = fence;
693f3058635Sriastradh } else {
694f3058635Sriastradh prealloc->shared[i] = list->shared[i];
695f3058635Sriastradh }
696f3058635Sriastradh }
697f3058635Sriastradh prealloc->shared_count = shared_count;
698f3058635Sriastradh
699f3058635Sriastradh /* If we didn't find one, add it at the end. */
700*89e9c143Sriastradh if (replace == NULL) {
701*89e9c143Sriastradh KASSERT(prealloc->shared_count < prealloc->shared_max);
702f3058635Sriastradh prealloc->shared[prealloc->shared_count++] = fence;
703*89e9c143Sriastradh }
704f3058635Sriastradh
705f3058635Sriastradh /*
706f3058635Sriastradh * Now ready to replace the list. Begin an update.
707f3058635Sriastradh * Implies membar_producer for fence and prealloc.
708f3058635Sriastradh */
709f3058635Sriastradh dma_resv_write_begin(robj, &ticket);
710f3058635Sriastradh
711f3058635Sriastradh /* Replace the list. */
712f03762a4Sriastradh atomic_store_relaxed(&robj->fence, prealloc);
713f3058635Sriastradh robj->robj_prealloc = NULL;
714f3058635Sriastradh
715f3058635Sriastradh /* Commit the update. */
716f3058635Sriastradh dma_resv_write_commit(robj, &ticket);
717f3058635Sriastradh
718f3058635Sriastradh /*
719f3058635Sriastradh * If there is an old list, free it when convenient.
720f3058635Sriastradh * (We are not in a position at this point to sleep
721f3058635Sriastradh * waiting for activity on all CPUs.)
722f3058635Sriastradh */
723f3058635Sriastradh if (list)
724f3058635Sriastradh objlist_defer_free(list);
725f3058635Sriastradh }
726f3058635Sriastradh
727f3058635Sriastradh /* Release a fence if we replaced it. */
728cbc804a9Sriastradh if (replace) {
729f3058635Sriastradh dma_fence_put(replace);
730cbc804a9Sriastradh replace = NULL; /* paranoia */
731cbc804a9Sriastradh }
732f3058635Sriastradh }
733f3058635Sriastradh
734f3058635Sriastradh /*
735f3058635Sriastradh * dma_resv_get_excl_rcu(robj)
736f3058635Sriastradh *
737f3058635Sriastradh * Note: Caller need not call this from an RCU read section.
738f3058635Sriastradh */
739f3058635Sriastradh struct dma_fence *
dma_resv_get_excl_rcu(const struct dma_resv * robj)740f3058635Sriastradh dma_resv_get_excl_rcu(const struct dma_resv *robj)
741f3058635Sriastradh {
742f3058635Sriastradh struct dma_fence *fence;
743f3058635Sriastradh
744f3058635Sriastradh rcu_read_lock();
745f3058635Sriastradh fence = dma_fence_get_rcu_safe(&robj->fence_excl);
746f3058635Sriastradh rcu_read_unlock();
747f3058635Sriastradh
748f3058635Sriastradh return fence;
749f3058635Sriastradh }
750f3058635Sriastradh
751f3058635Sriastradh /*
752f3058635Sriastradh * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
753332aca9eSriastradh *
754332aca9eSriastradh * Get a snapshot of the exclusive and shared fences of robj. The
755332aca9eSriastradh * shared fences are returned as a pointer *sharedp to an array,
756332aca9eSriastradh * to be freed by the caller with kfree, of *nsharedp elements.
75790d9c04bSriastradh * If fencep is null, then add the exclusive fence, if any, at the
75890d9c04bSriastradh * end of the array instead.
759332aca9eSriastradh *
760332aca9eSriastradh * Returns zero on success, negative (Linux-style) error code on
761332aca9eSriastradh * failure. On failure, *fencep, *nsharedp, and *sharedp are
762332aca9eSriastradh * untouched.
763f3058635Sriastradh */
764f3058635Sriastradh int
dma_resv_get_fences_rcu(const struct dma_resv * robj,struct dma_fence ** fencep,unsigned * nsharedp,struct dma_fence *** sharedp)765f3058635Sriastradh dma_resv_get_fences_rcu(const struct dma_resv *robj,
766f3058635Sriastradh struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
767f3058635Sriastradh {
768cbc804a9Sriastradh const struct dma_resv_list *list = NULL;
769cbc804a9Sriastradh struct dma_fence *fence = NULL;
770f3058635Sriastradh struct dma_fence **shared = NULL;
771350b8bc6Sriastradh unsigned shared_alloc = 0, shared_count, i;
772f3058635Sriastradh struct dma_resv_read_ticket ticket;
773f3058635Sriastradh
774cbc804a9Sriastradh top: KASSERT(fence == NULL);
775cbc804a9Sriastradh
776f3058635Sriastradh /* Enter an RCU read section and get a read ticket. */
777f3058635Sriastradh rcu_read_lock();
778f3058635Sriastradh dma_resv_read_begin(robj, &ticket);
779f3058635Sriastradh
7806a54306eSriastradh /* If there is a shared list, grab it. */
7816a54306eSriastradh if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket))
7826a54306eSriastradh goto restart;
7836a54306eSriastradh if (list != NULL) {
784f3058635Sriastradh
78590d9c04bSriastradh /*
78690d9c04bSriastradh * Avoid arithmetic overflow with `+ 1' below.
78790d9c04bSriastradh * Strictly speaking we don't need this if the caller
78890d9c04bSriastradh * specified fencep or if there is no exclusive fence,
78990d9c04bSriastradh * but it is simpler to not have to consider those
79090d9c04bSriastradh * cases.
79190d9c04bSriastradh */
79290d9c04bSriastradh KASSERT(shared_count <= list->shared_max);
79390d9c04bSriastradh if (list->shared_max == UINT_MAX)
79490d9c04bSriastradh return -ENOMEM;
79590d9c04bSriastradh
796f3058635Sriastradh /* Check whether we have a buffer. */
797f3058635Sriastradh if (shared == NULL) {
798f3058635Sriastradh /*
799f3058635Sriastradh * We don't have a buffer yet. Try to allocate
800f3058635Sriastradh * one without waiting.
801f3058635Sriastradh */
80290d9c04bSriastradh shared_alloc = list->shared_max + 1;
803f3058635Sriastradh shared = kcalloc(shared_alloc, sizeof(shared[0]),
804f3058635Sriastradh GFP_NOWAIT);
805f3058635Sriastradh if (shared == NULL) {
806f3058635Sriastradh /*
807f3058635Sriastradh * Couldn't do it immediately. Back
808f3058635Sriastradh * out of RCU and allocate one with
809f3058635Sriastradh * waiting.
810f3058635Sriastradh */
811f3058635Sriastradh rcu_read_unlock();
812f3058635Sriastradh shared = kcalloc(shared_alloc,
813f3058635Sriastradh sizeof(shared[0]), GFP_KERNEL);
814f3058635Sriastradh if (shared == NULL)
815f3058635Sriastradh return -ENOMEM;
816f3058635Sriastradh goto top;
817f3058635Sriastradh }
81890d9c04bSriastradh } else if (shared_alloc < list->shared_max + 1) {
819f3058635Sriastradh /*
820f3058635Sriastradh * We have a buffer but it's too small. We're
821f3058635Sriastradh * already racing in this case, so just back
822f3058635Sriastradh * out and wait to allocate a bigger one.
823f3058635Sriastradh */
82490d9c04bSriastradh shared_alloc = list->shared_max + 1;
825f3058635Sriastradh rcu_read_unlock();
826f3058635Sriastradh kfree(shared);
827f3058635Sriastradh shared = kcalloc(shared_alloc, sizeof(shared[0]),
828f3058635Sriastradh GFP_KERNEL);
829f3058635Sriastradh if (shared == NULL)
830f3058635Sriastradh return -ENOMEM;
831267f4d2eSriastradh goto top;
832f3058635Sriastradh }
833f3058635Sriastradh
834f3058635Sriastradh /*
835f3058635Sriastradh * We got a buffer large enough. Copy into the buffer
836f03762a4Sriastradh * and record the number of elements. Could safely use
837f03762a4Sriastradh * memcpy here, because even if we race with a writer
838f03762a4Sriastradh * it'll invalidate the read ticket and we'll start
8392d7da004Sriastradh * over, but atomic_load in a loop will pacify kcsan.
840f3058635Sriastradh */
841f03762a4Sriastradh for (i = 0; i < shared_count; i++)
842f03762a4Sriastradh shared[i] = atomic_load_relaxed(&list->shared[i]);
8432d7da004Sriastradh
8442d7da004Sriastradh /* If anything changed while we were copying, restart. */
8452d7da004Sriastradh if (!dma_resv_read_valid(robj, &ticket))
8462d7da004Sriastradh goto restart;
847f3058635Sriastradh }
848f3058635Sriastradh
849f3058635Sriastradh /* If there is an exclusive fence, grab it. */
850cbc804a9Sriastradh KASSERT(fence == NULL);
8516a54306eSriastradh if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
852f3058635Sriastradh goto restart;
853f3058635Sriastradh
854f3058635Sriastradh /*
855f3058635Sriastradh * Try to get a reference to all of the shared fences.
856f3058635Sriastradh */
857f3058635Sriastradh for (i = 0; i < shared_count; i++) {
858f03762a4Sriastradh if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
859f3058635Sriastradh goto put_restart;
860f3058635Sriastradh }
861f3058635Sriastradh
862f3058635Sriastradh /* Success! */
863f3058635Sriastradh rcu_read_unlock();
864350b8bc6Sriastradh KASSERT(shared_count <= shared_alloc);
865350b8bc6Sriastradh KASSERT(shared_alloc == 0 || shared_count < shared_alloc);
866350b8bc6Sriastradh KASSERT(shared_alloc <= UINT_MAX);
86790d9c04bSriastradh if (fencep) {
868f3058635Sriastradh *fencep = fence;
86990d9c04bSriastradh } else if (fence) {
870350b8bc6Sriastradh if (shared_count) {
87190d9c04bSriastradh shared[shared_count++] = fence;
872350b8bc6Sriastradh } else {
873350b8bc6Sriastradh shared = kmalloc(sizeof(shared[0]), GFP_KERNEL);
874350b8bc6Sriastradh shared[0] = fence;
875350b8bc6Sriastradh shared_count = 1;
876350b8bc6Sriastradh }
87790d9c04bSriastradh }
878f3058635Sriastradh *nsharedp = shared_count;
879f3058635Sriastradh *sharedp = shared;
880f3058635Sriastradh return 0;
881f3058635Sriastradh
882f3058635Sriastradh put_restart:
883f3058635Sriastradh /* Back out. */
884f3058635Sriastradh while (i --> 0) {
885f3058635Sriastradh dma_fence_put(shared[i]);
886f3058635Sriastradh shared[i] = NULL; /* paranoia */
887f3058635Sriastradh }
888f3058635Sriastradh if (fence) {
889f3058635Sriastradh dma_fence_put(fence);
890cbc804a9Sriastradh fence = NULL;
891f3058635Sriastradh }
892f3058635Sriastradh
893f3058635Sriastradh restart:
894cbc804a9Sriastradh KASSERT(fence == NULL);
895f3058635Sriastradh rcu_read_unlock();
896f3058635Sriastradh goto top;
897f3058635Sriastradh }
898f3058635Sriastradh
899f3058635Sriastradh /*
900f3058635Sriastradh * dma_resv_copy_fences(dst, src)
901f3058635Sriastradh *
902f3058635Sriastradh * Copy the exclusive fence and all the shared fences from src to
903f3058635Sriastradh * dst.
904f3058635Sriastradh *
905f3058635Sriastradh * Caller must have dst locked.
906f3058635Sriastradh */
907f3058635Sriastradh int
dma_resv_copy_fences(struct dma_resv * dst_robj,const struct dma_resv * src_robj)908f3058635Sriastradh dma_resv_copy_fences(struct dma_resv *dst_robj,
909f3058635Sriastradh const struct dma_resv *src_robj)
910f3058635Sriastradh {
911f3058635Sriastradh const struct dma_resv_list *src_list;
912f3058635Sriastradh struct dma_resv_list *dst_list = NULL;
913f3058635Sriastradh struct dma_resv_list *old_list;
914f3058635Sriastradh struct dma_fence *fence = NULL;
915f3058635Sriastradh struct dma_fence *old_fence;
916f3058635Sriastradh uint32_t shared_count, i;
917f3058635Sriastradh struct dma_resv_read_ticket read_ticket;
918f3058635Sriastradh struct dma_resv_write_ticket write_ticket;
919f3058635Sriastradh
920f3058635Sriastradh KASSERT(dma_resv_held(dst_robj));
921f3058635Sriastradh
922cbc804a9Sriastradh top: KASSERT(fence == NULL);
923cbc804a9Sriastradh
924f3058635Sriastradh /* Enter an RCU read section and get a read ticket. */
925f3058635Sriastradh rcu_read_lock();
926f3058635Sriastradh dma_resv_read_begin(src_robj, &read_ticket);
927f3058635Sriastradh
928f3058635Sriastradh /* Get the shared list. */
9296a54306eSriastradh if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count,
9306a54306eSriastradh &read_ticket))
931f3058635Sriastradh goto restart;
932*89e9c143Sriastradh if (src_list) {
933*89e9c143Sriastradh /* Allocate a new list, if necessary. */
934f3058635Sriastradh if (dst_list == NULL)
935*89e9c143Sriastradh dst_list = objlist_tryalloc(shared_count);
936*89e9c143Sriastradh if (dst_list == NULL || dst_list->shared_max < shared_count) {
937*89e9c143Sriastradh rcu_read_unlock();
938*89e9c143Sriastradh if (dst_list) {
939*89e9c143Sriastradh objlist_free(dst_list);
940*89e9c143Sriastradh dst_list = NULL;
941*89e9c143Sriastradh }
942*89e9c143Sriastradh dst_list = objlist_alloc(shared_count);
943*89e9c143Sriastradh dst_list->shared_count = 0; /* paranoia */
944*89e9c143Sriastradh goto top;
945*89e9c143Sriastradh }
946f3058635Sriastradh
947f3058635Sriastradh /* Copy over all fences that are not yet signalled. */
948f3058635Sriastradh dst_list->shared_count = 0;
949f3058635Sriastradh for (i = 0; i < shared_count; i++) {
950cbc804a9Sriastradh KASSERT(fence == NULL);
951f03762a4Sriastradh fence = atomic_load_relaxed(&src_list->shared[i]);
952d10f540aSriastradh if ((fence = dma_fence_get_rcu(fence)) == NULL)
953f3058635Sriastradh goto restart;
954f3058635Sriastradh if (dma_fence_is_signaled(fence)) {
955f3058635Sriastradh dma_fence_put(fence);
956f3058635Sriastradh fence = NULL;
957f3058635Sriastradh continue;
958f3058635Sriastradh }
959f3058635Sriastradh dst_list->shared[dst_list->shared_count++] = fence;
960f3058635Sriastradh fence = NULL;
961f3058635Sriastradh }
9622d7da004Sriastradh
9632d7da004Sriastradh /* If anything changed while we were copying, restart. */
9642d7da004Sriastradh if (!dma_resv_read_valid(src_robj, &read_ticket))
9652d7da004Sriastradh goto restart;
966f3058635Sriastradh }
967f3058635Sriastradh
968f3058635Sriastradh /* Get the exclusive fence. */
969cbc804a9Sriastradh KASSERT(fence == NULL);
9706a54306eSriastradh if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket))
971f3058635Sriastradh goto restart;
972f3058635Sriastradh
973f3058635Sriastradh /* All done with src; exit the RCU read section. */
974f3058635Sriastradh rcu_read_unlock();
975f3058635Sriastradh
976f3058635Sriastradh /*
977f3058635Sriastradh * We now have a snapshot of the shared and exclusive fences of
978f3058635Sriastradh * src_robj and we have acquired references to them so they
979f3058635Sriastradh * won't go away. Transfer them over to dst_robj, releasing
980f3058635Sriastradh * references to any that were there.
981f3058635Sriastradh */
982f3058635Sriastradh
983f3058635Sriastradh /* Get the old shared and exclusive fences, if any. */
984f3058635Sriastradh old_list = dst_robj->fence;
985f3058635Sriastradh old_fence = dst_robj->fence_excl;
986f3058635Sriastradh
987f03762a4Sriastradh /*
988f03762a4Sriastradh * Begin an update. Implies membar_producer for dst_list and
989f03762a4Sriastradh * fence.
990f03762a4Sriastradh */
991f3058635Sriastradh dma_resv_write_begin(dst_robj, &write_ticket);
992f3058635Sriastradh
993f3058635Sriastradh /* Replace the fences. */
99442dbd033Sriastradh atomic_store_relaxed(&dst_robj->fence, dst_list);
99542dbd033Sriastradh atomic_store_relaxed(&dst_robj->fence_excl, fence);
996f3058635Sriastradh
997f3058635Sriastradh /* Commit the update. */
998f3058635Sriastradh dma_resv_write_commit(dst_robj, &write_ticket);
999f3058635Sriastradh
1000f3058635Sriastradh /* Release the old exclusive fence, if any. */
1001cbc804a9Sriastradh if (old_fence) {
1002f3058635Sriastradh dma_fence_put(old_fence);
1003cbc804a9Sriastradh old_fence = NULL; /* paranoia */
1004cbc804a9Sriastradh }
1005f3058635Sriastradh
1006f3058635Sriastradh /* Release any old shared fences. */
1007f3058635Sriastradh if (old_list) {
1008cbc804a9Sriastradh for (i = old_list->shared_count; i --> 0;) {
1009f3058635Sriastradh dma_fence_put(old_list->shared[i]);
1010cbc804a9Sriastradh old_list->shared[i] = NULL; /* paranoia */
1011cbc804a9Sriastradh }
1012cbc804a9Sriastradh objlist_free(old_list);
1013cbc804a9Sriastradh old_list = NULL; /* paranoia */
1014f3058635Sriastradh }
1015f3058635Sriastradh
1016f3058635Sriastradh /* Success! */
1017f3058635Sriastradh return 0;
1018f3058635Sriastradh
1019f3058635Sriastradh restart:
1020cbc804a9Sriastradh KASSERT(fence == NULL);
1021f3058635Sriastradh rcu_read_unlock();
1022f3058635Sriastradh if (dst_list) {
1023f3058635Sriastradh for (i = dst_list->shared_count; i --> 0;) {
1024f3058635Sriastradh dma_fence_put(dst_list->shared[i]);
1025cbc804a9Sriastradh dst_list->shared[i] = NULL; /* paranoia */
1026f3058635Sriastradh }
1027*89e9c143Sriastradh /* reuse dst_list allocation for the next attempt */
1028f3058635Sriastradh }
1029f3058635Sriastradh goto top;
1030f3058635Sriastradh }
1031f3058635Sriastradh
1032f3058635Sriastradh /*
1033f3058635Sriastradh * dma_resv_test_signaled_rcu(robj, shared)
1034f3058635Sriastradh *
1035f3058635Sriastradh * If shared is true, test whether all of the shared fences are
1036f3058635Sriastradh * signalled, or if there are none, test whether the exclusive
1037f3058635Sriastradh * fence is signalled. If shared is false, test only whether the
1038f3058635Sriastradh * exclusive fence is signalled.
1039f3058635Sriastradh *
1040f3058635Sriastradh * XXX Why does this _not_ test the exclusive fence if shared is
1041f3058635Sriastradh * true only if there are no shared fences? This makes no sense.
1042f3058635Sriastradh */
1043f3058635Sriastradh bool
dma_resv_test_signaled_rcu(const struct dma_resv * robj,bool shared)1044f3058635Sriastradh dma_resv_test_signaled_rcu(const struct dma_resv *robj,
1045f3058635Sriastradh bool shared)
1046f3058635Sriastradh {
1047f3058635Sriastradh struct dma_resv_read_ticket ticket;
10486a54306eSriastradh const struct dma_resv_list *list;
1049cbc804a9Sriastradh struct dma_fence *fence = NULL;
1050f3058635Sriastradh uint32_t i, shared_count;
1051f3058635Sriastradh bool signaled = true;
1052f3058635Sriastradh
1053cbc804a9Sriastradh top: KASSERT(fence == NULL);
1054cbc804a9Sriastradh
1055f3058635Sriastradh /* Enter an RCU read section and get a read ticket. */
1056f3058635Sriastradh rcu_read_lock();
1057f3058635Sriastradh dma_resv_read_begin(robj, &ticket);
1058f3058635Sriastradh
1059f3058635Sriastradh /* If shared is requested and there is a shared list, test it. */
10606a54306eSriastradh if (shared) {
10616a54306eSriastradh if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
10626a54306eSriastradh &ticket))
1063f3058635Sriastradh goto restart;
10646a54306eSriastradh } else {
10656a54306eSriastradh list = NULL;
10666a54306eSriastradh shared_count = 0;
10676a54306eSriastradh }
10686a54306eSriastradh if (list != NULL) {
1069f3058635Sriastradh /*
1070f3058635Sriastradh * For each fence, if it is going away, restart.
1071f3058635Sriastradh * Otherwise, acquire a reference to it to test whether
1072f3058635Sriastradh * it is signalled. Stop if we find any that is not
1073f3058635Sriastradh * signalled.
1074f3058635Sriastradh */
1075f3058635Sriastradh for (i = 0; i < shared_count; i++) {
1076cbc804a9Sriastradh KASSERT(fence == NULL);
1077f03762a4Sriastradh fence = atomic_load_relaxed(&list->shared[i]);
1078cbc804a9Sriastradh if ((fence = dma_fence_get_rcu(fence)) == NULL)
1079f3058635Sriastradh goto restart;
1080f3058635Sriastradh signaled &= dma_fence_is_signaled(fence);
1081f3058635Sriastradh dma_fence_put(fence);
1082cbc804a9Sriastradh fence = NULL;
1083f3058635Sriastradh if (!signaled)
1084f3058635Sriastradh goto out;
1085f3058635Sriastradh }
10862d7da004Sriastradh
10872d7da004Sriastradh /* If anything changed while we were testing, restart. */
10882d7da004Sriastradh if (!dma_resv_read_valid(robj, &ticket))
10892d7da004Sriastradh goto restart;
1090f3058635Sriastradh }
10910a4684f7Sriastradh if (shared_count)
10920a4684f7Sriastradh goto out;
1093f3058635Sriastradh
1094f3058635Sriastradh /* If there is an exclusive fence, test it. */
1095cbc804a9Sriastradh KASSERT(fence == NULL);
10966a54306eSriastradh if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1097f3058635Sriastradh goto restart;
10986a54306eSriastradh if (fence != NULL) {
10996a54306eSriastradh /* Test whether it is signalled. If no, stop. */
1100f3058635Sriastradh signaled &= dma_fence_is_signaled(fence);
1101f3058635Sriastradh dma_fence_put(fence);
1102cbc804a9Sriastradh fence = NULL;
1103f3058635Sriastradh if (!signaled)
1104f3058635Sriastradh goto out;
1105f3058635Sriastradh }
1106f3058635Sriastradh
1107cbc804a9Sriastradh out: KASSERT(fence == NULL);
1108cbc804a9Sriastradh rcu_read_unlock();
1109f3058635Sriastradh return signaled;
1110f3058635Sriastradh
1111f3058635Sriastradh restart:
1112cbc804a9Sriastradh KASSERT(fence == NULL);
1113f3058635Sriastradh rcu_read_unlock();
1114f3058635Sriastradh goto top;
1115f3058635Sriastradh }
1116f3058635Sriastradh
1117f3058635Sriastradh /*
1118f3058635Sriastradh * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
1119f3058635Sriastradh *
1120f3058635Sriastradh * If shared is true, wait for all of the shared fences to be
1121f3058635Sriastradh * signalled, or if there are none, wait for the exclusive fence
1122f3058635Sriastradh * to be signalled. If shared is false, wait only for the
1123f3058635Sriastradh * exclusive fence to be signalled. If timeout is zero, don't
1124f3058635Sriastradh * wait, only test.
1125f3058635Sriastradh *
1126f3058635Sriastradh * XXX Why does this _not_ wait for the exclusive fence if shared
1127f3058635Sriastradh * is true only if there are no shared fences? This makes no
1128f3058635Sriastradh * sense.
1129f3058635Sriastradh */
1130f3058635Sriastradh long
dma_resv_wait_timeout_rcu(const struct dma_resv * robj,bool shared,bool intr,unsigned long timeout)1131f3058635Sriastradh dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
1132f3058635Sriastradh bool shared, bool intr, unsigned long timeout)
1133f3058635Sriastradh {
1134f3058635Sriastradh struct dma_resv_read_ticket ticket;
11356a54306eSriastradh const struct dma_resv_list *list;
1136cbc804a9Sriastradh struct dma_fence *fence = NULL;
1137f3058635Sriastradh uint32_t i, shared_count;
1138f3058635Sriastradh long ret;
1139f3058635Sriastradh
1140f3058635Sriastradh if (timeout == 0)
1141f3058635Sriastradh return dma_resv_test_signaled_rcu(robj, shared);
1142f3058635Sriastradh
1143cbc804a9Sriastradh top: KASSERT(fence == NULL);
1144cbc804a9Sriastradh
1145f3058635Sriastradh /* Enter an RCU read section and get a read ticket. */
1146f3058635Sriastradh rcu_read_lock();
1147f3058635Sriastradh dma_resv_read_begin(robj, &ticket);
1148f3058635Sriastradh
1149f3058635Sriastradh /* If shared is requested and there is a shared list, wait on it. */
11506a54306eSriastradh if (shared) {
11516a54306eSriastradh if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
11526a54306eSriastradh &ticket))
1153f3058635Sriastradh goto restart;
11546a54306eSriastradh } else {
11556a54306eSriastradh list = NULL;
11566a54306eSriastradh shared_count = 0;
11576a54306eSriastradh }
11586a54306eSriastradh if (list != NULL) {
1159f3058635Sriastradh /*
1160f3058635Sriastradh * For each fence, if it is going away, restart.
1161f3058635Sriastradh * Otherwise, acquire a reference to it to test whether
1162f3058635Sriastradh * it is signalled. Stop and wait if we find any that
1163f3058635Sriastradh * is not signalled.
1164f3058635Sriastradh */
1165f3058635Sriastradh for (i = 0; i < shared_count; i++) {
1166cbc804a9Sriastradh KASSERT(fence == NULL);
1167f03762a4Sriastradh fence = atomic_load_relaxed(&list->shared[i]);
1168cbc804a9Sriastradh if ((fence = dma_fence_get_rcu(fence)) == NULL)
1169f3058635Sriastradh goto restart;
1170f3058635Sriastradh if (!dma_fence_is_signaled(fence))
1171f3058635Sriastradh goto wait;
1172f3058635Sriastradh dma_fence_put(fence);
1173cbc804a9Sriastradh fence = NULL;
1174f3058635Sriastradh }
11752d7da004Sriastradh
11762d7da004Sriastradh /* If anything changed while we were testing, restart. */
11772d7da004Sriastradh if (!dma_resv_read_valid(robj, &ticket))
11782d7da004Sriastradh goto restart;
1179f3058635Sriastradh }
11800a4684f7Sriastradh if (shared_count)
11810a4684f7Sriastradh goto out;
1182f3058635Sriastradh
1183f3058635Sriastradh /* If there is an exclusive fence, test it. */
1184cbc804a9Sriastradh KASSERT(fence == NULL);
11856a54306eSriastradh if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1186f3058635Sriastradh goto restart;
11876a54306eSriastradh if (fence != NULL) {
11886a54306eSriastradh /* Test whether it is signalled. If no, wait. */
1189f3058635Sriastradh if (!dma_fence_is_signaled(fence))
1190f3058635Sriastradh goto wait;
1191f3058635Sriastradh dma_fence_put(fence);
1192cbc804a9Sriastradh fence = NULL;
1193f3058635Sriastradh }
1194f3058635Sriastradh
11950a4684f7Sriastradh out: /* Success! Return the number of ticks left. */
1196f3058635Sriastradh rcu_read_unlock();
1197cbc804a9Sriastradh KASSERT(fence == NULL);
1198f3058635Sriastradh return timeout;
1199f3058635Sriastradh
1200f3058635Sriastradh restart:
1201cbc804a9Sriastradh KASSERT(fence == NULL);
1202f3058635Sriastradh rcu_read_unlock();
1203f3058635Sriastradh goto top;
1204f3058635Sriastradh
1205f3058635Sriastradh wait:
1206f3058635Sriastradh /*
12071944e9c4Sriastradh * Exit the RCU read section, wait for it, and release the
12081944e9c4Sriastradh * fence when we're done. If we time out or fail, bail.
12091944e9c4Sriastradh * Otherwise, go back to the top.
1210f3058635Sriastradh */
1211f3058635Sriastradh KASSERT(fence != NULL);
1212f3058635Sriastradh rcu_read_unlock();
1213f3058635Sriastradh ret = dma_fence_wait_timeout(fence, intr, timeout);
1214f3058635Sriastradh dma_fence_put(fence);
1215cbc804a9Sriastradh fence = NULL;
1216f3058635Sriastradh if (ret <= 0)
1217f3058635Sriastradh return ret;
1218f3058635Sriastradh KASSERT(ret <= timeout);
1219f3058635Sriastradh timeout = ret;
1220f3058635Sriastradh goto top;
1221f3058635Sriastradh }
1222f3058635Sriastradh
1223f3058635Sriastradh /*
1224f3058635Sriastradh * dma_resv_poll_init(rpoll, lock)
1225f3058635Sriastradh *
1226f3058635Sriastradh * Initialize reservation poll state.
1227f3058635Sriastradh */
1228f3058635Sriastradh void
dma_resv_poll_init(struct dma_resv_poll * rpoll)1229f3058635Sriastradh dma_resv_poll_init(struct dma_resv_poll *rpoll)
1230f3058635Sriastradh {
1231f3058635Sriastradh
1232f3058635Sriastradh mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
1233f3058635Sriastradh selinit(&rpoll->rp_selq);
1234f3058635Sriastradh rpoll->rp_claimed = 0;
1235f3058635Sriastradh }
1236f3058635Sriastradh
1237f3058635Sriastradh /*
1238f3058635Sriastradh * dma_resv_poll_fini(rpoll)
1239f3058635Sriastradh *
1240f3058635Sriastradh * Release any resource associated with reservation poll state.
1241f3058635Sriastradh */
1242f3058635Sriastradh void
dma_resv_poll_fini(struct dma_resv_poll * rpoll)1243f3058635Sriastradh dma_resv_poll_fini(struct dma_resv_poll *rpoll)
1244f3058635Sriastradh {
1245f3058635Sriastradh
1246f3058635Sriastradh KASSERT(rpoll->rp_claimed == 0);
1247f3058635Sriastradh seldestroy(&rpoll->rp_selq);
1248f3058635Sriastradh mutex_destroy(&rpoll->rp_lock);
1249f3058635Sriastradh }
1250f3058635Sriastradh
1251f3058635Sriastradh /*
1252f3058635Sriastradh * dma_resv_poll_cb(fence, fcb)
1253f3058635Sriastradh *
1254f3058635Sriastradh * Callback to notify a reservation poll that a fence has
1255f3058635Sriastradh * completed. Notify any waiters and allow the next poller to
1256f3058635Sriastradh * claim the callback.
1257f3058635Sriastradh *
1258f3058635Sriastradh * If one thread is waiting for the exclusive fence only, and we
1259f3058635Sriastradh * spuriously notify them about a shared fence, tough.
1260f3058635Sriastradh */
1261f3058635Sriastradh static void
dma_resv_poll_cb(struct dma_fence * fence,struct dma_fence_cb * fcb)1262f3058635Sriastradh dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
1263f3058635Sriastradh {
1264f3058635Sriastradh struct dma_resv_poll *rpoll = container_of(fcb,
1265f3058635Sriastradh struct dma_resv_poll, rp_fcb);
1266f3058635Sriastradh
1267f3058635Sriastradh mutex_enter(&rpoll->rp_lock);
1268f3058635Sriastradh selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
1269f3058635Sriastradh rpoll->rp_claimed = 0;
1270f3058635Sriastradh mutex_exit(&rpoll->rp_lock);
1271f3058635Sriastradh }
1272f3058635Sriastradh
1273f3058635Sriastradh /*
1274f3058635Sriastradh * dma_resv_do_poll(robj, events, rpoll)
1275f3058635Sriastradh *
1276f3058635Sriastradh * Poll for reservation object events using the reservation poll
1277f3058635Sriastradh * state in rpoll:
1278f3058635Sriastradh *
1279f3058635Sriastradh * - POLLOUT wait for all fences shared and exclusive
1280f3058635Sriastradh * - POLLIN wait for the exclusive fence
1281f3058635Sriastradh *
1282f3058635Sriastradh * Return the subset of events in events that are ready. If any
1283f3058635Sriastradh * are requested but not ready, arrange to be notified with
1284f3058635Sriastradh * selnotify when they are.
1285f3058635Sriastradh */
1286f3058635Sriastradh int
dma_resv_do_poll(const struct dma_resv * robj,int events,struct dma_resv_poll * rpoll)1287f3058635Sriastradh dma_resv_do_poll(const struct dma_resv *robj, int events,
1288f3058635Sriastradh struct dma_resv_poll *rpoll)
1289f3058635Sriastradh {
1290f3058635Sriastradh struct dma_resv_read_ticket ticket;
12916a54306eSriastradh const struct dma_resv_list *list;
1292cbc804a9Sriastradh struct dma_fence *fence = NULL;
1293f3058635Sriastradh uint32_t i, shared_count;
1294f3058635Sriastradh int revents;
1295f3058635Sriastradh bool recorded = false; /* curlwp is on the selq */
1296f3058635Sriastradh bool claimed = false; /* we claimed the callback */
1297f3058635Sriastradh bool callback = false; /* we requested a callback */
1298f3058635Sriastradh
1299f3058635Sriastradh /*
1300f3058635Sriastradh * Start with the maximal set of events that could be ready.
1301f3058635Sriastradh * We will eliminate the events that are definitely not ready
1302f3058635Sriastradh * as we go at the same time as we add callbacks to notify us
1303f3058635Sriastradh * that they may be ready.
1304f3058635Sriastradh */
1305f3058635Sriastradh revents = events & (POLLIN|POLLOUT);
1306f3058635Sriastradh if (revents == 0)
1307f3058635Sriastradh return 0;
1308f3058635Sriastradh
1309cbc804a9Sriastradh top: KASSERT(fence == NULL);
1310cbc804a9Sriastradh
1311f3058635Sriastradh /* Enter an RCU read section and get a read ticket. */
1312f3058635Sriastradh rcu_read_lock();
1313f3058635Sriastradh dma_resv_read_begin(robj, &ticket);
1314f3058635Sriastradh
1315f3058635Sriastradh /* If we want to wait for all fences, get the shared list. */
13166a54306eSriastradh if (events & POLLOUT) {
13176a54306eSriastradh if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
13186a54306eSriastradh &ticket))
1319f3058635Sriastradh goto restart;
13206a54306eSriastradh } else {
13216a54306eSriastradh list = NULL;
13226a54306eSriastradh shared_count = 0;
13236a54306eSriastradh }
13246a54306eSriastradh if (list != NULL) do {
1325f3058635Sriastradh /*
1326f3058635Sriastradh * For each fence, if it is going away, restart.
1327f3058635Sriastradh * Otherwise, acquire a reference to it to test whether
1328f3058635Sriastradh * it is signalled. Stop and request a callback if we
1329f3058635Sriastradh * find any that is not signalled.
1330f3058635Sriastradh */
1331f3058635Sriastradh for (i = 0; i < shared_count; i++) {
1332cbc804a9Sriastradh KASSERT(fence == NULL);
1333f03762a4Sriastradh fence = atomic_load_relaxed(&list->shared[i]);
1334cbc804a9Sriastradh if ((fence = dma_fence_get_rcu(fence)) == NULL)
1335f3058635Sriastradh goto restart;
1336f3058635Sriastradh if (!dma_fence_is_signaled(fence)) {
1337f3058635Sriastradh dma_fence_put(fence);
1338cbc804a9Sriastradh fence = NULL;
1339f3058635Sriastradh break;
1340f3058635Sriastradh }
1341f3058635Sriastradh dma_fence_put(fence);
1342cbc804a9Sriastradh fence = NULL;
1343f3058635Sriastradh }
1344f3058635Sriastradh
1345f3058635Sriastradh /* If all shared fences have been signalled, move on. */
1346f3058635Sriastradh if (i == shared_count)
1347f3058635Sriastradh break;
1348f3058635Sriastradh
1349f3058635Sriastradh /* Put ourselves on the selq if we haven't already. */
1350f3058635Sriastradh if (!recorded)
1351f3058635Sriastradh goto record;
1352f3058635Sriastradh
1353f3058635Sriastradh /*
1354f3058635Sriastradh * If someone else claimed the callback, or we already
1355f3058635Sriastradh * requested it, we're guaranteed to be notified, so
1356f3058635Sriastradh * assume the event is not ready.
1357f3058635Sriastradh */
1358f3058635Sriastradh if (!claimed || callback) {
1359f3058635Sriastradh revents &= ~POLLOUT;
1360f3058635Sriastradh break;
1361f3058635Sriastradh }
1362f3058635Sriastradh
1363f3058635Sriastradh /*
1364f3058635Sriastradh * Otherwise, find the first fence that is not
1365f3058635Sriastradh * signalled, request the callback, and clear POLLOUT
1366f3058635Sriastradh * from the possible ready events. If they are all
1367f3058635Sriastradh * signalled, leave POLLOUT set; we will simulate the
1368f3058635Sriastradh * callback later.
1369f3058635Sriastradh */
1370f3058635Sriastradh for (i = 0; i < shared_count; i++) {
1371cbc804a9Sriastradh KASSERT(fence == NULL);
1372f03762a4Sriastradh fence = atomic_load_relaxed(&list->shared[i]);
1373cbc804a9Sriastradh if ((fence = dma_fence_get_rcu(fence)) == NULL)
1374f3058635Sriastradh goto restart;
1375f3058635Sriastradh if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
1376f3058635Sriastradh dma_resv_poll_cb)) {
1377f3058635Sriastradh dma_fence_put(fence);
1378cbc804a9Sriastradh fence = NULL;
1379f3058635Sriastradh revents &= ~POLLOUT;
1380f3058635Sriastradh callback = true;
1381f3058635Sriastradh break;
1382f3058635Sriastradh }
1383f3058635Sriastradh dma_fence_put(fence);
1384cbc804a9Sriastradh fence = NULL;
1385f3058635Sriastradh }
1386f3058635Sriastradh } while (0);
1387f3058635Sriastradh
1388f3058635Sriastradh /* We always wait for at least the exclusive fence, so get it. */
1389cbc804a9Sriastradh KASSERT(fence == NULL);
13906a54306eSriastradh if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1391f3058635Sriastradh goto restart;
13926a54306eSriastradh if (fence != NULL) do {
1393f3058635Sriastradh /*
13946a54306eSriastradh * Test whether it is signalled. If not, stop and
13956a54306eSriastradh * request a callback.
1396f3058635Sriastradh */
13976673ca35Sriastradh if (dma_fence_is_signaled(fence))
1398f3058635Sriastradh break;
1399f3058635Sriastradh
1400f3058635Sriastradh /* Put ourselves on the selq if we haven't already. */
1401f3058635Sriastradh if (!recorded) {
1402f3058635Sriastradh dma_fence_put(fence);
1403cbc804a9Sriastradh fence = NULL;
1404f3058635Sriastradh goto record;
1405f3058635Sriastradh }
1406f3058635Sriastradh
1407f3058635Sriastradh /*
1408f3058635Sriastradh * If someone else claimed the callback, or we already
1409f3058635Sriastradh * requested it, we're guaranteed to be notified, so
1410f3058635Sriastradh * assume the event is not ready.
1411f3058635Sriastradh */
1412f3058635Sriastradh if (!claimed || callback) {
1413f3058635Sriastradh revents = 0;
1414f3058635Sriastradh break;
1415f3058635Sriastradh }
1416f3058635Sriastradh
1417f3058635Sriastradh /*
1418f3058635Sriastradh * Otherwise, try to request the callback, and clear
1419f3058635Sriastradh * all possible ready events. If the fence has been
1420f3058635Sriastradh * signalled in the interim, leave the events set; we
1421f3058635Sriastradh * will simulate the callback later.
1422f3058635Sriastradh */
1423f3058635Sriastradh if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
1424f3058635Sriastradh dma_resv_poll_cb)) {
1425f3058635Sriastradh revents = 0;
1426f3058635Sriastradh callback = true;
1427f3058635Sriastradh break;
1428f3058635Sriastradh }
14296673ca35Sriastradh } while (0);
14306673ca35Sriastradh if (fence != NULL) {
1431f3058635Sriastradh dma_fence_put(fence);
1432cbc804a9Sriastradh fence = NULL;
14336673ca35Sriastradh }
1434f3058635Sriastradh
1435f3058635Sriastradh /* All done reading the fences. */
1436f3058635Sriastradh rcu_read_unlock();
1437f3058635Sriastradh
1438f3058635Sriastradh if (claimed && !callback) {
1439f3058635Sriastradh /*
1440f3058635Sriastradh * We claimed the callback but we didn't actually
1441f3058635Sriastradh * request it because a fence was signalled while we
1442f3058635Sriastradh * were claiming it. Call it ourselves now. The
1443f3058635Sriastradh * callback doesn't use the fence nor rely on holding
1444f3058635Sriastradh * any of the fence locks, so this is safe.
1445f3058635Sriastradh */
1446f3058635Sriastradh dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
1447f3058635Sriastradh }
1448f3058635Sriastradh return revents;
1449f3058635Sriastradh
1450f3058635Sriastradh restart:
1451cbc804a9Sriastradh KASSERT(fence == NULL);
1452f3058635Sriastradh rcu_read_unlock();
1453f3058635Sriastradh goto top;
1454f3058635Sriastradh
1455f3058635Sriastradh record:
1456cbc804a9Sriastradh KASSERT(fence == NULL);
1457f3058635Sriastradh rcu_read_unlock();
1458f3058635Sriastradh mutex_enter(&rpoll->rp_lock);
1459f3058635Sriastradh selrecord(curlwp, &rpoll->rp_selq);
1460f3058635Sriastradh if (!rpoll->rp_claimed)
1461f3058635Sriastradh claimed = rpoll->rp_claimed = true;
1462f3058635Sriastradh mutex_exit(&rpoll->rp_lock);
1463f3058635Sriastradh recorded = true;
1464f3058635Sriastradh goto top;
1465f3058635Sriastradh }
1466f3058635Sriastradh
1467f3058635Sriastradh /*
1468f3058635Sriastradh * dma_resv_kqfilter(robj, kn, rpoll)
1469f3058635Sriastradh *
1470f3058635Sriastradh * Kqueue filter for reservation objects. Currently not
1471f3058635Sriastradh * implemented because the logic to implement it is nontrivial,
1472f3058635Sriastradh * and userland will presumably never use it, so it would be
1473f3058635Sriastradh * dangerous to add never-tested complex code paths to the kernel.
1474f3058635Sriastradh */
1475f3058635Sriastradh int
dma_resv_kqfilter(const struct dma_resv * robj,struct knote * kn,struct dma_resv_poll * rpoll)1476f3058635Sriastradh dma_resv_kqfilter(const struct dma_resv *robj,
1477f3058635Sriastradh struct knote *kn, struct dma_resv_poll *rpoll)
1478f3058635Sriastradh {
1479f3058635Sriastradh
1480f3058635Sriastradh return EINVAL;
1481f3058635Sriastradh }
1482