xref: /openbsd-src/sys/dev/pci/drm/drm_syncobj.c (revision 9fbeb06e0aa7e69c7fd009423489de69c53976b1)
17f4dd379Sjsg /*
27f4dd379Sjsg  * Copyright 2017 Red Hat
37f4dd379Sjsg  * Parts ported from amdgpu (fence wait code).
47f4dd379Sjsg  * Copyright 2016 Advanced Micro Devices, Inc.
57f4dd379Sjsg  *
67f4dd379Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
77f4dd379Sjsg  * copy of this software and associated documentation files (the "Software"),
87f4dd379Sjsg  * to deal in the Software without restriction, including without limitation
97f4dd379Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
107f4dd379Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
117f4dd379Sjsg  * Software is furnished to do so, subject to the following conditions:
127f4dd379Sjsg  *
137f4dd379Sjsg  * The above copyright notice and this permission notice (including the next
147f4dd379Sjsg  * paragraph) shall be included in all copies or substantial portions of the
157f4dd379Sjsg  * Software.
167f4dd379Sjsg  *
177f4dd379Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
187f4dd379Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
197f4dd379Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
207f4dd379Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
217f4dd379Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
227f4dd379Sjsg  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
237f4dd379Sjsg  * IN THE SOFTWARE.
247f4dd379Sjsg  *
257f4dd379Sjsg  * Authors:
267f4dd379Sjsg  *
277f4dd379Sjsg  */
287f4dd379Sjsg 
297f4dd379Sjsg /**
307f4dd379Sjsg  * DOC: Overview
317f4dd379Sjsg  *
32c349dbc7Sjsg  * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33c349dbc7Sjsg  * container for a synchronization primitive which can be used by userspace
34c349dbc7Sjsg  * to explicitly synchronize GPU commands, can be shared between userspace
35c349dbc7Sjsg  * processes, and can be shared between different DRM drivers.
367f4dd379Sjsg  * Their primary use-case is to implement Vulkan fences and semaphores.
37c349dbc7Sjsg  * The syncobj userspace API provides ioctls for several operations:
387f4dd379Sjsg  *
39c349dbc7Sjsg  *  - Creation and destruction of syncobjs
40c349dbc7Sjsg  *  - Import and export of syncobjs to/from a syncobj file descriptor
41c349dbc7Sjsg  *  - Import and export a syncobj's underlying fence to/from a sync file
42c349dbc7Sjsg  *  - Reset a syncobj (set its fence to NULL)
43c349dbc7Sjsg  *  - Signal a syncobj (set a trivially signaled fence)
44c349dbc7Sjsg  *  - Wait for a syncobj's fence to appear and be signaled
45c349dbc7Sjsg  *
46c349dbc7Sjsg  * The syncobj userspace API also provides operations to manipulate a syncobj
47c349dbc7Sjsg  * in terms of a timeline of struct &dma_fence_chain rather than a single
48c349dbc7Sjsg  * struct &dma_fence, through the following operations:
49c349dbc7Sjsg  *
50c349dbc7Sjsg  *   - Signal a given point on the timeline
51c349dbc7Sjsg  *   - Wait for a given point to appear and/or be signaled
52c349dbc7Sjsg  *   - Import and export from/to a given point of a timeline
53c349dbc7Sjsg  *
54c349dbc7Sjsg  * At it's core, a syncobj is simply a wrapper around a pointer to a struct
55c349dbc7Sjsg  * &dma_fence which may be NULL.
56c349dbc7Sjsg  * When a syncobj is first created, its pointer is either NULL or a pointer
57c349dbc7Sjsg  * to an already signaled fence depending on whether the
58c349dbc7Sjsg  * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
59c349dbc7Sjsg  * &DRM_IOCTL_SYNCOBJ_CREATE.
60c349dbc7Sjsg  *
61c349dbc7Sjsg  * If the syncobj is considered as a binary (its state is either signaled or
62c349dbc7Sjsg  * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
63c349dbc7Sjsg  * the syncobj, the syncobj's fence is replaced with a fence which will be
64c349dbc7Sjsg  * signaled by the completion of that work.
65c349dbc7Sjsg  * If the syncobj is considered as a timeline primitive, when GPU work is
66c349dbc7Sjsg  * enqueued in a DRM driver to signal the a given point of the syncobj, a new
67c349dbc7Sjsg  * struct &dma_fence_chain pointing to the DRM driver's fence and also
68c349dbc7Sjsg  * pointing to the previous fence that was in the syncobj. The new struct
69c349dbc7Sjsg  * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
70c349dbc7Sjsg  * completion of the DRM driver's work and also any work associated with the
71c349dbc7Sjsg  * fence previously in the syncobj.
72c349dbc7Sjsg  *
73c349dbc7Sjsg  * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
74c349dbc7Sjsg  * time the work is enqueued, it waits on the syncobj's fence before
75c349dbc7Sjsg  * submitting the work to hardware. That fence is either :
76c349dbc7Sjsg  *
77c349dbc7Sjsg  *    - The syncobj's current fence if the syncobj is considered as a binary
78c349dbc7Sjsg  *      primitive.
79c349dbc7Sjsg  *    - The struct &dma_fence associated with a given point if the syncobj is
80c349dbc7Sjsg  *      considered as a timeline primitive.
81c349dbc7Sjsg  *
82c349dbc7Sjsg  * If the syncobj's fence is NULL or not present in the syncobj's timeline,
83c349dbc7Sjsg  * the enqueue operation is expected to fail.
84c349dbc7Sjsg  *
85c349dbc7Sjsg  * With binary syncobj, all manipulation of the syncobjs's fence happens in
86c349dbc7Sjsg  * terms of the current fence at the time the ioctl is called by userspace
87c349dbc7Sjsg  * regardless of whether that operation is an immediate host-side operation
88c349dbc7Sjsg  * (signal or reset) or or an operation which is enqueued in some driver
89c349dbc7Sjsg  * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
90c349dbc7Sjsg  * to manipulate a syncobj from the host by resetting its pointer to NULL or
91c349dbc7Sjsg  * setting its pointer to a fence which is already signaled.
92c349dbc7Sjsg  *
93c349dbc7Sjsg  * With a timeline syncobj, all manipulation of the synobj's fence happens in
94c349dbc7Sjsg  * terms of a u64 value referring to point in the timeline. See
95c349dbc7Sjsg  * dma_fence_chain_find_seqno() to see how a given point is found in the
96c349dbc7Sjsg  * timeline.
97c349dbc7Sjsg  *
98c349dbc7Sjsg  * Note that applications should be careful to always use timeline set of
99c349dbc7Sjsg  * ioctl() when dealing with syncobj considered as timeline. Using a binary
100c349dbc7Sjsg  * set of ioctl() with a syncobj considered as timeline could result incorrect
101c349dbc7Sjsg  * synchronization. The use of binary syncobj is supported through the
102c349dbc7Sjsg  * timeline set of ioctl() by using a point value of 0, this will reproduce
103c349dbc7Sjsg  * the behavior of the binary set of ioctl() (for example replace the
104c349dbc7Sjsg  * syncobj's fence when signaling).
105c349dbc7Sjsg  *
106c349dbc7Sjsg  *
107c349dbc7Sjsg  * Host-side wait on syncobjs
108c349dbc7Sjsg  * --------------------------
109c349dbc7Sjsg  *
110c349dbc7Sjsg  * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
111c349dbc7Sjsg  * host-side wait on all of the syncobj fences simultaneously.
112c349dbc7Sjsg  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
113c349dbc7Sjsg  * all of the syncobj fences to be signaled before it returns.
114c349dbc7Sjsg  * Otherwise, it returns once at least one syncobj fence has been signaled
115c349dbc7Sjsg  * and the index of a signaled fence is written back to the client.
116c349dbc7Sjsg  *
117c349dbc7Sjsg  * Unlike the enqueued GPU work dependencies which fail if they see a NULL
118c349dbc7Sjsg  * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
119c349dbc7Sjsg  * the host-side wait will first wait for the syncobj to receive a non-NULL
120c349dbc7Sjsg  * fence and then wait on that fence.
121c349dbc7Sjsg  * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
122c349dbc7Sjsg  * syncobjs in the array has a NULL fence, -EINVAL will be returned.
123c349dbc7Sjsg  * Assuming the syncobj starts off with a NULL fence, this allows a client
124c349dbc7Sjsg  * to do a host wait in one thread (or process) which waits on GPU work
125c349dbc7Sjsg  * submitted in another thread (or process) without having to manually
126c349dbc7Sjsg  * synchronize between the two.
127c349dbc7Sjsg  * This requirement is inherited from the Vulkan fence API.
128c349dbc7Sjsg  *
129c349dbc7Sjsg  * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
130c349dbc7Sjsg  * handles as well as an array of u64 points and does a host-side wait on all
131c349dbc7Sjsg  * of syncobj fences at the given points simultaneously.
132c349dbc7Sjsg  *
133c349dbc7Sjsg  * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
134c349dbc7Sjsg  * fence to materialize on the timeline without waiting for the fence to be
135c349dbc7Sjsg  * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
136c349dbc7Sjsg  * requirement is inherited from the wait-before-signal behavior required by
137c349dbc7Sjsg  * the Vulkan timeline semaphore API.
138c349dbc7Sjsg  *
139f005ef32Sjsg  * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
140f005ef32Sjsg  * blocking: an eventfd will be signaled when the syncobj is. This is useful to
141f005ef32Sjsg  * integrate the wait in an event loop.
142f005ef32Sjsg  *
143c349dbc7Sjsg  *
144c349dbc7Sjsg  * Import/export of syncobjs
145c349dbc7Sjsg  * -------------------------
146c349dbc7Sjsg  *
147c349dbc7Sjsg  * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
148c349dbc7Sjsg  * provide two mechanisms for import/export of syncobjs.
149c349dbc7Sjsg  *
150c349dbc7Sjsg  * The first lets the client import or export an entire syncobj to a file
151c349dbc7Sjsg  * descriptor.
152c349dbc7Sjsg  * These fd's are opaque and have no other use case, except passing the
153c349dbc7Sjsg  * syncobj between processes.
154c349dbc7Sjsg  * All exported file descriptors and any syncobj handles created as a
155c349dbc7Sjsg  * result of importing those file descriptors own a reference to the
156c349dbc7Sjsg  * same underlying struct &drm_syncobj and the syncobj can be used
157c349dbc7Sjsg  * persistently across all the processes with which it is shared.
158c349dbc7Sjsg  * The syncobj is freed only once the last reference is dropped.
159c349dbc7Sjsg  * Unlike dma-buf, importing a syncobj creates a new handle (with its own
160c349dbc7Sjsg  * reference) for every import instead of de-duplicating.
161c349dbc7Sjsg  * The primary use-case of this persistent import/export is for shared
162c349dbc7Sjsg  * Vulkan fences and semaphores.
163c349dbc7Sjsg  *
164c349dbc7Sjsg  * The second import/export mechanism, which is indicated by
165c349dbc7Sjsg  * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
166c349dbc7Sjsg  * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
167c349dbc7Sjsg  * import/export the syncobj's current fence from/to a &sync_file.
168c349dbc7Sjsg  * When a syncobj is exported to a sync file, that sync file wraps the
169c349dbc7Sjsg  * sycnobj's fence at the time of export and any later signal or reset
170c349dbc7Sjsg  * operations on the syncobj will not affect the exported sync file.
171c349dbc7Sjsg  * When a sync file is imported into a syncobj, the syncobj's fence is set
172c349dbc7Sjsg  * to the fence wrapped by that sync file.
173c349dbc7Sjsg  * Because sync files are immutable, resetting or signaling the syncobj
174c349dbc7Sjsg  * will not affect any sync files whose fences have been imported into the
175c349dbc7Sjsg  * syncobj.
176c349dbc7Sjsg  *
177c349dbc7Sjsg  *
178c349dbc7Sjsg  * Import/export of timeline points in timeline syncobjs
179c349dbc7Sjsg  * -----------------------------------------------------
180c349dbc7Sjsg  *
181c349dbc7Sjsg  * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
182c349dbc7Sjsg  * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
183c349dbc7Sjsg  * into another syncobj.
184c349dbc7Sjsg  *
185c349dbc7Sjsg  * Note that if you want to transfer a struct &dma_fence_chain from a given
186c349dbc7Sjsg  * point on a timeline syncobj from/into a binary syncobj, you can use the
187c349dbc7Sjsg  * point 0 to mean take/replace the fence in the syncobj.
1887f4dd379Sjsg  */
1897f4dd379Sjsg 
190c349dbc7Sjsg #include <linux/anon_inodes.h>
191c74c9f56Sjsg #include <linux/dma-fence-unwrap.h>
192f005ef32Sjsg #include <linux/eventfd.h>
1937f4dd379Sjsg #include <linux/file.h>
1947f4dd379Sjsg #include <linux/fs.h>
1957f4dd379Sjsg #include <linux/sched/signal.h>
196c349dbc7Sjsg #include <linux/sync_file.h>
197c349dbc7Sjsg #include <linux/uaccess.h>
198c349dbc7Sjsg 
199c349dbc7Sjsg #include <drm/drm.h>
200c349dbc7Sjsg #include <drm/drm_drv.h>
201c349dbc7Sjsg #include <drm/drm_file.h>
202c349dbc7Sjsg #include <drm/drm_gem.h>
203c349dbc7Sjsg #include <drm/drm_print.h>
204c349dbc7Sjsg #include <drm/drm_syncobj.h>
205c349dbc7Sjsg #include <drm/drm_utils.h>
2067f4dd379Sjsg 
2077f4dd379Sjsg #include "drm_internal.h"
208c349dbc7Sjsg 
209c349dbc7Sjsg struct syncobj_wait_entry {
210c349dbc7Sjsg 	struct list_head node;
211c349dbc7Sjsg #ifdef __linux__
212c349dbc7Sjsg 	struct task_struct *task;
213c349dbc7Sjsg #else
214c349dbc7Sjsg 	struct proc *task;
215c349dbc7Sjsg #endif
216c349dbc7Sjsg 	struct dma_fence *fence;
217c349dbc7Sjsg 	struct dma_fence_cb fence_cb;
218c349dbc7Sjsg 	u64    point;
219c349dbc7Sjsg };
220c349dbc7Sjsg 
221c349dbc7Sjsg static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
222c349dbc7Sjsg 				      struct syncobj_wait_entry *wait);
2237f4dd379Sjsg 
224f005ef32Sjsg struct syncobj_eventfd_entry {
225f005ef32Sjsg 	struct list_head node;
226f005ef32Sjsg 	struct dma_fence *fence;
227f005ef32Sjsg 	struct dma_fence_cb fence_cb;
228f005ef32Sjsg 	struct drm_syncobj *syncobj;
229f005ef32Sjsg 	struct eventfd_ctx *ev_fd_ctx;
230f005ef32Sjsg 	u64 point;
231f005ef32Sjsg 	u32 flags;
232f005ef32Sjsg };
233f005ef32Sjsg 
234f005ef32Sjsg static void
235f005ef32Sjsg syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
236f005ef32Sjsg 			   struct syncobj_eventfd_entry *entry);
237f005ef32Sjsg 
2387f4dd379Sjsg /**
2397f4dd379Sjsg  * drm_syncobj_find - lookup and reference a sync object.
2407f4dd379Sjsg  * @file_private: drm file private pointer
2417f4dd379Sjsg  * @handle: sync object handle to lookup.
2427f4dd379Sjsg  *
2437f4dd379Sjsg  * Returns a reference to the syncobj pointed to by handle or NULL. The
2447f4dd379Sjsg  * reference must be released by calling drm_syncobj_put().
2457f4dd379Sjsg  */
2467f4dd379Sjsg struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
2477f4dd379Sjsg 				     u32 handle)
2487f4dd379Sjsg {
2497f4dd379Sjsg 	struct drm_syncobj *syncobj;
2507f4dd379Sjsg 
2517f4dd379Sjsg 	spin_lock(&file_private->syncobj_table_lock);
2527f4dd379Sjsg 
2537f4dd379Sjsg 	/* Check if we currently have a reference on the object */
2547f4dd379Sjsg 	syncobj = idr_find(&file_private->syncobj_idr, handle);
2557f4dd379Sjsg 	if (syncobj)
2567f4dd379Sjsg 		drm_syncobj_get(syncobj);
2577f4dd379Sjsg 
2587f4dd379Sjsg 	spin_unlock(&file_private->syncobj_table_lock);
2597f4dd379Sjsg 
2607f4dd379Sjsg 	return syncobj;
2617f4dd379Sjsg }
2627f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_find);
2637f4dd379Sjsg 
264c349dbc7Sjsg static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
265c349dbc7Sjsg 				       struct syncobj_wait_entry *wait)
2667f4dd379Sjsg {
267c349dbc7Sjsg 	struct dma_fence *fence;
2687f4dd379Sjsg 
269c349dbc7Sjsg 	if (wait->fence)
270c349dbc7Sjsg 		return;
2717f4dd379Sjsg 
2727f4dd379Sjsg 	spin_lock(&syncobj->lock);
2737f4dd379Sjsg 	/* We've already tried once to get a fence and failed.  Now that we
2747f4dd379Sjsg 	 * have the lock, try one more time just to be sure we don't add a
2757f4dd379Sjsg 	 * callback when a fence has already been set.
2767f4dd379Sjsg 	 */
277c349dbc7Sjsg 	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
278c349dbc7Sjsg 	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
279c349dbc7Sjsg 		dma_fence_put(fence);
280c349dbc7Sjsg 		list_add_tail(&wait->node, &syncobj->cb_list);
281c349dbc7Sjsg 	} else if (!fence) {
282c349dbc7Sjsg 		wait->fence = dma_fence_get_stub();
2837f4dd379Sjsg 	} else {
284c349dbc7Sjsg 		wait->fence = fence;
2857f4dd379Sjsg 	}
2867f4dd379Sjsg 	spin_unlock(&syncobj->lock);
2877f4dd379Sjsg }
2887f4dd379Sjsg 
289c349dbc7Sjsg static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
290c349dbc7Sjsg 				    struct syncobj_wait_entry *wait)
291c349dbc7Sjsg {
292c349dbc7Sjsg 	if (!wait->node.next)
293c349dbc7Sjsg 		return;
294c349dbc7Sjsg 
295c349dbc7Sjsg 	spin_lock(&syncobj->lock);
296c349dbc7Sjsg 	list_del_init(&wait->node);
297c349dbc7Sjsg 	spin_unlock(&syncobj->lock);
298c349dbc7Sjsg }
299c349dbc7Sjsg 
300f005ef32Sjsg static void
301f005ef32Sjsg syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
302f005ef32Sjsg {
303f005ef32Sjsg 	eventfd_ctx_put(entry->ev_fd_ctx);
304f005ef32Sjsg 	dma_fence_put(entry->fence);
305f005ef32Sjsg 	/* This happens either inside the syncobj lock, or after the node has
306f005ef32Sjsg 	 * already been removed from the list.
307f005ef32Sjsg 	 */
308f005ef32Sjsg 	list_del(&entry->node);
309f005ef32Sjsg 	kfree(entry);
310f005ef32Sjsg }
311f005ef32Sjsg 
312f005ef32Sjsg #ifdef notyet
313f005ef32Sjsg static void
314f005ef32Sjsg drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
315f005ef32Sjsg 			struct syncobj_eventfd_entry *entry)
316f005ef32Sjsg {
317f005ef32Sjsg 	spin_lock(&syncobj->lock);
318f005ef32Sjsg 	list_add_tail(&entry->node, &syncobj->ev_fd_list);
319f005ef32Sjsg 	syncobj_eventfd_entry_func(syncobj, entry);
320f005ef32Sjsg 	spin_unlock(&syncobj->lock);
321f005ef32Sjsg }
322f005ef32Sjsg #endif
323f005ef32Sjsg 
3247f4dd379Sjsg /**
325c349dbc7Sjsg  * drm_syncobj_add_point - add new timeline point to the syncobj
326c349dbc7Sjsg  * @syncobj: sync object to add timeline point do
327c349dbc7Sjsg  * @chain: chain node to use to add the point
328c349dbc7Sjsg  * @fence: fence to encapsulate in the chain node
329c349dbc7Sjsg  * @point: sequence number to use for the point
3307f4dd379Sjsg  *
331c349dbc7Sjsg  * Add the chain node as new timeline point to the syncobj.
3327f4dd379Sjsg  */
333c349dbc7Sjsg void drm_syncobj_add_point(struct drm_syncobj *syncobj,
334c349dbc7Sjsg 			   struct dma_fence_chain *chain,
335c349dbc7Sjsg 			   struct dma_fence *fence,
336c349dbc7Sjsg 			   uint64_t point)
3377f4dd379Sjsg {
338f005ef32Sjsg 	struct syncobj_wait_entry *wait_cur, *wait_tmp;
339f005ef32Sjsg 	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
340c349dbc7Sjsg 	struct dma_fence *prev;
3417f4dd379Sjsg 
342c349dbc7Sjsg 	dma_fence_get(fence);
343c349dbc7Sjsg 
3447f4dd379Sjsg 	spin_lock(&syncobj->lock);
345c349dbc7Sjsg 
346c349dbc7Sjsg 	prev = drm_syncobj_fence_get(syncobj);
347c349dbc7Sjsg 	/* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
348c349dbc7Sjsg 	if (prev && prev->seqno >= point)
349ad8b1aafSjsg 		DRM_DEBUG("You are adding an unorder point to timeline!\n");
350c349dbc7Sjsg 	dma_fence_chain_init(chain, prev, fence, point);
351c349dbc7Sjsg 	rcu_assign_pointer(syncobj->fence, &chain->base);
352c349dbc7Sjsg 
353f005ef32Sjsg 	list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
354f005ef32Sjsg 		syncobj_wait_syncobj_func(syncobj, wait_cur);
355f005ef32Sjsg 	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
356f005ef32Sjsg 		syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
3577f4dd379Sjsg 	spin_unlock(&syncobj->lock);
358c349dbc7Sjsg 
359c349dbc7Sjsg 	/* Walk the chain once to trigger garbage collection */
360c349dbc7Sjsg 	dma_fence_chain_for_each(fence, prev);
361c349dbc7Sjsg 	dma_fence_put(prev);
3627f4dd379Sjsg }
363c349dbc7Sjsg EXPORT_SYMBOL(drm_syncobj_add_point);
3647f4dd379Sjsg 
3657f4dd379Sjsg /**
3667f4dd379Sjsg  * drm_syncobj_replace_fence - replace fence in a sync object.
3677f4dd379Sjsg  * @syncobj: Sync object to replace fence in
3687f4dd379Sjsg  * @fence: fence to install in sync file.
3697f4dd379Sjsg  *
3707f4dd379Sjsg  * This replaces the fence on a sync object.
3717f4dd379Sjsg  */
3727f4dd379Sjsg void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
3737f4dd379Sjsg 			       struct dma_fence *fence)
3747f4dd379Sjsg {
3757f4dd379Sjsg 	struct dma_fence *old_fence;
376f005ef32Sjsg 	struct syncobj_wait_entry *wait_cur, *wait_tmp;
377f005ef32Sjsg 	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
3787f4dd379Sjsg 
3797f4dd379Sjsg 	if (fence)
3807f4dd379Sjsg 		dma_fence_get(fence);
3817f4dd379Sjsg 
3827f4dd379Sjsg 	spin_lock(&syncobj->lock);
3837f4dd379Sjsg 
3847f4dd379Sjsg 	old_fence = rcu_dereference_protected(syncobj->fence,
3857f4dd379Sjsg 					      lockdep_is_held(&syncobj->lock));
3867f4dd379Sjsg 	rcu_assign_pointer(syncobj->fence, fence);
3877f4dd379Sjsg 
3887f4dd379Sjsg 	if (fence != old_fence) {
389f005ef32Sjsg 		list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
390f005ef32Sjsg 			syncobj_wait_syncobj_func(syncobj, wait_cur);
391f005ef32Sjsg 		list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
392f005ef32Sjsg 			syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
3937f4dd379Sjsg 	}
3947f4dd379Sjsg 
3957f4dd379Sjsg 	spin_unlock(&syncobj->lock);
3967f4dd379Sjsg 
3977f4dd379Sjsg 	dma_fence_put(old_fence);
3987f4dd379Sjsg }
3997f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_replace_fence);
4007f4dd379Sjsg 
401c349dbc7Sjsg /**
402c349dbc7Sjsg  * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
403c349dbc7Sjsg  * @syncobj: sync object to assign the fence on
404c349dbc7Sjsg  *
405c349dbc7Sjsg  * Assign a already signaled stub fence to the sync object.
406c349dbc7Sjsg  */
4075ca02815Sjsg static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
4087f4dd379Sjsg {
40952ffd059Sjsg 	struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
4105ca02815Sjsg 
411f69d21f3Sjsg 	if (!fence)
412f69d21f3Sjsg 		return -ENOMEM;
413c349dbc7Sjsg 
414c349dbc7Sjsg 	drm_syncobj_replace_fence(syncobj, fence);
415c349dbc7Sjsg 	dma_fence_put(fence);
4165ca02815Sjsg 	return 0;
4177f4dd379Sjsg }
4187f4dd379Sjsg 
419c349dbc7Sjsg /* 5s default for wait submission */
420c349dbc7Sjsg #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
4217f4dd379Sjsg /**
4227f4dd379Sjsg  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
4237f4dd379Sjsg  * @file_private: drm file private pointer
4247f4dd379Sjsg  * @handle: sync object handle to lookup.
425c349dbc7Sjsg  * @point: timeline point
426c349dbc7Sjsg  * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
4277f4dd379Sjsg  * @fence: out parameter for the fence
4287f4dd379Sjsg  *
4297f4dd379Sjsg  * This is just a convenience function that combines drm_syncobj_find() and
4307f4dd379Sjsg  * drm_syncobj_fence_get().
4317f4dd379Sjsg  *
4327f4dd379Sjsg  * Returns 0 on success or a negative error value on failure. On success @fence
4337f4dd379Sjsg  * contains a reference to the fence, which must be released by calling
4347f4dd379Sjsg  * dma_fence_put().
4357f4dd379Sjsg  */
4367f4dd379Sjsg int drm_syncobj_find_fence(struct drm_file *file_private,
437c349dbc7Sjsg 			   u32 handle, u64 point, u64 flags,
4387f4dd379Sjsg 			   struct dma_fence **fence)
4397f4dd379Sjsg {
4407f4dd379Sjsg 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
441c349dbc7Sjsg 	struct syncobj_wait_entry wait;
442c349dbc7Sjsg 	u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
443c349dbc7Sjsg 	int ret;
4447f4dd379Sjsg 
4457f4dd379Sjsg 	if (!syncobj)
4467f4dd379Sjsg 		return -ENOENT;
4477f4dd379Sjsg 
4485ca02815Sjsg 	/* Waiting for userspace with locks help is illegal cause that can
4495ca02815Sjsg 	 * trivial deadlock with page faults for example. Make lockdep complain
4505ca02815Sjsg 	 * about it early on.
4515ca02815Sjsg 	 */
4525ca02815Sjsg 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
4535ca02815Sjsg 		might_sleep();
4545ca02815Sjsg 		lockdep_assert_none_held_once();
4555ca02815Sjsg 	}
4565ca02815Sjsg 
4577f4dd379Sjsg 	*fence = drm_syncobj_fence_get(syncobj);
458c349dbc7Sjsg 
459c349dbc7Sjsg 	if (*fence) {
460c349dbc7Sjsg 		ret = dma_fence_chain_find_seqno(fence, point);
4616115a8aaSjsg 		if (!ret) {
4626115a8aaSjsg 			/* If the requested seqno is already signaled
4636115a8aaSjsg 			 * drm_syncobj_find_fence may return a NULL
4646115a8aaSjsg 			 * fence. To make sure the recipient gets
4656115a8aaSjsg 			 * signalled, use a new fence instead.
4666115a8aaSjsg 			 */
4676115a8aaSjsg 			if (!*fence)
4686115a8aaSjsg 				*fence = dma_fence_get_stub();
4696115a8aaSjsg 
470ad8b1aafSjsg 			goto out;
4716115a8aaSjsg 		}
472c349dbc7Sjsg 		dma_fence_put(*fence);
473c349dbc7Sjsg 	} else {
4747f4dd379Sjsg 		ret = -EINVAL;
4757f4dd379Sjsg 	}
476c349dbc7Sjsg 
477c349dbc7Sjsg 	if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
478ad8b1aafSjsg 		goto out;
479c349dbc7Sjsg 
480c349dbc7Sjsg 	memset(&wait, 0, sizeof(wait));
48154e346a1Sjsg #ifdef __linux__
482c349dbc7Sjsg 	wait.task = current;
48354e346a1Sjsg #else
48454e346a1Sjsg 	wait.task = curproc;
48554e346a1Sjsg #endif
486c349dbc7Sjsg 	wait.point = point;
487c349dbc7Sjsg 	drm_syncobj_fence_add_wait(syncobj, &wait);
488c349dbc7Sjsg 
489c349dbc7Sjsg 	do {
490c349dbc7Sjsg 		set_current_state(TASK_INTERRUPTIBLE);
491c349dbc7Sjsg 		if (wait.fence) {
492c349dbc7Sjsg 			ret = 0;
493c349dbc7Sjsg 			break;
494c349dbc7Sjsg 		}
495c349dbc7Sjsg                 if (timeout == 0) {
496c349dbc7Sjsg                         ret = -ETIME;
497c349dbc7Sjsg                         break;
498c349dbc7Sjsg                 }
499c349dbc7Sjsg 
500c349dbc7Sjsg 		if (signal_pending(current)) {
501c349dbc7Sjsg 			ret = -ERESTARTSYS;
502c349dbc7Sjsg 			break;
503c349dbc7Sjsg 		}
504c349dbc7Sjsg 
505c349dbc7Sjsg                 timeout = schedule_timeout(timeout);
506c349dbc7Sjsg 	} while (1);
507c349dbc7Sjsg 
508c349dbc7Sjsg 	__set_current_state(TASK_RUNNING);
509c349dbc7Sjsg 	*fence = wait.fence;
510c349dbc7Sjsg 
511c349dbc7Sjsg 	if (wait.node.next)
512c349dbc7Sjsg 		drm_syncobj_remove_wait(syncobj, &wait);
513c349dbc7Sjsg 
514ad8b1aafSjsg out:
515ad8b1aafSjsg 	drm_syncobj_put(syncobj);
516ad8b1aafSjsg 
517c349dbc7Sjsg 	return ret;
5187f4dd379Sjsg }
5197f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_find_fence);
5207f4dd379Sjsg 
5217f4dd379Sjsg /**
5227f4dd379Sjsg  * drm_syncobj_free - free a sync object.
5237f4dd379Sjsg  * @kref: kref to free.
5247f4dd379Sjsg  *
5257f4dd379Sjsg  * Only to be called from kref_put in drm_syncobj_put.
5267f4dd379Sjsg  */
5277f4dd379Sjsg void drm_syncobj_free(struct kref *kref)
5287f4dd379Sjsg {
5297f4dd379Sjsg 	struct drm_syncobj *syncobj = container_of(kref,
5307f4dd379Sjsg 						   struct drm_syncobj,
5317f4dd379Sjsg 						   refcount);
532f005ef32Sjsg 	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
533f005ef32Sjsg 
5347f4dd379Sjsg 	drm_syncobj_replace_fence(syncobj, NULL);
535f005ef32Sjsg 
536f005ef32Sjsg 	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
537f005ef32Sjsg 		syncobj_eventfd_entry_free(ev_fd_cur);
538f005ef32Sjsg 
5397f4dd379Sjsg 	kfree(syncobj);
5407f4dd379Sjsg }
5417f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_free);
5427f4dd379Sjsg 
5437f4dd379Sjsg /**
5447f4dd379Sjsg  * drm_syncobj_create - create a new syncobj
5457f4dd379Sjsg  * @out_syncobj: returned syncobj
5467f4dd379Sjsg  * @flags: DRM_SYNCOBJ_* flags
5477f4dd379Sjsg  * @fence: if non-NULL, the syncobj will represent this fence
5487f4dd379Sjsg  *
5497f4dd379Sjsg  * This is the first function to create a sync object. After creating, drivers
5507f4dd379Sjsg  * probably want to make it available to userspace, either through
5517f4dd379Sjsg  * drm_syncobj_get_handle() or drm_syncobj_get_fd().
5527f4dd379Sjsg  *
5537f4dd379Sjsg  * Returns 0 on success or a negative error value on failure.
5547f4dd379Sjsg  */
5557f4dd379Sjsg int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
5567f4dd379Sjsg 		       struct dma_fence *fence)
5577f4dd379Sjsg {
5585ca02815Sjsg 	int ret;
5597f4dd379Sjsg 	struct drm_syncobj *syncobj;
5607f4dd379Sjsg 
5617f4dd379Sjsg 	syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
5627f4dd379Sjsg 	if (!syncobj)
5637f4dd379Sjsg 		return -ENOMEM;
5647f4dd379Sjsg 
5657f4dd379Sjsg 	kref_init(&syncobj->refcount);
5667f4dd379Sjsg 	INIT_LIST_HEAD(&syncobj->cb_list);
567f005ef32Sjsg 	INIT_LIST_HEAD(&syncobj->ev_fd_list);
5687f4dd379Sjsg 	mtx_init(&syncobj->lock, IPL_NONE);
5697f4dd379Sjsg 
5705ca02815Sjsg 	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
5715ca02815Sjsg 		ret = drm_syncobj_assign_null_handle(syncobj);
5725ca02815Sjsg 		if (ret < 0) {
5735ca02815Sjsg 			drm_syncobj_put(syncobj);
5745ca02815Sjsg 			return ret;
5755ca02815Sjsg 		}
5765ca02815Sjsg 	}
5777f4dd379Sjsg 
5787f4dd379Sjsg 	if (fence)
5797f4dd379Sjsg 		drm_syncobj_replace_fence(syncobj, fence);
5807f4dd379Sjsg 
5817f4dd379Sjsg 	*out_syncobj = syncobj;
5827f4dd379Sjsg 	return 0;
5837f4dd379Sjsg }
5847f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_create);
5857f4dd379Sjsg 
5867f4dd379Sjsg /**
5877f4dd379Sjsg  * drm_syncobj_get_handle - get a handle from a syncobj
5887f4dd379Sjsg  * @file_private: drm file private pointer
5897f4dd379Sjsg  * @syncobj: Sync object to export
5907f4dd379Sjsg  * @handle: out parameter with the new handle
5917f4dd379Sjsg  *
5927f4dd379Sjsg  * Exports a sync object created with drm_syncobj_create() as a handle on
5937f4dd379Sjsg  * @file_private to userspace.
5947f4dd379Sjsg  *
5957f4dd379Sjsg  * Returns 0 on success or a negative error value on failure.
5967f4dd379Sjsg  */
5977f4dd379Sjsg int drm_syncobj_get_handle(struct drm_file *file_private,
5987f4dd379Sjsg 			   struct drm_syncobj *syncobj, u32 *handle)
5997f4dd379Sjsg {
6007f4dd379Sjsg 	int ret;
6017f4dd379Sjsg 
6027f4dd379Sjsg 	/* take a reference to put in the idr */
6037f4dd379Sjsg 	drm_syncobj_get(syncobj);
6047f4dd379Sjsg 
6057f4dd379Sjsg 	idr_preload(GFP_KERNEL);
6067f4dd379Sjsg 	spin_lock(&file_private->syncobj_table_lock);
6077f4dd379Sjsg 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
6087f4dd379Sjsg 	spin_unlock(&file_private->syncobj_table_lock);
6097f4dd379Sjsg 
6107f4dd379Sjsg 	idr_preload_end();
6117f4dd379Sjsg 
6127f4dd379Sjsg 	if (ret < 0) {
6137f4dd379Sjsg 		drm_syncobj_put(syncobj);
6147f4dd379Sjsg 		return ret;
6157f4dd379Sjsg 	}
6167f4dd379Sjsg 
6177f4dd379Sjsg 	*handle = ret;
6187f4dd379Sjsg 	return 0;
6197f4dd379Sjsg }
6207f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_get_handle);
6217f4dd379Sjsg 
6227f4dd379Sjsg static int drm_syncobj_create_as_handle(struct drm_file *file_private,
6237f4dd379Sjsg 					u32 *handle, uint32_t flags)
6247f4dd379Sjsg {
6257f4dd379Sjsg 	int ret;
6267f4dd379Sjsg 	struct drm_syncobj *syncobj;
6277f4dd379Sjsg 
6287f4dd379Sjsg 	ret = drm_syncobj_create(&syncobj, flags, NULL);
6297f4dd379Sjsg 	if (ret)
6307f4dd379Sjsg 		return ret;
6317f4dd379Sjsg 
6327f4dd379Sjsg 	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
6337f4dd379Sjsg 	drm_syncobj_put(syncobj);
6347f4dd379Sjsg 	return ret;
6357f4dd379Sjsg }
6367f4dd379Sjsg 
6377f4dd379Sjsg static int drm_syncobj_destroy(struct drm_file *file_private,
6387f4dd379Sjsg 			       u32 handle)
6397f4dd379Sjsg {
6407f4dd379Sjsg 	struct drm_syncobj *syncobj;
6417f4dd379Sjsg 
6427f4dd379Sjsg 	spin_lock(&file_private->syncobj_table_lock);
6437f4dd379Sjsg 	syncobj = idr_remove(&file_private->syncobj_idr, handle);
6447f4dd379Sjsg 	spin_unlock(&file_private->syncobj_table_lock);
6457f4dd379Sjsg 
6467f4dd379Sjsg 	if (!syncobj)
6477f4dd379Sjsg 		return -EINVAL;
6487f4dd379Sjsg 
6497f4dd379Sjsg 	drm_syncobj_put(syncobj);
6507f4dd379Sjsg 	return 0;
6517f4dd379Sjsg }
6527f4dd379Sjsg 
6537f4dd379Sjsg #ifdef notyet
6547f4dd379Sjsg static int drm_syncobj_file_release(struct inode *inode, struct file *file)
6557f4dd379Sjsg {
6567f4dd379Sjsg 	struct drm_syncobj *syncobj = file->private_data;
6577f4dd379Sjsg 
6587f4dd379Sjsg 	drm_syncobj_put(syncobj);
6597f4dd379Sjsg 	return 0;
6607f4dd379Sjsg }
6617f4dd379Sjsg 
6627f4dd379Sjsg static const struct file_operations drm_syncobj_file_fops = {
6637f4dd379Sjsg 	.release = drm_syncobj_file_release,
6647f4dd379Sjsg };
6657f4dd379Sjsg #endif
6667f4dd379Sjsg 
6677f4dd379Sjsg /**
6687f4dd379Sjsg  * drm_syncobj_get_fd - get a file descriptor from a syncobj
6697f4dd379Sjsg  * @syncobj: Sync object to export
6707f4dd379Sjsg  * @p_fd: out parameter with the new file descriptor
6717f4dd379Sjsg  *
6727f4dd379Sjsg  * Exports a sync object created with drm_syncobj_create() as a file descriptor.
6737f4dd379Sjsg  *
6747f4dd379Sjsg  * Returns 0 on success or a negative error value on failure.
6757f4dd379Sjsg  */
6767f4dd379Sjsg int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
6777f4dd379Sjsg {
6787f4dd379Sjsg 	STUB();
679c349dbc7Sjsg 	return -ENOSYS;
6807f4dd379Sjsg #ifdef notyet
6817f4dd379Sjsg 	struct file *file;
6827f4dd379Sjsg 	int fd;
6837f4dd379Sjsg 
6847f4dd379Sjsg 	fd = get_unused_fd_flags(O_CLOEXEC);
6857f4dd379Sjsg 	if (fd < 0)
6867f4dd379Sjsg 		return fd;
6877f4dd379Sjsg 
6887f4dd379Sjsg 	file = anon_inode_getfile("syncobj_file",
6897f4dd379Sjsg 				  &drm_syncobj_file_fops,
6907f4dd379Sjsg 				  syncobj, 0);
6917f4dd379Sjsg 	if (IS_ERR(file)) {
6927f4dd379Sjsg 		put_unused_fd(fd);
6937f4dd379Sjsg 		return PTR_ERR(file);
6947f4dd379Sjsg 	}
6957f4dd379Sjsg 
6967f4dd379Sjsg 	drm_syncobj_get(syncobj);
6977f4dd379Sjsg 	fd_install(fd, file);
6987f4dd379Sjsg 
6997f4dd379Sjsg 	*p_fd = fd;
7007f4dd379Sjsg 	return 0;
7017f4dd379Sjsg #endif
7027f4dd379Sjsg }
7037f4dd379Sjsg EXPORT_SYMBOL(drm_syncobj_get_fd);
7047f4dd379Sjsg 
7057f4dd379Sjsg static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
7067f4dd379Sjsg 				    u32 handle, int *p_fd)
7077f4dd379Sjsg {
7087f4dd379Sjsg 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
7097f4dd379Sjsg 	int ret;
7107f4dd379Sjsg 
7117f4dd379Sjsg 	if (!syncobj)
7127f4dd379Sjsg 		return -EINVAL;
7137f4dd379Sjsg 
7147f4dd379Sjsg 	ret = drm_syncobj_get_fd(syncobj, p_fd);
7157f4dd379Sjsg 	drm_syncobj_put(syncobj);
7167f4dd379Sjsg 	return ret;
7177f4dd379Sjsg }
7187f4dd379Sjsg 
7197f4dd379Sjsg static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
7207f4dd379Sjsg 				    int fd, u32 *handle)
7217f4dd379Sjsg {
7227f4dd379Sjsg 	STUB();
7237f4dd379Sjsg 	return -ENOSYS;
7247f4dd379Sjsg #ifdef notyet
7257f4dd379Sjsg 	struct drm_syncobj *syncobj;
726c349dbc7Sjsg 	struct fd f = fdget(fd);
7277f4dd379Sjsg 	int ret;
7287f4dd379Sjsg 
729c349dbc7Sjsg 	if (!f.file)
7307f4dd379Sjsg 		return -EINVAL;
7317f4dd379Sjsg 
732c349dbc7Sjsg 	if (f.file->f_op != &drm_syncobj_file_fops) {
733c349dbc7Sjsg 		fdput(f);
7347f4dd379Sjsg 		return -EINVAL;
7357f4dd379Sjsg 	}
7367f4dd379Sjsg 
7377f4dd379Sjsg 	/* take a reference to put in the idr */
738c349dbc7Sjsg 	syncobj = f.file->private_data;
7397f4dd379Sjsg 	drm_syncobj_get(syncobj);
7407f4dd379Sjsg 
7417f4dd379Sjsg 	idr_preload(GFP_KERNEL);
7427f4dd379Sjsg 	spin_lock(&file_private->syncobj_table_lock);
7437f4dd379Sjsg 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
7447f4dd379Sjsg 	spin_unlock(&file_private->syncobj_table_lock);
7457f4dd379Sjsg 	idr_preload_end();
7467f4dd379Sjsg 
7477f4dd379Sjsg 	if (ret > 0) {
7487f4dd379Sjsg 		*handle = ret;
7497f4dd379Sjsg 		ret = 0;
7507f4dd379Sjsg 	} else
7517f4dd379Sjsg 		drm_syncobj_put(syncobj);
7527f4dd379Sjsg 
753c349dbc7Sjsg 	fdput(f);
7547f4dd379Sjsg 	return ret;
7557f4dd379Sjsg #endif
7567f4dd379Sjsg }
7577f4dd379Sjsg 
7587f4dd379Sjsg static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
7597f4dd379Sjsg 					      int fd, int handle)
7607f4dd379Sjsg {
7617f4dd379Sjsg 	struct dma_fence *fence = sync_file_get_fence(fd);
7627f4dd379Sjsg 	struct drm_syncobj *syncobj;
7637f4dd379Sjsg 
7647f4dd379Sjsg 	if (!fence)
7657f4dd379Sjsg 		return -EINVAL;
7667f4dd379Sjsg 
7677f4dd379Sjsg 	syncobj = drm_syncobj_find(file_private, handle);
7687f4dd379Sjsg 	if (!syncobj) {
7697f4dd379Sjsg 		dma_fence_put(fence);
7707f4dd379Sjsg 		return -ENOENT;
7717f4dd379Sjsg 	}
7727f4dd379Sjsg 
7737f4dd379Sjsg 	drm_syncobj_replace_fence(syncobj, fence);
7747f4dd379Sjsg 	dma_fence_put(fence);
7757f4dd379Sjsg 	drm_syncobj_put(syncobj);
7767f4dd379Sjsg 	return 0;
7777f4dd379Sjsg }
7787f4dd379Sjsg 
7797f4dd379Sjsg static int drm_syncobj_export_sync_file(struct drm_file *file_private,
7807f4dd379Sjsg 					int handle, int *p_fd)
7817f4dd379Sjsg {
7827f4dd379Sjsg 	int ret;
7837f4dd379Sjsg 	struct dma_fence *fence;
7847f4dd379Sjsg 	struct sync_file *sync_file;
7857f4dd379Sjsg 	int fd = get_unused_fd_flags(O_CLOEXEC);
7867f4dd379Sjsg 
7877f4dd379Sjsg 	if (fd < 0)
7887f4dd379Sjsg 		return fd;
7897f4dd379Sjsg 
790c349dbc7Sjsg 	ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
7917f4dd379Sjsg 	if (ret)
7927f4dd379Sjsg 		goto err_put_fd;
7937f4dd379Sjsg 
7947f4dd379Sjsg 	sync_file = sync_file_create(fence);
7957f4dd379Sjsg 
7967f4dd379Sjsg 	dma_fence_put(fence);
7977f4dd379Sjsg 
7987f4dd379Sjsg 	if (!sync_file) {
7997f4dd379Sjsg 		ret = -EINVAL;
8007f4dd379Sjsg 		goto err_put_fd;
8017f4dd379Sjsg 	}
8027f4dd379Sjsg 
8037f4dd379Sjsg 	fd_install(fd, sync_file->file);
8047f4dd379Sjsg 
8057f4dd379Sjsg 	*p_fd = fd;
8067f4dd379Sjsg 	return 0;
8077f4dd379Sjsg err_put_fd:
8087f4dd379Sjsg 	put_unused_fd(fd);
8097f4dd379Sjsg 	return ret;
8107f4dd379Sjsg }
8117f4dd379Sjsg /**
8125ca02815Sjsg  * drm_syncobj_open - initializes syncobj file-private structures at devnode open time
8137f4dd379Sjsg  * @file_private: drm file-private structure to set up
8147f4dd379Sjsg  *
8157f4dd379Sjsg  * Called at device open time, sets up the structure for handling refcounting
8167f4dd379Sjsg  * of sync objects.
8177f4dd379Sjsg  */
8187f4dd379Sjsg void
8197f4dd379Sjsg drm_syncobj_open(struct drm_file *file_private)
8207f4dd379Sjsg {
8217f4dd379Sjsg 	idr_init_base(&file_private->syncobj_idr, 1);
8227f4dd379Sjsg 	mtx_init(&file_private->syncobj_table_lock, IPL_NONE);
8237f4dd379Sjsg }
8247f4dd379Sjsg 
8257f4dd379Sjsg static int
8267f4dd379Sjsg drm_syncobj_release_handle(int id, void *ptr, void *data)
8277f4dd379Sjsg {
8287f4dd379Sjsg 	struct drm_syncobj *syncobj = ptr;
8297f4dd379Sjsg 
8307f4dd379Sjsg 	drm_syncobj_put(syncobj);
8317f4dd379Sjsg 	return 0;
8327f4dd379Sjsg }
8337f4dd379Sjsg 
8347f4dd379Sjsg /**
8357f4dd379Sjsg  * drm_syncobj_release - release file-private sync object resources
8367f4dd379Sjsg  * @file_private: drm file-private structure to clean up
8377f4dd379Sjsg  *
8387f4dd379Sjsg  * Called at close time when the filp is going away.
8397f4dd379Sjsg  *
8407f4dd379Sjsg  * Releases any remaining references on objects by this filp.
8417f4dd379Sjsg  */
8427f4dd379Sjsg void
8437f4dd379Sjsg drm_syncobj_release(struct drm_file *file_private)
8447f4dd379Sjsg {
8457f4dd379Sjsg 	idr_for_each(&file_private->syncobj_idr,
8467f4dd379Sjsg 		     &drm_syncobj_release_handle, file_private);
8477f4dd379Sjsg 	idr_destroy(&file_private->syncobj_idr);
8487f4dd379Sjsg }
8497f4dd379Sjsg 
8507f4dd379Sjsg int
8517f4dd379Sjsg drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
8527f4dd379Sjsg 			 struct drm_file *file_private)
8537f4dd379Sjsg {
8547f4dd379Sjsg 	struct drm_syncobj_create *args = data;
8557f4dd379Sjsg 
8567f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
857c349dbc7Sjsg 		return -EOPNOTSUPP;
8587f4dd379Sjsg 
8597f4dd379Sjsg 	/* no valid flags yet */
8607f4dd379Sjsg 	if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
8617f4dd379Sjsg 		return -EINVAL;
8627f4dd379Sjsg 
8637f4dd379Sjsg 	return drm_syncobj_create_as_handle(file_private,
8647f4dd379Sjsg 					    &args->handle, args->flags);
8657f4dd379Sjsg }
8667f4dd379Sjsg 
8677f4dd379Sjsg int
8687f4dd379Sjsg drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
8697f4dd379Sjsg 			  struct drm_file *file_private)
8707f4dd379Sjsg {
8717f4dd379Sjsg 	struct drm_syncobj_destroy *args = data;
8727f4dd379Sjsg 
8737f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
874c349dbc7Sjsg 		return -EOPNOTSUPP;
8757f4dd379Sjsg 
8767f4dd379Sjsg 	/* make sure padding is empty */
8777f4dd379Sjsg 	if (args->pad)
8787f4dd379Sjsg 		return -EINVAL;
8797f4dd379Sjsg 	return drm_syncobj_destroy(file_private, args->handle);
8807f4dd379Sjsg }
8817f4dd379Sjsg 
8827f4dd379Sjsg int
8837f4dd379Sjsg drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
8847f4dd379Sjsg 				   struct drm_file *file_private)
8857f4dd379Sjsg {
8867f4dd379Sjsg 	struct drm_syncobj_handle *args = data;
8877f4dd379Sjsg 
8887f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
889c349dbc7Sjsg 		return -EOPNOTSUPP;
8907f4dd379Sjsg 
8917f4dd379Sjsg 	if (args->pad)
8927f4dd379Sjsg 		return -EINVAL;
8937f4dd379Sjsg 
8947f4dd379Sjsg 	if (args->flags != 0 &&
8957f4dd379Sjsg 	    args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
8967f4dd379Sjsg 		return -EINVAL;
8977f4dd379Sjsg 
8987f4dd379Sjsg 	if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
8997f4dd379Sjsg 		return drm_syncobj_export_sync_file(file_private, args->handle,
9007f4dd379Sjsg 						    &args->fd);
9017f4dd379Sjsg 
9027f4dd379Sjsg 	return drm_syncobj_handle_to_fd(file_private, args->handle,
9037f4dd379Sjsg 					&args->fd);
9047f4dd379Sjsg }
9057f4dd379Sjsg 
9067f4dd379Sjsg int
9077f4dd379Sjsg drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
9087f4dd379Sjsg 				   struct drm_file *file_private)
9097f4dd379Sjsg {
9107f4dd379Sjsg 	struct drm_syncobj_handle *args = data;
9117f4dd379Sjsg 
9127f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
913c349dbc7Sjsg 		return -EOPNOTSUPP;
9147f4dd379Sjsg 
9157f4dd379Sjsg 	if (args->pad)
9167f4dd379Sjsg 		return -EINVAL;
9177f4dd379Sjsg 
9187f4dd379Sjsg 	if (args->flags != 0 &&
9197f4dd379Sjsg 	    args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
9207f4dd379Sjsg 		return -EINVAL;
9217f4dd379Sjsg 
9227f4dd379Sjsg 	if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
9237f4dd379Sjsg 		return drm_syncobj_import_sync_file_fence(file_private,
9247f4dd379Sjsg 							  args->fd,
9257f4dd379Sjsg 							  args->handle);
9267f4dd379Sjsg 
9277f4dd379Sjsg 	return drm_syncobj_fd_to_handle(file_private, args->fd,
9287f4dd379Sjsg 					&args->handle);
9297f4dd379Sjsg }
9307f4dd379Sjsg 
9317583019bSjsg 
9327583019bSjsg /*
9337583019bSjsg  * Try to flatten a dma_fence_chain into a dma_fence_array so that it can be
9347583019bSjsg  * added as timeline fence to a chain again.
9357583019bSjsg  */
9367583019bSjsg static int drm_syncobj_flatten_chain(struct dma_fence **f)
9377583019bSjsg {
9387583019bSjsg 	struct dma_fence_chain *chain = to_dma_fence_chain(*f);
9397583019bSjsg 	struct dma_fence *tmp, **fences;
9407583019bSjsg 	struct dma_fence_array *array;
9417583019bSjsg 	unsigned int count;
9427583019bSjsg 
9437583019bSjsg 	if (!chain)
9447583019bSjsg 		return 0;
9457583019bSjsg 
9467583019bSjsg 	count = 0;
9477583019bSjsg 	dma_fence_chain_for_each(tmp, &chain->base)
9487583019bSjsg 		++count;
9497583019bSjsg 
9507583019bSjsg 	fences = kmalloc_array(count, sizeof(*fences), GFP_KERNEL);
9517583019bSjsg 	if (!fences)
9527583019bSjsg 		return -ENOMEM;
9537583019bSjsg 
9547583019bSjsg 	count = 0;
9557583019bSjsg 	dma_fence_chain_for_each(tmp, &chain->base)
9567583019bSjsg 		fences[count++] = dma_fence_get(tmp);
9577583019bSjsg 
9587583019bSjsg 	array = dma_fence_array_create(count, fences,
9597583019bSjsg 				       dma_fence_context_alloc(1),
9607583019bSjsg 				       1, false);
9617583019bSjsg 	if (!array)
9627583019bSjsg 		goto free_fences;
9637583019bSjsg 
9647583019bSjsg 	dma_fence_put(*f);
9657583019bSjsg 	*f = &array->base;
9667583019bSjsg 	return 0;
9677583019bSjsg 
9687583019bSjsg free_fences:
9697583019bSjsg 	while (count--)
9707583019bSjsg 		dma_fence_put(fences[count]);
9717583019bSjsg 
9727583019bSjsg 	kfree(fences);
9737583019bSjsg 	return -ENOMEM;
9747583019bSjsg }
9757583019bSjsg 
976c349dbc7Sjsg static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
977c349dbc7Sjsg 					    struct drm_syncobj_transfer *args)
978c349dbc7Sjsg {
979c349dbc7Sjsg 	struct drm_syncobj *timeline_syncobj = NULL;
980c349dbc7Sjsg 	struct dma_fence_chain *chain;
9817583019bSjsg 	struct dma_fence *fence;
982c349dbc7Sjsg 	int ret;
9837f4dd379Sjsg 
984c349dbc7Sjsg 	timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
985c349dbc7Sjsg 	if (!timeline_syncobj) {
986c349dbc7Sjsg 		return -ENOENT;
987c349dbc7Sjsg 	}
988c349dbc7Sjsg 	ret = drm_syncobj_find_fence(file_private, args->src_handle,
989c349dbc7Sjsg 				     args->src_point, args->flags,
990c349dbc7Sjsg 				     &fence);
991c349dbc7Sjsg 	if (ret)
9927583019bSjsg 		goto err_put_timeline;
9937583019bSjsg 
9947583019bSjsg 	ret = drm_syncobj_flatten_chain(&fence);
9957583019bSjsg 	if (ret)
9967583019bSjsg 		goto err_free_fence;
9977583019bSjsg 
9985ca02815Sjsg 	chain = dma_fence_chain_alloc();
999c349dbc7Sjsg 	if (!chain) {
1000c349dbc7Sjsg 		ret = -ENOMEM;
10017583019bSjsg 		goto err_free_fence;
1002c349dbc7Sjsg 	}
10037583019bSjsg 
1004c349dbc7Sjsg 	drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
10057583019bSjsg err_free_fence:
1006c349dbc7Sjsg 	dma_fence_put(fence);
10077583019bSjsg err_put_timeline:
1008c349dbc7Sjsg 	drm_syncobj_put(timeline_syncobj);
1009c349dbc7Sjsg 
1010c349dbc7Sjsg 	return ret;
1011c349dbc7Sjsg }
1012c349dbc7Sjsg 
1013c349dbc7Sjsg static int
1014c349dbc7Sjsg drm_syncobj_transfer_to_binary(struct drm_file *file_private,
1015c349dbc7Sjsg 			       struct drm_syncobj_transfer *args)
1016c349dbc7Sjsg {
1017c349dbc7Sjsg 	struct drm_syncobj *binary_syncobj = NULL;
1018c349dbc7Sjsg 	struct dma_fence *fence;
1019c349dbc7Sjsg 	int ret;
1020c349dbc7Sjsg 
1021c349dbc7Sjsg 	binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
1022c349dbc7Sjsg 	if (!binary_syncobj)
1023c349dbc7Sjsg 		return -ENOENT;
1024c349dbc7Sjsg 	ret = drm_syncobj_find_fence(file_private, args->src_handle,
1025c349dbc7Sjsg 				     args->src_point, args->flags, &fence);
1026c349dbc7Sjsg 	if (ret)
1027c349dbc7Sjsg 		goto err;
1028c349dbc7Sjsg 	drm_syncobj_replace_fence(binary_syncobj, fence);
1029c349dbc7Sjsg 	dma_fence_put(fence);
1030c349dbc7Sjsg err:
1031c349dbc7Sjsg 	drm_syncobj_put(binary_syncobj);
1032c349dbc7Sjsg 
1033c349dbc7Sjsg 	return ret;
1034c349dbc7Sjsg }
1035c349dbc7Sjsg int
1036c349dbc7Sjsg drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
1037c349dbc7Sjsg 			   struct drm_file *file_private)
1038c349dbc7Sjsg {
1039c349dbc7Sjsg 	struct drm_syncobj_transfer *args = data;
1040c349dbc7Sjsg 	int ret;
1041c349dbc7Sjsg 
1042c349dbc7Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1043c349dbc7Sjsg 		return -EOPNOTSUPP;
1044c349dbc7Sjsg 
1045c349dbc7Sjsg 	if (args->pad)
1046c349dbc7Sjsg 		return -EINVAL;
1047c349dbc7Sjsg 
1048c349dbc7Sjsg 	if (args->dst_point)
1049c349dbc7Sjsg 		ret = drm_syncobj_transfer_to_timeline(file_private, args);
1050c349dbc7Sjsg 	else
1051c349dbc7Sjsg 		ret = drm_syncobj_transfer_to_binary(file_private, args);
1052c349dbc7Sjsg 
1053c349dbc7Sjsg 	return ret;
1054c349dbc7Sjsg }
1055c349dbc7Sjsg 
10567f4dd379Sjsg static void syncobj_wait_fence_func(struct dma_fence *fence,
10577f4dd379Sjsg 				    struct dma_fence_cb *cb)
10587f4dd379Sjsg {
10597f4dd379Sjsg 	struct syncobj_wait_entry *wait =
10607f4dd379Sjsg 		container_of(cb, struct syncobj_wait_entry, fence_cb);
10617f4dd379Sjsg 
10627f4dd379Sjsg 	wake_up_process(wait->task);
10637f4dd379Sjsg }
10647f4dd379Sjsg 
10657f4dd379Sjsg static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
1066c349dbc7Sjsg 				      struct syncobj_wait_entry *wait)
10677f4dd379Sjsg {
1068c349dbc7Sjsg 	struct dma_fence *fence;
10697f4dd379Sjsg 
10707f4dd379Sjsg 	/* This happens inside the syncobj lock */
1071c349dbc7Sjsg 	fence = rcu_dereference_protected(syncobj->fence,
1072c349dbc7Sjsg 					  lockdep_is_held(&syncobj->lock));
1073c349dbc7Sjsg 	dma_fence_get(fence);
1074c349dbc7Sjsg 	if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
1075c349dbc7Sjsg 		dma_fence_put(fence);
1076c349dbc7Sjsg 		return;
1077c349dbc7Sjsg 	} else if (!fence) {
1078c349dbc7Sjsg 		wait->fence = dma_fence_get_stub();
1079c349dbc7Sjsg 	} else {
1080c349dbc7Sjsg 		wait->fence = fence;
1081c349dbc7Sjsg 	}
1082c349dbc7Sjsg 
10837f4dd379Sjsg 	wake_up_process(wait->task);
1084c349dbc7Sjsg 	list_del_init(&wait->node);
10857f4dd379Sjsg }
10867f4dd379Sjsg 
10877f4dd379Sjsg static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
1088c349dbc7Sjsg 						  void __user *user_points,
10897f4dd379Sjsg 						  uint32_t count,
10907f4dd379Sjsg 						  uint32_t flags,
10917f4dd379Sjsg 						  signed long timeout,
10927f4dd379Sjsg 						  uint32_t *idx)
10937f4dd379Sjsg {
10947f4dd379Sjsg 	struct syncobj_wait_entry *entries;
10957f4dd379Sjsg 	struct dma_fence *fence;
1096c349dbc7Sjsg 	uint64_t *points;
10977f4dd379Sjsg 	uint32_t signaled_count, i;
10987f4dd379Sjsg 
1099ad45188dSjsg 	if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1100ad45188dSjsg 		     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
11015ca02815Sjsg 		lockdep_assert_none_held_once();
11025ca02815Sjsg 
1103c349dbc7Sjsg 	points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
1104c349dbc7Sjsg 	if (points == NULL)
11057f4dd379Sjsg 		return -ENOMEM;
11067f4dd379Sjsg 
1107c349dbc7Sjsg 	if (!user_points) {
1108c349dbc7Sjsg 		memset(points, 0, count * sizeof(uint64_t));
1109c349dbc7Sjsg 
1110c349dbc7Sjsg 	} else if (copy_from_user(points, user_points,
1111c349dbc7Sjsg 				  sizeof(uint64_t) * count)) {
1112c349dbc7Sjsg 		timeout = -EFAULT;
1113c349dbc7Sjsg 		goto err_free_points;
1114c349dbc7Sjsg 	}
1115c349dbc7Sjsg 
1116c349dbc7Sjsg 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1117c349dbc7Sjsg 	if (!entries) {
1118c349dbc7Sjsg 		timeout = -ENOMEM;
1119c349dbc7Sjsg 		goto err_free_points;
1120c349dbc7Sjsg 	}
11217f4dd379Sjsg 	/* Walk the list of sync objects and initialize entries.  We do
11227f4dd379Sjsg 	 * this up-front so that we can properly return -EINVAL if there is
11237f4dd379Sjsg 	 * a syncobj with a missing fence and then never have the chance of
11247f4dd379Sjsg 	 * returning -EINVAL again.
11257f4dd379Sjsg 	 */
11267f4dd379Sjsg 	signaled_count = 0;
11277f4dd379Sjsg 	for (i = 0; i < count; ++i) {
1128c349dbc7Sjsg 		struct dma_fence *fence;
1129c349dbc7Sjsg 
11307f4dd379Sjsg #ifdef __linux__
11317f4dd379Sjsg 		entries[i].task = current;
11327f4dd379Sjsg #else
11337f4dd379Sjsg 		entries[i].task = curproc;
11347f4dd379Sjsg #endif
1135c349dbc7Sjsg 		entries[i].point = points[i];
1136c349dbc7Sjsg 		fence = drm_syncobj_fence_get(syncobjs[i]);
1137c349dbc7Sjsg 		if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
1138c349dbc7Sjsg 			dma_fence_put(fence);
1139075cc07cSjsg 			if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1140075cc07cSjsg 				     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
11417f4dd379Sjsg 				continue;
11427f4dd379Sjsg 			} else {
1143c349dbc7Sjsg 				timeout = -EINVAL;
11447f4dd379Sjsg 				goto cleanup_entries;
11457f4dd379Sjsg 			}
11467f4dd379Sjsg 		}
11477f4dd379Sjsg 
1148c349dbc7Sjsg 		if (fence)
1149c349dbc7Sjsg 			entries[i].fence = fence;
1150c349dbc7Sjsg 		else
1151c349dbc7Sjsg 			entries[i].fence = dma_fence_get_stub();
1152c349dbc7Sjsg 
1153c349dbc7Sjsg 		if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1154c349dbc7Sjsg 		    dma_fence_is_signaled(entries[i].fence)) {
11557f4dd379Sjsg 			if (signaled_count == 0 && idx)
11567f4dd379Sjsg 				*idx = i;
11577f4dd379Sjsg 			signaled_count++;
11587f4dd379Sjsg 		}
11597f4dd379Sjsg 	}
11607f4dd379Sjsg 
11617f4dd379Sjsg 	if (signaled_count == count ||
11627f4dd379Sjsg 	    (signaled_count > 0 &&
11637f4dd379Sjsg 	     !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
11647f4dd379Sjsg 		goto cleanup_entries;
11657f4dd379Sjsg 
11667f4dd379Sjsg 	/* There's a very annoying laxness in the dma_fence API here, in
11677f4dd379Sjsg 	 * that backends are not required to automatically report when a
11687f4dd379Sjsg 	 * fence is signaled prior to fence->ops->enable_signaling() being
11697f4dd379Sjsg 	 * called.  So here if we fail to match signaled_count, we need to
11707f4dd379Sjsg 	 * fallthough and try a 0 timeout wait!
11717f4dd379Sjsg 	 */
11727f4dd379Sjsg 
1173ad45188dSjsg 	if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1174ad45188dSjsg 		     DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
1175c349dbc7Sjsg 		for (i = 0; i < count; ++i)
1176c349dbc7Sjsg 			drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
11777f4dd379Sjsg 	}
11787f4dd379Sjsg 
11797f4dd379Sjsg 	do {
11807f4dd379Sjsg 		set_current_state(TASK_INTERRUPTIBLE);
11817f4dd379Sjsg 
11827f4dd379Sjsg 		signaled_count = 0;
11837f4dd379Sjsg 		for (i = 0; i < count; ++i) {
11847f4dd379Sjsg 			fence = entries[i].fence;
11857f4dd379Sjsg 			if (!fence)
11867f4dd379Sjsg 				continue;
11877f4dd379Sjsg 
1188c349dbc7Sjsg 			if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
1189c349dbc7Sjsg 			    dma_fence_is_signaled(fence) ||
11907f4dd379Sjsg 			    (!entries[i].fence_cb.func &&
11917f4dd379Sjsg 			     dma_fence_add_callback(fence,
11927f4dd379Sjsg 						    &entries[i].fence_cb,
11937f4dd379Sjsg 						    syncobj_wait_fence_func))) {
11947f4dd379Sjsg 				/* The fence has been signaled */
11957f4dd379Sjsg 				if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
11967f4dd379Sjsg 					signaled_count++;
11977f4dd379Sjsg 				} else {
11987f4dd379Sjsg 					if (idx)
11997f4dd379Sjsg 						*idx = i;
12007f4dd379Sjsg 					goto done_waiting;
12017f4dd379Sjsg 				}
12027f4dd379Sjsg 			}
12037f4dd379Sjsg 		}
12047f4dd379Sjsg 
12057f4dd379Sjsg 		if (signaled_count == count)
12067f4dd379Sjsg 			goto done_waiting;
12077f4dd379Sjsg 
12087f4dd379Sjsg 		if (timeout == 0) {
1209c349dbc7Sjsg 			timeout = -ETIME;
12107f4dd379Sjsg 			goto done_waiting;
12117f4dd379Sjsg 		}
12127f4dd379Sjsg 
1213c349dbc7Sjsg 		if (signal_pending(current)) {
1214c349dbc7Sjsg 			timeout = -ERESTARTSYS;
1215c349dbc7Sjsg 			goto done_waiting;
1216c349dbc7Sjsg 		}
12177f4dd379Sjsg 
1218c349dbc7Sjsg 		timeout = schedule_timeout(timeout);
1219c349dbc7Sjsg 	} while (1);
12207f4dd379Sjsg 
12217f4dd379Sjsg done_waiting:
12227f4dd379Sjsg 	__set_current_state(TASK_RUNNING);
12237f4dd379Sjsg 
12247f4dd379Sjsg cleanup_entries:
12257f4dd379Sjsg 	for (i = 0; i < count; ++i) {
1226c349dbc7Sjsg 		drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
12277f4dd379Sjsg 		if (entries[i].fence_cb.func)
12287f4dd379Sjsg 			dma_fence_remove_callback(entries[i].fence,
12297f4dd379Sjsg 						  &entries[i].fence_cb);
12307f4dd379Sjsg 		dma_fence_put(entries[i].fence);
12317f4dd379Sjsg 	}
12327f4dd379Sjsg 	kfree(entries);
12337f4dd379Sjsg 
1234c349dbc7Sjsg err_free_points:
1235c349dbc7Sjsg 	kfree(points);
1236c349dbc7Sjsg 
1237c349dbc7Sjsg 	return timeout;
12387f4dd379Sjsg }
12397f4dd379Sjsg 
12407f4dd379Sjsg /**
12417f4dd379Sjsg  * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
12427f4dd379Sjsg  *
12437f4dd379Sjsg  * @timeout_nsec: timeout nsec component in ns, 0 for poll
12447f4dd379Sjsg  *
12457f4dd379Sjsg  * Calculate the timeout in jiffies from an absolute time in sec/nsec.
12467f4dd379Sjsg  */
1247c349dbc7Sjsg signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
12487f4dd379Sjsg {
12497f4dd379Sjsg 	ktime_t abs_timeout, now;
12507f4dd379Sjsg 	u64 timeout_ns, timeout_jiffies64;
12517f4dd379Sjsg 
12527f4dd379Sjsg 	/* make 0 timeout means poll - absolute 0 doesn't seem valid */
12537f4dd379Sjsg 	if (timeout_nsec == 0)
12547f4dd379Sjsg 		return 0;
12557f4dd379Sjsg 
12567f4dd379Sjsg 	abs_timeout = ns_to_ktime(timeout_nsec);
12577f4dd379Sjsg 	now = ktime_get();
12587f4dd379Sjsg 
12597f4dd379Sjsg 	if (!ktime_after(abs_timeout, now))
12607f4dd379Sjsg 		return 0;
12617f4dd379Sjsg 
12627f4dd379Sjsg 	timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
12637f4dd379Sjsg 
12647f4dd379Sjsg 	timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
12657f4dd379Sjsg 	/*  clamp timeout to avoid infinite timeout */
12667f4dd379Sjsg 	if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
12677f4dd379Sjsg 		return MAX_SCHEDULE_TIMEOUT - 1;
12687f4dd379Sjsg 
12697f4dd379Sjsg 	return timeout_jiffies64 + 1;
12707f4dd379Sjsg }
1271c349dbc7Sjsg EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
12727f4dd379Sjsg 
12737f4dd379Sjsg static int drm_syncobj_array_wait(struct drm_device *dev,
12747f4dd379Sjsg 				  struct drm_file *file_private,
12757f4dd379Sjsg 				  struct drm_syncobj_wait *wait,
1276c349dbc7Sjsg 				  struct drm_syncobj_timeline_wait *timeline_wait,
1277c349dbc7Sjsg 				  struct drm_syncobj **syncobjs, bool timeline)
12787f4dd379Sjsg {
1279c349dbc7Sjsg 	signed long timeout = 0;
12807f4dd379Sjsg 	uint32_t first = ~0;
12817f4dd379Sjsg 
1282c349dbc7Sjsg 	if (!timeline) {
1283c349dbc7Sjsg 		timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1284c349dbc7Sjsg 		timeout = drm_syncobj_array_wait_timeout(syncobjs,
1285c349dbc7Sjsg 							 NULL,
12867f4dd379Sjsg 							 wait->count_handles,
12877f4dd379Sjsg 							 wait->flags,
12887f4dd379Sjsg 							 timeout, &first);
1289c349dbc7Sjsg 		if (timeout < 0)
1290c349dbc7Sjsg 			return timeout;
12917f4dd379Sjsg 		wait->first_signaled = first;
1292c349dbc7Sjsg 	} else {
1293c349dbc7Sjsg 		timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1294c349dbc7Sjsg 		timeout = drm_syncobj_array_wait_timeout(syncobjs,
1295c349dbc7Sjsg 							 u64_to_user_ptr(timeline_wait->points),
1296c349dbc7Sjsg 							 timeline_wait->count_handles,
1297c349dbc7Sjsg 							 timeline_wait->flags,
1298c349dbc7Sjsg 							 timeout, &first);
1299c349dbc7Sjsg 		if (timeout < 0)
1300c349dbc7Sjsg 			return timeout;
1301c349dbc7Sjsg 		timeline_wait->first_signaled = first;
1302c349dbc7Sjsg 	}
13037f4dd379Sjsg 	return 0;
13047f4dd379Sjsg }
13057f4dd379Sjsg 
13067f4dd379Sjsg static int drm_syncobj_array_find(struct drm_file *file_private,
13077f4dd379Sjsg 				  void __user *user_handles,
13087f4dd379Sjsg 				  uint32_t count_handles,
13097f4dd379Sjsg 				  struct drm_syncobj ***syncobjs_out)
13107f4dd379Sjsg {
13117f4dd379Sjsg 	uint32_t i, *handles;
13127f4dd379Sjsg 	struct drm_syncobj **syncobjs;
13137f4dd379Sjsg 	int ret;
13147f4dd379Sjsg 
13157f4dd379Sjsg 	handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
13167f4dd379Sjsg 	if (handles == NULL)
13177f4dd379Sjsg 		return -ENOMEM;
13187f4dd379Sjsg 
13197f4dd379Sjsg 	if (copy_from_user(handles, user_handles,
13207f4dd379Sjsg 			   sizeof(uint32_t) * count_handles)) {
13217f4dd379Sjsg 		ret = -EFAULT;
13227f4dd379Sjsg 		goto err_free_handles;
13237f4dd379Sjsg 	}
13247f4dd379Sjsg 
13257f4dd379Sjsg 	syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
13267f4dd379Sjsg 	if (syncobjs == NULL) {
13277f4dd379Sjsg 		ret = -ENOMEM;
13287f4dd379Sjsg 		goto err_free_handles;
13297f4dd379Sjsg 	}
13307f4dd379Sjsg 
13317f4dd379Sjsg 	for (i = 0; i < count_handles; i++) {
13327f4dd379Sjsg 		syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
13337f4dd379Sjsg 		if (!syncobjs[i]) {
13347f4dd379Sjsg 			ret = -ENOENT;
13357f4dd379Sjsg 			goto err_put_syncobjs;
13367f4dd379Sjsg 		}
13377f4dd379Sjsg 	}
13387f4dd379Sjsg 
13397f4dd379Sjsg 	kfree(handles);
13407f4dd379Sjsg 	*syncobjs_out = syncobjs;
13417f4dd379Sjsg 	return 0;
13427f4dd379Sjsg 
13437f4dd379Sjsg err_put_syncobjs:
13447f4dd379Sjsg 	while (i-- > 0)
13457f4dd379Sjsg 		drm_syncobj_put(syncobjs[i]);
13467f4dd379Sjsg 	kfree(syncobjs);
13477f4dd379Sjsg err_free_handles:
13487f4dd379Sjsg 	kfree(handles);
13497f4dd379Sjsg 
13507f4dd379Sjsg 	return ret;
13517f4dd379Sjsg }
13527f4dd379Sjsg 
13537f4dd379Sjsg static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
13547f4dd379Sjsg 				   uint32_t count)
13557f4dd379Sjsg {
13567f4dd379Sjsg 	uint32_t i;
1357ad8b1aafSjsg 
13587f4dd379Sjsg 	for (i = 0; i < count; i++)
13597f4dd379Sjsg 		drm_syncobj_put(syncobjs[i]);
13607f4dd379Sjsg 	kfree(syncobjs);
13617f4dd379Sjsg }
13627f4dd379Sjsg 
13637f4dd379Sjsg int
13647f4dd379Sjsg drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
13657f4dd379Sjsg 		       struct drm_file *file_private)
13667f4dd379Sjsg {
13677f4dd379Sjsg 	struct drm_syncobj_wait *args = data;
13687f4dd379Sjsg 	struct drm_syncobj **syncobjs;
13697f4dd379Sjsg 	int ret = 0;
13707f4dd379Sjsg 
13717f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1372c349dbc7Sjsg 		return -EOPNOTSUPP;
13737f4dd379Sjsg 
13747f4dd379Sjsg 	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
13757f4dd379Sjsg 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
13767f4dd379Sjsg 		return -EINVAL;
13777f4dd379Sjsg 
13787f4dd379Sjsg 	if (args->count_handles == 0)
13797f4dd379Sjsg 		return -EINVAL;
13807f4dd379Sjsg 
13817f4dd379Sjsg 	ret = drm_syncobj_array_find(file_private,
13827f4dd379Sjsg 				     u64_to_user_ptr(args->handles),
13837f4dd379Sjsg 				     args->count_handles,
13847f4dd379Sjsg 				     &syncobjs);
13857f4dd379Sjsg 	if (ret < 0)
13867f4dd379Sjsg 		return ret;
13877f4dd379Sjsg 
13887f4dd379Sjsg 	ret = drm_syncobj_array_wait(dev, file_private,
1389c349dbc7Sjsg 				     args, NULL, syncobjs, false);
13907f4dd379Sjsg 
13917f4dd379Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
13927f4dd379Sjsg 
13937f4dd379Sjsg 	return ret;
13947f4dd379Sjsg }
13957f4dd379Sjsg 
13967f4dd379Sjsg int
1397c349dbc7Sjsg drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1398c349dbc7Sjsg 				struct drm_file *file_private)
1399c349dbc7Sjsg {
1400c349dbc7Sjsg 	struct drm_syncobj_timeline_wait *args = data;
1401c349dbc7Sjsg 	struct drm_syncobj **syncobjs;
1402c349dbc7Sjsg 	int ret = 0;
1403c349dbc7Sjsg 
1404c349dbc7Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1405c349dbc7Sjsg 		return -EOPNOTSUPP;
1406c349dbc7Sjsg 
1407c349dbc7Sjsg 	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1408c349dbc7Sjsg 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1409c349dbc7Sjsg 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1410c349dbc7Sjsg 		return -EINVAL;
1411c349dbc7Sjsg 
1412c349dbc7Sjsg 	if (args->count_handles == 0)
1413c349dbc7Sjsg 		return -EINVAL;
1414c349dbc7Sjsg 
1415c349dbc7Sjsg 	ret = drm_syncobj_array_find(file_private,
1416c349dbc7Sjsg 				     u64_to_user_ptr(args->handles),
1417c349dbc7Sjsg 				     args->count_handles,
1418c349dbc7Sjsg 				     &syncobjs);
1419c349dbc7Sjsg 	if (ret < 0)
1420c349dbc7Sjsg 		return ret;
1421c349dbc7Sjsg 
1422c349dbc7Sjsg 	ret = drm_syncobj_array_wait(dev, file_private,
1423c349dbc7Sjsg 				     NULL, args, syncobjs, true);
1424c349dbc7Sjsg 
1425c349dbc7Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
1426c349dbc7Sjsg 
1427c349dbc7Sjsg 	return ret;
1428c349dbc7Sjsg }
1429c349dbc7Sjsg 
1430f005ef32Sjsg static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
1431f005ef32Sjsg 					     struct dma_fence_cb *cb)
1432f005ef32Sjsg {
1433f005ef32Sjsg 	struct syncobj_eventfd_entry *entry =
1434f005ef32Sjsg 		container_of(cb, struct syncobj_eventfd_entry, fence_cb);
1435f005ef32Sjsg 
1436f005ef32Sjsg 	eventfd_signal(entry->ev_fd_ctx, 1);
1437f005ef32Sjsg 	syncobj_eventfd_entry_free(entry);
1438f005ef32Sjsg }
1439f005ef32Sjsg 
1440f005ef32Sjsg static void
1441f005ef32Sjsg syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
1442f005ef32Sjsg 			   struct syncobj_eventfd_entry *entry)
1443f005ef32Sjsg {
1444f005ef32Sjsg 	int ret;
1445f005ef32Sjsg 	struct dma_fence *fence;
1446f005ef32Sjsg 
1447f005ef32Sjsg 	/* This happens inside the syncobj lock */
1448f005ef32Sjsg 	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
1449c9ffe515Sjsg 	if (!fence)
1450c9ffe515Sjsg 		return;
1451c9ffe515Sjsg 
1452f005ef32Sjsg 	ret = dma_fence_chain_find_seqno(&fence, entry->point);
1453c9ffe515Sjsg 	if (ret != 0) {
1454c9ffe515Sjsg 		/* The given seqno has not been submitted yet. */
1455f005ef32Sjsg 		dma_fence_put(fence);
1456f005ef32Sjsg 		return;
1457c9ffe515Sjsg 	} else if (!fence) {
1458c9ffe515Sjsg 		/* If dma_fence_chain_find_seqno returns 0 but sets the fence
1459c9ffe515Sjsg 		 * to NULL, it implies that the given seqno is signaled and a
1460c9ffe515Sjsg 		 * later seqno has already been submitted. Assign a stub fence
1461c9ffe515Sjsg 		 * so that the eventfd still gets signaled below.
1462c9ffe515Sjsg 		 */
1463c9ffe515Sjsg 		fence = dma_fence_get_stub();
1464f005ef32Sjsg 	}
1465f005ef32Sjsg 
1466f005ef32Sjsg 	list_del_init(&entry->node);
1467f005ef32Sjsg 	entry->fence = fence;
1468f005ef32Sjsg 
1469f005ef32Sjsg 	if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
1470f005ef32Sjsg 		eventfd_signal(entry->ev_fd_ctx, 1);
1471f005ef32Sjsg 		syncobj_eventfd_entry_free(entry);
1472f005ef32Sjsg 	} else {
1473f005ef32Sjsg 		ret = dma_fence_add_callback(fence, &entry->fence_cb,
1474f005ef32Sjsg 					     syncobj_eventfd_entry_fence_func);
1475f005ef32Sjsg 		if (ret == -ENOENT) {
1476f005ef32Sjsg 			eventfd_signal(entry->ev_fd_ctx, 1);
1477f005ef32Sjsg 			syncobj_eventfd_entry_free(entry);
1478f005ef32Sjsg 		}
1479f005ef32Sjsg 	}
1480f005ef32Sjsg }
1481f005ef32Sjsg 
1482f005ef32Sjsg int
1483f005ef32Sjsg drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
1484f005ef32Sjsg 			  struct drm_file *file_private)
1485f005ef32Sjsg {
1486f005ef32Sjsg 	return -EOPNOTSUPP;
1487f005ef32Sjsg #ifdef notyet
1488f005ef32Sjsg 	struct drm_syncobj_eventfd *args = data;
1489f005ef32Sjsg 	struct drm_syncobj *syncobj;
1490f005ef32Sjsg 	struct eventfd_ctx *ev_fd_ctx;
1491f005ef32Sjsg 	struct syncobj_eventfd_entry *entry;
1492*9fbeb06eSjsg 	int ret;
1493f005ef32Sjsg 
1494f005ef32Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1495f005ef32Sjsg 		return -EOPNOTSUPP;
1496f005ef32Sjsg 
1497f005ef32Sjsg 	if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
1498f005ef32Sjsg 		return -EINVAL;
1499f005ef32Sjsg 
1500f005ef32Sjsg 	if (args->pad)
1501f005ef32Sjsg 		return -EINVAL;
1502f005ef32Sjsg 
1503f005ef32Sjsg 	syncobj = drm_syncobj_find(file_private, args->handle);
1504f005ef32Sjsg 	if (!syncobj)
1505f005ef32Sjsg 		return -ENOENT;
1506f005ef32Sjsg 
1507f005ef32Sjsg 	ev_fd_ctx = eventfd_ctx_fdget(args->fd);
1508*9fbeb06eSjsg 	if (IS_ERR(ev_fd_ctx)) {
1509*9fbeb06eSjsg 		ret = PTR_ERR(ev_fd_ctx);
1510*9fbeb06eSjsg 		goto err_fdget;
1511*9fbeb06eSjsg 	}
1512f005ef32Sjsg 
1513f005ef32Sjsg 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1514f005ef32Sjsg 	if (!entry) {
1515*9fbeb06eSjsg 		ret = -ENOMEM;
1516*9fbeb06eSjsg 		goto err_kzalloc;
1517f005ef32Sjsg 	}
1518f005ef32Sjsg 	entry->syncobj = syncobj;
1519f005ef32Sjsg 	entry->ev_fd_ctx = ev_fd_ctx;
1520f005ef32Sjsg 	entry->point = args->point;
1521f005ef32Sjsg 	entry->flags = args->flags;
1522f005ef32Sjsg 
1523f005ef32Sjsg 	drm_syncobj_add_eventfd(syncobj, entry);
1524f005ef32Sjsg 	drm_syncobj_put(syncobj);
1525f005ef32Sjsg 
1526f005ef32Sjsg 	return 0;
1527*9fbeb06eSjsg 
1528*9fbeb06eSjsg err_kzalloc:
1529*9fbeb06eSjsg 	eventfd_ctx_put(ev_fd_ctx);
1530*9fbeb06eSjsg err_fdget:
1531*9fbeb06eSjsg 	drm_syncobj_put(syncobj);
1532*9fbeb06eSjsg 	return ret;
1533f005ef32Sjsg #endif
1534f005ef32Sjsg }
1535c349dbc7Sjsg 
1536c349dbc7Sjsg int
15377f4dd379Sjsg drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
15387f4dd379Sjsg 			struct drm_file *file_private)
15397f4dd379Sjsg {
15407f4dd379Sjsg 	struct drm_syncobj_array *args = data;
15417f4dd379Sjsg 	struct drm_syncobj **syncobjs;
15427f4dd379Sjsg 	uint32_t i;
15437f4dd379Sjsg 	int ret;
15447f4dd379Sjsg 
15457f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1546c349dbc7Sjsg 		return -EOPNOTSUPP;
15477f4dd379Sjsg 
15487f4dd379Sjsg 	if (args->pad != 0)
15497f4dd379Sjsg 		return -EINVAL;
15507f4dd379Sjsg 
15517f4dd379Sjsg 	if (args->count_handles == 0)
15527f4dd379Sjsg 		return -EINVAL;
15537f4dd379Sjsg 
15547f4dd379Sjsg 	ret = drm_syncobj_array_find(file_private,
15557f4dd379Sjsg 				     u64_to_user_ptr(args->handles),
15567f4dd379Sjsg 				     args->count_handles,
15577f4dd379Sjsg 				     &syncobjs);
15587f4dd379Sjsg 	if (ret < 0)
15597f4dd379Sjsg 		return ret;
15607f4dd379Sjsg 
15617f4dd379Sjsg 	for (i = 0; i < args->count_handles; i++)
15627f4dd379Sjsg 		drm_syncobj_replace_fence(syncobjs[i], NULL);
15637f4dd379Sjsg 
15647f4dd379Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
15657f4dd379Sjsg 
15667f4dd379Sjsg 	return 0;
15677f4dd379Sjsg }
15687f4dd379Sjsg 
15697f4dd379Sjsg int
15707f4dd379Sjsg drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
15717f4dd379Sjsg 			 struct drm_file *file_private)
15727f4dd379Sjsg {
15737f4dd379Sjsg 	struct drm_syncobj_array *args = data;
15747f4dd379Sjsg 	struct drm_syncobj **syncobjs;
15757f4dd379Sjsg 	uint32_t i;
15767f4dd379Sjsg 	int ret;
15777f4dd379Sjsg 
15787f4dd379Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1579c349dbc7Sjsg 		return -EOPNOTSUPP;
15807f4dd379Sjsg 
15817f4dd379Sjsg 	if (args->pad != 0)
15827f4dd379Sjsg 		return -EINVAL;
15837f4dd379Sjsg 
15847f4dd379Sjsg 	if (args->count_handles == 0)
15857f4dd379Sjsg 		return -EINVAL;
15867f4dd379Sjsg 
15877f4dd379Sjsg 	ret = drm_syncobj_array_find(file_private,
15887f4dd379Sjsg 				     u64_to_user_ptr(args->handles),
15897f4dd379Sjsg 				     args->count_handles,
15907f4dd379Sjsg 				     &syncobjs);
15917f4dd379Sjsg 	if (ret < 0)
15927f4dd379Sjsg 		return ret;
15937f4dd379Sjsg 
15945ca02815Sjsg 	for (i = 0; i < args->count_handles; i++) {
15955ca02815Sjsg 		ret = drm_syncobj_assign_null_handle(syncobjs[i]);
15965ca02815Sjsg 		if (ret < 0)
15975ca02815Sjsg 			break;
15985ca02815Sjsg 	}
15997f4dd379Sjsg 
16007f4dd379Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
16017f4dd379Sjsg 
16027f4dd379Sjsg 	return ret;
16037f4dd379Sjsg }
1604c349dbc7Sjsg 
1605c349dbc7Sjsg int
1606c349dbc7Sjsg drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1607c349dbc7Sjsg 				  struct drm_file *file_private)
1608c349dbc7Sjsg {
1609c349dbc7Sjsg 	struct drm_syncobj_timeline_array *args = data;
1610c349dbc7Sjsg 	struct drm_syncobj **syncobjs;
1611c349dbc7Sjsg 	struct dma_fence_chain **chains;
1612c349dbc7Sjsg 	uint64_t *points;
1613c349dbc7Sjsg 	uint32_t i, j;
1614c349dbc7Sjsg 	int ret;
1615c349dbc7Sjsg 
1616c349dbc7Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1617c349dbc7Sjsg 		return -EOPNOTSUPP;
1618c349dbc7Sjsg 
1619c349dbc7Sjsg 	if (args->flags != 0)
1620c349dbc7Sjsg 		return -EINVAL;
1621c349dbc7Sjsg 
1622c349dbc7Sjsg 	if (args->count_handles == 0)
1623c349dbc7Sjsg 		return -EINVAL;
1624c349dbc7Sjsg 
1625c349dbc7Sjsg 	ret = drm_syncobj_array_find(file_private,
1626c349dbc7Sjsg 				     u64_to_user_ptr(args->handles),
1627c349dbc7Sjsg 				     args->count_handles,
1628c349dbc7Sjsg 				     &syncobjs);
1629c349dbc7Sjsg 	if (ret < 0)
1630c349dbc7Sjsg 		return ret;
1631c349dbc7Sjsg 
1632c349dbc7Sjsg 	points = kmalloc_array(args->count_handles, sizeof(*points),
1633c349dbc7Sjsg 			       GFP_KERNEL);
1634c349dbc7Sjsg 	if (!points) {
1635c349dbc7Sjsg 		ret = -ENOMEM;
1636c349dbc7Sjsg 		goto out;
1637c349dbc7Sjsg 	}
1638c349dbc7Sjsg 	if (!u64_to_user_ptr(args->points)) {
1639c349dbc7Sjsg 		memset(points, 0, args->count_handles * sizeof(uint64_t));
1640c349dbc7Sjsg 	} else if (copy_from_user(points, u64_to_user_ptr(args->points),
1641c349dbc7Sjsg 				  sizeof(uint64_t) * args->count_handles)) {
1642c349dbc7Sjsg 		ret = -EFAULT;
1643c349dbc7Sjsg 		goto err_points;
1644c349dbc7Sjsg 	}
1645c349dbc7Sjsg 
1646c349dbc7Sjsg 	chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1647c349dbc7Sjsg 	if (!chains) {
1648c349dbc7Sjsg 		ret = -ENOMEM;
1649c349dbc7Sjsg 		goto err_points;
1650c349dbc7Sjsg 	}
1651c349dbc7Sjsg 	for (i = 0; i < args->count_handles; i++) {
16525ca02815Sjsg 		chains[i] = dma_fence_chain_alloc();
1653c349dbc7Sjsg 		if (!chains[i]) {
1654c349dbc7Sjsg 			for (j = 0; j < i; j++)
16555ca02815Sjsg 				dma_fence_chain_free(chains[j]);
1656c349dbc7Sjsg 			ret = -ENOMEM;
1657c349dbc7Sjsg 			goto err_chains;
1658c349dbc7Sjsg 		}
1659c349dbc7Sjsg 	}
1660c349dbc7Sjsg 
1661c349dbc7Sjsg 	for (i = 0; i < args->count_handles; i++) {
1662c349dbc7Sjsg 		struct dma_fence *fence = dma_fence_get_stub();
1663c349dbc7Sjsg 
1664c349dbc7Sjsg 		drm_syncobj_add_point(syncobjs[i], chains[i],
1665c349dbc7Sjsg 				      fence, points[i]);
1666c349dbc7Sjsg 		dma_fence_put(fence);
1667c349dbc7Sjsg 	}
1668c349dbc7Sjsg err_chains:
1669c349dbc7Sjsg 	kfree(chains);
1670c349dbc7Sjsg err_points:
1671c349dbc7Sjsg 	kfree(points);
1672c349dbc7Sjsg out:
1673c349dbc7Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
1674c349dbc7Sjsg 
1675c349dbc7Sjsg 	return ret;
1676c349dbc7Sjsg }
1677c349dbc7Sjsg 
1678c349dbc7Sjsg int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1679c349dbc7Sjsg 			    struct drm_file *file_private)
1680c349dbc7Sjsg {
1681c349dbc7Sjsg 	struct drm_syncobj_timeline_array *args = data;
1682c349dbc7Sjsg 	struct drm_syncobj **syncobjs;
1683c349dbc7Sjsg 	uint64_t __user *points = u64_to_user_ptr(args->points);
1684c349dbc7Sjsg 	uint32_t i;
1685c349dbc7Sjsg 	int ret;
1686c349dbc7Sjsg 
1687c349dbc7Sjsg 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1688c349dbc7Sjsg 		return -EOPNOTSUPP;
1689c349dbc7Sjsg 
1690c349dbc7Sjsg 	if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
1691c349dbc7Sjsg 		return -EINVAL;
1692c349dbc7Sjsg 
1693c349dbc7Sjsg 	if (args->count_handles == 0)
1694c349dbc7Sjsg 		return -EINVAL;
1695c349dbc7Sjsg 
1696c349dbc7Sjsg 	ret = drm_syncobj_array_find(file_private,
1697c349dbc7Sjsg 				     u64_to_user_ptr(args->handles),
1698c349dbc7Sjsg 				     args->count_handles,
1699c349dbc7Sjsg 				     &syncobjs);
1700c349dbc7Sjsg 	if (ret < 0)
1701c349dbc7Sjsg 		return ret;
1702c349dbc7Sjsg 
1703c349dbc7Sjsg 	for (i = 0; i < args->count_handles; i++) {
1704c349dbc7Sjsg 		struct dma_fence_chain *chain;
1705c349dbc7Sjsg 		struct dma_fence *fence;
1706c349dbc7Sjsg 		uint64_t point;
1707c349dbc7Sjsg 
1708c349dbc7Sjsg 		fence = drm_syncobj_fence_get(syncobjs[i]);
1709c349dbc7Sjsg 		chain = to_dma_fence_chain(fence);
1710c349dbc7Sjsg 		if (chain) {
1711c349dbc7Sjsg 			struct dma_fence *iter, *last_signaled =
1712c349dbc7Sjsg 				dma_fence_get(fence);
1713c349dbc7Sjsg 
1714c349dbc7Sjsg 			if (args->flags &
1715c349dbc7Sjsg 			    DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
1716c349dbc7Sjsg 				point = fence->seqno;
1717c349dbc7Sjsg 			} else {
1718c349dbc7Sjsg 				dma_fence_chain_for_each(iter, fence) {
1719c349dbc7Sjsg 					if (iter->context != fence->context) {
1720c349dbc7Sjsg 						dma_fence_put(iter);
1721c349dbc7Sjsg 						/* It is most likely that timeline has
1722c349dbc7Sjsg 						* unorder points. */
1723c349dbc7Sjsg 						break;
1724c349dbc7Sjsg 					}
1725c349dbc7Sjsg 					dma_fence_put(last_signaled);
1726c349dbc7Sjsg 					last_signaled = dma_fence_get(iter);
1727c349dbc7Sjsg 				}
1728c349dbc7Sjsg 				point = dma_fence_is_signaled(last_signaled) ?
1729c349dbc7Sjsg 					last_signaled->seqno :
1730c349dbc7Sjsg 					to_dma_fence_chain(last_signaled)->prev_seqno;
1731c349dbc7Sjsg 			}
1732c349dbc7Sjsg 			dma_fence_put(last_signaled);
1733c349dbc7Sjsg 		} else {
1734c349dbc7Sjsg 			point = 0;
1735c349dbc7Sjsg 		}
1736c349dbc7Sjsg 		dma_fence_put(fence);
1737c349dbc7Sjsg 		ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1738c349dbc7Sjsg 		ret = ret ? -EFAULT : 0;
1739c349dbc7Sjsg 		if (ret)
1740c349dbc7Sjsg 			break;
1741c349dbc7Sjsg 	}
1742c349dbc7Sjsg 	drm_syncobj_array_free(syncobjs, args->count_handles);
1743c349dbc7Sjsg 
1744c349dbc7Sjsg 	return ret;
1745c349dbc7Sjsg }
1746