xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_dma_fence_array.c (revision 87ddc0b948c1af75d1f3c2b3fbbf58ca4da1409d)
1 /*	$NetBSD: linux_dma_fence_array.c,v 1.4 2021/12/19 12:39:56 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2021 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence_array.c,v 1.4 2021/12/19 12:39:56 riastradh Exp $");
34 
35 #include <sys/systm.h>
36 
37 #include <linux/dma-fence-array.h>
38 
39 static const char *
dma_fence_array_driver_name(struct dma_fence * fence)40 dma_fence_array_driver_name(struct dma_fence *fence)
41 {
42 	return "dma_fence_array";
43 }
44 
45 static const char *
dma_fence_array_timeline_name(struct dma_fence * fence)46 dma_fence_array_timeline_name(struct dma_fence *fence)
47 {
48 	return "unbound";
49 }
50 
51 static void
dma_fence_array_done1(struct dma_fence * fence,struct dma_fence_cb * cb)52 dma_fence_array_done1(struct dma_fence *fence, struct dma_fence_cb *cb)
53 {
54 	struct dma_fence_array_cb *C =
55 	    container_of(cb, struct dma_fence_array_cb, dfac_cb);
56 	struct dma_fence_array *A = C->dfac_array;
57 
58 	KASSERT(spin_is_locked(&A->dfa_lock));
59 
60 	if (fence->error && A->base.error == 1) {
61 		KASSERT(fence->error != 1);
62 		A->base.error = fence->error;
63 	}
64 	if (--A->dfa_npending) {
65 		dma_fence_put(&A->base);
66 		return;
67 	}
68 
69 	/* Last one out, hit the lights -- dma_fence_array_done.  */
70 	irq_work_queue(&A->dfa_work);
71 }
72 
73 static void
dma_fence_array_done(struct irq_work * W)74 dma_fence_array_done(struct irq_work *W)
75 {
76 	struct dma_fence_array *A = container_of(W, struct dma_fence_array,
77 	    dfa_work);
78 
79 	spin_lock(&A->dfa_lock);
80 	if (A->base.error == 1)
81 		A->base.error = 0;
82 	dma_fence_signal_locked(&A->base);
83 	spin_unlock(&A->dfa_lock);
84 
85 	dma_fence_put(&A->base);
86 }
87 
88 static bool
dma_fence_array_enable_signaling(struct dma_fence * fence)89 dma_fence_array_enable_signaling(struct dma_fence *fence)
90 {
91 	struct dma_fence_array *A = to_dma_fence_array(fence);
92 	struct dma_fence_array_cb *C;
93 	unsigned i;
94 	int error;
95 
96 	KASSERT(spin_is_locked(&A->dfa_lock));
97 
98 	for (i = 0; i < A->num_fences; i++) {
99 		C = &A->dfa_cb[i];
100 		C->dfac_array = A;
101 		dma_fence_get(&A->base);
102 		if (dma_fence_add_callback(A->fences[i], &C->dfac_cb,
103 			dma_fence_array_done1)) {
104 			error = A->fences[i]->error;
105 			if (error) {
106 				KASSERT(error != 1);
107 				if (A->base.error == 1)
108 					A->base.error = error;
109 			}
110 			dma_fence_put(&A->base);
111 			if (--A->dfa_npending == 0) {
112 				if (A->base.error == 1)
113 					A->base.error = 0;
114 				return false;
115 			}
116 		}
117 	}
118 
119 	return true;
120 }
121 
122 static bool
dma_fence_array_signaled(struct dma_fence * fence)123 dma_fence_array_signaled(struct dma_fence *fence)
124 {
125 	struct dma_fence_array *A = to_dma_fence_array(fence);
126 
127 	KASSERT(spin_is_locked(&A->dfa_lock));
128 
129 	return A->dfa_npending == 0;
130 }
131 
132 static void
dma_fence_array_release(struct dma_fence * fence)133 dma_fence_array_release(struct dma_fence *fence)
134 {
135 	struct dma_fence_array *A = to_dma_fence_array(fence);
136 	unsigned i;
137 
138 	for (i = 0; i < A->num_fences; i++)
139 		dma_fence_put(A->fences[i]);
140 
141 	kfree(A->fences);
142 	spin_lock_destroy(&A->dfa_lock);
143 	dma_fence_free(fence);
144 }
145 
146 static const struct dma_fence_ops dma_fence_array_ops = {
147 	.get_driver_name = dma_fence_array_driver_name,
148 	.get_timeline_name = dma_fence_array_timeline_name,
149 	.enable_signaling = dma_fence_array_enable_signaling,
150 	.signaled = dma_fence_array_signaled,
151 	.release = dma_fence_array_release,
152 };
153 
154 struct dma_fence_array *
dma_fence_array_create(int num_fences,struct dma_fence ** fences,unsigned context,unsigned seqno,bool signal_on_any)155 dma_fence_array_create(int num_fences, struct dma_fence **fences,
156     unsigned context, unsigned seqno, bool signal_on_any)
157 {
158 	struct dma_fence_array *A;
159 
160 	/*
161 	 * Must be allocated with kmalloc or equivalent because
162 	 * dma-fence will free it with kfree.
163 	 */
164 	A = kzalloc(struct_size(A, dfa_cb, num_fences), GFP_KERNEL);
165 	if (A == NULL)
166 		return NULL;
167 
168 	A->fences = fences;
169 	A->num_fences = num_fences;
170 	A->dfa_npending = signal_on_any ? 1 : num_fences;
171 
172 	spin_lock_init(&A->dfa_lock);
173 	dma_fence_init(&A->base, &dma_fence_array_ops, &A->dfa_lock,
174 	    context, seqno);
175 	init_irq_work(&A->dfa_work, dma_fence_array_done);
176 
177 	return A;
178 }
179 
180 bool
dma_fence_is_array(struct dma_fence * fence)181 dma_fence_is_array(struct dma_fence *fence)
182 {
183 
184 	return fence->ops == &dma_fence_array_ops;
185 }
186 
187 struct dma_fence_array *
to_dma_fence_array(struct dma_fence * fence)188 to_dma_fence_array(struct dma_fence *fence)
189 {
190 
191 	KASSERT(dma_fence_is_array(fence));
192 	return container_of(fence, struct dma_fence_array, base);
193 }
194