xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c (revision 532d944f92101d743a16bb16675947ef894d4a0c)
1 /*	$NetBSD: nouveau_fence.c,v 1.17 2021/12/19 10:49:13 riastradh Exp $	*/
2 
3 /*
4  * Copyright (C) 2007 Ben Skeggs.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining
8  * a copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial
17  * portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.17 2021/12/19 10:49:13 riastradh Exp $");
31 
32 #include <linux/atomic.h>
33 #include <linux/ktime.h>
34 #include <linux/hrtimer.h>
35 #include <linux/sched/signal.h>
36 #include <linux/workqueue.h>
37 #include <trace/events/dma_fence.h>
38 
39 #include <nvif/cl826e.h>
40 #include <nvif/notify.h>
41 #include <nvif/event.h>
42 
43 #include "nouveau_drv.h"
44 #include "nouveau_dma.h"
45 #include "nouveau_fence.h"
46 
47 static const struct dma_fence_ops nouveau_fence_ops_uevent;
48 static const struct dma_fence_ops nouveau_fence_ops_legacy;
49 
50 static inline struct nouveau_fence *
from_fence(struct dma_fence * fence)51 from_fence(struct dma_fence *fence)
52 {
53 	return container_of(fence, struct nouveau_fence, base);
54 }
55 
56 static inline struct nouveau_fence_chan *
nouveau_fctx(struct nouveau_fence * fence)57 nouveau_fctx(struct nouveau_fence *fence)
58 {
59 	return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
60 }
61 
62 static int
nouveau_fence_signal(struct nouveau_fence * fence)63 nouveau_fence_signal(struct nouveau_fence *fence)
64 {
65 	int drop = 0;
66 
67 	dma_fence_signal_locked(&fence->base);
68 	list_del(&fence->head);
69 	rcu_assign_pointer(fence->channel, NULL);
70 
71 	if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
72 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
73 
74 		if (!--fctx->notify_ref)
75 			drop = 1;
76 	}
77 
78 	dma_fence_put(&fence->base);
79 	return drop;
80 }
81 
82 static struct nouveau_fence *
nouveau_local_fence(struct dma_fence * fence,struct nouveau_drm * drm)83 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
84 {
85 	if (fence->ops != &nouveau_fence_ops_legacy &&
86 	    fence->ops != &nouveau_fence_ops_uevent)
87 		return NULL;
88 
89 	if (fence->context < drm->chan.context_base ||
90 	    fence->context >= drm->chan.context_base + drm->chan.nr)
91 		return NULL;
92 
93 	return from_fence(fence);
94 }
95 
96 void
nouveau_fence_context_kill(struct nouveau_fence_chan * fctx,int error)97 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
98 {
99 	struct nouveau_fence *fence;
100 
101 	spin_lock_irq(&fctx->lock);
102 	while (!list_empty(&fctx->pending)) {
103 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
104 
105 		if (error)
106 			dma_fence_set_error(&fence->base, error);
107 
108 		if (nouveau_fence_signal(fence))
109 			nvif_notify_put(&fctx->notify);
110 	}
111 	spin_unlock_irq(&fctx->lock);
112 }
113 
114 void
nouveau_fence_context_del(struct nouveau_fence_chan * fctx)115 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
116 {
117 	nouveau_fence_context_kill(fctx, 0);
118 	nvif_notify_fini(&fctx->notify);
119 	fctx->dead = 1;
120 
121 	/*
122 	 * Ensure that all accesses to fence->channel complete before freeing
123 	 * the channel.
124 	 */
125 	synchronize_rcu();
126 }
127 
128 static void
nouveau_fence_context_put(struct kref * fence_ref)129 nouveau_fence_context_put(struct kref *fence_ref)
130 {
131 	struct nouveau_fence_chan *fctx =
132 	    container_of(fence_ref, struct nouveau_fence_chan, fence_ref);
133 
134 	spin_lock_destroy(&fctx->lock);
135 	kfree(fctx);
136 }
137 
138 void
nouveau_fence_context_free(struct nouveau_fence_chan * fctx)139 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
140 {
141 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
142 }
143 
144 static int
nouveau_fence_update(struct nouveau_channel * chan,struct nouveau_fence_chan * fctx)145 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
146 {
147 	struct nouveau_fence *fence;
148 	int drop = 0;
149 	u32 seq = fctx->read(chan);
150 
151 	while (!list_empty(&fctx->pending)) {
152 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
153 
154 		if ((int)(seq - fence->base.seqno) < 0)
155 			break;
156 
157 		drop |= nouveau_fence_signal(fence);
158 	}
159 
160 	return drop;
161 }
162 
163 static int
nouveau_fence_wait_uevent_handler(struct nvif_notify * notify)164 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
165 {
166 	struct nouveau_fence_chan *fctx =
167 		container_of(notify, typeof(*fctx), notify);
168 	unsigned long flags;
169 	int ret = NVIF_NOTIFY_KEEP;
170 
171 	spin_lock_irqsave(&fctx->lock, flags);
172 	if (!list_empty(&fctx->pending)) {
173 		struct nouveau_fence *fence;
174 		struct nouveau_channel *chan;
175 
176 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
177 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
178 		if (nouveau_fence_update(chan, fctx))
179 			ret = NVIF_NOTIFY_DROP;
180 	}
181 	spin_unlock_irqrestore(&fctx->lock, flags);
182 
183 	return ret;
184 }
185 
186 void
nouveau_fence_context_new(struct nouveau_channel * chan,struct nouveau_fence_chan * fctx)187 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
188 {
189 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
190 	struct nouveau_cli *cli = (void *)chan->user.client;
191 	int ret;
192 
193 	INIT_LIST_HEAD(&fctx->flip);
194 	INIT_LIST_HEAD(&fctx->pending);
195 	spin_lock_init(&fctx->lock);
196 	fctx->context = chan->drm->chan.context_base + chan->chid;
197 
198 	if (chan == chan->drm->cechan)
199 		strcpy(fctx->name, "copy engine channel");
200 	else if (chan == chan->drm->channel)
201 		strcpy(fctx->name, "generic kernel channel");
202 	else
203 		strcpy(fctx->name, nvxx_client(&cli->base)->name);
204 
205 	kref_init(&fctx->fence_ref);
206 	if (!priv->uevent)
207 		return;
208 
209 	ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
210 			       false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
211 			       &(struct nvif_notify_uevent_req) { },
212 			       sizeof(struct nvif_notify_uevent_req),
213 			       sizeof(struct nvif_notify_uevent_rep),
214 			       &fctx->notify);
215 
216 	WARN_ON(ret);
217 }
218 
219 int
nouveau_fence_emit(struct nouveau_fence * fence,struct nouveau_channel * chan)220 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
221 {
222 	struct nouveau_fence_chan *fctx = chan->fence;
223 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
224 	int ret;
225 
226 	fence->channel  = chan;
227 	fence->timeout  = jiffies + (15 * HZ);
228 
229 	if (priv->uevent)
230 		dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
231 			       &fctx->lock, fctx->context, ++fctx->sequence);
232 	else
233 		dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
234 			       &fctx->lock, fctx->context, ++fctx->sequence);
235 	kref_get(&fctx->fence_ref);
236 
237 	trace_dma_fence_emit(&fence->base);
238 	ret = fctx->emit(fence);
239 	if (!ret) {
240 		dma_fence_get(&fence->base);
241 		spin_lock_irq(&fctx->lock);
242 
243 		if (nouveau_fence_update(chan, fctx))
244 			nvif_notify_put(&fctx->notify);
245 
246 		list_add_tail(&fence->head, &fctx->pending);
247 		spin_unlock_irq(&fctx->lock);
248 	}
249 
250 	return ret;
251 }
252 
253 bool
nouveau_fence_done(struct nouveau_fence * fence)254 nouveau_fence_done(struct nouveau_fence *fence)
255 {
256 	if (fence->base.ops == &nouveau_fence_ops_legacy ||
257 	    fence->base.ops == &nouveau_fence_ops_uevent) {
258 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
259 		struct nouveau_channel *chan;
260 		unsigned long flags;
261 
262 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
263 			return true;
264 
265 		spin_lock_irqsave(&fctx->lock, flags);
266 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
267 		if (chan && nouveau_fence_update(chan, fctx))
268 			nvif_notify_put(&fctx->notify);
269 		spin_unlock_irqrestore(&fctx->lock, flags);
270 	}
271 	return dma_fence_is_signaled(&fence->base);
272 }
273 
274 static long
nouveau_fence_wait_legacy(struct dma_fence * f,bool intr,long wait)275 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
276 {
277 	struct nouveau_fence *fence = from_fence(f);
278 #ifndef __NetBSD__
279 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
280 #endif
281 	unsigned long t = jiffies, timeout = t + wait;
282 
283 #ifdef __NetBSD__
284 	while (!nouveau_fence_done(fence)) {
285 		int ret;
286 		/* XXX what lock? */
287 		/* XXX errno NetBSD->Linux */
288 		ret = -kpause("nvfencel", intr, 1, NULL);
289 		if (ret) {
290 			if (ret == -ERESTART)
291 				ret = -ERESTARTSYS;
292 			return ret;
293 		}
294 		t = jiffies;
295 		if (t >= timeout)
296 			return 0;
297 	}
298 #else
299 	while (!nouveau_fence_done(fence)) {
300 		ktime_t kt;
301 
302 		t = jiffies;
303 
304 		if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
305 			__set_current_state(TASK_RUNNING);
306 			return 0;
307 		}
308 
309 		__set_current_state(intr ? TASK_INTERRUPTIBLE :
310 					   TASK_UNINTERRUPTIBLE);
311 
312 		kt = sleep_time;
313 		schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
314 		sleep_time *= 2;
315 		if (sleep_time > NSEC_PER_MSEC)
316 			sleep_time = NSEC_PER_MSEC;
317 
318 		if (intr && signal_pending(current))
319 			return -ERESTARTSYS;
320 	}
321 
322 	__set_current_state(TASK_RUNNING);
323 #endif
324 
325 	return timeout - t;
326 }
327 
328 static int
nouveau_fence_wait_busy(struct nouveau_fence * fence,bool intr)329 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
330 {
331 	int ret = 0;
332 
333 	while (!nouveau_fence_done(fence)) {
334 		if (time_after_eq(jiffies, fence->timeout)) {
335 			ret = -EBUSY;
336 			break;
337 		}
338 
339 #ifdef __NetBSD__
340 		/* XXX unlock anything? */
341 		/* XXX poll for interrupts? */
342 		DELAY(1000);
343 #else
344 		__set_current_state(intr ?
345 				    TASK_INTERRUPTIBLE :
346 				    TASK_UNINTERRUPTIBLE);
347 
348 		if (intr && signal_pending(current)) {
349 			ret = -ERESTARTSYS;
350 			break;
351 		}
352 #endif
353 	}
354 
355 #ifndef __NetBSD__
356 	__set_current_state(TASK_RUNNING);
357 #endif
358 	return ret;
359 }
360 
361 int
nouveau_fence_wait(struct nouveau_fence * fence,bool lazy,bool intr)362 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
363 {
364 	long ret;
365 
366 	if (!lazy)
367 		return nouveau_fence_wait_busy(fence, intr);
368 
369 	ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
370 	if (ret < 0)
371 		return ret;
372 	else if (!ret)
373 		return -EBUSY;
374 	else
375 		return 0;
376 }
377 
378 int
nouveau_fence_sync(struct nouveau_bo * nvbo,struct nouveau_channel * chan,bool exclusive,bool intr)379 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
380 {
381 	struct nouveau_fence_chan *fctx = chan->fence;
382 	struct dma_fence *fence;
383 	struct dma_resv *resv = nvbo->bo.base.resv;
384 	struct dma_resv_list *fobj;
385 	struct nouveau_fence *f;
386 	int ret = 0, i;
387 
388 	if (!exclusive) {
389 		ret = dma_resv_reserve_shared(resv, 1);
390 
391 		if (ret)
392 			return ret;
393 	}
394 
395 	fobj = dma_resv_get_list(resv);
396 	fence = dma_resv_get_excl(resv);
397 
398 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
399 		struct nouveau_channel *prev = NULL;
400 		bool must_wait = true;
401 
402 		f = nouveau_local_fence(fence, chan->drm);
403 		if (f) {
404 			rcu_read_lock();
405 			prev = rcu_dereference(f->channel);
406 			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
407 				must_wait = false;
408 			rcu_read_unlock();
409 		}
410 
411 		if (must_wait)
412 			ret = dma_fence_wait(fence, intr);
413 
414 		return ret;
415 	}
416 
417 	if (!exclusive || !fobj)
418 		return ret;
419 
420 	for (i = 0; i < fobj->shared_count && !ret; ++i) {
421 		struct nouveau_channel *prev = NULL;
422 		bool must_wait = true;
423 
424 		fence = rcu_dereference_protected(fobj->shared[i],
425 						dma_resv_held(resv));
426 
427 		f = nouveau_local_fence(fence, chan->drm);
428 		if (f) {
429 			rcu_read_lock();
430 			prev = rcu_dereference(f->channel);
431 			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
432 				must_wait = false;
433 			rcu_read_unlock();
434 		}
435 
436 		if (must_wait)
437 			ret = dma_fence_wait(fence, intr);
438 	}
439 
440 	return ret;
441 }
442 
443 void
nouveau_fence_unref(struct nouveau_fence ** pfence)444 nouveau_fence_unref(struct nouveau_fence **pfence)
445 {
446 	if (*pfence)
447 		dma_fence_put(&(*pfence)->base);
448 	*pfence = NULL;
449 }
450 
451 int
nouveau_fence_new(struct nouveau_channel * chan,bool sysmem,struct nouveau_fence ** pfence)452 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
453 		  struct nouveau_fence **pfence)
454 {
455 	struct nouveau_fence *fence;
456 	int ret = 0;
457 
458 	if (unlikely(!chan->fence))
459 		return -ENODEV;
460 
461 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
462 	if (!fence)
463 		return -ENOMEM;
464 
465 	ret = nouveau_fence_emit(fence, chan);
466 	if (ret)
467 		nouveau_fence_unref(&fence);
468 
469 	*pfence = fence;
470 	return ret;
471 }
472 
nouveau_fence_get_get_driver_name(struct dma_fence * fence)473 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
474 {
475 	return "nouveau";
476 }
477 
nouveau_fence_get_timeline_name(struct dma_fence * f)478 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
479 {
480 	struct nouveau_fence *fence = from_fence(f);
481 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
482 
483 	return !fctx->dead ? fctx->name : "dead channel";
484 }
485 
486 /*
487  * In an ideal world, read would not assume the channel context is still alive.
488  * This function may be called from another device, running into free memory as a
489  * result. The drm node should still be there, so we can derive the index from
490  * the fence context.
491  */
nouveau_fence_is_signaled(struct dma_fence * f)492 static bool nouveau_fence_is_signaled(struct dma_fence *f)
493 {
494 	struct nouveau_fence *fence = from_fence(f);
495 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
496 	struct nouveau_channel *chan;
497 	bool ret = false;
498 
499 	rcu_read_lock();
500 	chan = rcu_dereference(fence->channel);
501 	if (chan)
502 		ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
503 	rcu_read_unlock();
504 
505 	return ret;
506 }
507 
nouveau_fence_no_signaling(struct dma_fence * f)508 static bool nouveau_fence_no_signaling(struct dma_fence *f)
509 {
510 	struct nouveau_fence *fence = from_fence(f);
511 
512 	/*
513 	 * caller should have a reference on the fence,
514 	 * else fence could get freed here
515 	 */
516 	WARN_ON(kref_read(&fence->base.refcount) <= 1);
517 
518 	/*
519 	 * This needs uevents to work correctly, but dma_fence_add_callback relies on
520 	 * being able to enable signaling. It will still get signaled eventually,
521 	 * just not right away.
522 	 */
523 	if (nouveau_fence_is_signaled(f)) {
524 		list_del(&fence->head);
525 
526 		dma_fence_put(&fence->base);
527 		return false;
528 	}
529 
530 	return true;
531 }
532 
nouveau_fence_release(struct dma_fence * f)533 static void nouveau_fence_release(struct dma_fence *f)
534 {
535 	struct nouveau_fence *fence = from_fence(f);
536 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
537 
538 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
539 	dma_fence_free(&fence->base);
540 }
541 
542 static const struct dma_fence_ops nouveau_fence_ops_legacy = {
543 	.get_driver_name = nouveau_fence_get_get_driver_name,
544 	.get_timeline_name = nouveau_fence_get_timeline_name,
545 	.enable_signaling = nouveau_fence_no_signaling,
546 	.signaled = nouveau_fence_is_signaled,
547 	.wait = nouveau_fence_wait_legacy,
548 	.release = nouveau_fence_release
549 };
550 
nouveau_fence_enable_signaling(struct dma_fence * f)551 static bool nouveau_fence_enable_signaling(struct dma_fence *f)
552 {
553 	struct nouveau_fence *fence = from_fence(f);
554 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
555 	bool ret;
556 
557 	if (!fctx->notify_ref++)
558 		nvif_notify_get(&fctx->notify);
559 
560 	ret = nouveau_fence_no_signaling(f);
561 	if (ret)
562 		set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
563 	else if (!--fctx->notify_ref)
564 		nvif_notify_put(&fctx->notify);
565 
566 	return ret;
567 }
568 
569 static const struct dma_fence_ops nouveau_fence_ops_uevent = {
570 	.get_driver_name = nouveau_fence_get_get_driver_name,
571 	.get_timeline_name = nouveau_fence_get_timeline_name,
572 	.enable_signaling = nouveau_fence_enable_signaling,
573 	.signaled = nouveau_fence_is_signaled,
574 	.release = nouveau_fence_release
575 };
576