xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nouveau_fence.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: nouveau_fence.c,v 1.15 2020/02/14 14:34:58 maya Exp $	*/
2 
3 /*
4  * Copyright (C) 2007 Ben Skeggs.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining
8  * a copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial
17  * portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: nouveau_fence.c,v 1.15 2020/02/14 14:34:58 maya Exp $");
31 
32 #include <drm/drmP.h>
33 
34 #include <linux/ktime.h>
35 #include <linux/hrtimer.h>
36 #include <trace/events/fence.h>
37 
38 #include <nvif/notify.h>
39 #include <nvif/event.h>
40 
41 #include "nouveau_drm.h"
42 #include "nouveau_dma.h"
43 #include "nouveau_fence.h"
44 
45 static const struct fence_ops nouveau_fence_ops_uevent;
46 static const struct fence_ops nouveau_fence_ops_legacy;
47 
48 static inline struct nouveau_fence *
49 from_fence(struct fence *fence)
50 {
51 	return container_of(fence, struct nouveau_fence, base);
52 }
53 
54 static inline struct nouveau_fence_chan *
55 nouveau_fctx(struct nouveau_fence *fence)
56 {
57 	return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
58 }
59 
60 static int
61 nouveau_fence_signal(struct nouveau_fence *fence)
62 {
63 	int drop = 0;
64 
65 	fence_signal_locked(&fence->base);
66 	list_del(&fence->head);
67 	rcu_assign_pointer(fence->channel, NULL);
68 
69 	if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
70 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
71 
72 		if (!--fctx->notify_ref)
73 			drop = 1;
74 	}
75 
76 	fence_put(&fence->base);
77 	return drop;
78 }
79 
80 static struct nouveau_fence *
81 nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
82 	struct nouveau_fence_priv *priv = (void*)drm->fence;
83 
84 	if (fence->ops != &nouveau_fence_ops_legacy &&
85 	    fence->ops != &nouveau_fence_ops_uevent)
86 		return NULL;
87 
88 	if (fence->context < priv->context_base ||
89 	    fence->context >= priv->context_base + priv->contexts)
90 		return NULL;
91 
92 	return from_fence(fence);
93 }
94 
95 void
96 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
97 {
98 	struct nouveau_fence *fence;
99 
100 	spin_lock_irq(&fctx->lock);
101 	while (!list_empty(&fctx->pending)) {
102 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
103 
104 		if (nouveau_fence_signal(fence))
105 			nvif_notify_put(&fctx->notify);
106 	}
107 	spin_unlock_irq(&fctx->lock);
108 
109 	nvif_notify_fini(&fctx->notify);
110 	fctx->dead = 1;
111 
112 	/*
113 	 * Ensure that all accesses to fence->channel complete before freeing
114 	 * the channel.
115 	 */
116 	synchronize_rcu();
117 }
118 
119 static void
120 nouveau_fence_context_put(struct kref *fence_ref)
121 {
122 	struct nouveau_fence_chan *fctx =
123 	    container_of(fence_ref, struct nouveau_fence_chan, fence_ref);
124 
125 	spin_lock_destroy(&fctx->lock);
126 	kfree(fctx);
127 }
128 
129 void
130 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
131 {
132 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
133 }
134 
135 static int
136 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
137 {
138 	struct nouveau_fence *fence;
139 	int drop = 0;
140 	u32 seq = fctx->read(chan);
141 
142 	while (!list_empty(&fctx->pending)) {
143 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 
145 		if ((int)(seq - fence->base.seqno) < 0)
146 			break;
147 
148 		drop |= nouveau_fence_signal(fence);
149 	}
150 
151 	return drop;
152 }
153 
154 static int
155 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
156 {
157 	struct nouveau_fence_chan *fctx =
158 		container_of(notify, typeof(*fctx), notify);
159 	unsigned long flags;
160 	int ret = NVIF_NOTIFY_KEEP;
161 
162 	spin_lock_irqsave(&fctx->lock, flags);
163 	if (!list_empty(&fctx->pending)) {
164 		struct nouveau_fence *fence;
165 		struct nouveau_channel *chan;
166 
167 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
168 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
169 		if (nouveau_fence_update(chan, fctx))
170 			ret = NVIF_NOTIFY_DROP;
171 	}
172 	spin_unlock_irqrestore(&fctx->lock, flags);
173 
174 	return ret;
175 }
176 
177 void
178 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
179 {
180 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
181 	struct nouveau_cli *cli = (void *)chan->user.client;
182 	int ret;
183 
184 	INIT_LIST_HEAD(&fctx->flip);
185 	INIT_LIST_HEAD(&fctx->pending);
186 	spin_lock_init(&fctx->lock);
187 	fctx->context = priv->context_base + chan->chid;
188 
189 	if (chan == chan->drm->cechan)
190 		strcpy(fctx->name, "copy engine channel");
191 	else if (chan == chan->drm->channel)
192 		strcpy(fctx->name, "generic kernel channel");
193 	else
194 		strcpy(fctx->name, nvxx_client(&cli->base)->name);
195 
196 	kref_init(&fctx->fence_ref);
197 	if (!priv->uevent)
198 		return;
199 
200 	ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
201 			       false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
202 			       &(struct nvif_notify_uevent_req) { },
203 			       sizeof(struct nvif_notify_uevent_req),
204 			       sizeof(struct nvif_notify_uevent_rep),
205 			       &fctx->notify);
206 
207 	WARN_ON(ret);
208 }
209 
210 struct nouveau_fence_work {
211 	struct work_struct work;
212 	struct fence_cb cb;
213 	void (*func)(void *);
214 	void *data;
215 };
216 
217 static void
218 nouveau_fence_work_handler(struct work_struct *kwork)
219 {
220 	struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
221 	work->func(work->data);
222 	kfree(work);
223 }
224 
225 static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
226 {
227 	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
228 
229 	schedule_work(&work->work);
230 }
231 
232 void
233 nouveau_fence_work(struct fence *fence,
234 		   void (*func)(void *), void *data)
235 {
236 	struct nouveau_fence_work *work;
237 
238 	if (fence_is_signaled(fence))
239 		goto err;
240 
241 	work = kmalloc(sizeof(*work), GFP_KERNEL);
242 	if (!work) {
243 		/*
244 		 * this might not be a nouveau fence any more,
245 		 * so force a lazy wait here
246 		 */
247 		WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
248 					   true, false));
249 		goto err;
250 	}
251 
252 	INIT_WORK(&work->work, nouveau_fence_work_handler);
253 	work->func = func;
254 	work->data = data;
255 
256 	if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
257 		goto err_free;
258 	return;
259 
260 err_free:
261 	kfree(work);
262 err:
263 	func(data);
264 }
265 
266 int
267 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
268 {
269 	struct nouveau_fence_chan *fctx = chan->fence;
270 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
271 	int ret;
272 
273 	fence->channel  = chan;
274 	fence->timeout  = jiffies + (15 * HZ);
275 
276 	if (priv->uevent)
277 		fence_init(&fence->base, &nouveau_fence_ops_uevent,
278 			   &fctx->lock, fctx->context, ++fctx->sequence);
279 	else
280 		fence_init(&fence->base, &nouveau_fence_ops_legacy,
281 			   &fctx->lock, fctx->context, ++fctx->sequence);
282 	kref_get(&fctx->fence_ref);
283 
284 	trace_fence_emit(&fence->base);
285 	ret = fctx->emit(fence);
286 	if (!ret) {
287 		fence_get(&fence->base);
288 		spin_lock_irq(&fctx->lock);
289 
290 		if (nouveau_fence_update(chan, fctx))
291 			nvif_notify_put(&fctx->notify);
292 
293 		list_add_tail(&fence->head, &fctx->pending);
294 		spin_unlock_irq(&fctx->lock);
295 	}
296 
297 	return ret;
298 }
299 
300 bool
301 nouveau_fence_done(struct nouveau_fence *fence)
302 {
303 	if (fence->base.ops == &nouveau_fence_ops_legacy ||
304 	    fence->base.ops == &nouveau_fence_ops_uevent) {
305 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
306 		struct nouveau_channel *chan;
307 		unsigned long flags;
308 
309 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
310 			return true;
311 
312 		spin_lock_irqsave(&fctx->lock, flags);
313 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
314 		if (chan && nouveau_fence_update(chan, fctx))
315 			nvif_notify_put(&fctx->notify);
316 		spin_unlock_irqrestore(&fctx->lock, flags);
317 	}
318 	return fence_is_signaled(&fence->base);
319 }
320 
321 static long
322 nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
323 {
324 	struct nouveau_fence *fence = from_fence(f);
325 #ifndef __NetBSD__
326 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
327 #endif
328 	unsigned long t = jiffies, timeout = t + wait;
329 
330 #ifdef __NetBSD__
331 	while (!nouveau_fence_done(fence)) {
332 		int ret;
333 		/* XXX what lock? */
334 		/* XXX errno NetBSD->Linux */
335 		ret = -kpause("nvfencel", intr, 1, NULL);
336 		if (ret) {
337 			if (ret == -ERESTART)
338 				ret = -ERESTARTSYS;
339 			return ret;
340 		}
341 		t = jiffies;
342 		if (t >= timeout)
343 			return 0;
344 	}
345 #else
346 	while (!nouveau_fence_done(fence)) {
347 		ktime_t kt;
348 
349 		t = jiffies;
350 
351 		if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
352 			__set_current_state(TASK_RUNNING);
353 			return 0;
354 		}
355 
356 		__set_current_state(intr ? TASK_INTERRUPTIBLE :
357 					   TASK_UNINTERRUPTIBLE);
358 
359 		kt = ktime_set(0, sleep_time);
360 		schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
361 		sleep_time *= 2;
362 		if (sleep_time > NSEC_PER_MSEC)
363 			sleep_time = NSEC_PER_MSEC;
364 
365 		if (intr && signal_pending(current))
366 			return -ERESTARTSYS;
367 	}
368 
369 	__set_current_state(TASK_RUNNING);
370 #endif
371 
372 	return timeout - t;
373 }
374 
375 static int
376 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
377 {
378 	int ret = 0;
379 
380 	while (!nouveau_fence_done(fence)) {
381 		if (time_after_eq(jiffies, fence->timeout)) {
382 			ret = -EBUSY;
383 			break;
384 		}
385 
386 #ifdef __NetBSD__
387 		/* XXX unlock anything? */
388 		/* XXX poll for interrupts? */
389 		DELAY(1000);
390 #else
391 		__set_current_state(intr ?
392 				    TASK_INTERRUPTIBLE :
393 				    TASK_UNINTERRUPTIBLE);
394 
395 		if (intr && signal_pending(current)) {
396 			ret = -ERESTARTSYS;
397 			break;
398 		}
399 #endif
400 	}
401 
402 #ifndef __NetBSD__
403 	__set_current_state(TASK_RUNNING);
404 #endif
405 	return ret;
406 }
407 
408 int
409 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
410 {
411 	long ret;
412 
413 	if (!lazy)
414 		return nouveau_fence_wait_busy(fence, intr);
415 
416 	ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
417 	if (ret < 0)
418 		return ret;
419 	else if (!ret)
420 		return -EBUSY;
421 	else
422 		return 0;
423 }
424 
425 int
426 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
427 {
428 	struct nouveau_fence_chan *fctx = chan->fence;
429 	struct fence *fence;
430 	struct reservation_object *resv = nvbo->bo.resv;
431 	struct reservation_object_list *fobj;
432 	struct nouveau_fence *f;
433 	int ret = 0, i;
434 
435 	if (!exclusive) {
436 		ret = reservation_object_reserve_shared(resv);
437 
438 		if (ret)
439 			return ret;
440 	}
441 
442 	fobj = reservation_object_get_list(resv);
443 	fence = reservation_object_get_excl(resv);
444 
445 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
446 		struct nouveau_channel *prev = NULL;
447 		bool must_wait = true;
448 
449 		f = nouveau_local_fence(fence, chan->drm);
450 		if (f) {
451 			rcu_read_lock();
452 			prev = rcu_dereference(f->channel);
453 			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
454 				must_wait = false;
455 			rcu_read_unlock();
456 		}
457 
458 		if (must_wait)
459 			ret = fence_wait(fence, intr);
460 
461 		return ret;
462 	}
463 
464 	if (!exclusive || !fobj)
465 		return ret;
466 
467 	for (i = 0; i < fobj->shared_count && !ret; ++i) {
468 		struct nouveau_channel *prev = NULL;
469 		bool must_wait = true;
470 
471 		fence = rcu_dereference_protected(fobj->shared[i],
472 						reservation_object_held(resv));
473 
474 		f = nouveau_local_fence(fence, chan->drm);
475 		if (f) {
476 			rcu_read_lock();
477 			prev = rcu_dereference(f->channel);
478 			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
479 				must_wait = false;
480 			rcu_read_unlock();
481 		}
482 
483 		if (must_wait)
484 			ret = fence_wait(fence, intr);
485 	}
486 
487 	return ret;
488 }
489 
490 void
491 nouveau_fence_unref(struct nouveau_fence **pfence)
492 {
493 	if (*pfence)
494 		fence_put(&(*pfence)->base);
495 	*pfence = NULL;
496 }
497 
498 int
499 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
500 		  struct nouveau_fence **pfence)
501 {
502 	struct nouveau_fence *fence;
503 	int ret = 0;
504 
505 	if (unlikely(!chan->fence))
506 		return -ENODEV;
507 
508 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
509 	if (!fence)
510 		return -ENOMEM;
511 
512 	fence->sysmem = sysmem;
513 
514 	ret = nouveau_fence_emit(fence, chan);
515 	if (ret)
516 		nouveau_fence_unref(&fence);
517 
518 	*pfence = fence;
519 	return ret;
520 }
521 
522 static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
523 {
524 	return "nouveau";
525 }
526 
527 static const char *nouveau_fence_get_timeline_name(struct fence *f)
528 {
529 	struct nouveau_fence *fence = from_fence(f);
530 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
531 
532 	return !fctx->dead ? fctx->name : "dead channel";
533 }
534 
535 /*
536  * In an ideal world, read would not assume the channel context is still alive.
537  * This function may be called from another device, running into free memory as a
538  * result. The drm node should still be there, so we can derive the index from
539  * the fence context.
540  */
541 static bool nouveau_fence_is_signaled(struct fence *f)
542 {
543 	struct nouveau_fence *fence = from_fence(f);
544 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
545 	struct nouveau_channel *chan;
546 	bool ret = false;
547 
548 	rcu_read_lock();
549 	chan = rcu_dereference(fence->channel);
550 	if (chan)
551 		ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
552 	rcu_read_unlock();
553 
554 	return ret;
555 }
556 
557 static bool nouveau_fence_no_signaling(struct fence *f)
558 {
559 	struct nouveau_fence *fence = from_fence(f);
560 
561 	/*
562 	 * caller should have a reference on the fence,
563 	 * else fence could get freed here
564 	 */
565 	WARN_ON(!kref_referenced_p(&fence->base.refcount));
566 	WARN_ON(kref_exclusive_p(&fence->base.refcount));
567 
568 	/*
569 	 * This needs uevents to work correctly, but fence_add_callback relies on
570 	 * being able to enable signaling. It will still get signaled eventually,
571 	 * just not right away.
572 	 */
573 	if (nouveau_fence_is_signaled(f)) {
574 		list_del(&fence->head);
575 
576 		fence_put(&fence->base);
577 		return false;
578 	}
579 
580 	return true;
581 }
582 
583 static void nouveau_fence_release(struct fence *f)
584 {
585 	struct nouveau_fence *fence = from_fence(f);
586 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
587 
588 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
589 	fence_free(&fence->base);
590 }
591 
592 static const struct fence_ops nouveau_fence_ops_legacy = {
593 	.get_driver_name = nouveau_fence_get_get_driver_name,
594 	.get_timeline_name = nouveau_fence_get_timeline_name,
595 	.enable_signaling = nouveau_fence_no_signaling,
596 	.signaled = nouveau_fence_is_signaled,
597 	.wait = nouveau_fence_wait_legacy,
598 	.release = nouveau_fence_release
599 };
600 
601 static bool nouveau_fence_enable_signaling(struct fence *f)
602 {
603 	struct nouveau_fence *fence = from_fence(f);
604 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
605 	bool ret;
606 
607 	if (!fctx->notify_ref++)
608 		nvif_notify_get(&fctx->notify);
609 
610 	ret = nouveau_fence_no_signaling(f);
611 	if (ret)
612 		set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
613 	else if (!--fctx->notify_ref)
614 		nvif_notify_put(&fctx->notify);
615 
616 	return ret;
617 }
618 
619 static const struct fence_ops nouveau_fence_ops_uevent = {
620 	.get_driver_name = nouveau_fence_get_get_driver_name,
621 	.get_timeline_name = nouveau_fence_get_timeline_name,
622 	.enable_signaling = nouveau_fence_enable_signaling,
623 	.signaled = nouveau_fence_is_signaled,
624 	.wait = fence_default_wait,
625 	.release = NULL
626 };
627