1 /* $NetBSD: vmwgfx_fence.c,v 1.4 2022/10/25 23:34:05 riastradh Exp $ */
2
3 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 /**************************************************************************
5 *
6 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fence.c,v 1.4 2022/10/25 23:34:05 riastradh Exp $");
32
33 #include <linux/sched/signal.h>
34
35 #include "vmwgfx_drv.h"
36
37 #include <linux/nbsd-namespace.h>
38
39 #define VMW_FENCE_WRAP (1 << 31)
40
41 struct vmw_fence_manager {
42 int num_fence_objects;
43 struct vmw_private *dev_priv;
44 spinlock_t lock;
45 struct list_head fence_list;
46 struct work_struct work;
47 u32 user_fence_size;
48 u32 fence_size;
49 u32 event_fence_action_size;
50 bool fifo_down;
51 struct list_head cleanup_list;
52 uint32_t pending_actions[VMW_ACTION_MAX];
53 struct mutex goal_irq_mutex;
54 bool goal_irq_on; /* Protected by @goal_irq_mutex */
55 bool seqno_valid; /* Protected by @lock, and may not be set to true
56 without the @goal_irq_mutex held. */
57 u64 ctx;
58 };
59
60 struct vmw_user_fence {
61 struct ttm_base_object base;
62 struct vmw_fence_obj fence;
63 };
64
65 /**
66 * struct vmw_event_fence_action - fence action that delivers a drm event.
67 *
68 * @e: A struct drm_pending_event that controls the event delivery.
69 * @action: A struct vmw_fence_action to hook up to a fence.
70 * @fence: A referenced pointer to the fence to keep it alive while @action
71 * hangs on it.
72 * @dev: Pointer to a struct drm_device so we can access the event stuff.
73 * @kref: Both @e and @action has destructors, so we need to refcount.
74 * @size: Size accounted for this object.
75 * @tv_sec: If non-null, the variable pointed to will be assigned
76 * current time tv_sec val when the fence signals.
77 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
78 * be assigned the current time tv_usec val when the fence signals.
79 */
80 struct vmw_event_fence_action {
81 struct vmw_fence_action action;
82
83 struct drm_pending_event *event;
84 struct vmw_fence_obj *fence;
85 struct drm_device *dev;
86
87 uint32_t *tv_sec;
88 uint32_t *tv_usec;
89 };
90
91 static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj * fence)92 fman_from_fence(struct vmw_fence_obj *fence)
93 {
94 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
95 }
96
97 /**
98 * Note on fencing subsystem usage of irqs:
99 * Typically the vmw_fences_update function is called
100 *
101 * a) When a new fence seqno has been submitted by the fifo code.
102 * b) On-demand when we have waiters. Sleeping waiters will switch on the
103 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
104 * irq is received. When the last fence waiter is gone, that IRQ is masked
105 * away.
106 *
107 * In situations where there are no waiters and we don't submit any new fences,
108 * fence objects may not be signaled. This is perfectly OK, since there are
109 * no consumers of the signaled data, but that is NOT ok when there are fence
110 * actions attached to a fence. The fencing subsystem then makes use of the
111 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
112 * which has an action attached, and each time vmw_fences_update is called,
113 * the subsystem makes sure the fence goal seqno is updated.
114 *
115 * The fence goal seqno irq is on as long as there are unsignaled fence
116 * objects with actions attached to them.
117 */
118
vmw_fence_obj_destroy(struct dma_fence * f)119 static void vmw_fence_obj_destroy(struct dma_fence *f)
120 {
121 struct vmw_fence_obj *fence =
122 container_of(f, struct vmw_fence_obj, base);
123
124 struct vmw_fence_manager *fman = fman_from_fence(fence);
125
126 spin_lock(&fman->lock);
127 list_del_init(&fence->head);
128 --fman->num_fence_objects;
129 spin_unlock(&fman->lock);
130 fence->destroy(fence);
131 }
132
vmw_fence_get_driver_name(struct dma_fence * f)133 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
134 {
135 return "vmwgfx";
136 }
137
vmw_fence_get_timeline_name(struct dma_fence * f)138 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
139 {
140 return "svga";
141 }
142
vmw_fence_enable_signaling(struct dma_fence * f)143 static bool vmw_fence_enable_signaling(struct dma_fence *f)
144 {
145 struct vmw_fence_obj *fence =
146 container_of(f, struct vmw_fence_obj, base);
147
148 struct vmw_fence_manager *fman = fman_from_fence(fence);
149 struct vmw_private *dev_priv = fman->dev_priv;
150
151 u32 *fifo_mem = dev_priv->mmio_virt;
152 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
153 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
154 return false;
155
156 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
157
158 return true;
159 }
160
161 struct vmwgfx_wait_cb {
162 struct dma_fence_cb base;
163 #ifdef __NetBSD__
164 drm_waitqueue_t wq;
165 #else
166 struct task_struct *task;
167 #endif
168 };
169
170 static void
vmwgfx_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)171 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
172 {
173 struct vmwgfx_wait_cb *wait =
174 container_of(cb, struct vmwgfx_wait_cb, base);
175
176 #ifdef __NetBSD__
177 DRM_SPIN_WAKEUP_ALL(&wait->wq, fence->lock);
178 #else
179 wake_up_process(wait->task);
180 #endif
181 }
182
183 static void __vmw_fences_update(struct vmw_fence_manager *fman);
184
vmw_fence_wait(struct dma_fence * f,bool intr,signed long timeout)185 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
186 {
187 struct vmw_fence_obj *fence =
188 container_of(f, struct vmw_fence_obj, base);
189
190 struct vmw_fence_manager *fman = fman_from_fence(fence);
191 struct vmw_private *dev_priv = fman->dev_priv;
192 struct vmwgfx_wait_cb cb;
193 long ret = timeout;
194
195 if (likely(vmw_fence_obj_signaled(fence)))
196 return timeout;
197
198 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
199 vmw_seqno_waiter_add(dev_priv);
200
201 spin_lock(f->lock);
202
203 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
204 goto out;
205
206 if (intr && signal_pending(current)) {
207 ret = -ERESTARTSYS;
208 goto out;
209 }
210
211 #ifdef __NetBSD__
212 DRM_INIT_WAITQUEUE(&cb.wq, "vmwgfxwf");
213 #else
214 cb.task = current;
215 #endif
216 spin_unlock(f->lock);
217 ret = dma_fence_add_callback(f, &cb.base, vmwgfx_wait_cb);
218 spin_lock(f->lock);
219 if (ret)
220 goto out;
221
222 #ifdef __NetBSD__
223 #define C (__vmw_fences_update(fman), dma_fence_is_signaled_locked(f))
224 if (intr) {
225 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &cb.wq, f->lock, timeout, C);
226 } else {
227 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &cb.wq, f->lock, timeout,
228 C);
229 }
230 #else
231 for (;;) {
232 __vmw_fences_update(fman);
233
234 /*
235 * We can use the barrier free __set_current_state() since
236 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
237 * fence spinlock.
238 */
239 if (intr)
240 __set_current_state(TASK_INTERRUPTIBLE);
241 else
242 __set_current_state(TASK_UNINTERRUPTIBLE);
243
244 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
245 if (ret == 0 && timeout > 0)
246 ret = 1;
247 break;
248 }
249
250 if (intr && signal_pending(current)) {
251 ret = -ERESTARTSYS;
252 break;
253 }
254
255 if (ret == 0)
256 break;
257
258 spin_unlock(f->lock);
259
260 ret = schedule_timeout(ret);
261
262 spin_lock(f->lock);
263 }
264 __set_current_state(TASK_RUNNING);
265 if (!list_empty(&cb.base.node))
266 list_del(&cb.base.node);
267 #endif
268 spin_unlock(f->lock);
269 dma_fence_remove_callback(f, &cb.base);
270 spin_lock(f->lock);
271
272 out:
273 spin_unlock(f->lock);
274 #ifdef __NetBSD__
275 DRM_DESTROY_WAITQUEUE(&cb.wq);
276 #endif
277
278 vmw_seqno_waiter_remove(dev_priv);
279
280 return ret;
281 }
282
283 static const struct dma_fence_ops vmw_fence_ops = {
284 .get_driver_name = vmw_fence_get_driver_name,
285 .get_timeline_name = vmw_fence_get_timeline_name,
286 .enable_signaling = vmw_fence_enable_signaling,
287 .wait = vmw_fence_wait,
288 .release = vmw_fence_obj_destroy,
289 };
290
291
292 /**
293 * Execute signal actions on fences recently signaled.
294 * This is done from a workqueue so we don't have to execute
295 * signal actions from atomic context.
296 */
297
vmw_fence_work_func(struct work_struct * work)298 static void vmw_fence_work_func(struct work_struct *work)
299 {
300 struct vmw_fence_manager *fman =
301 container_of(work, struct vmw_fence_manager, work);
302 struct list_head list;
303 struct vmw_fence_action *action, *next_action;
304 bool seqno_valid;
305
306 do {
307 INIT_LIST_HEAD(&list);
308 mutex_lock(&fman->goal_irq_mutex);
309
310 spin_lock(&fman->lock);
311 list_splice_init(&fman->cleanup_list, &list);
312 seqno_valid = fman->seqno_valid;
313 spin_unlock(&fman->lock);
314
315 if (!seqno_valid && fman->goal_irq_on) {
316 fman->goal_irq_on = false;
317 vmw_goal_waiter_remove(fman->dev_priv);
318 }
319 mutex_unlock(&fman->goal_irq_mutex);
320
321 if (list_empty(&list))
322 return;
323
324 /*
325 * At this point, only we should be able to manipulate the
326 * list heads of the actions we have on the private list.
327 * hence fman::lock not held.
328 */
329
330 list_for_each_entry_safe(action, next_action, &list, head) {
331 list_del_init(&action->head);
332 if (action->cleanup)
333 action->cleanup(action);
334 }
335 } while (1);
336 }
337
vmw_fence_manager_init(struct vmw_private * dev_priv)338 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
339 {
340 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
341
342 if (unlikely(!fman))
343 return NULL;
344
345 fman->dev_priv = dev_priv;
346 spin_lock_init(&fman->lock);
347 INIT_LIST_HEAD(&fman->fence_list);
348 INIT_LIST_HEAD(&fman->cleanup_list);
349 INIT_WORK(&fman->work, &vmw_fence_work_func);
350 fman->fifo_down = true;
351 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
352 TTM_OBJ_EXTRA_SIZE;
353 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
354 fman->event_fence_action_size =
355 ttm_round_pot(sizeof(struct vmw_event_fence_action));
356 mutex_init(&fman->goal_irq_mutex);
357 fman->ctx = dma_fence_context_alloc(1);
358
359 return fman;
360 }
361
vmw_fence_manager_takedown(struct vmw_fence_manager * fman)362 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
363 {
364 bool lists_empty;
365
366 (void) cancel_work_sync(&fman->work);
367
368 spin_lock(&fman->lock);
369 lists_empty = list_empty(&fman->fence_list) &&
370 list_empty(&fman->cleanup_list);
371 spin_unlock(&fman->lock);
372
373 BUG_ON(!lists_empty);
374 kfree(fman);
375 }
376
vmw_fence_obj_init(struct vmw_fence_manager * fman,struct vmw_fence_obj * fence,u32 seqno,void (* destroy)(struct vmw_fence_obj * fence))377 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
378 struct vmw_fence_obj *fence, u32 seqno,
379 void (*destroy) (struct vmw_fence_obj *fence))
380 {
381 int ret = 0;
382
383 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
384 fman->ctx, seqno);
385 INIT_LIST_HEAD(&fence->seq_passed_actions);
386 fence->destroy = destroy;
387
388 spin_lock(&fman->lock);
389 if (unlikely(fman->fifo_down)) {
390 ret = -EBUSY;
391 goto out_unlock;
392 }
393 list_add_tail(&fence->head, &fman->fence_list);
394 ++fman->num_fence_objects;
395
396 out_unlock:
397 spin_unlock(&fman->lock);
398 return ret;
399
400 }
401
vmw_fences_perform_actions(struct vmw_fence_manager * fman,struct list_head * list)402 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
403 struct list_head *list)
404 {
405 struct vmw_fence_action *action, *next_action;
406
407 list_for_each_entry_safe(action, next_action, list, head) {
408 list_del_init(&action->head);
409 fman->pending_actions[action->type]--;
410 if (action->seq_passed != NULL)
411 action->seq_passed(action);
412
413 /*
414 * Add the cleanup action to the cleanup list so that
415 * it will be performed by a worker task.
416 */
417
418 list_add_tail(&action->head, &fman->cleanup_list);
419 }
420 }
421
422 /**
423 * vmw_fence_goal_new_locked - Figure out a new device fence goal
424 * seqno if needed.
425 *
426 * @fman: Pointer to a fence manager.
427 * @passed_seqno: The seqno the device currently signals as passed.
428 *
429 * This function should be called with the fence manager lock held.
430 * It is typically called when we have a new passed_seqno, and
431 * we might need to update the fence goal. It checks to see whether
432 * the current fence goal has already passed, and, in that case,
433 * scans through all unsignaled fences to get the next fence object with an
434 * action attached, and sets the seqno of that fence as a new fence goal.
435 *
436 * returns true if the device goal seqno was updated. False otherwise.
437 */
vmw_fence_goal_new_locked(struct vmw_fence_manager * fman,u32 passed_seqno)438 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
439 u32 passed_seqno)
440 {
441 u32 goal_seqno;
442 u32 *fifo_mem;
443 struct vmw_fence_obj *fence;
444
445 if (likely(!fman->seqno_valid))
446 return false;
447
448 fifo_mem = fman->dev_priv->mmio_virt;
449 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
450 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
451 return false;
452
453 fman->seqno_valid = false;
454 list_for_each_entry(fence, &fman->fence_list, head) {
455 if (!list_empty(&fence->seq_passed_actions)) {
456 fman->seqno_valid = true;
457 vmw_mmio_write(fence->base.seqno,
458 fifo_mem + SVGA_FIFO_FENCE_GOAL);
459 break;
460 }
461 }
462
463 return true;
464 }
465
466
467 /**
468 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
469 * needed.
470 *
471 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
472 * considered as a device fence goal.
473 *
474 * This function should be called with the fence manager lock held.
475 * It is typically called when an action has been attached to a fence to
476 * check whether the seqno of that fence should be used for a fence
477 * goal interrupt. This is typically needed if the current fence goal is
478 * invalid, or has a higher seqno than that of the current fence object.
479 *
480 * returns true if the device goal seqno was updated. False otherwise.
481 */
vmw_fence_goal_check_locked(struct vmw_fence_obj * fence)482 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
483 {
484 struct vmw_fence_manager *fman = fman_from_fence(fence);
485 u32 goal_seqno;
486 u32 *fifo_mem;
487
488 if (dma_fence_is_signaled_locked(&fence->base))
489 return false;
490
491 fifo_mem = fman->dev_priv->mmio_virt;
492 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
493 if (likely(fman->seqno_valid &&
494 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
495 return false;
496
497 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
498 fman->seqno_valid = true;
499
500 return true;
501 }
502
__vmw_fences_update(struct vmw_fence_manager * fman)503 static void __vmw_fences_update(struct vmw_fence_manager *fman)
504 {
505 struct vmw_fence_obj *fence, *next_fence;
506 struct list_head action_list;
507 bool needs_rerun;
508 uint32_t seqno, new_seqno;
509 u32 *fifo_mem = fman->dev_priv->mmio_virt;
510
511 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
512 rerun:
513 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
514 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
515 list_del_init(&fence->head);
516 dma_fence_signal_locked(&fence->base);
517 INIT_LIST_HEAD(&action_list);
518 list_splice_init(&fence->seq_passed_actions,
519 &action_list);
520 vmw_fences_perform_actions(fman, &action_list);
521 } else
522 break;
523 }
524
525 /*
526 * Rerun if the fence goal seqno was updated, and the
527 * hardware might have raced with that update, so that
528 * we missed a fence_goal irq.
529 */
530
531 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
532 if (unlikely(needs_rerun)) {
533 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
534 if (new_seqno != seqno) {
535 seqno = new_seqno;
536 goto rerun;
537 }
538 }
539
540 if (!list_empty(&fman->cleanup_list))
541 (void) schedule_work(&fman->work);
542 }
543
vmw_fences_update(struct vmw_fence_manager * fman)544 void vmw_fences_update(struct vmw_fence_manager *fman)
545 {
546 spin_lock(&fman->lock);
547 __vmw_fences_update(fman);
548 spin_unlock(&fman->lock);
549 }
550
vmw_fence_obj_signaled(struct vmw_fence_obj * fence)551 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
552 {
553 struct vmw_fence_manager *fman = fman_from_fence(fence);
554
555 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
556 return 1;
557
558 vmw_fences_update(fman);
559
560 return dma_fence_is_signaled(&fence->base);
561 }
562
vmw_fence_obj_wait(struct vmw_fence_obj * fence,bool lazy,bool interruptible,unsigned long timeout)563 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
564 bool interruptible, unsigned long timeout)
565 {
566 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
567
568 if (likely(ret > 0))
569 return 0;
570 else if (ret == 0)
571 return -EBUSY;
572 else
573 return ret;
574 }
575
vmw_fence_obj_flush(struct vmw_fence_obj * fence)576 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
577 {
578 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
579
580 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
581 }
582
vmw_fence_destroy(struct vmw_fence_obj * fence)583 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
584 {
585 dma_fence_free(&fence->base);
586 }
587
vmw_fence_create(struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence)588 int vmw_fence_create(struct vmw_fence_manager *fman,
589 uint32_t seqno,
590 struct vmw_fence_obj **p_fence)
591 {
592 struct vmw_fence_obj *fence;
593 int ret;
594
595 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
596 if (unlikely(!fence))
597 return -ENOMEM;
598
599 ret = vmw_fence_obj_init(fman, fence, seqno,
600 vmw_fence_destroy);
601 if (unlikely(ret != 0))
602 goto out_err_init;
603
604 *p_fence = fence;
605 return 0;
606
607 out_err_init:
608 kfree(fence);
609 return ret;
610 }
611
612
vmw_user_fence_destroy(struct vmw_fence_obj * fence)613 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
614 {
615 struct vmw_user_fence *ufence =
616 container_of(fence, struct vmw_user_fence, fence);
617 struct vmw_fence_manager *fman = fman_from_fence(fence);
618
619 ttm_base_object_kfree(ufence, base);
620 /*
621 * Free kernel space accounting.
622 */
623 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
624 fman->user_fence_size);
625 }
626
vmw_user_fence_base_release(struct ttm_base_object ** p_base)627 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
628 {
629 struct ttm_base_object *base = *p_base;
630 struct vmw_user_fence *ufence =
631 container_of(base, struct vmw_user_fence, base);
632 struct vmw_fence_obj *fence = &ufence->fence;
633
634 *p_base = NULL;
635 vmw_fence_obj_unreference(&fence);
636 }
637
vmw_user_fence_create(struct drm_file * file_priv,struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)638 int vmw_user_fence_create(struct drm_file *file_priv,
639 struct vmw_fence_manager *fman,
640 uint32_t seqno,
641 struct vmw_fence_obj **p_fence,
642 uint32_t *p_handle)
643 {
644 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
645 struct vmw_user_fence *ufence;
646 struct vmw_fence_obj *tmp;
647 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
648 struct ttm_operation_ctx ctx = {
649 .interruptible = false,
650 .no_wait_gpu = false
651 };
652 int ret;
653
654 /*
655 * Kernel memory space accounting, since this object may
656 * be created by a user-space request.
657 */
658
659 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
660 &ctx);
661 if (unlikely(ret != 0))
662 return ret;
663
664 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
665 if (unlikely(!ufence)) {
666 ret = -ENOMEM;
667 goto out_no_object;
668 }
669
670 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
671 vmw_user_fence_destroy);
672 if (unlikely(ret != 0)) {
673 kfree(ufence);
674 goto out_no_object;
675 }
676
677 /*
678 * The base object holds a reference which is freed in
679 * vmw_user_fence_base_release.
680 */
681 tmp = vmw_fence_obj_reference(&ufence->fence);
682 ret = ttm_base_object_init(tfile, &ufence->base, false,
683 VMW_RES_FENCE,
684 &vmw_user_fence_base_release, NULL);
685
686
687 if (unlikely(ret != 0)) {
688 /*
689 * Free the base object's reference
690 */
691 vmw_fence_obj_unreference(&tmp);
692 goto out_err;
693 }
694
695 *p_fence = &ufence->fence;
696 *p_handle = ufence->base.handle;
697
698 return 0;
699 out_err:
700 tmp = &ufence->fence;
701 vmw_fence_obj_unreference(&tmp);
702 out_no_object:
703 ttm_mem_global_free(mem_glob, fman->user_fence_size);
704 return ret;
705 }
706
707
708 /**
709 * vmw_wait_dma_fence - Wait for a dma fence
710 *
711 * @fman: pointer to a fence manager
712 * @fence: DMA fence to wait on
713 *
714 * This function handles the case when the fence is actually a fence
715 * array. If that's the case, it'll wait on each of the child fence
716 */
vmw_wait_dma_fence(struct vmw_fence_manager * fman,struct dma_fence * fence)717 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
718 struct dma_fence *fence)
719 {
720 struct dma_fence_array *fence_array;
721 int ret = 0;
722 int i;
723
724
725 if (dma_fence_is_signaled(fence))
726 return 0;
727
728 if (!dma_fence_is_array(fence))
729 return dma_fence_wait(fence, true);
730
731 /* From i915: Note that if the fence-array was created in
732 * signal-on-any mode, we should *not* decompose it into its individual
733 * fences. However, we don't currently store which mode the fence-array
734 * is operating in. Fortunately, the only user of signal-on-any is
735 * private to amdgpu and we should not see any incoming fence-array
736 * from sync-file being in signal-on-any mode.
737 */
738
739 fence_array = to_dma_fence_array(fence);
740 for (i = 0; i < fence_array->num_fences; i++) {
741 struct dma_fence *child = fence_array->fences[i];
742
743 ret = dma_fence_wait(child, true);
744
745 if (ret < 0)
746 return ret;
747 }
748
749 return 0;
750 }
751
752
753 /**
754 * vmw_fence_fifo_down - signal all unsignaled fence objects.
755 */
756
vmw_fence_fifo_down(struct vmw_fence_manager * fman)757 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
758 {
759 struct list_head action_list;
760 int ret;
761
762 /*
763 * The list may be altered while we traverse it, so always
764 * restart when we've released the fman->lock.
765 */
766
767 spin_lock(&fman->lock);
768 fman->fifo_down = true;
769 while (!list_empty(&fman->fence_list)) {
770 struct vmw_fence_obj *fence =
771 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
772 head);
773 dma_fence_get(&fence->base);
774 spin_unlock(&fman->lock);
775
776 ret = vmw_fence_obj_wait(fence, false, false,
777 VMW_FENCE_WAIT_TIMEOUT);
778
779 if (unlikely(ret != 0)) {
780 list_del_init(&fence->head);
781 dma_fence_signal(&fence->base);
782 INIT_LIST_HEAD(&action_list);
783 list_splice_init(&fence->seq_passed_actions,
784 &action_list);
785 vmw_fences_perform_actions(fman, &action_list);
786 }
787
788 BUG_ON(!list_empty(&fence->head));
789 dma_fence_put(&fence->base);
790 spin_lock(&fman->lock);
791 }
792 spin_unlock(&fman->lock);
793 }
794
vmw_fence_fifo_up(struct vmw_fence_manager * fman)795 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
796 {
797 spin_lock(&fman->lock);
798 fman->fifo_down = false;
799 spin_unlock(&fman->lock);
800 }
801
802
803 /**
804 * vmw_fence_obj_lookup - Look up a user-space fence object
805 *
806 * @tfile: A struct ttm_object_file identifying the caller.
807 * @handle: A handle identifying the fence object.
808 * @return: A struct vmw_user_fence base ttm object on success or
809 * an error pointer on failure.
810 *
811 * The fence object is looked up and type-checked. The caller needs
812 * to have opened the fence object first, but since that happens on
813 * creation and fence objects aren't shareable, that's not an
814 * issue currently.
815 */
816 static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file * tfile,u32 handle)817 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
818 {
819 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
820
821 if (!base) {
822 pr_err("Invalid fence object handle 0x%08lx.\n",
823 (unsigned long)handle);
824 return ERR_PTR(-EINVAL);
825 }
826
827 if (base->refcount_release != vmw_user_fence_base_release) {
828 pr_err("Invalid fence object handle 0x%08lx.\n",
829 (unsigned long)handle);
830 ttm_base_object_unref(&base);
831 return ERR_PTR(-EINVAL);
832 }
833
834 return base;
835 }
836
837
vmw_fence_obj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)838 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
839 struct drm_file *file_priv)
840 {
841 struct drm_vmw_fence_wait_arg *arg =
842 (struct drm_vmw_fence_wait_arg *)data;
843 unsigned long timeout;
844 struct ttm_base_object *base;
845 struct vmw_fence_obj *fence;
846 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
847 int ret;
848 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
849
850 /*
851 * 64-bit division not present on 32-bit systems, so do an
852 * approximation. (Divide by 1000000).
853 */
854
855 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
856 (wait_timeout >> 26);
857
858 if (!arg->cookie_valid) {
859 arg->cookie_valid = 1;
860 arg->kernel_cookie = jiffies + wait_timeout;
861 }
862
863 base = vmw_fence_obj_lookup(tfile, arg->handle);
864 if (IS_ERR(base))
865 return PTR_ERR(base);
866
867 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
868
869 timeout = jiffies;
870 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
871 ret = ((vmw_fence_obj_signaled(fence)) ?
872 0 : -EBUSY);
873 goto out;
874 }
875
876 timeout = (unsigned long)arg->kernel_cookie - timeout;
877
878 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
879
880 out:
881 ttm_base_object_unref(&base);
882
883 /*
884 * Optionally unref the fence object.
885 */
886
887 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
888 return ttm_ref_object_base_unref(tfile, arg->handle,
889 TTM_REF_USAGE);
890 return ret;
891 }
892
vmw_fence_obj_signaled_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)893 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
894 struct drm_file *file_priv)
895 {
896 struct drm_vmw_fence_signaled_arg *arg =
897 (struct drm_vmw_fence_signaled_arg *) data;
898 struct ttm_base_object *base;
899 struct vmw_fence_obj *fence;
900 struct vmw_fence_manager *fman;
901 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
902 struct vmw_private *dev_priv = vmw_priv(dev);
903
904 base = vmw_fence_obj_lookup(tfile, arg->handle);
905 if (IS_ERR(base))
906 return PTR_ERR(base);
907
908 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
909 fman = fman_from_fence(fence);
910
911 arg->signaled = vmw_fence_obj_signaled(fence);
912
913 arg->signaled_flags = arg->flags;
914 spin_lock(&dev_priv->fence_lock);
915 const u32 seqno = dev_priv->last_read_seqno;
916 spin_unlock(&dev_priv->fence_lock);
917 spin_lock(&fman->lock);
918 arg->passed_seqno = seqno;
919 spin_unlock(&fman->lock);
920
921 ttm_base_object_unref(&base);
922
923 return 0;
924 }
925
926
vmw_fence_obj_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)927 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
928 struct drm_file *file_priv)
929 {
930 struct drm_vmw_fence_arg *arg =
931 (struct drm_vmw_fence_arg *) data;
932
933 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
934 arg->handle,
935 TTM_REF_USAGE);
936 }
937
938 /**
939 * vmw_event_fence_action_seq_passed
940 *
941 * @action: The struct vmw_fence_action embedded in a struct
942 * vmw_event_fence_action.
943 *
944 * This function is called when the seqno of the fence where @action is
945 * attached has passed. It queues the event on the submitter's event list.
946 * This function is always called from atomic context.
947 */
vmw_event_fence_action_seq_passed(struct vmw_fence_action * action)948 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
949 {
950 struct vmw_event_fence_action *eaction =
951 container_of(action, struct vmw_event_fence_action, action);
952 struct drm_device *dev = eaction->dev;
953 struct drm_pending_event *event = eaction->event;
954
955 if (unlikely(event == NULL))
956 return;
957
958 spin_lock_irq(&dev->event_lock);
959
960 if (likely(eaction->tv_sec != NULL)) {
961 struct timespec64 ts;
962
963 ktime_get_ts64(&ts);
964 /* monotonic time, so no y2038 overflow */
965 *eaction->tv_sec = ts.tv_sec;
966 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
967 }
968
969 drm_send_event_locked(dev, eaction->event);
970 eaction->event = NULL;
971 spin_unlock_irq(&dev->event_lock);
972 }
973
974 /**
975 * vmw_event_fence_action_cleanup
976 *
977 * @action: The struct vmw_fence_action embedded in a struct
978 * vmw_event_fence_action.
979 *
980 * This function is the struct vmw_fence_action destructor. It's typically
981 * called from a workqueue.
982 */
vmw_event_fence_action_cleanup(struct vmw_fence_action * action)983 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
984 {
985 struct vmw_event_fence_action *eaction =
986 container_of(action, struct vmw_event_fence_action, action);
987
988 vmw_fence_obj_unreference(&eaction->fence);
989 kfree(eaction);
990 }
991
992
993 /**
994 * vmw_fence_obj_add_action - Add an action to a fence object.
995 *
996 * @fence - The fence object.
997 * @action - The action to add.
998 *
999 * Note that the action callbacks may be executed before this function
1000 * returns.
1001 */
vmw_fence_obj_add_action(struct vmw_fence_obj * fence,struct vmw_fence_action * action)1002 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
1003 struct vmw_fence_action *action)
1004 {
1005 struct vmw_fence_manager *fman = fman_from_fence(fence);
1006 bool run_update = false;
1007
1008 mutex_lock(&fman->goal_irq_mutex);
1009 spin_lock(&fman->lock);
1010
1011 fman->pending_actions[action->type]++;
1012 if (dma_fence_is_signaled_locked(&fence->base)) {
1013 struct list_head action_list;
1014
1015 INIT_LIST_HEAD(&action_list);
1016 list_add_tail(&action->head, &action_list);
1017 vmw_fences_perform_actions(fman, &action_list);
1018 } else {
1019 list_add_tail(&action->head, &fence->seq_passed_actions);
1020
1021 /*
1022 * This function may set fman::seqno_valid, so it must
1023 * be run with the goal_irq_mutex held.
1024 */
1025 run_update = vmw_fence_goal_check_locked(fence);
1026 }
1027
1028 spin_unlock(&fman->lock);
1029
1030 if (run_update) {
1031 if (!fman->goal_irq_on) {
1032 fman->goal_irq_on = true;
1033 vmw_goal_waiter_add(fman->dev_priv);
1034 }
1035 vmw_fences_update(fman);
1036 }
1037 mutex_unlock(&fman->goal_irq_mutex);
1038
1039 }
1040
1041 /**
1042 * vmw_event_fence_action_create - Post an event for sending when a fence
1043 * object seqno has passed.
1044 *
1045 * @file_priv: The file connection on which the event should be posted.
1046 * @fence: The fence object on which to post the event.
1047 * @event: Event to be posted. This event should've been alloced
1048 * using k[mz]alloc, and should've been completely initialized.
1049 * @interruptible: Interruptible waits if possible.
1050 *
1051 * As a side effect, the object pointed to by @event may have been
1052 * freed when this function returns. If this function returns with
1053 * an error code, the caller needs to free that object.
1054 */
1055
vmw_event_fence_action_queue(struct drm_file * file_priv,struct vmw_fence_obj * fence,struct drm_pending_event * event,uint32_t * tv_sec,uint32_t * tv_usec,bool interruptible)1056 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1057 struct vmw_fence_obj *fence,
1058 struct drm_pending_event *event,
1059 uint32_t *tv_sec,
1060 uint32_t *tv_usec,
1061 bool interruptible)
1062 {
1063 struct vmw_event_fence_action *eaction;
1064 struct vmw_fence_manager *fman = fman_from_fence(fence);
1065
1066 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1067 if (unlikely(!eaction))
1068 return -ENOMEM;
1069
1070 eaction->event = event;
1071
1072 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1073 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1074 eaction->action.type = VMW_ACTION_EVENT;
1075
1076 eaction->fence = vmw_fence_obj_reference(fence);
1077 eaction->dev = fman->dev_priv->dev;
1078 eaction->tv_sec = tv_sec;
1079 eaction->tv_usec = tv_usec;
1080
1081 vmw_fence_obj_add_action(fence, &eaction->action);
1082
1083 return 0;
1084 }
1085
1086 struct vmw_event_fence_pending {
1087 struct drm_pending_event base;
1088 struct drm_vmw_event_fence event;
1089 };
1090
vmw_event_fence_action_create(struct drm_file * file_priv,struct vmw_fence_obj * fence,uint32_t flags,uint64_t user_data,bool interruptible)1091 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1092 struct vmw_fence_obj *fence,
1093 uint32_t flags,
1094 uint64_t user_data,
1095 bool interruptible)
1096 {
1097 struct vmw_event_fence_pending *event;
1098 struct vmw_fence_manager *fman = fman_from_fence(fence);
1099 struct drm_device *dev = fman->dev_priv->dev;
1100 int ret;
1101
1102 event = kzalloc(sizeof(*event), GFP_KERNEL);
1103 if (unlikely(!event)) {
1104 DRM_ERROR("Failed to allocate an event.\n");
1105 ret = -ENOMEM;
1106 goto out_no_space;
1107 }
1108
1109 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1110 event->event.base.length = sizeof(*event);
1111 event->event.user_data = user_data;
1112
1113 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1114
1115 if (unlikely(ret != 0)) {
1116 DRM_ERROR("Failed to allocate event space for this file.\n");
1117 kfree(event);
1118 goto out_no_space;
1119 }
1120
1121 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1122 ret = vmw_event_fence_action_queue(file_priv, fence,
1123 &event->base,
1124 &event->event.tv_sec,
1125 &event->event.tv_usec,
1126 interruptible);
1127 else
1128 ret = vmw_event_fence_action_queue(file_priv, fence,
1129 &event->base,
1130 NULL,
1131 NULL,
1132 interruptible);
1133 if (ret != 0)
1134 goto out_no_queue;
1135
1136 return 0;
1137
1138 out_no_queue:
1139 drm_event_cancel_free(dev, &event->base);
1140 out_no_space:
1141 return ret;
1142 }
1143
vmw_fence_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1144 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1145 struct drm_file *file_priv)
1146 {
1147 struct vmw_private *dev_priv = vmw_priv(dev);
1148 struct drm_vmw_fence_event_arg *arg =
1149 (struct drm_vmw_fence_event_arg *) data;
1150 struct vmw_fence_obj *fence = NULL;
1151 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1152 struct ttm_object_file *tfile = vmw_fp->tfile;
1153 struct drm_vmw_fence_rep __user *user_fence_rep =
1154 (struct drm_vmw_fence_rep __user *)(unsigned long)
1155 arg->fence_rep;
1156 uint32_t handle;
1157 int ret;
1158
1159 /*
1160 * Look up an existing fence object,
1161 * and if user-space wants a new reference,
1162 * add one.
1163 */
1164 if (arg->handle) {
1165 struct ttm_base_object *base =
1166 vmw_fence_obj_lookup(tfile, arg->handle);
1167
1168 if (IS_ERR(base))
1169 return PTR_ERR(base);
1170
1171 fence = &(container_of(base, struct vmw_user_fence,
1172 base)->fence);
1173 (void) vmw_fence_obj_reference(fence);
1174
1175 if (user_fence_rep != NULL) {
1176 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1177 TTM_REF_USAGE, NULL, false);
1178 if (unlikely(ret != 0)) {
1179 DRM_ERROR("Failed to reference a fence "
1180 "object.\n");
1181 goto out_no_ref_obj;
1182 }
1183 handle = base->handle;
1184 }
1185 ttm_base_object_unref(&base);
1186 }
1187
1188 /*
1189 * Create a new fence object.
1190 */
1191 if (!fence) {
1192 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1193 &fence,
1194 (user_fence_rep) ?
1195 &handle : NULL);
1196 if (unlikely(ret != 0)) {
1197 DRM_ERROR("Fence event failed to create fence.\n");
1198 return ret;
1199 }
1200 }
1201
1202 BUG_ON(fence == NULL);
1203
1204 ret = vmw_event_fence_action_create(file_priv, fence,
1205 arg->flags,
1206 arg->user_data,
1207 true);
1208 if (unlikely(ret != 0)) {
1209 if (ret != -ERESTARTSYS)
1210 DRM_ERROR("Failed to attach event to fence.\n");
1211 goto out_no_create;
1212 }
1213
1214 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1215 handle, -1, NULL);
1216 vmw_fence_obj_unreference(&fence);
1217 return 0;
1218 out_no_create:
1219 if (user_fence_rep != NULL)
1220 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1221 out_no_ref_obj:
1222 vmw_fence_obj_unreference(&fence);
1223 return ret;
1224 }
1225