xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /*	$NetBSD: vmwgfx_fence.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
2 
3 /**************************************************************************
4  *
5  * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  **************************************************************************/
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fence.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
32 
33 #include <drm/drmP.h>
34 #include "vmwgfx_drv.h"
35 
36 #define VMW_FENCE_WRAP (1 << 31)
37 
38 struct vmw_fence_manager {
39 	int num_fence_objects;
40 	struct vmw_private *dev_priv;
41 	spinlock_t lock;
42 	struct list_head fence_list;
43 	struct work_struct work;
44 	u32 user_fence_size;
45 	u32 fence_size;
46 	u32 event_fence_action_size;
47 	bool fifo_down;
48 	struct list_head cleanup_list;
49 	uint32_t pending_actions[VMW_ACTION_MAX];
50 	struct mutex goal_irq_mutex;
51 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
52 	bool seqno_valid; /* Protected by @lock, and may not be set to true
53 			     without the @goal_irq_mutex held. */
54 	unsigned ctx;
55 };
56 
57 struct vmw_user_fence {
58 	struct ttm_base_object base;
59 	struct vmw_fence_obj fence;
60 };
61 
62 /**
63  * struct vmw_event_fence_action - fence action that delivers a drm event.
64  *
65  * @e: A struct drm_pending_event that controls the event delivery.
66  * @action: A struct vmw_fence_action to hook up to a fence.
67  * @fence: A referenced pointer to the fence to keep it alive while @action
68  * hangs on it.
69  * @dev: Pointer to a struct drm_device so we can access the event stuff.
70  * @kref: Both @e and @action has destructors, so we need to refcount.
71  * @size: Size accounted for this object.
72  * @tv_sec: If non-null, the variable pointed to will be assigned
73  * current time tv_sec val when the fence signals.
74  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
75  * be assigned the current time tv_usec val when the fence signals.
76  */
77 struct vmw_event_fence_action {
78 	struct vmw_fence_action action;
79 	struct list_head fpriv_head;
80 
81 	struct drm_pending_event *event;
82 	struct vmw_fence_obj *fence;
83 	struct drm_device *dev;
84 
85 	uint32_t *tv_sec;
86 	uint32_t *tv_usec;
87 };
88 
89 static struct vmw_fence_manager *
90 fman_from_fence(struct vmw_fence_obj *fence)
91 {
92 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
93 }
94 
95 /**
96  * Note on fencing subsystem usage of irqs:
97  * Typically the vmw_fences_update function is called
98  *
99  * a) When a new fence seqno has been submitted by the fifo code.
100  * b) On-demand when we have waiters. Sleeping waiters will switch on the
101  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
102  * irq is received. When the last fence waiter is gone, that IRQ is masked
103  * away.
104  *
105  * In situations where there are no waiters and we don't submit any new fences,
106  * fence objects may not be signaled. This is perfectly OK, since there are
107  * no consumers of the signaled data, but that is NOT ok when there are fence
108  * actions attached to a fence. The fencing subsystem then makes use of the
109  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
110  * which has an action attached, and each time vmw_fences_update is called,
111  * the subsystem makes sure the fence goal seqno is updated.
112  *
113  * The fence goal seqno irq is on as long as there are unsignaled fence
114  * objects with actions attached to them.
115  */
116 
117 static void vmw_fence_obj_destroy(struct fence *f)
118 {
119 	struct vmw_fence_obj *fence =
120 		container_of(f, struct vmw_fence_obj, base);
121 
122 	struct vmw_fence_manager *fman = fman_from_fence(fence);
123 	unsigned long irq_flags;
124 
125 	spin_lock_irqsave(&fman->lock, irq_flags);
126 	list_del_init(&fence->head);
127 	--fman->num_fence_objects;
128 	spin_unlock_irqrestore(&fman->lock, irq_flags);
129 	fence->destroy(fence);
130 }
131 
132 static const char *vmw_fence_get_driver_name(struct fence *f)
133 {
134 	return "vmwgfx";
135 }
136 
137 static const char *vmw_fence_get_timeline_name(struct fence *f)
138 {
139 	return "svga";
140 }
141 
142 static bool vmw_fence_enable_signaling(struct fence *f)
143 {
144 	struct vmw_fence_obj *fence =
145 		container_of(f, struct vmw_fence_obj, base);
146 
147 	struct vmw_fence_manager *fman = fman_from_fence(fence);
148 	struct vmw_private *dev_priv = fman->dev_priv;
149 
150 	u32 *fifo_mem = dev_priv->mmio_virt;
151 	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
152 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
153 		return false;
154 
155 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
156 
157 	return true;
158 }
159 
160 struct vmwgfx_wait_cb {
161 	struct fence_cb base;
162 	struct task_struct *task;
163 };
164 
165 static void
166 vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
167 {
168 	struct vmwgfx_wait_cb *wait =
169 		container_of(cb, struct vmwgfx_wait_cb, base);
170 
171 	wake_up_process(wait->task);
172 }
173 
174 static void __vmw_fences_update(struct vmw_fence_manager *fman);
175 
176 static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
177 {
178 	struct vmw_fence_obj *fence =
179 		container_of(f, struct vmw_fence_obj, base);
180 
181 	struct vmw_fence_manager *fman = fman_from_fence(fence);
182 	struct vmw_private *dev_priv = fman->dev_priv;
183 	struct vmwgfx_wait_cb cb;
184 	long ret = timeout;
185 	unsigned long irq_flags;
186 
187 	if (likely(vmw_fence_obj_signaled(fence)))
188 		return timeout;
189 
190 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
191 	vmw_seqno_waiter_add(dev_priv);
192 
193 	spin_lock_irqsave(f->lock, irq_flags);
194 
195 	if (intr && signal_pending(current)) {
196 		ret = -ERESTARTSYS;
197 		goto out;
198 	}
199 
200 	cb.base.func = vmwgfx_wait_cb;
201 	cb.task = current;
202 	list_add(&cb.base.node, &f->cb_list);
203 
204 	while (ret > 0) {
205 		__vmw_fences_update(fman);
206 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
207 			break;
208 
209 		if (intr)
210 			__set_current_state(TASK_INTERRUPTIBLE);
211 		else
212 			__set_current_state(TASK_UNINTERRUPTIBLE);
213 		spin_unlock_irqrestore(f->lock, irq_flags);
214 
215 		ret = schedule_timeout(ret);
216 
217 		spin_lock_irqsave(f->lock, irq_flags);
218 		if (ret > 0 && intr && signal_pending(current))
219 			ret = -ERESTARTSYS;
220 	}
221 
222 	if (!list_empty(&cb.base.node))
223 		list_del(&cb.base.node);
224 	__set_current_state(TASK_RUNNING);
225 
226 out:
227 	spin_unlock_irqrestore(f->lock, irq_flags);
228 
229 	vmw_seqno_waiter_remove(dev_priv);
230 
231 	return ret;
232 }
233 
234 static struct fence_ops vmw_fence_ops = {
235 	.get_driver_name = vmw_fence_get_driver_name,
236 	.get_timeline_name = vmw_fence_get_timeline_name,
237 	.enable_signaling = vmw_fence_enable_signaling,
238 	.wait = vmw_fence_wait,
239 	.release = vmw_fence_obj_destroy,
240 };
241 
242 
243 /**
244  * Execute signal actions on fences recently signaled.
245  * This is done from a workqueue so we don't have to execute
246  * signal actions from atomic context.
247  */
248 
249 static void vmw_fence_work_func(struct work_struct *work)
250 {
251 	struct vmw_fence_manager *fman =
252 		container_of(work, struct vmw_fence_manager, work);
253 	struct list_head list;
254 	struct vmw_fence_action *action, *next_action;
255 	bool seqno_valid;
256 
257 	do {
258 		INIT_LIST_HEAD(&list);
259 		mutex_lock(&fman->goal_irq_mutex);
260 
261 		spin_lock_irq(&fman->lock);
262 		list_splice_init(&fman->cleanup_list, &list);
263 		seqno_valid = fman->seqno_valid;
264 		spin_unlock_irq(&fman->lock);
265 
266 		if (!seqno_valid && fman->goal_irq_on) {
267 			fman->goal_irq_on = false;
268 			vmw_goal_waiter_remove(fman->dev_priv);
269 		}
270 		mutex_unlock(&fman->goal_irq_mutex);
271 
272 		if (list_empty(&list))
273 			return;
274 
275 		/*
276 		 * At this point, only we should be able to manipulate the
277 		 * list heads of the actions we have on the private list.
278 		 * hence fman::lock not held.
279 		 */
280 
281 		list_for_each_entry_safe(action, next_action, &list, head) {
282 			list_del_init(&action->head);
283 			if (action->cleanup)
284 				action->cleanup(action);
285 		}
286 	} while (1);
287 }
288 
289 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
290 {
291 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
292 
293 	if (unlikely(fman == NULL))
294 		return NULL;
295 
296 	fman->dev_priv = dev_priv;
297 	spin_lock_init(&fman->lock);
298 	INIT_LIST_HEAD(&fman->fence_list);
299 	INIT_LIST_HEAD(&fman->cleanup_list);
300 	INIT_WORK(&fman->work, &vmw_fence_work_func);
301 	fman->fifo_down = true;
302 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
303 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
304 	fman->event_fence_action_size =
305 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
306 	mutex_init(&fman->goal_irq_mutex);
307 	fman->ctx = fence_context_alloc(1);
308 
309 	return fman;
310 }
311 
312 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
313 {
314 	unsigned long irq_flags;
315 	bool lists_empty;
316 
317 	(void) cancel_work_sync(&fman->work);
318 
319 	spin_lock_irqsave(&fman->lock, irq_flags);
320 	lists_empty = list_empty(&fman->fence_list) &&
321 		list_empty(&fman->cleanup_list);
322 	spin_unlock_irqrestore(&fman->lock, irq_flags);
323 
324 	BUG_ON(!lists_empty);
325 	kfree(fman);
326 }
327 
328 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
329 			      struct vmw_fence_obj *fence, u32 seqno,
330 			      void (*destroy) (struct vmw_fence_obj *fence))
331 {
332 	unsigned long irq_flags;
333 	int ret = 0;
334 
335 	fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
336 		   fman->ctx, seqno);
337 	INIT_LIST_HEAD(&fence->seq_passed_actions);
338 	fence->destroy = destroy;
339 
340 	spin_lock_irqsave(&fman->lock, irq_flags);
341 	if (unlikely(fman->fifo_down)) {
342 		ret = -EBUSY;
343 		goto out_unlock;
344 	}
345 	list_add_tail(&fence->head, &fman->fence_list);
346 	++fman->num_fence_objects;
347 
348 out_unlock:
349 	spin_unlock_irqrestore(&fman->lock, irq_flags);
350 	return ret;
351 
352 }
353 
354 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
355 				struct list_head *list)
356 {
357 	struct vmw_fence_action *action, *next_action;
358 
359 	list_for_each_entry_safe(action, next_action, list, head) {
360 		list_del_init(&action->head);
361 		fman->pending_actions[action->type]--;
362 		if (action->seq_passed != NULL)
363 			action->seq_passed(action);
364 
365 		/*
366 		 * Add the cleanup action to the cleanup list so that
367 		 * it will be performed by a worker task.
368 		 */
369 
370 		list_add_tail(&action->head, &fman->cleanup_list);
371 	}
372 }
373 
374 /**
375  * vmw_fence_goal_new_locked - Figure out a new device fence goal
376  * seqno if needed.
377  *
378  * @fman: Pointer to a fence manager.
379  * @passed_seqno: The seqno the device currently signals as passed.
380  *
381  * This function should be called with the fence manager lock held.
382  * It is typically called when we have a new passed_seqno, and
383  * we might need to update the fence goal. It checks to see whether
384  * the current fence goal has already passed, and, in that case,
385  * scans through all unsignaled fences to get the next fence object with an
386  * action attached, and sets the seqno of that fence as a new fence goal.
387  *
388  * returns true if the device goal seqno was updated. False otherwise.
389  */
390 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
391 				      u32 passed_seqno)
392 {
393 	u32 goal_seqno;
394 	u32 *fifo_mem;
395 	struct vmw_fence_obj *fence;
396 
397 	if (likely(!fman->seqno_valid))
398 		return false;
399 
400 	fifo_mem = fman->dev_priv->mmio_virt;
401 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
402 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
403 		return false;
404 
405 	fman->seqno_valid = false;
406 	list_for_each_entry(fence, &fman->fence_list, head) {
407 		if (!list_empty(&fence->seq_passed_actions)) {
408 			fman->seqno_valid = true;
409 			vmw_mmio_write(fence->base.seqno,
410 				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
411 			break;
412 		}
413 	}
414 
415 	return true;
416 }
417 
418 
419 /**
420  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
421  * needed.
422  *
423  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
424  * considered as a device fence goal.
425  *
426  * This function should be called with the fence manager lock held.
427  * It is typically called when an action has been attached to a fence to
428  * check whether the seqno of that fence should be used for a fence
429  * goal interrupt. This is typically needed if the current fence goal is
430  * invalid, or has a higher seqno than that of the current fence object.
431  *
432  * returns true if the device goal seqno was updated. False otherwise.
433  */
434 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
435 {
436 	struct vmw_fence_manager *fman = fman_from_fence(fence);
437 	u32 goal_seqno;
438 	u32 *fifo_mem;
439 
440 	if (fence_is_signaled_locked(&fence->base))
441 		return false;
442 
443 	fifo_mem = fman->dev_priv->mmio_virt;
444 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
445 	if (likely(fman->seqno_valid &&
446 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
447 		return false;
448 
449 	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
450 	fman->seqno_valid = true;
451 
452 	return true;
453 }
454 
455 static void __vmw_fences_update(struct vmw_fence_manager *fman)
456 {
457 	struct vmw_fence_obj *fence, *next_fence;
458 	struct list_head action_list;
459 	bool needs_rerun;
460 	uint32_t seqno, new_seqno;
461 	u32 *fifo_mem = fman->dev_priv->mmio_virt;
462 
463 	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
464 rerun:
465 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
466 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
467 			list_del_init(&fence->head);
468 			fence_signal_locked(&fence->base);
469 			INIT_LIST_HEAD(&action_list);
470 			list_splice_init(&fence->seq_passed_actions,
471 					 &action_list);
472 			vmw_fences_perform_actions(fman, &action_list);
473 		} else
474 			break;
475 	}
476 
477 	/*
478 	 * Rerun if the fence goal seqno was updated, and the
479 	 * hardware might have raced with that update, so that
480 	 * we missed a fence_goal irq.
481 	 */
482 
483 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
484 	if (unlikely(needs_rerun)) {
485 		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
486 		if (new_seqno != seqno) {
487 			seqno = new_seqno;
488 			goto rerun;
489 		}
490 	}
491 
492 	if (!list_empty(&fman->cleanup_list))
493 		(void) schedule_work(&fman->work);
494 }
495 
496 void vmw_fences_update(struct vmw_fence_manager *fman)
497 {
498 	unsigned long irq_flags;
499 
500 	spin_lock_irqsave(&fman->lock, irq_flags);
501 	__vmw_fences_update(fman);
502 	spin_unlock_irqrestore(&fman->lock, irq_flags);
503 }
504 
505 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
506 {
507 	struct vmw_fence_manager *fman = fman_from_fence(fence);
508 
509 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
510 		return 1;
511 
512 	vmw_fences_update(fman);
513 
514 	return fence_is_signaled(&fence->base);
515 }
516 
517 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
518 		       bool interruptible, unsigned long timeout)
519 {
520 	long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
521 
522 	if (likely(ret > 0))
523 		return 0;
524 	else if (ret == 0)
525 		return -EBUSY;
526 	else
527 		return ret;
528 }
529 
530 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
531 {
532 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
533 
534 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
535 }
536 
537 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
538 {
539 	fence_free(&fence->base);
540 }
541 
542 int vmw_fence_create(struct vmw_fence_manager *fman,
543 		     uint32_t seqno,
544 		     struct vmw_fence_obj **p_fence)
545 {
546 	struct vmw_fence_obj *fence;
547  	int ret;
548 
549 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
550 	if (unlikely(fence == NULL))
551 		return -ENOMEM;
552 
553 	ret = vmw_fence_obj_init(fman, fence, seqno,
554 				 vmw_fence_destroy);
555 	if (unlikely(ret != 0))
556 		goto out_err_init;
557 
558 	*p_fence = fence;
559 	return 0;
560 
561 out_err_init:
562 	kfree(fence);
563 	return ret;
564 }
565 
566 
567 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
568 {
569 	struct vmw_user_fence *ufence =
570 		container_of(fence, struct vmw_user_fence, fence);
571 	struct vmw_fence_manager *fman = fman_from_fence(fence);
572 
573 	ttm_base_object_kfree(ufence, base);
574 	/*
575 	 * Free kernel space accounting.
576 	 */
577 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
578 			    fman->user_fence_size);
579 }
580 
581 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
582 {
583 	struct ttm_base_object *base = *p_base;
584 	struct vmw_user_fence *ufence =
585 		container_of(base, struct vmw_user_fence, base);
586 	struct vmw_fence_obj *fence = &ufence->fence;
587 
588 	*p_base = NULL;
589 	vmw_fence_obj_unreference(&fence);
590 }
591 
592 int vmw_user_fence_create(struct drm_file *file_priv,
593 			  struct vmw_fence_manager *fman,
594 			  uint32_t seqno,
595 			  struct vmw_fence_obj **p_fence,
596 			  uint32_t *p_handle)
597 {
598 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
599 	struct vmw_user_fence *ufence;
600 	struct vmw_fence_obj *tmp;
601 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
602 	int ret;
603 
604 	/*
605 	 * Kernel memory space accounting, since this object may
606 	 * be created by a user-space request.
607 	 */
608 
609 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
610 				   false, false);
611 	if (unlikely(ret != 0))
612 		return ret;
613 
614 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
615 	if (unlikely(ufence == NULL)) {
616 		ret = -ENOMEM;
617 		goto out_no_object;
618 	}
619 
620 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
621 				 vmw_user_fence_destroy);
622 	if (unlikely(ret != 0)) {
623 		kfree(ufence);
624 		goto out_no_object;
625 	}
626 
627 	/*
628 	 * The base object holds a reference which is freed in
629 	 * vmw_user_fence_base_release.
630 	 */
631 	tmp = vmw_fence_obj_reference(&ufence->fence);
632 	ret = ttm_base_object_init(tfile, &ufence->base, false,
633 				   VMW_RES_FENCE,
634 				   &vmw_user_fence_base_release, NULL);
635 
636 
637 	if (unlikely(ret != 0)) {
638 		/*
639 		 * Free the base object's reference
640 		 */
641 		vmw_fence_obj_unreference(&tmp);
642 		goto out_err;
643 	}
644 
645 	*p_fence = &ufence->fence;
646 	*p_handle = ufence->base.hash.key;
647 
648 	return 0;
649 out_err:
650 	tmp = &ufence->fence;
651 	vmw_fence_obj_unreference(&tmp);
652 out_no_object:
653 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
654 	return ret;
655 }
656 
657 
658 /**
659  * vmw_fence_fifo_down - signal all unsignaled fence objects.
660  */
661 
662 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
663 {
664 	struct list_head action_list;
665 	int ret;
666 
667 	/*
668 	 * The list may be altered while we traverse it, so always
669 	 * restart when we've released the fman->lock.
670 	 */
671 
672 	spin_lock_irq(&fman->lock);
673 	fman->fifo_down = true;
674 	while (!list_empty(&fman->fence_list)) {
675 		struct vmw_fence_obj *fence =
676 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
677 				   head);
678 		fence_get(&fence->base);
679 		spin_unlock_irq(&fman->lock);
680 
681 		ret = vmw_fence_obj_wait(fence, false, false,
682 					 VMW_FENCE_WAIT_TIMEOUT);
683 
684 		if (unlikely(ret != 0)) {
685 			list_del_init(&fence->head);
686 			fence_signal(&fence->base);
687 			INIT_LIST_HEAD(&action_list);
688 			list_splice_init(&fence->seq_passed_actions,
689 					 &action_list);
690 			vmw_fences_perform_actions(fman, &action_list);
691 		}
692 
693 		BUG_ON(!list_empty(&fence->head));
694 		fence_put(&fence->base);
695 		spin_lock_irq(&fman->lock);
696 	}
697 	spin_unlock_irq(&fman->lock);
698 }
699 
700 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
701 {
702 	unsigned long irq_flags;
703 
704 	spin_lock_irqsave(&fman->lock, irq_flags);
705 	fman->fifo_down = false;
706 	spin_unlock_irqrestore(&fman->lock, irq_flags);
707 }
708 
709 
710 /**
711  * vmw_fence_obj_lookup - Look up a user-space fence object
712  *
713  * @tfile: A struct ttm_object_file identifying the caller.
714  * @handle: A handle identifying the fence object.
715  * @return: A struct vmw_user_fence base ttm object on success or
716  * an error pointer on failure.
717  *
718  * The fence object is looked up and type-checked. The caller needs
719  * to have opened the fence object first, but since that happens on
720  * creation and fence objects aren't shareable, that's not an
721  * issue currently.
722  */
723 static struct ttm_base_object *
724 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
725 {
726 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
727 
728 	if (!base) {
729 		pr_err("Invalid fence object handle 0x%08lx.\n",
730 		       (unsigned long)handle);
731 		return ERR_PTR(-EINVAL);
732 	}
733 
734 	if (base->refcount_release != vmw_user_fence_base_release) {
735 		pr_err("Invalid fence object handle 0x%08lx.\n",
736 		       (unsigned long)handle);
737 		ttm_base_object_unref(&base);
738 		return ERR_PTR(-EINVAL);
739 	}
740 
741 	return base;
742 }
743 
744 
745 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
746 			     struct drm_file *file_priv)
747 {
748 	struct drm_vmw_fence_wait_arg *arg =
749 	    (struct drm_vmw_fence_wait_arg *)data;
750 	unsigned long timeout;
751 	struct ttm_base_object *base;
752 	struct vmw_fence_obj *fence;
753 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
754 	int ret;
755 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
756 
757 	/*
758 	 * 64-bit division not present on 32-bit systems, so do an
759 	 * approximation. (Divide by 1000000).
760 	 */
761 
762 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
763 	  (wait_timeout >> 26);
764 
765 	if (!arg->cookie_valid) {
766 		arg->cookie_valid = 1;
767 		arg->kernel_cookie = jiffies + wait_timeout;
768 	}
769 
770 	base = vmw_fence_obj_lookup(tfile, arg->handle);
771 	if (IS_ERR(base))
772 		return PTR_ERR(base);
773 
774 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
775 
776 	timeout = jiffies;
777 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
778 		ret = ((vmw_fence_obj_signaled(fence)) ?
779 		       0 : -EBUSY);
780 		goto out;
781 	}
782 
783 	timeout = (unsigned long)arg->kernel_cookie - timeout;
784 
785 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
786 
787 out:
788 	ttm_base_object_unref(&base);
789 
790 	/*
791 	 * Optionally unref the fence object.
792 	 */
793 
794 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
795 		return ttm_ref_object_base_unref(tfile, arg->handle,
796 						 TTM_REF_USAGE);
797 	return ret;
798 }
799 
800 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
801 				 struct drm_file *file_priv)
802 {
803 	struct drm_vmw_fence_signaled_arg *arg =
804 		(struct drm_vmw_fence_signaled_arg *) data;
805 	struct ttm_base_object *base;
806 	struct vmw_fence_obj *fence;
807 	struct vmw_fence_manager *fman;
808 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
809 	struct vmw_private *dev_priv = vmw_priv(dev);
810 
811 	base = vmw_fence_obj_lookup(tfile, arg->handle);
812 	if (IS_ERR(base))
813 		return PTR_ERR(base);
814 
815 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
816 	fman = fman_from_fence(fence);
817 
818 	arg->signaled = vmw_fence_obj_signaled(fence);
819 
820 	arg->signaled_flags = arg->flags;
821 	spin_lock_irq(&fman->lock);
822 	arg->passed_seqno = dev_priv->last_read_seqno;
823 	spin_unlock_irq(&fman->lock);
824 
825 	ttm_base_object_unref(&base);
826 
827 	return 0;
828 }
829 
830 
831 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
832 			      struct drm_file *file_priv)
833 {
834 	struct drm_vmw_fence_arg *arg =
835 		(struct drm_vmw_fence_arg *) data;
836 
837 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
838 					 arg->handle,
839 					 TTM_REF_USAGE);
840 }
841 
842 /**
843  * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
844  *
845  * @fman: Pointer to a struct vmw_fence_manager
846  * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
847  * with pointers to a struct drm_file object about to be closed.
848  *
849  * This function removes all pending fence events with references to a
850  * specific struct drm_file object about to be closed. The caller is required
851  * to pass a list of all struct vmw_event_fence_action objects with such
852  * events attached. This function is typically called before the
853  * struct drm_file object's event management is taken down.
854  */
855 void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
856 				struct list_head *event_list)
857 {
858 	struct vmw_event_fence_action *eaction;
859 	struct drm_pending_event *event;
860 	unsigned long irq_flags;
861 
862 	while (1) {
863 		spin_lock_irqsave(&fman->lock, irq_flags);
864 		if (list_empty(event_list))
865 			goto out_unlock;
866 		eaction = list_first_entry(event_list,
867 					   struct vmw_event_fence_action,
868 					   fpriv_head);
869 		list_del_init(&eaction->fpriv_head);
870 		event = eaction->event;
871 		eaction->event = NULL;
872 		spin_unlock_irqrestore(&fman->lock, irq_flags);
873 		event->destroy(event);
874 	}
875 out_unlock:
876 	spin_unlock_irqrestore(&fman->lock, irq_flags);
877 }
878 
879 
880 /**
881  * vmw_event_fence_action_seq_passed
882  *
883  * @action: The struct vmw_fence_action embedded in a struct
884  * vmw_event_fence_action.
885  *
886  * This function is called when the seqno of the fence where @action is
887  * attached has passed. It queues the event on the submitter's event list.
888  * This function is always called from atomic context, and may be called
889  * from irq context.
890  */
891 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
892 {
893 	struct vmw_event_fence_action *eaction =
894 		container_of(action, struct vmw_event_fence_action, action);
895 	struct drm_device *dev = eaction->dev;
896 	struct drm_pending_event *event = eaction->event;
897 	struct drm_file *file_priv;
898 	unsigned long irq_flags;
899 
900 	if (unlikely(event == NULL))
901 		return;
902 
903 	file_priv = event->file_priv;
904 	spin_lock_irqsave(&dev->event_lock, irq_flags);
905 
906 	if (likely(eaction->tv_sec != NULL)) {
907 		struct timeval tv;
908 
909 		do_gettimeofday(&tv);
910 		*eaction->tv_sec = tv.tv_sec;
911 		*eaction->tv_usec = tv.tv_usec;
912 	}
913 
914 	list_del_init(&eaction->fpriv_head);
915 	list_add_tail(&eaction->event->link, &file_priv->event_list);
916 	eaction->event = NULL;
917 	wake_up_all(&file_priv->event_wait);
918 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
919 }
920 
921 /**
922  * vmw_event_fence_action_cleanup
923  *
924  * @action: The struct vmw_fence_action embedded in a struct
925  * vmw_event_fence_action.
926  *
927  * This function is the struct vmw_fence_action destructor. It's typically
928  * called from a workqueue.
929  */
930 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
931 {
932 	struct vmw_event_fence_action *eaction =
933 		container_of(action, struct vmw_event_fence_action, action);
934 	struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
935 	unsigned long irq_flags;
936 
937 	spin_lock_irqsave(&fman->lock, irq_flags);
938 	list_del(&eaction->fpriv_head);
939 	spin_unlock_irqrestore(&fman->lock, irq_flags);
940 
941 	vmw_fence_obj_unreference(&eaction->fence);
942 	kfree(eaction);
943 }
944 
945 
946 /**
947  * vmw_fence_obj_add_action - Add an action to a fence object.
948  *
949  * @fence - The fence object.
950  * @action - The action to add.
951  *
952  * Note that the action callbacks may be executed before this function
953  * returns.
954  */
955 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
956 			      struct vmw_fence_action *action)
957 {
958 	struct vmw_fence_manager *fman = fman_from_fence(fence);
959 	unsigned long irq_flags;
960 	bool run_update = false;
961 
962 	mutex_lock(&fman->goal_irq_mutex);
963 	spin_lock_irqsave(&fman->lock, irq_flags);
964 
965 	fman->pending_actions[action->type]++;
966 	if (fence_is_signaled_locked(&fence->base)) {
967 		struct list_head action_list;
968 
969 		INIT_LIST_HEAD(&action_list);
970 		list_add_tail(&action->head, &action_list);
971 		vmw_fences_perform_actions(fman, &action_list);
972 	} else {
973 		list_add_tail(&action->head, &fence->seq_passed_actions);
974 
975 		/*
976 		 * This function may set fman::seqno_valid, so it must
977 		 * be run with the goal_irq_mutex held.
978 		 */
979 		run_update = vmw_fence_goal_check_locked(fence);
980 	}
981 
982 	spin_unlock_irqrestore(&fman->lock, irq_flags);
983 
984 	if (run_update) {
985 		if (!fman->goal_irq_on) {
986 			fman->goal_irq_on = true;
987 			vmw_goal_waiter_add(fman->dev_priv);
988 		}
989 		vmw_fences_update(fman);
990 	}
991 	mutex_unlock(&fman->goal_irq_mutex);
992 
993 }
994 
995 /**
996  * vmw_event_fence_action_create - Post an event for sending when a fence
997  * object seqno has passed.
998  *
999  * @file_priv: The file connection on which the event should be posted.
1000  * @fence: The fence object on which to post the event.
1001  * @event: Event to be posted. This event should've been alloced
1002  * using k[mz]alloc, and should've been completely initialized.
1003  * @interruptible: Interruptible waits if possible.
1004  *
1005  * As a side effect, the object pointed to by @event may have been
1006  * freed when this function returns. If this function returns with
1007  * an error code, the caller needs to free that object.
1008  */
1009 
1010 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1011 				 struct vmw_fence_obj *fence,
1012 				 struct drm_pending_event *event,
1013 				 uint32_t *tv_sec,
1014 				 uint32_t *tv_usec,
1015 				 bool interruptible)
1016 {
1017 	struct vmw_event_fence_action *eaction;
1018 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1019 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1020 	unsigned long irq_flags;
1021 
1022 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1023 	if (unlikely(eaction == NULL))
1024 		return -ENOMEM;
1025 
1026 	eaction->event = event;
1027 
1028 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1029 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1030 	eaction->action.type = VMW_ACTION_EVENT;
1031 
1032 	eaction->fence = vmw_fence_obj_reference(fence);
1033 	eaction->dev = fman->dev_priv->dev;
1034 	eaction->tv_sec = tv_sec;
1035 	eaction->tv_usec = tv_usec;
1036 
1037 	spin_lock_irqsave(&fman->lock, irq_flags);
1038 	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
1039 	spin_unlock_irqrestore(&fman->lock, irq_flags);
1040 
1041 	vmw_fence_obj_add_action(fence, &eaction->action);
1042 
1043 	return 0;
1044 }
1045 
1046 struct vmw_event_fence_pending {
1047 	struct drm_pending_event base;
1048 	struct drm_vmw_event_fence event;
1049 };
1050 
1051 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1052 				  struct vmw_fence_obj *fence,
1053 				  uint32_t flags,
1054 				  uint64_t user_data,
1055 				  bool interruptible)
1056 {
1057 	struct vmw_event_fence_pending *event;
1058 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1059 	struct drm_device *dev = fman->dev_priv->dev;
1060 	unsigned long irq_flags;
1061 	int ret;
1062 
1063 	spin_lock_irqsave(&dev->event_lock, irq_flags);
1064 
1065 	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
1066 	if (likely(ret == 0))
1067 		file_priv->event_space -= sizeof(event->event);
1068 
1069 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1070 
1071 	if (unlikely(ret != 0)) {
1072 		DRM_ERROR("Failed to allocate event space for this file.\n");
1073 		goto out_no_space;
1074 	}
1075 
1076 
1077 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1078 	if (unlikely(event == NULL)) {
1079 		DRM_ERROR("Failed to allocate an event.\n");
1080 		ret = -ENOMEM;
1081 		goto out_no_event;
1082 	}
1083 
1084 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1085 	event->event.base.length = sizeof(*event);
1086 	event->event.user_data = user_data;
1087 
1088 	event->base.event = &event->event.base;
1089 	event->base.file_priv = file_priv;
1090 	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1091 
1092 
1093 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1094 		ret = vmw_event_fence_action_queue(file_priv, fence,
1095 						   &event->base,
1096 						   &event->event.tv_sec,
1097 						   &event->event.tv_usec,
1098 						   interruptible);
1099 	else
1100 		ret = vmw_event_fence_action_queue(file_priv, fence,
1101 						   &event->base,
1102 						   NULL,
1103 						   NULL,
1104 						   interruptible);
1105 	if (ret != 0)
1106 		goto out_no_queue;
1107 
1108 	return 0;
1109 
1110 out_no_queue:
1111 	event->base.destroy(&event->base);
1112 out_no_event:
1113 	spin_lock_irqsave(&dev->event_lock, irq_flags);
1114 	file_priv->event_space += sizeof(*event);
1115 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1116 out_no_space:
1117 	return ret;
1118 }
1119 
1120 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1121 			  struct drm_file *file_priv)
1122 {
1123 	struct vmw_private *dev_priv = vmw_priv(dev);
1124 	struct drm_vmw_fence_event_arg *arg =
1125 		(struct drm_vmw_fence_event_arg *) data;
1126 	struct vmw_fence_obj *fence = NULL;
1127 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1128 	struct ttm_object_file *tfile = vmw_fp->tfile;
1129 	struct drm_vmw_fence_rep __user *user_fence_rep =
1130 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1131 		arg->fence_rep;
1132 	uint32_t handle;
1133 	int ret;
1134 
1135 	/*
1136 	 * Look up an existing fence object,
1137 	 * and if user-space wants a new reference,
1138 	 * add one.
1139 	 */
1140 	if (arg->handle) {
1141 		struct ttm_base_object *base =
1142 			vmw_fence_obj_lookup(tfile, arg->handle);
1143 
1144 		if (IS_ERR(base))
1145 			return PTR_ERR(base);
1146 
1147 		fence = &(container_of(base, struct vmw_user_fence,
1148 				       base)->fence);
1149 		(void) vmw_fence_obj_reference(fence);
1150 
1151 		if (user_fence_rep != NULL) {
1152 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1153 						 TTM_REF_USAGE, NULL, false);
1154 			if (unlikely(ret != 0)) {
1155 				DRM_ERROR("Failed to reference a fence "
1156 					  "object.\n");
1157 				goto out_no_ref_obj;
1158 			}
1159 			handle = base->hash.key;
1160 		}
1161 		ttm_base_object_unref(&base);
1162 	}
1163 
1164 	/*
1165 	 * Create a new fence object.
1166 	 */
1167 	if (!fence) {
1168 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1169 						 &fence,
1170 						 (user_fence_rep) ?
1171 						 &handle : NULL);
1172 		if (unlikely(ret != 0)) {
1173 			DRM_ERROR("Fence event failed to create fence.\n");
1174 			return ret;
1175 		}
1176 	}
1177 
1178 	BUG_ON(fence == NULL);
1179 
1180 	ret = vmw_event_fence_action_create(file_priv, fence,
1181 					    arg->flags,
1182 					    arg->user_data,
1183 					    true);
1184 	if (unlikely(ret != 0)) {
1185 		if (ret != -ERESTARTSYS)
1186 			DRM_ERROR("Failed to attach event to fence.\n");
1187 		goto out_no_create;
1188 	}
1189 
1190 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1191 				    handle);
1192 	vmw_fence_obj_unreference(&fence);
1193 	return 0;
1194 out_no_create:
1195 	if (user_fence_rep != NULL)
1196 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1197 out_no_ref_obj:
1198 	vmw_fence_obj_unreference(&fence);
1199 	return ret;
1200 }
1201