xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_timeline.c (revision 1bb76ff151c0aba8e3312a604e4cd2e5195cf4b7)
15ca02815Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2016-2018 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6*1bb76ff1Sjsg #include <drm/drm_cache.h>
7*1bb76ff1Sjsg 
8*1bb76ff1Sjsg #include "gem/i915_gem_internal.h"
9c349dbc7Sjsg 
10c349dbc7Sjsg #include "i915_active.h"
11*1bb76ff1Sjsg #include "i915_drv.h"
12c349dbc7Sjsg #include "i915_syncmap.h"
13c349dbc7Sjsg #include "intel_gt.h"
14c349dbc7Sjsg #include "intel_ring.h"
15c349dbc7Sjsg #include "intel_timeline.h"
16c349dbc7Sjsg 
175ca02815Sjsg #define TIMELINE_SEQNO_BYTES 8
18c349dbc7Sjsg 
hwsp_alloc(struct intel_gt * gt)195ca02815Sjsg static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
20c349dbc7Sjsg {
21c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
22c349dbc7Sjsg 	struct drm_i915_gem_object *obj;
23c349dbc7Sjsg 	struct i915_vma *vma;
24c349dbc7Sjsg 
25c349dbc7Sjsg 	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
26c349dbc7Sjsg 	if (IS_ERR(obj))
27c349dbc7Sjsg 		return ERR_CAST(obj);
28c349dbc7Sjsg 
29c349dbc7Sjsg 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
30c349dbc7Sjsg 
31c349dbc7Sjsg 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
32c349dbc7Sjsg 	if (IS_ERR(vma))
33c349dbc7Sjsg 		i915_gem_object_put(obj);
34c349dbc7Sjsg 
35c349dbc7Sjsg 	return vma;
36c349dbc7Sjsg }
37c349dbc7Sjsg 
__timeline_retire(struct i915_active * active)385ca02815Sjsg static void __timeline_retire(struct i915_active *active)
39c349dbc7Sjsg {
405ca02815Sjsg 	struct intel_timeline *tl =
415ca02815Sjsg 		container_of(active, typeof(*tl), active);
42c349dbc7Sjsg 
435ca02815Sjsg 	i915_vma_unpin(tl->hwsp_ggtt);
445ca02815Sjsg 	intel_timeline_put(tl);
45c349dbc7Sjsg }
46c349dbc7Sjsg 
__timeline_active(struct i915_active * active)475ca02815Sjsg static int __timeline_active(struct i915_active *active)
48c349dbc7Sjsg {
495ca02815Sjsg 	struct intel_timeline *tl =
505ca02815Sjsg 		container_of(active, typeof(*tl), active);
51c349dbc7Sjsg 
525ca02815Sjsg 	__i915_vma_pin(tl->hwsp_ggtt);
535ca02815Sjsg 	intel_timeline_get(tl);
54c349dbc7Sjsg 	return 0;
55c349dbc7Sjsg }
56c349dbc7Sjsg 
575ca02815Sjsg I915_SELFTEST_EXPORT int
intel_timeline_pin_map(struct intel_timeline * timeline)585ca02815Sjsg intel_timeline_pin_map(struct intel_timeline *timeline)
59c349dbc7Sjsg {
605ca02815Sjsg 	struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
615ca02815Sjsg 	u32 ofs = offset_in_page(timeline->hwsp_offset);
62c349dbc7Sjsg 	void *vaddr;
63c349dbc7Sjsg 
645ca02815Sjsg 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
655ca02815Sjsg 	if (IS_ERR(vaddr))
665ca02815Sjsg 		return PTR_ERR(vaddr);
67c349dbc7Sjsg 
685ca02815Sjsg 	timeline->hwsp_map = vaddr;
695ca02815Sjsg 	timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
705ca02815Sjsg 	drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
71c349dbc7Sjsg 
725ca02815Sjsg 	return 0;
73c349dbc7Sjsg }
74c349dbc7Sjsg 
intel_timeline_init(struct intel_timeline * timeline,struct intel_gt * gt,struct i915_vma * hwsp,unsigned int offset)75ad8b1aafSjsg static int intel_timeline_init(struct intel_timeline *timeline,
76c349dbc7Sjsg 			       struct intel_gt *gt,
77ad8b1aafSjsg 			       struct i915_vma *hwsp,
78ad8b1aafSjsg 			       unsigned int offset)
79c349dbc7Sjsg {
80c349dbc7Sjsg 	kref_init(&timeline->kref);
81c349dbc7Sjsg 	atomic_set(&timeline->pin_count, 0);
82c349dbc7Sjsg 
83c349dbc7Sjsg 	timeline->gt = gt;
84c349dbc7Sjsg 
855ca02815Sjsg 	if (hwsp) {
865ca02815Sjsg 		timeline->hwsp_offset = offset;
875ca02815Sjsg 		timeline->hwsp_ggtt = i915_vma_get(hwsp);
885ca02815Sjsg 	} else {
895ca02815Sjsg 		timeline->has_initial_breadcrumb = true;
905ca02815Sjsg 		hwsp = hwsp_alloc(gt);
91c349dbc7Sjsg 		if (IS_ERR(hwsp))
92c349dbc7Sjsg 			return PTR_ERR(hwsp);
935ca02815Sjsg 		timeline->hwsp_ggtt = hwsp;
94c349dbc7Sjsg 	}
95c349dbc7Sjsg 
965ca02815Sjsg 	timeline->hwsp_map = NULL;
975ca02815Sjsg 	timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
98c349dbc7Sjsg 
99c349dbc7Sjsg 	GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
100c349dbc7Sjsg 
101c349dbc7Sjsg 	timeline->fence_context = dma_fence_context_alloc(1);
102c349dbc7Sjsg 
103c349dbc7Sjsg 	rw_init(&timeline->mutex, "itmln");
104c349dbc7Sjsg 
105c349dbc7Sjsg 	INIT_ACTIVE_FENCE(&timeline->last_request);
106c349dbc7Sjsg 	INIT_LIST_HEAD(&timeline->requests);
107c349dbc7Sjsg 
108c349dbc7Sjsg 	i915_syncmap_init(&timeline->sync);
1095ca02815Sjsg 	i915_active_init(&timeline->active, __timeline_active,
1105ca02815Sjsg 			 __timeline_retire, 0);
111c349dbc7Sjsg 
112c349dbc7Sjsg 	return 0;
113c349dbc7Sjsg }
114c349dbc7Sjsg 
intel_gt_init_timelines(struct intel_gt * gt)115c349dbc7Sjsg void intel_gt_init_timelines(struct intel_gt *gt)
116c349dbc7Sjsg {
117c349dbc7Sjsg 	struct intel_gt_timelines *timelines = &gt->timelines;
118c349dbc7Sjsg 
11963b35fb2Sjsg 	mtx_init(&timelines->lock, IPL_NONE);
120c349dbc7Sjsg 	INIT_LIST_HEAD(&timelines->active_list);
121c349dbc7Sjsg }
122c349dbc7Sjsg 
intel_timeline_fini(struct rcu_head * rcu)1235ca02815Sjsg static void intel_timeline_fini(struct rcu_head *rcu)
124c349dbc7Sjsg {
1255ca02815Sjsg 	struct intel_timeline *timeline =
1265ca02815Sjsg 		container_of(rcu, struct intel_timeline, rcu);
127c349dbc7Sjsg 
1285ca02815Sjsg 	if (timeline->hwsp_map)
129c349dbc7Sjsg 		i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
130c349dbc7Sjsg 
131c349dbc7Sjsg 	i915_vma_put(timeline->hwsp_ggtt);
1325ca02815Sjsg 	i915_active_fini(&timeline->active);
13399e610a6Sjsg 
13499e610a6Sjsg 	/*
13599e610a6Sjsg 	 * A small race exists between intel_gt_retire_requests_timeout and
13699e610a6Sjsg 	 * intel_timeline_exit which could result in the syncmap not getting
13799e610a6Sjsg 	 * free'd. Rather than work to hard to seal this race, simply cleanup
13899e610a6Sjsg 	 * the syncmap on fini.
13999e610a6Sjsg 	 */
14099e610a6Sjsg 	i915_syncmap_free(&timeline->sync);
1415ca02815Sjsg 
1425ca02815Sjsg 	kfree(timeline);
143c349dbc7Sjsg }
144c349dbc7Sjsg 
145c349dbc7Sjsg struct intel_timeline *
__intel_timeline_create(struct intel_gt * gt,struct i915_vma * global_hwsp,unsigned int offset)146ad8b1aafSjsg __intel_timeline_create(struct intel_gt *gt,
147ad8b1aafSjsg 			struct i915_vma *global_hwsp,
148ad8b1aafSjsg 			unsigned int offset)
149c349dbc7Sjsg {
150c349dbc7Sjsg 	struct intel_timeline *timeline;
151c349dbc7Sjsg 	int err;
152c349dbc7Sjsg 
153c349dbc7Sjsg 	timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
154c349dbc7Sjsg 	if (!timeline)
155c349dbc7Sjsg 		return ERR_PTR(-ENOMEM);
156c349dbc7Sjsg 
157ad8b1aafSjsg 	err = intel_timeline_init(timeline, gt, global_hwsp, offset);
158c349dbc7Sjsg 	if (err) {
159c349dbc7Sjsg 		kfree(timeline);
160c349dbc7Sjsg 		return ERR_PTR(err);
161c349dbc7Sjsg 	}
162c349dbc7Sjsg 
163c349dbc7Sjsg 	return timeline;
164c349dbc7Sjsg }
165c349dbc7Sjsg 
1665ca02815Sjsg struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs * engine,unsigned int offset)1675ca02815Sjsg intel_timeline_create_from_engine(struct intel_engine_cs *engine,
1685ca02815Sjsg 				  unsigned int offset)
1695ca02815Sjsg {
1705ca02815Sjsg 	struct i915_vma *hwsp = engine->status_page.vma;
1715ca02815Sjsg 	struct intel_timeline *tl;
1725ca02815Sjsg 
1735ca02815Sjsg 	tl = __intel_timeline_create(engine->gt, hwsp, offset);
1745ca02815Sjsg 	if (IS_ERR(tl))
1755ca02815Sjsg 		return tl;
1765ca02815Sjsg 
1775ca02815Sjsg 	/* Borrow a nearby lock; we only create these timelines during init */
1785ca02815Sjsg 	mutex_lock(&hwsp->vm->mutex);
1795ca02815Sjsg 	list_add_tail(&tl->engine_link, &engine->status_page.timelines);
1805ca02815Sjsg 	mutex_unlock(&hwsp->vm->mutex);
1815ca02815Sjsg 
1825ca02815Sjsg 	return tl;
1835ca02815Sjsg }
1845ca02815Sjsg 
__intel_timeline_pin(struct intel_timeline * tl)185ad8b1aafSjsg void __intel_timeline_pin(struct intel_timeline *tl)
186ad8b1aafSjsg {
187ad8b1aafSjsg 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
188ad8b1aafSjsg 	atomic_inc(&tl->pin_count);
189ad8b1aafSjsg }
190ad8b1aafSjsg 
intel_timeline_pin(struct intel_timeline * tl,struct i915_gem_ww_ctx * ww)191ad8b1aafSjsg int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
192c349dbc7Sjsg {
193c349dbc7Sjsg 	int err;
194c349dbc7Sjsg 
195c349dbc7Sjsg 	if (atomic_add_unless(&tl->pin_count, 1, 0))
196c349dbc7Sjsg 		return 0;
197c349dbc7Sjsg 
1985ca02815Sjsg 	if (!tl->hwsp_map) {
1995ca02815Sjsg 		err = intel_timeline_pin_map(tl);
2005ca02815Sjsg 		if (err)
2015ca02815Sjsg 			return err;
2025ca02815Sjsg 	}
2035ca02815Sjsg 
204ad8b1aafSjsg 	err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
205c349dbc7Sjsg 	if (err)
206c349dbc7Sjsg 		return err;
207c349dbc7Sjsg 
208c349dbc7Sjsg 	tl->hwsp_offset =
209c349dbc7Sjsg 		i915_ggtt_offset(tl->hwsp_ggtt) +
210c349dbc7Sjsg 		offset_in_page(tl->hwsp_offset);
211ad8b1aafSjsg 	GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
212ad8b1aafSjsg 		 tl->fence_context, tl->hwsp_offset);
213c349dbc7Sjsg 
2145ca02815Sjsg 	i915_active_acquire(&tl->active);
215c349dbc7Sjsg 	if (atomic_fetch_inc(&tl->pin_count)) {
2165ca02815Sjsg 		i915_active_release(&tl->active);
217c349dbc7Sjsg 		__i915_vma_unpin(tl->hwsp_ggtt);
218c349dbc7Sjsg 	}
219c349dbc7Sjsg 
220c349dbc7Sjsg 	return 0;
221c349dbc7Sjsg }
222c349dbc7Sjsg 
intel_timeline_reset_seqno(const struct intel_timeline * tl)223ad8b1aafSjsg void intel_timeline_reset_seqno(const struct intel_timeline *tl)
224ad8b1aafSjsg {
2255ca02815Sjsg 	u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
226ad8b1aafSjsg 	/* Must be pinned to be writable, and no requests in flight. */
227ad8b1aafSjsg 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
2285ca02815Sjsg 
2295ca02815Sjsg 	memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
2305ca02815Sjsg 	WRITE_ONCE(*hwsp_seqno, tl->seqno);
2315ca02815Sjsg 	drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
232ad8b1aafSjsg }
233ad8b1aafSjsg 
intel_timeline_enter(struct intel_timeline * tl)234c349dbc7Sjsg void intel_timeline_enter(struct intel_timeline *tl)
235c349dbc7Sjsg {
236c349dbc7Sjsg 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
237c349dbc7Sjsg 
238c349dbc7Sjsg 	/*
239c349dbc7Sjsg 	 * Pretend we are serialised by the timeline->mutex.
240c349dbc7Sjsg 	 *
241c349dbc7Sjsg 	 * While generally true, there are a few exceptions to the rule
242c349dbc7Sjsg 	 * for the engine->kernel_context being used to manage power
243c349dbc7Sjsg 	 * transitions. As the engine_park may be called from under any
244c349dbc7Sjsg 	 * timeline, it uses the power mutex as a global serialisation
245c349dbc7Sjsg 	 * lock to prevent any other request entering its timeline.
246c349dbc7Sjsg 	 *
247c349dbc7Sjsg 	 * The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
248c349dbc7Sjsg 	 *
249c349dbc7Sjsg 	 * However, intel_gt_retire_request() does not know which engine
250c349dbc7Sjsg 	 * it is retiring along and so cannot partake in the engine-pm
251c349dbc7Sjsg 	 * barrier, and there we use the tl->active_count as a means to
252c349dbc7Sjsg 	 * pin the timeline in the active_list while the locks are dropped.
253c349dbc7Sjsg 	 * Ergo, as that is outside of the engine-pm barrier, we need to
254c349dbc7Sjsg 	 * use atomic to manipulate tl->active_count.
255c349dbc7Sjsg 	 */
256c349dbc7Sjsg 	lockdep_assert_held(&tl->mutex);
257c349dbc7Sjsg 
258c349dbc7Sjsg 	if (atomic_add_unless(&tl->active_count, 1, 0))
259c349dbc7Sjsg 		return;
260c349dbc7Sjsg 
261c349dbc7Sjsg 	spin_lock(&timelines->lock);
262ad8b1aafSjsg 	if (!atomic_fetch_inc(&tl->active_count)) {
263ad8b1aafSjsg 		/*
264ad8b1aafSjsg 		 * The HWSP is volatile, and may have been lost while inactive,
265ad8b1aafSjsg 		 * e.g. across suspend/resume. Be paranoid, and ensure that
266ad8b1aafSjsg 		 * the HWSP value matches our seqno so we don't proclaim
267ad8b1aafSjsg 		 * the next request as already complete.
268ad8b1aafSjsg 		 */
269ad8b1aafSjsg 		intel_timeline_reset_seqno(tl);
270c349dbc7Sjsg 		list_add_tail(&tl->link, &timelines->active_list);
271ad8b1aafSjsg 	}
272c349dbc7Sjsg 	spin_unlock(&timelines->lock);
273c349dbc7Sjsg }
274c349dbc7Sjsg 
intel_timeline_exit(struct intel_timeline * tl)275c349dbc7Sjsg void intel_timeline_exit(struct intel_timeline *tl)
276c349dbc7Sjsg {
277c349dbc7Sjsg 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
278c349dbc7Sjsg 
279c349dbc7Sjsg 	/* See intel_timeline_enter() */
280c349dbc7Sjsg 	lockdep_assert_held(&tl->mutex);
281c349dbc7Sjsg 
282c349dbc7Sjsg 	GEM_BUG_ON(!atomic_read(&tl->active_count));
283c349dbc7Sjsg 	if (atomic_add_unless(&tl->active_count, -1, 1))
284c349dbc7Sjsg 		return;
285c349dbc7Sjsg 
286c349dbc7Sjsg 	spin_lock(&timelines->lock);
287c349dbc7Sjsg 	if (atomic_dec_and_test(&tl->active_count))
288c349dbc7Sjsg 		list_del(&tl->link);
289c349dbc7Sjsg 	spin_unlock(&timelines->lock);
290c349dbc7Sjsg 
291c349dbc7Sjsg 	/*
292c349dbc7Sjsg 	 * Since this timeline is idle, all bariers upon which we were waiting
293c349dbc7Sjsg 	 * must also be complete and so we can discard the last used barriers
294c349dbc7Sjsg 	 * without loss of information.
295c349dbc7Sjsg 	 */
296c349dbc7Sjsg 	i915_syncmap_free(&tl->sync);
297c349dbc7Sjsg }
298c349dbc7Sjsg 
timeline_advance(struct intel_timeline * tl)299c349dbc7Sjsg static u32 timeline_advance(struct intel_timeline *tl)
300c349dbc7Sjsg {
301c349dbc7Sjsg 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
302c349dbc7Sjsg 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
303c349dbc7Sjsg 
304c349dbc7Sjsg 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
305c349dbc7Sjsg }
306c349dbc7Sjsg 
307c349dbc7Sjsg static noinline int
__intel_timeline_get_seqno(struct intel_timeline * tl,u32 * seqno)308c349dbc7Sjsg __intel_timeline_get_seqno(struct intel_timeline *tl,
309c349dbc7Sjsg 			   u32 *seqno)
310c349dbc7Sjsg {
3115ca02815Sjsg 	u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
312c349dbc7Sjsg 
3135ca02815Sjsg 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
3145ca02815Sjsg 	if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
3155ca02815Sjsg 		next_ofs = offset_in_page(next_ofs + BIT(5));
316c349dbc7Sjsg 
3175ca02815Sjsg 	tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
3185ca02815Sjsg 	tl->hwsp_seqno = tl->hwsp_map + next_ofs;
3195ca02815Sjsg 	intel_timeline_reset_seqno(tl);
320c349dbc7Sjsg 
321c349dbc7Sjsg 	*seqno = timeline_advance(tl);
322c349dbc7Sjsg 	GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
323c349dbc7Sjsg 	return 0;
324c349dbc7Sjsg }
325c349dbc7Sjsg 
intel_timeline_get_seqno(struct intel_timeline * tl,struct i915_request * rq,u32 * seqno)326c349dbc7Sjsg int intel_timeline_get_seqno(struct intel_timeline *tl,
327c349dbc7Sjsg 			     struct i915_request *rq,
328c349dbc7Sjsg 			     u32 *seqno)
329c349dbc7Sjsg {
330c349dbc7Sjsg 	*seqno = timeline_advance(tl);
331c349dbc7Sjsg 
332c349dbc7Sjsg 	/* Replace the HWSP on wraparound for HW semaphores */
3335ca02815Sjsg 	if (unlikely(!*seqno && tl->has_initial_breadcrumb))
3345ca02815Sjsg 		return __intel_timeline_get_seqno(tl, seqno);
335c349dbc7Sjsg 
336c349dbc7Sjsg 	return 0;
337c349dbc7Sjsg }
338c349dbc7Sjsg 
intel_timeline_read_hwsp(struct i915_request * from,struct i915_request * to,u32 * hwsp)339c349dbc7Sjsg int intel_timeline_read_hwsp(struct i915_request *from,
340c349dbc7Sjsg 			     struct i915_request *to,
341c349dbc7Sjsg 			     u32 *hwsp)
342c349dbc7Sjsg {
3435ca02815Sjsg 	struct intel_timeline *tl;
344c349dbc7Sjsg 	int err;
345c349dbc7Sjsg 
346c349dbc7Sjsg 	rcu_read_lock();
3475ca02815Sjsg 	tl = rcu_dereference(from->timeline);
3485ca02815Sjsg 	if (i915_request_signaled(from) ||
3495ca02815Sjsg 	    !i915_active_acquire_if_busy(&tl->active))
3505ca02815Sjsg 		tl = NULL;
3515ca02815Sjsg 
3525ca02815Sjsg 	if (tl) {
3535ca02815Sjsg 		/* hwsp_offset may wraparound, so use from->hwsp_seqno */
3545ca02815Sjsg 		*hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
3555ca02815Sjsg 			offset_in_page(from->hwsp_seqno);
3565ca02815Sjsg 	}
3575ca02815Sjsg 
3585ca02815Sjsg 	/* ensure we wait on the right request, if not, we completed */
3595ca02815Sjsg 	if (tl && __i915_request_is_complete(from)) {
3605ca02815Sjsg 		i915_active_release(&tl->active);
3615ca02815Sjsg 		tl = NULL;
3625ca02815Sjsg 	}
363c349dbc7Sjsg 	rcu_read_unlock();
364c349dbc7Sjsg 
3655ca02815Sjsg 	if (!tl)
366c349dbc7Sjsg 		return 1;
3675ca02815Sjsg 
3685ca02815Sjsg 	/* Can't do semaphore waits on kernel context */
3695ca02815Sjsg 	if (!tl->has_initial_breadcrumb) {
3705ca02815Sjsg 		err = -EINVAL;
3715ca02815Sjsg 		goto out;
3725ca02815Sjsg 	}
3735ca02815Sjsg 
3745ca02815Sjsg 	err = i915_active_add_request(&tl->active, to);
3755ca02815Sjsg 
3765ca02815Sjsg out:
3775ca02815Sjsg 	i915_active_release(&tl->active);
3785ca02815Sjsg 	return err;
379c349dbc7Sjsg }
380c349dbc7Sjsg 
intel_timeline_unpin(struct intel_timeline * tl)381c349dbc7Sjsg void intel_timeline_unpin(struct intel_timeline *tl)
382c349dbc7Sjsg {
383c349dbc7Sjsg 	GEM_BUG_ON(!atomic_read(&tl->pin_count));
384c349dbc7Sjsg 	if (!atomic_dec_and_test(&tl->pin_count))
385c349dbc7Sjsg 		return;
386c349dbc7Sjsg 
3875ca02815Sjsg 	i915_active_release(&tl->active);
388c349dbc7Sjsg 	__i915_vma_unpin(tl->hwsp_ggtt);
389c349dbc7Sjsg }
390c349dbc7Sjsg 
__intel_timeline_free(struct kref * kref)391c349dbc7Sjsg void __intel_timeline_free(struct kref *kref)
392c349dbc7Sjsg {
393c349dbc7Sjsg 	struct intel_timeline *timeline =
394c349dbc7Sjsg 		container_of(kref, typeof(*timeline), kref);
395c349dbc7Sjsg 
3965ca02815Sjsg 	GEM_BUG_ON(atomic_read(&timeline->pin_count));
3975ca02815Sjsg 	GEM_BUG_ON(!list_empty(&timeline->requests));
3985ca02815Sjsg 	GEM_BUG_ON(timeline->retire);
3995ca02815Sjsg 
4005ca02815Sjsg 	call_rcu(&timeline->rcu, intel_timeline_fini);
401c349dbc7Sjsg }
402c349dbc7Sjsg 
intel_gt_fini_timelines(struct intel_gt * gt)403c349dbc7Sjsg void intel_gt_fini_timelines(struct intel_gt *gt)
404c349dbc7Sjsg {
405c349dbc7Sjsg 	struct intel_gt_timelines *timelines = &gt->timelines;
406c349dbc7Sjsg 
407c349dbc7Sjsg 	GEM_BUG_ON(!list_empty(&timelines->active_list));
4085ca02815Sjsg }
4095ca02815Sjsg 
intel_gt_show_timelines(struct intel_gt * gt,struct drm_printer * m,void (* show_request)(struct drm_printer * m,const struct i915_request * rq,const char * prefix,int indent))4105ca02815Sjsg void intel_gt_show_timelines(struct intel_gt *gt,
4115ca02815Sjsg 			     struct drm_printer *m,
4125ca02815Sjsg 			     void (*show_request)(struct drm_printer *m,
4135ca02815Sjsg 						  const struct i915_request *rq,
4145ca02815Sjsg 						  const char *prefix,
4155ca02815Sjsg 						  int indent))
4165ca02815Sjsg {
4175ca02815Sjsg 	struct intel_gt_timelines *timelines = &gt->timelines;
4185ca02815Sjsg 	struct intel_timeline *tl, *tn;
4195ca02815Sjsg 	DRM_LIST_HEAD(free);
4205ca02815Sjsg 
4215ca02815Sjsg 	spin_lock(&timelines->lock);
4225ca02815Sjsg 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
4235ca02815Sjsg 		unsigned long count, ready, inflight;
4245ca02815Sjsg 		struct i915_request *rq, *rn;
4255ca02815Sjsg 		struct dma_fence *fence;
4265ca02815Sjsg 
4275ca02815Sjsg 		if (!mutex_trylock(&tl->mutex)) {
4285ca02815Sjsg 			drm_printf(m, "Timeline %llx: busy; skipping\n",
4295ca02815Sjsg 				   tl->fence_context);
4305ca02815Sjsg 			continue;
4315ca02815Sjsg 		}
4325ca02815Sjsg 
4335ca02815Sjsg 		intel_timeline_get(tl);
4345ca02815Sjsg 		GEM_BUG_ON(!atomic_read(&tl->active_count));
4355ca02815Sjsg 		atomic_inc(&tl->active_count); /* pin the list element */
4365ca02815Sjsg 		spin_unlock(&timelines->lock);
4375ca02815Sjsg 
4385ca02815Sjsg 		count = 0;
4395ca02815Sjsg 		ready = 0;
4405ca02815Sjsg 		inflight = 0;
4415ca02815Sjsg 		list_for_each_entry_safe(rq, rn, &tl->requests, link) {
4425ca02815Sjsg 			if (i915_request_completed(rq))
4435ca02815Sjsg 				continue;
4445ca02815Sjsg 
4455ca02815Sjsg 			count++;
4465ca02815Sjsg 			if (i915_request_is_ready(rq))
4475ca02815Sjsg 				ready++;
4485ca02815Sjsg 			if (i915_request_is_active(rq))
4495ca02815Sjsg 				inflight++;
4505ca02815Sjsg 		}
4515ca02815Sjsg 
4525ca02815Sjsg 		drm_printf(m, "Timeline %llx: { ", tl->fence_context);
4535ca02815Sjsg 		drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
4545ca02815Sjsg 			   count, ready, inflight);
4555ca02815Sjsg 		drm_printf(m, ", seqno: { current: %d, last: %d }",
4565ca02815Sjsg 			   *tl->hwsp_seqno, tl->seqno);
4575ca02815Sjsg 		fence = i915_active_fence_get(&tl->last_request);
4585ca02815Sjsg 		if (fence) {
4595ca02815Sjsg 			drm_printf(m, ", engine: %s",
4605ca02815Sjsg 				   to_request(fence)->engine->name);
4615ca02815Sjsg 			dma_fence_put(fence);
4625ca02815Sjsg 		}
4635ca02815Sjsg 		drm_printf(m, " }\n");
4645ca02815Sjsg 
4655ca02815Sjsg 		if (show_request) {
4665ca02815Sjsg 			list_for_each_entry_safe(rq, rn, &tl->requests, link)
4675ca02815Sjsg 				show_request(m, rq, "", 2);
4685ca02815Sjsg 		}
4695ca02815Sjsg 
4705ca02815Sjsg 		mutex_unlock(&tl->mutex);
4715ca02815Sjsg 		spin_lock(&timelines->lock);
4725ca02815Sjsg 
4735ca02815Sjsg 		/* Resume list iteration after reacquiring spinlock */
4745ca02815Sjsg 		list_safe_reset_next(tl, tn, link);
4755ca02815Sjsg 		if (atomic_dec_and_test(&tl->active_count))
4765ca02815Sjsg 			list_del(&tl->link);
4775ca02815Sjsg 
4785ca02815Sjsg 		/* Defer the final release to after the spinlock */
4795ca02815Sjsg 		if (refcount_dec_and_test(&tl->kref.refcount)) {
4805ca02815Sjsg 			GEM_BUG_ON(atomic_read(&tl->active_count));
4815ca02815Sjsg 			list_add(&tl->link, &free);
4825ca02815Sjsg 		}
4835ca02815Sjsg 	}
4845ca02815Sjsg 	spin_unlock(&timelines->lock);
4855ca02815Sjsg 
4865ca02815Sjsg 	list_for_each_entry_safe(tl, tn, &free, link)
4875ca02815Sjsg 		__intel_timeline_free(&tl->kref);
488c349dbc7Sjsg }
489c349dbc7Sjsg 
490c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
491c349dbc7Sjsg #include "gt/selftests/mock_timeline.c"
492c349dbc7Sjsg #include "gt/selftest_timeline.c"
493c349dbc7Sjsg #endif
494