15ca02815Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg */
5c349dbc7Sjsg
6c349dbc7Sjsg #include "i915_drv.h"
7c349dbc7Sjsg
8ad8b1aafSjsg #include "intel_breadcrumbs.h"
9c349dbc7Sjsg #include "intel_context.h"
10c349dbc7Sjsg #include "intel_engine.h"
11c349dbc7Sjsg #include "intel_engine_heartbeat.h"
12c349dbc7Sjsg #include "intel_engine_pm.h"
13c349dbc7Sjsg #include "intel_gt.h"
14c349dbc7Sjsg #include "intel_gt_pm.h"
15c349dbc7Sjsg #include "intel_rc6.h"
16c349dbc7Sjsg #include "intel_ring.h"
17ad8b1aafSjsg #include "shmem_utils.h"
18f005ef32Sjsg #include "intel_gt_regs.h"
19f005ef32Sjsg
intel_gsc_idle_msg_enable(struct intel_engine_cs * engine)20f005ef32Sjsg static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
21f005ef32Sjsg {
22f005ef32Sjsg struct drm_i915_private *i915 = engine->i915;
23f005ef32Sjsg
24*ddf58b8fSjsg if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
25f005ef32Sjsg intel_uncore_write(engine->gt->uncore,
26f005ef32Sjsg RC_PSMI_CTRL_GSCCS,
27f005ef32Sjsg _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
28f005ef32Sjsg /* hysteresis 0xA=5us as recommended in spec*/
29f005ef32Sjsg intel_uncore_write(engine->gt->uncore,
30f005ef32Sjsg PWRCTX_MAXCNT_GSCCS,
31f005ef32Sjsg 0xA);
32f005ef32Sjsg }
33f005ef32Sjsg }
34c349dbc7Sjsg
dbg_poison_ce(struct intel_context * ce)355ca02815Sjsg static void dbg_poison_ce(struct intel_context *ce)
365ca02815Sjsg {
375ca02815Sjsg if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
385ca02815Sjsg return;
395ca02815Sjsg
405ca02815Sjsg if (ce->state) {
415ca02815Sjsg struct drm_i915_gem_object *obj = ce->state->obj;
42f005ef32Sjsg int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
435ca02815Sjsg void *map;
445ca02815Sjsg
451bb76ff1Sjsg if (!i915_gem_object_trylock(obj, NULL))
465ca02815Sjsg return;
475ca02815Sjsg
485ca02815Sjsg map = i915_gem_object_pin_map(obj, type);
495ca02815Sjsg if (!IS_ERR(map)) {
505ca02815Sjsg memset(map, CONTEXT_REDZONE, obj->base.size);
515ca02815Sjsg i915_gem_object_flush_map(obj);
525ca02815Sjsg i915_gem_object_unpin_map(obj);
535ca02815Sjsg }
545ca02815Sjsg i915_gem_object_unlock(obj);
555ca02815Sjsg }
565ca02815Sjsg }
575ca02815Sjsg
__engine_unpark(struct intel_wakeref * wf)58c349dbc7Sjsg static int __engine_unpark(struct intel_wakeref *wf)
59c349dbc7Sjsg {
60c349dbc7Sjsg struct intel_engine_cs *engine =
61c349dbc7Sjsg container_of(wf, typeof(*engine), wakeref);
62c349dbc7Sjsg struct intel_context *ce;
63c349dbc7Sjsg
64c349dbc7Sjsg ENGINE_TRACE(engine, "\n");
65c349dbc7Sjsg
66c349dbc7Sjsg intel_gt_pm_get(engine->gt);
67c349dbc7Sjsg
68c349dbc7Sjsg /* Discard stale context state from across idling */
69c349dbc7Sjsg ce = engine->kernel_context;
70c349dbc7Sjsg if (ce) {
71c349dbc7Sjsg GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
72c349dbc7Sjsg
735ca02815Sjsg /* Flush all pending HW writes before we touch the context */
745ca02815Sjsg while (unlikely(intel_context_inflight(ce)))
755ca02815Sjsg intel_engine_flush_submission(engine);
765ca02815Sjsg
77c349dbc7Sjsg /* First poison the image to verify we never fully trust it */
785ca02815Sjsg dbg_poison_ce(ce);
79c349dbc7Sjsg
805ca02815Sjsg /* Scrub the context image after our loss of control */
81c349dbc7Sjsg ce->ops->reset(ce);
825ca02815Sjsg
835ca02815Sjsg CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
845ca02815Sjsg ce->timeline->seqno,
855ca02815Sjsg READ_ONCE(*ce->timeline->hwsp_seqno),
865ca02815Sjsg ce->ring->emit);
875ca02815Sjsg GEM_BUG_ON(ce->timeline->seqno !=
885ca02815Sjsg READ_ONCE(*ce->timeline->hwsp_seqno));
89c349dbc7Sjsg }
90c349dbc7Sjsg
91c349dbc7Sjsg if (engine->unpark)
92c349dbc7Sjsg engine->unpark(engine);
93c349dbc7Sjsg
945ca02815Sjsg intel_breadcrumbs_unpark(engine->breadcrumbs);
95c349dbc7Sjsg intel_engine_unpark_heartbeat(engine);
96c349dbc7Sjsg return 0;
97c349dbc7Sjsg }
98c349dbc7Sjsg
duration(struct dma_fence * fence,struct dma_fence_cb * cb)99c349dbc7Sjsg static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
100c349dbc7Sjsg {
101c349dbc7Sjsg struct i915_request *rq = to_request(fence);
102c349dbc7Sjsg
103c349dbc7Sjsg ewma__engine_latency_add(&rq->engine->latency,
104c349dbc7Sjsg ktime_us_delta(rq->fence.timestamp,
105c349dbc7Sjsg rq->duration.emitted));
106c349dbc7Sjsg }
107c349dbc7Sjsg
108c349dbc7Sjsg static void
__queue_and_release_pm(struct i915_request * rq,struct intel_timeline * tl,struct intel_engine_cs * engine)109c349dbc7Sjsg __queue_and_release_pm(struct i915_request *rq,
110c349dbc7Sjsg struct intel_timeline *tl,
111c349dbc7Sjsg struct intel_engine_cs *engine)
112c349dbc7Sjsg {
113c349dbc7Sjsg struct intel_gt_timelines *timelines = &engine->gt->timelines;
114c349dbc7Sjsg
115c349dbc7Sjsg ENGINE_TRACE(engine, "parking\n");
116c349dbc7Sjsg
117c349dbc7Sjsg /*
118f005ef32Sjsg * Open coded one half of intel_context_enter, which we have to omit
119f005ef32Sjsg * here (see the large comment below) and because the other part must
120f005ef32Sjsg * not be called due constructing directly with __i915_request_create
121f005ef32Sjsg * which increments active count via intel_context_mark_active.
122f005ef32Sjsg */
123f005ef32Sjsg GEM_BUG_ON(rq->context->active_count != 1);
124f005ef32Sjsg __intel_gt_pm_get(engine->gt);
125f005ef32Sjsg
126f005ef32Sjsg /*
127c349dbc7Sjsg * We have to serialise all potential retirement paths with our
128c349dbc7Sjsg * submission, as we don't want to underflow either the
129c349dbc7Sjsg * engine->wakeref.counter or our timeline->active_count.
130c349dbc7Sjsg *
131c349dbc7Sjsg * Equally, we cannot allow a new submission to start until
132c349dbc7Sjsg * after we finish queueing, nor could we allow that submitter
133c349dbc7Sjsg * to retire us before we are ready!
134c349dbc7Sjsg */
135c349dbc7Sjsg spin_lock(&timelines->lock);
136c349dbc7Sjsg
137c349dbc7Sjsg /* Let intel_gt_retire_requests() retire us (acquired under lock) */
138c349dbc7Sjsg if (!atomic_fetch_inc(&tl->active_count))
139c349dbc7Sjsg list_add_tail(&tl->link, &timelines->active_list);
140c349dbc7Sjsg
141c349dbc7Sjsg /* Hand the request over to HW and so engine_retire() */
1425ca02815Sjsg __i915_request_queue_bh(rq);
143c349dbc7Sjsg
144c349dbc7Sjsg /* Let new submissions commence (and maybe retire this timeline) */
145c349dbc7Sjsg __intel_wakeref_defer_park(&engine->wakeref);
146c349dbc7Sjsg
147c349dbc7Sjsg spin_unlock(&timelines->lock);
148c349dbc7Sjsg }
149c349dbc7Sjsg
switch_to_kernel_context(struct intel_engine_cs * engine)150c349dbc7Sjsg static bool switch_to_kernel_context(struct intel_engine_cs *engine)
151c349dbc7Sjsg {
152c349dbc7Sjsg struct intel_context *ce = engine->kernel_context;
153c349dbc7Sjsg struct i915_request *rq;
154c349dbc7Sjsg bool result = true;
155c349dbc7Sjsg
1561bb76ff1Sjsg /*
1571bb76ff1Sjsg * This is execlist specific behaviour intended to ensure the GPU is
1581bb76ff1Sjsg * idle by switching to a known 'safe' context. With GuC submission, the
1591bb76ff1Sjsg * same idle guarantee is achieved by other means (disabling
1601bb76ff1Sjsg * scheduling). Further, switching to a 'safe' context has no effect
1611bb76ff1Sjsg * with GuC submission as the scheduler can just switch back again.
1621bb76ff1Sjsg *
1631bb76ff1Sjsg * FIXME: Move this backend scheduler specific behaviour into the
1641bb76ff1Sjsg * scheduler backend.
1651bb76ff1Sjsg */
1661bb76ff1Sjsg if (intel_engine_uses_guc(engine))
1671bb76ff1Sjsg return true;
1681bb76ff1Sjsg
169c349dbc7Sjsg /* GPU is pointing to the void, as good as in the kernel context. */
170c349dbc7Sjsg if (intel_gt_is_wedged(engine->gt))
171c349dbc7Sjsg return true;
172c349dbc7Sjsg
173c349dbc7Sjsg GEM_BUG_ON(!intel_context_is_barrier(ce));
174ad8b1aafSjsg GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
175c349dbc7Sjsg
176c349dbc7Sjsg /* Already inside the kernel context, safe to power down. */
177c349dbc7Sjsg if (engine->wakeref_serial == engine->serial)
178c349dbc7Sjsg return true;
179c349dbc7Sjsg
180c349dbc7Sjsg /*
181c349dbc7Sjsg * Note, we do this without taking the timeline->mutex. We cannot
182c349dbc7Sjsg * as we may be called while retiring the kernel context and so
183c349dbc7Sjsg * already underneath the timeline->mutex. Instead we rely on the
184c349dbc7Sjsg * exclusive property of the __engine_park that prevents anyone
185c349dbc7Sjsg * else from creating a request on this engine. This also requires
186c349dbc7Sjsg * that the ring is empty and we avoid any waits while constructing
187c349dbc7Sjsg * the context, as they assume protection by the timeline->mutex.
188c349dbc7Sjsg * This should hold true as we can only park the engine after
189c349dbc7Sjsg * retiring the last request, thus all rings should be empty and
190c349dbc7Sjsg * all timelines idle.
191c349dbc7Sjsg *
192c349dbc7Sjsg * For unlocking, there are 2 other parties and the GPU who have a
193c349dbc7Sjsg * stake here.
194c349dbc7Sjsg *
195c349dbc7Sjsg * A new gpu user will be waiting on the engine-pm to start their
196c349dbc7Sjsg * engine_unpark. New waiters are predicated on engine->wakeref.count
197c349dbc7Sjsg * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
198c349dbc7Sjsg * engine->wakeref.
199c349dbc7Sjsg *
200c349dbc7Sjsg * The other party is intel_gt_retire_requests(), which is walking the
201c349dbc7Sjsg * list of active timelines looking for completions. Meanwhile as soon
202c349dbc7Sjsg * as we call __i915_request_queue(), the GPU may complete our request.
203c349dbc7Sjsg * Ergo, if we put ourselves on the timelines.active_list
204c349dbc7Sjsg * (se intel_timeline_enter()) before we increment the
205c349dbc7Sjsg * engine->wakeref.count, we may see the request completion and retire
206ad8b1aafSjsg * it causing an underflow of the engine->wakeref.
207c349dbc7Sjsg */
2081bb76ff1Sjsg set_bit(CONTEXT_IS_PARKING, &ce->flags);
209c349dbc7Sjsg GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
210c349dbc7Sjsg
211c349dbc7Sjsg rq = __i915_request_create(ce, GFP_NOWAIT);
212c349dbc7Sjsg if (IS_ERR(rq))
213c349dbc7Sjsg /* Context switch failed, hope for the best! Maybe reset? */
214c349dbc7Sjsg goto out_unlock;
215c349dbc7Sjsg
216c349dbc7Sjsg /* Check again on the next retirement. */
217c349dbc7Sjsg engine->wakeref_serial = engine->serial + 1;
218c349dbc7Sjsg i915_request_add_active_barriers(rq);
219c349dbc7Sjsg
220c349dbc7Sjsg /* Install ourselves as a preemption barrier */
221c349dbc7Sjsg rq->sched.attr.priority = I915_PRIORITY_BARRIER;
222c349dbc7Sjsg if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
223c349dbc7Sjsg /*
224c349dbc7Sjsg * Use an interrupt for precise measurement of duration,
225c349dbc7Sjsg * otherwise we rely on someone else retiring all the requests
226c349dbc7Sjsg * which may delay the signaling (i.e. we will likely wait
227c349dbc7Sjsg * until the background request retirement running every
228c349dbc7Sjsg * second or two).
229c349dbc7Sjsg */
230c349dbc7Sjsg BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
231c349dbc7Sjsg dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
232c349dbc7Sjsg rq->duration.emitted = ktime_get();
233c349dbc7Sjsg }
234c349dbc7Sjsg
235c349dbc7Sjsg /* Expose ourselves to the world */
236c349dbc7Sjsg __queue_and_release_pm(rq, ce->timeline, engine);
237c349dbc7Sjsg
238c349dbc7Sjsg result = false;
239c349dbc7Sjsg out_unlock:
2401bb76ff1Sjsg clear_bit(CONTEXT_IS_PARKING, &ce->flags);
241c349dbc7Sjsg return result;
242c349dbc7Sjsg }
243c349dbc7Sjsg
call_idle_barriers(struct intel_engine_cs * engine)244c349dbc7Sjsg static void call_idle_barriers(struct intel_engine_cs *engine)
245c349dbc7Sjsg {
246c349dbc7Sjsg struct llist_node *node, *next;
247c349dbc7Sjsg
248c349dbc7Sjsg llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
249c349dbc7Sjsg struct dma_fence_cb *cb =
250c349dbc7Sjsg container_of((struct list_head *)node,
251c349dbc7Sjsg typeof(*cb), node);
252c349dbc7Sjsg
253c349dbc7Sjsg cb->func(ERR_PTR(-EAGAIN), cb);
254c349dbc7Sjsg }
255c349dbc7Sjsg }
256c349dbc7Sjsg
__engine_park(struct intel_wakeref * wf)257c349dbc7Sjsg static int __engine_park(struct intel_wakeref *wf)
258c349dbc7Sjsg {
259c349dbc7Sjsg struct intel_engine_cs *engine =
260c349dbc7Sjsg container_of(wf, typeof(*engine), wakeref);
261c349dbc7Sjsg
262c349dbc7Sjsg engine->saturated = 0;
263c349dbc7Sjsg
264c349dbc7Sjsg /*
265c349dbc7Sjsg * If one and only one request is completed between pm events,
266c349dbc7Sjsg * we know that we are inside the kernel context and it is
267c349dbc7Sjsg * safe to power down. (We are paranoid in case that runtime
268c349dbc7Sjsg * suspend causes corruption to the active context image, and
269c349dbc7Sjsg * want to avoid that impacting userspace.)
270c349dbc7Sjsg */
271c349dbc7Sjsg if (!switch_to_kernel_context(engine))
272c349dbc7Sjsg return -EBUSY;
273c349dbc7Sjsg
274c349dbc7Sjsg ENGINE_TRACE(engine, "parked\n");
275c349dbc7Sjsg
276c349dbc7Sjsg call_idle_barriers(engine); /* cleanup after wedging */
277c349dbc7Sjsg
278c349dbc7Sjsg intel_engine_park_heartbeat(engine);
279ad8b1aafSjsg intel_breadcrumbs_park(engine->breadcrumbs);
280c349dbc7Sjsg
281c349dbc7Sjsg if (engine->park)
282c349dbc7Sjsg engine->park(engine);
283c349dbc7Sjsg
284c349dbc7Sjsg /* While gt calls i915_vma_parked(), we have to break the lock cycle */
285c349dbc7Sjsg intel_gt_pm_put_async(engine->gt);
286c349dbc7Sjsg return 0;
287c349dbc7Sjsg }
288c349dbc7Sjsg
289c349dbc7Sjsg static const struct intel_wakeref_ops wf_ops = {
290c349dbc7Sjsg .get = __engine_unpark,
291c349dbc7Sjsg .put = __engine_park,
292c349dbc7Sjsg };
293c349dbc7Sjsg
intel_engine_init__pm(struct intel_engine_cs * engine)294c349dbc7Sjsg void intel_engine_init__pm(struct intel_engine_cs *engine)
295c349dbc7Sjsg {
296f005ef32Sjsg intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
297c349dbc7Sjsg intel_engine_init_heartbeat(engine);
298f005ef32Sjsg
299f005ef32Sjsg intel_gsc_idle_msg_enable(engine);
300c349dbc7Sjsg }
301c349dbc7Sjsg
3023f069f93Sjsg /**
3033f069f93Sjsg * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
3043f069f93Sjsg * an engine.
3053f069f93Sjsg * @engine: The engine whose pinned contexts we want to reset.
3063f069f93Sjsg *
3073f069f93Sjsg * Typically the pinned context LMEM images lose or get their content
3083f069f93Sjsg * corrupted on suspend. This function resets their images.
3093f069f93Sjsg */
intel_engine_reset_pinned_contexts(struct intel_engine_cs * engine)3103f069f93Sjsg void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
3113f069f93Sjsg {
3123f069f93Sjsg struct intel_context *ce;
3133f069f93Sjsg
3143f069f93Sjsg list_for_each_entry(ce, &engine->pinned_contexts_list,
3153f069f93Sjsg pinned_contexts_link) {
3163f069f93Sjsg /* kernel context gets reset at __engine_unpark() */
3173f069f93Sjsg if (ce == engine->kernel_context)
3183f069f93Sjsg continue;
3193f069f93Sjsg
3203f069f93Sjsg dbg_poison_ce(ce);
3213f069f93Sjsg ce->ops->reset(ce);
3223f069f93Sjsg }
3233f069f93Sjsg }
3243f069f93Sjsg
325c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
326c349dbc7Sjsg #include "selftest_engine_pm.c"
327c349dbc7Sjsg #endif
328