11bb76ff1Sjsg // SPDX-License-Identifier: MIT
21bb76ff1Sjsg /*
31bb76ff1Sjsg * Copyright(c) 2020 Intel Corporation.
41bb76ff1Sjsg */
51bb76ff1Sjsg #include <linux/workqueue.h>
6*f005ef32Sjsg
7*f005ef32Sjsg #include "gem/i915_gem_context.h"
8*f005ef32Sjsg
9*f005ef32Sjsg #include "gt/intel_context.h"
10*f005ef32Sjsg #include "gt/intel_gt.h"
11*f005ef32Sjsg
12*f005ef32Sjsg #include "i915_drv.h"
13*f005ef32Sjsg
141bb76ff1Sjsg #include "intel_pxp.h"
15*f005ef32Sjsg #include "intel_pxp_gsccs.h"
161bb76ff1Sjsg #include "intel_pxp_irq.h"
17*f005ef32Sjsg #include "intel_pxp_regs.h"
181bb76ff1Sjsg #include "intel_pxp_session.h"
191bb76ff1Sjsg #include "intel_pxp_tee.h"
20*f005ef32Sjsg #include "intel_pxp_types.h"
211bb76ff1Sjsg
221bb76ff1Sjsg /**
231bb76ff1Sjsg * DOC: PXP
241bb76ff1Sjsg *
251bb76ff1Sjsg * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
261bb76ff1Sjsg * It allows execution and flip to display of protected (i.e. encrypted)
271bb76ff1Sjsg * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
281bb76ff1Sjsg *
291bb76ff1Sjsg * Objects can opt-in to PXP encryption at creation time via the
301bb76ff1Sjsg * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
311bb76ff1Sjsg * correctly protected they must be used in conjunction with a context created
321bb76ff1Sjsg * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
331bb76ff1Sjsg * of those two uapi flags for details and restrictions.
341bb76ff1Sjsg *
351bb76ff1Sjsg * Protected objects are tied to a pxp session; currently we only support one
361bb76ff1Sjsg * session, which i915 manages and whose index is available in the uapi
371bb76ff1Sjsg * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
381bb76ff1Sjsg * protected objects.
391bb76ff1Sjsg * The session is invalidated by the HW when certain events occur (e.g.
401bb76ff1Sjsg * suspend/resume). When this happens, all the objects that were used with the
411bb76ff1Sjsg * session are marked as invalid and all contexts marked as using protected
421bb76ff1Sjsg * content are banned. Any further attempt at using them in an execbuf call is
431bb76ff1Sjsg * rejected, while flips are converted to black frames.
441bb76ff1Sjsg *
451bb76ff1Sjsg * Some of the PXP setup operations are performed by the Management Engine,
461bb76ff1Sjsg * which is handled by the mei driver; communication between i915 and mei is
471bb76ff1Sjsg * performed via the mei_pxp component module.
481bb76ff1Sjsg */
491bb76ff1Sjsg
intel_pxp_is_supported(const struct intel_pxp * pxp)50*f005ef32Sjsg bool intel_pxp_is_supported(const struct intel_pxp *pxp)
511bb76ff1Sjsg {
52*f005ef32Sjsg return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp;
531bb76ff1Sjsg }
541bb76ff1Sjsg
intel_pxp_is_enabled(const struct intel_pxp * pxp)551bb76ff1Sjsg bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
561bb76ff1Sjsg {
57*f005ef32Sjsg return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
581bb76ff1Sjsg }
591bb76ff1Sjsg
intel_pxp_is_active(const struct intel_pxp * pxp)601bb76ff1Sjsg bool intel_pxp_is_active(const struct intel_pxp *pxp)
611bb76ff1Sjsg {
62*f005ef32Sjsg return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
631bb76ff1Sjsg }
641bb76ff1Sjsg
kcr_pxp_set_status(const struct intel_pxp * pxp,bool enable)65*f005ef32Sjsg static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
661bb76ff1Sjsg {
67*f005ef32Sjsg u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
68*f005ef32Sjsg _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
69*f005ef32Sjsg
70*f005ef32Sjsg intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
711bb76ff1Sjsg }
721bb76ff1Sjsg
kcr_pxp_enable(const struct intel_pxp * pxp)73*f005ef32Sjsg static void kcr_pxp_enable(const struct intel_pxp *pxp)
741bb76ff1Sjsg {
75*f005ef32Sjsg kcr_pxp_set_status(pxp, true);
76*f005ef32Sjsg }
77*f005ef32Sjsg
kcr_pxp_disable(const struct intel_pxp * pxp)78*f005ef32Sjsg static void kcr_pxp_disable(const struct intel_pxp *pxp)
79*f005ef32Sjsg {
80*f005ef32Sjsg kcr_pxp_set_status(pxp, false);
811bb76ff1Sjsg }
821bb76ff1Sjsg
create_vcs_context(struct intel_pxp * pxp)831bb76ff1Sjsg static int create_vcs_context(struct intel_pxp *pxp)
841bb76ff1Sjsg {
851bb76ff1Sjsg static struct lock_class_key pxp_lock;
86*f005ef32Sjsg struct intel_gt *gt = pxp->ctrl_gt;
871bb76ff1Sjsg struct intel_engine_cs *engine;
881bb76ff1Sjsg struct intel_context *ce;
891bb76ff1Sjsg int i;
901bb76ff1Sjsg
911bb76ff1Sjsg /*
921bb76ff1Sjsg * Find the first VCS engine present. We're guaranteed there is one
931bb76ff1Sjsg * if we're in this function due to the check in has_pxp
941bb76ff1Sjsg */
951bb76ff1Sjsg for (i = 0, engine = NULL; !engine; i++)
961bb76ff1Sjsg engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
971bb76ff1Sjsg
981bb76ff1Sjsg GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
991bb76ff1Sjsg
1001bb76ff1Sjsg ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1011bb76ff1Sjsg I915_GEM_HWS_PXP_ADDR,
1021bb76ff1Sjsg &pxp_lock, "pxp_context");
1031bb76ff1Sjsg if (IS_ERR(ce)) {
1041bb76ff1Sjsg drm_err(>->i915->drm, "failed to create VCS ctx for PXP\n");
1051bb76ff1Sjsg return PTR_ERR(ce);
1061bb76ff1Sjsg }
1071bb76ff1Sjsg
1081bb76ff1Sjsg pxp->ce = ce;
1091bb76ff1Sjsg
1101bb76ff1Sjsg return 0;
1111bb76ff1Sjsg }
1121bb76ff1Sjsg
destroy_vcs_context(struct intel_pxp * pxp)1131bb76ff1Sjsg static void destroy_vcs_context(struct intel_pxp *pxp)
1141bb76ff1Sjsg {
115*f005ef32Sjsg if (pxp->ce)
1161bb76ff1Sjsg intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
1171bb76ff1Sjsg }
1181bb76ff1Sjsg
pxp_init_full(struct intel_pxp * pxp)119*f005ef32Sjsg static void pxp_init_full(struct intel_pxp *pxp)
1201bb76ff1Sjsg {
121*f005ef32Sjsg struct intel_gt *gt = pxp->ctrl_gt;
1221bb76ff1Sjsg int ret;
1231bb76ff1Sjsg
1241bb76ff1Sjsg /*
1251bb76ff1Sjsg * we'll use the completion to check if there is a termination pending,
1261bb76ff1Sjsg * so we start it as completed and we reinit it when a termination
1271bb76ff1Sjsg * is triggered.
1281bb76ff1Sjsg */
1291bb76ff1Sjsg init_completion(&pxp->termination);
1301bb76ff1Sjsg complete_all(&pxp->termination);
1311bb76ff1Sjsg
132*f005ef32Sjsg if (pxp->ctrl_gt->type == GT_MEDIA)
133*f005ef32Sjsg pxp->kcr_base = MTL_KCR_BASE;
134*f005ef32Sjsg else
135*f005ef32Sjsg pxp->kcr_base = GEN12_KCR_BASE;
136*f005ef32Sjsg
137*f005ef32Sjsg intel_pxp_session_management_init(pxp);
1381bb76ff1Sjsg
1391bb76ff1Sjsg ret = create_vcs_context(pxp);
1401bb76ff1Sjsg if (ret)
1411bb76ff1Sjsg return;
1421bb76ff1Sjsg
143*f005ef32Sjsg if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
144*f005ef32Sjsg ret = intel_pxp_gsccs_init(pxp);
145*f005ef32Sjsg else
1461bb76ff1Sjsg ret = intel_pxp_tee_component_init(pxp);
1471bb76ff1Sjsg if (ret)
1481bb76ff1Sjsg goto out_context;
1491bb76ff1Sjsg
1501bb76ff1Sjsg drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
1511bb76ff1Sjsg
1521bb76ff1Sjsg return;
1531bb76ff1Sjsg
1541bb76ff1Sjsg out_context:
1551bb76ff1Sjsg destroy_vcs_context(pxp);
1561bb76ff1Sjsg }
1571bb76ff1Sjsg
find_gt_for_required_teelink(struct drm_i915_private * i915)158*f005ef32Sjsg static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
1591bb76ff1Sjsg {
160*f005ef32Sjsg /*
161*f005ef32Sjsg * NOTE: Only certain platforms require PXP-tee-backend dependencies
162*f005ef32Sjsg * for HuC authentication. For now, its limited to DG2.
163*f005ef32Sjsg */
164*f005ef32Sjsg if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
165*f005ef32Sjsg intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc))
166*f005ef32Sjsg return to_gt(i915);
167*f005ef32Sjsg
168*f005ef32Sjsg return NULL;
169*f005ef32Sjsg }
170*f005ef32Sjsg
find_gt_for_required_protected_content(struct drm_i915_private * i915)171*f005ef32Sjsg static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
172*f005ef32Sjsg {
173*f005ef32Sjsg if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
174*f005ef32Sjsg return NULL;
175*f005ef32Sjsg
176*f005ef32Sjsg /*
177*f005ef32Sjsg * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
178*f005ef32Sjsg * on the media GT. NOTE: if we have a media-tile with a GSC-engine,
179*f005ef32Sjsg * the VDBOX is already present so skip that check. We also have to
180*f005ef32Sjsg * ensure the GSC and HUC firmware are coming online
181*f005ef32Sjsg */
182*f005ef32Sjsg if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) &&
183*f005ef32Sjsg intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) &&
184*f005ef32Sjsg intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw))
185*f005ef32Sjsg return i915->media_gt;
186*f005ef32Sjsg
187*f005ef32Sjsg /*
188*f005ef32Sjsg * Else we rely on mei-pxp module but only on legacy platforms
189*f005ef32Sjsg * prior to having separate media GTs and has a valid VDBOX.
190*f005ef32Sjsg */
191*f005ef32Sjsg if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915)))
192*f005ef32Sjsg return to_gt(i915);
193*f005ef32Sjsg
194*f005ef32Sjsg return NULL;
195*f005ef32Sjsg }
196*f005ef32Sjsg
intel_pxp_init(struct drm_i915_private * i915)197*f005ef32Sjsg int intel_pxp_init(struct drm_i915_private *i915)
198*f005ef32Sjsg {
199*f005ef32Sjsg struct intel_gt *gt;
200*f005ef32Sjsg bool is_full_feature = false;
201*f005ef32Sjsg
202*f005ef32Sjsg /*
203*f005ef32Sjsg * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
204*f005ef32Sjsg * we still need it if PXP's backend tee transport is needed.
205*f005ef32Sjsg */
206*f005ef32Sjsg gt = find_gt_for_required_protected_content(i915);
207*f005ef32Sjsg if (gt)
208*f005ef32Sjsg is_full_feature = true;
209*f005ef32Sjsg else
210*f005ef32Sjsg gt = find_gt_for_required_teelink(i915);
211*f005ef32Sjsg
212*f005ef32Sjsg if (!gt)
213*f005ef32Sjsg return -ENODEV;
214*f005ef32Sjsg
215*f005ef32Sjsg /*
216*f005ef32Sjsg * At this point, we will either enable full featured PXP capabilities
217*f005ef32Sjsg * including session and object management, or we will init the backend tee
218*f005ef32Sjsg * channel for internal users such as HuC loading by GSC
219*f005ef32Sjsg */
220*f005ef32Sjsg i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL);
221*f005ef32Sjsg if (!i915->pxp)
222*f005ef32Sjsg return -ENOMEM;
223*f005ef32Sjsg
224*f005ef32Sjsg /* init common info used by all feature-mode usages*/
225*f005ef32Sjsg i915->pxp->ctrl_gt = gt;
226*f005ef32Sjsg rw_init(&i915->pxp->tee_mutex, "pxptee");
227*f005ef32Sjsg
228*f005ef32Sjsg /*
229*f005ef32Sjsg * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
230*f005ef32Sjsg * such as DG2, we can skip the init of the full PXP session/object management
231*f005ef32Sjsg * and just init the tee channel.
232*f005ef32Sjsg */
233*f005ef32Sjsg if (is_full_feature)
234*f005ef32Sjsg pxp_init_full(i915->pxp);
235*f005ef32Sjsg else
236*f005ef32Sjsg intel_pxp_tee_component_init(i915->pxp);
237*f005ef32Sjsg
238*f005ef32Sjsg return 0;
239*f005ef32Sjsg }
240*f005ef32Sjsg
intel_pxp_fini(struct drm_i915_private * i915)241*f005ef32Sjsg void intel_pxp_fini(struct drm_i915_private *i915)
242*f005ef32Sjsg {
243*f005ef32Sjsg if (!i915->pxp)
2441bb76ff1Sjsg return;
2451bb76ff1Sjsg
246*f005ef32Sjsg i915->pxp->arb_is_valid = false;
2471bb76ff1Sjsg
248*f005ef32Sjsg if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0))
249*f005ef32Sjsg intel_pxp_gsccs_fini(i915->pxp);
250*f005ef32Sjsg else
251*f005ef32Sjsg intel_pxp_tee_component_fini(i915->pxp);
2521bb76ff1Sjsg
253*f005ef32Sjsg destroy_vcs_context(i915->pxp);
254*f005ef32Sjsg
255*f005ef32Sjsg kfree(i915->pxp);
256*f005ef32Sjsg i915->pxp = NULL;
2571bb76ff1Sjsg }
2581bb76ff1Sjsg
intel_pxp_mark_termination_in_progress(struct intel_pxp * pxp)2591bb76ff1Sjsg void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
2601bb76ff1Sjsg {
2611bb76ff1Sjsg pxp->arb_is_valid = false;
2621bb76ff1Sjsg reinit_completion(&pxp->termination);
2631bb76ff1Sjsg }
2641bb76ff1Sjsg
pxp_queue_termination(struct intel_pxp * pxp)2651bb76ff1Sjsg static void pxp_queue_termination(struct intel_pxp *pxp)
2661bb76ff1Sjsg {
267*f005ef32Sjsg struct intel_gt *gt = pxp->ctrl_gt;
2681bb76ff1Sjsg
2691bb76ff1Sjsg /*
2701bb76ff1Sjsg * We want to get the same effect as if we received a termination
2711bb76ff1Sjsg * interrupt, so just pretend that we did.
2721bb76ff1Sjsg */
2731bb76ff1Sjsg spin_lock_irq(gt->irq_lock);
2741bb76ff1Sjsg intel_pxp_mark_termination_in_progress(pxp);
2751bb76ff1Sjsg pxp->session_events |= PXP_TERMINATION_REQUEST;
2761bb76ff1Sjsg queue_work(system_unbound_wq, &pxp->session_work);
2771bb76ff1Sjsg spin_unlock_irq(gt->irq_lock);
2781bb76ff1Sjsg }
2791bb76ff1Sjsg
pxp_component_bound(struct intel_pxp * pxp)2801bb76ff1Sjsg static bool pxp_component_bound(struct intel_pxp *pxp)
2811bb76ff1Sjsg {
2821bb76ff1Sjsg bool bound = false;
2831bb76ff1Sjsg
2841bb76ff1Sjsg mutex_lock(&pxp->tee_mutex);
2851bb76ff1Sjsg if (pxp->pxp_component)
2861bb76ff1Sjsg bound = true;
2871bb76ff1Sjsg mutex_unlock(&pxp->tee_mutex);
2881bb76ff1Sjsg
2891bb76ff1Sjsg return bound;
2901bb76ff1Sjsg }
2911bb76ff1Sjsg
intel_pxp_get_backend_timeout_ms(struct intel_pxp * pxp)292*f005ef32Sjsg int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp)
293*f005ef32Sjsg {
294*f005ef32Sjsg if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
295*f005ef32Sjsg return GSCFW_MAX_ROUND_TRIP_LATENCY_MS;
296*f005ef32Sjsg else
297*f005ef32Sjsg return 250;
298*f005ef32Sjsg }
299*f005ef32Sjsg
__pxp_global_teardown_final(struct intel_pxp * pxp)300*f005ef32Sjsg static int __pxp_global_teardown_final(struct intel_pxp *pxp)
301*f005ef32Sjsg {
302*f005ef32Sjsg int timeout;
303*f005ef32Sjsg
304*f005ef32Sjsg if (!pxp->arb_is_valid)
305*f005ef32Sjsg return 0;
306*f005ef32Sjsg /*
307*f005ef32Sjsg * To ensure synchronous and coherent session teardown completion
308*f005ef32Sjsg * in response to suspend or shutdown triggers, don't use a worker.
309*f005ef32Sjsg */
310*f005ef32Sjsg intel_pxp_mark_termination_in_progress(pxp);
311*f005ef32Sjsg intel_pxp_terminate(pxp, false);
312*f005ef32Sjsg
313*f005ef32Sjsg timeout = intel_pxp_get_backend_timeout_ms(pxp);
314*f005ef32Sjsg
315*f005ef32Sjsg if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
316*f005ef32Sjsg return -ETIMEDOUT;
317*f005ef32Sjsg
318*f005ef32Sjsg return 0;
319*f005ef32Sjsg }
320*f005ef32Sjsg
__pxp_global_teardown_restart(struct intel_pxp * pxp)321*f005ef32Sjsg static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
322*f005ef32Sjsg {
323*f005ef32Sjsg int timeout;
324*f005ef32Sjsg
325*f005ef32Sjsg if (pxp->arb_is_valid)
326*f005ef32Sjsg return 0;
327*f005ef32Sjsg /*
328*f005ef32Sjsg * The arb-session is currently inactive and we are doing a reset and restart
329*f005ef32Sjsg * due to a runtime event. Use the worker that was designed for this.
330*f005ef32Sjsg */
331*f005ef32Sjsg pxp_queue_termination(pxp);
332*f005ef32Sjsg
333*f005ef32Sjsg timeout = intel_pxp_get_backend_timeout_ms(pxp);
334*f005ef32Sjsg
335*f005ef32Sjsg if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
336*f005ef32Sjsg return -ETIMEDOUT;
337*f005ef32Sjsg
338*f005ef32Sjsg return 0;
339*f005ef32Sjsg }
340*f005ef32Sjsg
intel_pxp_end(struct intel_pxp * pxp)341*f005ef32Sjsg void intel_pxp_end(struct intel_pxp *pxp)
342*f005ef32Sjsg {
343*f005ef32Sjsg struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
344*f005ef32Sjsg intel_wakeref_t wakeref;
345*f005ef32Sjsg
346*f005ef32Sjsg if (!intel_pxp_is_enabled(pxp))
347*f005ef32Sjsg return;
348*f005ef32Sjsg
349*f005ef32Sjsg wakeref = intel_runtime_pm_get(&i915->runtime_pm);
350*f005ef32Sjsg
351*f005ef32Sjsg mutex_lock(&pxp->arb_mutex);
352*f005ef32Sjsg
353*f005ef32Sjsg if (__pxp_global_teardown_final(pxp))
354*f005ef32Sjsg drm_dbg(&i915->drm, "PXP end timed out\n");
355*f005ef32Sjsg
356*f005ef32Sjsg mutex_unlock(&pxp->arb_mutex);
357*f005ef32Sjsg
358*f005ef32Sjsg intel_pxp_fini_hw(pxp);
359*f005ef32Sjsg intel_runtime_pm_put(&i915->runtime_pm, wakeref);
360*f005ef32Sjsg }
361*f005ef32Sjsg
362*f005ef32Sjsg /*
363*f005ef32Sjsg * this helper is used by both intel_pxp_start and by
364*f005ef32Sjsg * the GET_PARAM IOCTL that user space calls. Thus, the
365*f005ef32Sjsg * return values here should match the UAPI spec.
366*f005ef32Sjsg */
intel_pxp_get_readiness_status(struct intel_pxp * pxp)367*f005ef32Sjsg int intel_pxp_get_readiness_status(struct intel_pxp *pxp)
368*f005ef32Sjsg {
369*f005ef32Sjsg if (!intel_pxp_is_enabled(pxp))
370*f005ef32Sjsg return -ENODEV;
371*f005ef32Sjsg
372*f005ef32Sjsg if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) {
373*f005ef32Sjsg if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250))
374*f005ef32Sjsg return 2;
375*f005ef32Sjsg } else {
376*f005ef32Sjsg if (wait_for(pxp_component_bound(pxp), 250))
377*f005ef32Sjsg return 2;
378*f005ef32Sjsg }
379*f005ef32Sjsg return 1;
380*f005ef32Sjsg }
381*f005ef32Sjsg
3821bb76ff1Sjsg /*
3831bb76ff1Sjsg * the arb session is restarted from the irq work when we receive the
3841bb76ff1Sjsg * termination completion interrupt
3851bb76ff1Sjsg */
intel_pxp_start(struct intel_pxp * pxp)3861bb76ff1Sjsg int intel_pxp_start(struct intel_pxp *pxp)
3871bb76ff1Sjsg {
3881bb76ff1Sjsg int ret = 0;
3891bb76ff1Sjsg
390*f005ef32Sjsg ret = intel_pxp_get_readiness_status(pxp);
391*f005ef32Sjsg if (ret < 0)
392*f005ef32Sjsg return ret;
393*f005ef32Sjsg else if (ret > 1)
394*f005ef32Sjsg return -EIO; /* per UAPI spec, user may retry later */
3951bb76ff1Sjsg
3961bb76ff1Sjsg mutex_lock(&pxp->arb_mutex);
3971bb76ff1Sjsg
398*f005ef32Sjsg ret = __pxp_global_teardown_restart(pxp);
399*f005ef32Sjsg if (ret)
4001bb76ff1Sjsg goto unlock;
4011bb76ff1Sjsg
4021bb76ff1Sjsg /* make sure the compiler doesn't optimize the double access */
4031bb76ff1Sjsg barrier();
4041bb76ff1Sjsg
4051bb76ff1Sjsg if (!pxp->arb_is_valid)
4061bb76ff1Sjsg ret = -EIO;
4071bb76ff1Sjsg
4081bb76ff1Sjsg unlock:
4091bb76ff1Sjsg mutex_unlock(&pxp->arb_mutex);
4101bb76ff1Sjsg return ret;
4111bb76ff1Sjsg }
4121bb76ff1Sjsg
intel_pxp_init_hw(struct intel_pxp * pxp)4131bb76ff1Sjsg void intel_pxp_init_hw(struct intel_pxp *pxp)
4141bb76ff1Sjsg {
415*f005ef32Sjsg kcr_pxp_enable(pxp);
4161bb76ff1Sjsg intel_pxp_irq_enable(pxp);
4171bb76ff1Sjsg }
4181bb76ff1Sjsg
intel_pxp_fini_hw(struct intel_pxp * pxp)4191bb76ff1Sjsg void intel_pxp_fini_hw(struct intel_pxp *pxp)
4201bb76ff1Sjsg {
421*f005ef32Sjsg kcr_pxp_disable(pxp);
4221bb76ff1Sjsg intel_pxp_irq_disable(pxp);
4231bb76ff1Sjsg }
4241bb76ff1Sjsg
intel_pxp_key_check(struct intel_pxp * pxp,struct drm_i915_gem_object * obj,bool assign)4251bb76ff1Sjsg int intel_pxp_key_check(struct intel_pxp *pxp,
4261bb76ff1Sjsg struct drm_i915_gem_object *obj,
4271bb76ff1Sjsg bool assign)
4281bb76ff1Sjsg {
4291bb76ff1Sjsg if (!intel_pxp_is_active(pxp))
4301bb76ff1Sjsg return -ENODEV;
4311bb76ff1Sjsg
4321bb76ff1Sjsg if (!i915_gem_object_is_protected(obj))
4331bb76ff1Sjsg return -EINVAL;
4341bb76ff1Sjsg
4351bb76ff1Sjsg GEM_BUG_ON(!pxp->key_instance);
4361bb76ff1Sjsg
4371bb76ff1Sjsg /*
4381bb76ff1Sjsg * If this is the first time we're using this object, it's not
4391bb76ff1Sjsg * encrypted yet; it will be encrypted with the current key, so mark it
4401bb76ff1Sjsg * as such. If the object is already encrypted, check instead if the
4411bb76ff1Sjsg * used key is still valid.
4421bb76ff1Sjsg */
4431bb76ff1Sjsg if (!obj->pxp_key_instance && assign)
4441bb76ff1Sjsg obj->pxp_key_instance = pxp->key_instance;
4451bb76ff1Sjsg
4461bb76ff1Sjsg if (obj->pxp_key_instance != pxp->key_instance)
4471bb76ff1Sjsg return -ENOEXEC;
4481bb76ff1Sjsg
4491bb76ff1Sjsg return 0;
4501bb76ff1Sjsg }
4511bb76ff1Sjsg
intel_pxp_invalidate(struct intel_pxp * pxp)4521bb76ff1Sjsg void intel_pxp_invalidate(struct intel_pxp *pxp)
4531bb76ff1Sjsg {
454*f005ef32Sjsg struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
4551bb76ff1Sjsg struct i915_gem_context *ctx, *cn;
4561bb76ff1Sjsg
4571bb76ff1Sjsg /* ban all contexts marked as protected */
4581bb76ff1Sjsg spin_lock_irq(&i915->gem.contexts.lock);
4591bb76ff1Sjsg list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
4601bb76ff1Sjsg struct i915_gem_engines_iter it;
4611bb76ff1Sjsg struct intel_context *ce;
4621bb76ff1Sjsg
4631bb76ff1Sjsg if (!kref_get_unless_zero(&ctx->ref))
4641bb76ff1Sjsg continue;
4651bb76ff1Sjsg
4661bb76ff1Sjsg if (likely(!i915_gem_context_uses_protected_content(ctx))) {
4671bb76ff1Sjsg i915_gem_context_put(ctx);
4681bb76ff1Sjsg continue;
4691bb76ff1Sjsg }
4701bb76ff1Sjsg
4711bb76ff1Sjsg spin_unlock_irq(&i915->gem.contexts.lock);
4721bb76ff1Sjsg
4731bb76ff1Sjsg /*
4741bb76ff1Sjsg * By the time we get here we are either going to suspend with
4751bb76ff1Sjsg * quiesced execution or the HW keys are already long gone and
4761bb76ff1Sjsg * in this case it is worthless to attempt to close the context
4771bb76ff1Sjsg * and wait for its execution. It will hang the GPU if it has
4781bb76ff1Sjsg * not already. So, as a fast mitigation, we can ban the
4791bb76ff1Sjsg * context as quick as we can. That might race with the
4801bb76ff1Sjsg * execbuffer, but currently this is the best that can be done.
4811bb76ff1Sjsg */
4821bb76ff1Sjsg for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
4831bb76ff1Sjsg intel_context_ban(ce, NULL);
4841bb76ff1Sjsg i915_gem_context_unlock_engines(ctx);
4851bb76ff1Sjsg
4861bb76ff1Sjsg /*
4871bb76ff1Sjsg * The context has been banned, no need to keep the wakeref.
4881bb76ff1Sjsg * This is safe from races because the only other place this
4891bb76ff1Sjsg * is touched is context_release and we're holding a ctx ref
4901bb76ff1Sjsg */
4911bb76ff1Sjsg if (ctx->pxp_wakeref) {
4921bb76ff1Sjsg intel_runtime_pm_put(&i915->runtime_pm,
4931bb76ff1Sjsg ctx->pxp_wakeref);
4941bb76ff1Sjsg ctx->pxp_wakeref = 0;
4951bb76ff1Sjsg }
4961bb76ff1Sjsg
4971bb76ff1Sjsg spin_lock_irq(&i915->gem.contexts.lock);
4981bb76ff1Sjsg list_safe_reset_next(ctx, cn, link);
4991bb76ff1Sjsg i915_gem_context_put(ctx);
5001bb76ff1Sjsg }
5011bb76ff1Sjsg spin_unlock_irq(&i915->gem.contexts.lock);
5021bb76ff1Sjsg }
503