xref: /openbsd-src/sys/dev/pci/drm/i915/gt/selftest_slpc.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
15ca02815Sjsg // SPDX-License-Identifier: MIT
25ca02815Sjsg /*
35ca02815Sjsg  * Copyright © 2021 Intel Corporation
45ca02815Sjsg  */
55ca02815Sjsg 
65ca02815Sjsg #define NUM_STEPS 5
75ca02815Sjsg #define H2G_DELAY 50000
85ca02815Sjsg #define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
95ca02815Sjsg #define FREQUENCY_REQ_UNIT	DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
105ca02815Sjsg 						  GEN9_FREQ_SCALER)
111bb76ff1Sjsg enum test_type {
121bb76ff1Sjsg 	VARY_MIN,
131bb76ff1Sjsg 	VARY_MAX,
14*f005ef32Sjsg 	MAX_GRANTED,
15*f005ef32Sjsg 	SLPC_POWER,
16*f005ef32Sjsg 	TILE_INTERACTION,
17*f005ef32Sjsg };
18*f005ef32Sjsg 
19*f005ef32Sjsg struct slpc_thread {
20*f005ef32Sjsg 	struct kthread_worker *worker;
21*f005ef32Sjsg 	struct kthread_work work;
22*f005ef32Sjsg 	struct intel_gt *gt;
23*f005ef32Sjsg 	int result;
241bb76ff1Sjsg };
255ca02815Sjsg 
slpc_set_min_freq(struct intel_guc_slpc * slpc,u32 freq)265ca02815Sjsg static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
275ca02815Sjsg {
285ca02815Sjsg 	int ret;
295ca02815Sjsg 
305ca02815Sjsg 	ret = intel_guc_slpc_set_min_freq(slpc, freq);
315ca02815Sjsg 	if (ret)
325ca02815Sjsg 		pr_err("Could not set min frequency to [%u]\n", freq);
335ca02815Sjsg 	else /* Delay to ensure h2g completes */
345ca02815Sjsg 		delay_for_h2g();
355ca02815Sjsg 
365ca02815Sjsg 	return ret;
375ca02815Sjsg }
385ca02815Sjsg 
slpc_set_max_freq(struct intel_guc_slpc * slpc,u32 freq)395ca02815Sjsg static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
405ca02815Sjsg {
415ca02815Sjsg 	int ret;
425ca02815Sjsg 
435ca02815Sjsg 	ret = intel_guc_slpc_set_max_freq(slpc, freq);
445ca02815Sjsg 	if (ret)
455ca02815Sjsg 		pr_err("Could not set maximum frequency [%u]\n",
465ca02815Sjsg 		       freq);
475ca02815Sjsg 	else /* Delay to ensure h2g completes */
485ca02815Sjsg 		delay_for_h2g();
495ca02815Sjsg 
505ca02815Sjsg 	return ret;
515ca02815Sjsg }
525ca02815Sjsg 
slpc_set_freq(struct intel_gt * gt,u32 freq)53*f005ef32Sjsg static int slpc_set_freq(struct intel_gt *gt, u32 freq)
54*f005ef32Sjsg {
55*f005ef32Sjsg 	int err;
56*f005ef32Sjsg 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
57*f005ef32Sjsg 
58*f005ef32Sjsg 	err = slpc_set_max_freq(slpc, freq);
59*f005ef32Sjsg 	if (err) {
60*f005ef32Sjsg 		pr_err("Unable to update max freq");
61*f005ef32Sjsg 		return err;
62*f005ef32Sjsg 	}
63*f005ef32Sjsg 
64*f005ef32Sjsg 	err = slpc_set_min_freq(slpc, freq);
65*f005ef32Sjsg 	if (err) {
66*f005ef32Sjsg 		pr_err("Unable to update min freq");
67*f005ef32Sjsg 		return err;
68*f005ef32Sjsg 	}
69*f005ef32Sjsg 
70*f005ef32Sjsg 	return err;
71*f005ef32Sjsg }
72*f005ef32Sjsg 
slpc_restore_freq(struct intel_guc_slpc * slpc,u32 min,u32 max)73*f005ef32Sjsg static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
74*f005ef32Sjsg {
75*f005ef32Sjsg 	int err;
76*f005ef32Sjsg 
77*f005ef32Sjsg 	err = slpc_set_max_freq(slpc, max);
78*f005ef32Sjsg 	if (err) {
79*f005ef32Sjsg 		pr_err("Unable to restore max freq");
80*f005ef32Sjsg 		return err;
81*f005ef32Sjsg 	}
82*f005ef32Sjsg 
83*f005ef32Sjsg 	err = slpc_set_min_freq(slpc, min);
84*f005ef32Sjsg 	if (err) {
85*f005ef32Sjsg 		pr_err("Unable to restore min freq");
86*f005ef32Sjsg 		return err;
87*f005ef32Sjsg 	}
88*f005ef32Sjsg 
89*f005ef32Sjsg 	err = intel_guc_slpc_set_ignore_eff_freq(slpc, false);
90*f005ef32Sjsg 	if (err) {
91*f005ef32Sjsg 		pr_err("Unable to restore efficient freq");
92*f005ef32Sjsg 		return err;
93*f005ef32Sjsg 	}
94*f005ef32Sjsg 
95*f005ef32Sjsg 	return 0;
96*f005ef32Sjsg }
97*f005ef32Sjsg 
measure_power_at_freq(struct intel_gt * gt,int * freq,u64 * power)98*f005ef32Sjsg static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
99*f005ef32Sjsg {
100*f005ef32Sjsg 	int err = 0;
101*f005ef32Sjsg 
102*f005ef32Sjsg 	err = slpc_set_freq(gt, *freq);
103*f005ef32Sjsg 	if (err)
104*f005ef32Sjsg 		return err;
105*f005ef32Sjsg 	*freq = intel_rps_read_actual_frequency(&gt->rps);
106*f005ef32Sjsg 	*power = measure_power(&gt->rps, freq);
107*f005ef32Sjsg 
108*f005ef32Sjsg 	return err;
109*f005ef32Sjsg }
110*f005ef32Sjsg 
vary_max_freq(struct intel_guc_slpc * slpc,struct intel_rps * rps,u32 * max_act_freq)1111bb76ff1Sjsg static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
1121bb76ff1Sjsg 			 u32 *max_act_freq)
1135ca02815Sjsg {
1141bb76ff1Sjsg 	u32 step, max_freq, req_freq;
1151bb76ff1Sjsg 	u32 act_freq;
1161bb76ff1Sjsg 	int err = 0;
1171bb76ff1Sjsg 
1181bb76ff1Sjsg 	/* Go from max to min in 5 steps */
1191bb76ff1Sjsg 	step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
1201bb76ff1Sjsg 	*max_act_freq = slpc->min_freq;
1211bb76ff1Sjsg 	for (max_freq = slpc->rp0_freq; max_freq > slpc->min_freq;
1221bb76ff1Sjsg 				max_freq -= step) {
1231bb76ff1Sjsg 		err = slpc_set_max_freq(slpc, max_freq);
1241bb76ff1Sjsg 		if (err)
1251bb76ff1Sjsg 			break;
1261bb76ff1Sjsg 
1271bb76ff1Sjsg 		req_freq = intel_rps_read_punit_req_frequency(rps);
1281bb76ff1Sjsg 
1291bb76ff1Sjsg 		/* GuC requests freq in multiples of 50/3 MHz */
1301bb76ff1Sjsg 		if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
1311bb76ff1Sjsg 			pr_err("SWReq is %d, should be at most %d\n", req_freq,
1321bb76ff1Sjsg 			       max_freq + FREQUENCY_REQ_UNIT);
1331bb76ff1Sjsg 			err = -EINVAL;
1341bb76ff1Sjsg 		}
1351bb76ff1Sjsg 
1361bb76ff1Sjsg 		act_freq =  intel_rps_read_actual_frequency(rps);
1371bb76ff1Sjsg 		if (act_freq > *max_act_freq)
1381bb76ff1Sjsg 			*max_act_freq = act_freq;
1391bb76ff1Sjsg 
1401bb76ff1Sjsg 		if (err)
1411bb76ff1Sjsg 			break;
1421bb76ff1Sjsg 	}
1431bb76ff1Sjsg 
1441bb76ff1Sjsg 	return err;
1451bb76ff1Sjsg }
1461bb76ff1Sjsg 
vary_min_freq(struct intel_guc_slpc * slpc,struct intel_rps * rps,u32 * max_act_freq)1471bb76ff1Sjsg static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
1481bb76ff1Sjsg 			 u32 *max_act_freq)
1491bb76ff1Sjsg {
1501bb76ff1Sjsg 	u32 step, min_freq, req_freq;
1511bb76ff1Sjsg 	u32 act_freq;
1521bb76ff1Sjsg 	int err = 0;
1531bb76ff1Sjsg 
1541bb76ff1Sjsg 	/* Go from min to max in 5 steps */
1551bb76ff1Sjsg 	step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
1561bb76ff1Sjsg 	*max_act_freq = slpc->min_freq;
1571bb76ff1Sjsg 	for (min_freq = slpc->min_freq; min_freq < slpc->rp0_freq;
1581bb76ff1Sjsg 				min_freq += step) {
1591bb76ff1Sjsg 		err = slpc_set_min_freq(slpc, min_freq);
1601bb76ff1Sjsg 		if (err)
1611bb76ff1Sjsg 			break;
1621bb76ff1Sjsg 
1631bb76ff1Sjsg 		req_freq = intel_rps_read_punit_req_frequency(rps);
1641bb76ff1Sjsg 
1651bb76ff1Sjsg 		/* GuC requests freq in multiples of 50/3 MHz */
1661bb76ff1Sjsg 		if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
1671bb76ff1Sjsg 			pr_err("SWReq is %d, should be at least %d\n", req_freq,
1681bb76ff1Sjsg 			       min_freq - FREQUENCY_REQ_UNIT);
1691bb76ff1Sjsg 			err = -EINVAL;
1701bb76ff1Sjsg 		}
1711bb76ff1Sjsg 
1721bb76ff1Sjsg 		act_freq =  intel_rps_read_actual_frequency(rps);
1731bb76ff1Sjsg 		if (act_freq > *max_act_freq)
1741bb76ff1Sjsg 			*max_act_freq = act_freq;
1751bb76ff1Sjsg 
1761bb76ff1Sjsg 		if (err)
1771bb76ff1Sjsg 			break;
1781bb76ff1Sjsg 	}
1791bb76ff1Sjsg 
1801bb76ff1Sjsg 	return err;
1811bb76ff1Sjsg }
1821bb76ff1Sjsg 
slpc_power(struct intel_gt * gt,struct intel_engine_cs * engine)183*f005ef32Sjsg static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
184*f005ef32Sjsg {
185*f005ef32Sjsg 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
186*f005ef32Sjsg 	struct {
187*f005ef32Sjsg 		u64 power;
188*f005ef32Sjsg 		int freq;
189*f005ef32Sjsg 	} min, max;
190*f005ef32Sjsg 	int err = 0;
191*f005ef32Sjsg 
192*f005ef32Sjsg 	/*
193*f005ef32Sjsg 	 * Our fundamental assumption is that running at lower frequency
194*f005ef32Sjsg 	 * actually saves power. Let's see if our RAPL measurement supports
195*f005ef32Sjsg 	 * that theory.
196*f005ef32Sjsg 	 */
197*f005ef32Sjsg 	if (!librapl_supported(gt->i915))
198*f005ef32Sjsg 		return 0;
199*f005ef32Sjsg 
200*f005ef32Sjsg 	min.freq = slpc->min_freq;
201*f005ef32Sjsg 	err = measure_power_at_freq(gt, &min.freq, &min.power);
202*f005ef32Sjsg 
203*f005ef32Sjsg 	if (err)
204*f005ef32Sjsg 		return err;
205*f005ef32Sjsg 
206*f005ef32Sjsg 	max.freq = slpc->rp0_freq;
207*f005ef32Sjsg 	err = measure_power_at_freq(gt, &max.freq, &max.power);
208*f005ef32Sjsg 
209*f005ef32Sjsg 	if (err)
210*f005ef32Sjsg 		return err;
211*f005ef32Sjsg 
212*f005ef32Sjsg 	pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
213*f005ef32Sjsg 		engine->name,
214*f005ef32Sjsg 		min.power, min.freq,
215*f005ef32Sjsg 		max.power, max.freq);
216*f005ef32Sjsg 
217*f005ef32Sjsg 	if (10 * min.freq >= 9 * max.freq) {
218*f005ef32Sjsg 		pr_notice("Could not control frequency, ran at [%uMHz, %uMhz]\n",
219*f005ef32Sjsg 			  min.freq, max.freq);
220*f005ef32Sjsg 	}
221*f005ef32Sjsg 
222*f005ef32Sjsg 	if (11 * min.power > 10 * max.power) {
223*f005ef32Sjsg 		pr_err("%s: did not conserve power when setting lower frequency!\n",
224*f005ef32Sjsg 		       engine->name);
225*f005ef32Sjsg 		err = -EINVAL;
226*f005ef32Sjsg 	}
227*f005ef32Sjsg 
228*f005ef32Sjsg 	/* Restore min/max frequencies */
229*f005ef32Sjsg 	slpc_set_max_freq(slpc, slpc->rp0_freq);
230*f005ef32Sjsg 	slpc_set_min_freq(slpc, slpc->min_freq);
231*f005ef32Sjsg 
232*f005ef32Sjsg 	return err;
233*f005ef32Sjsg }
234*f005ef32Sjsg 
max_granted_freq(struct intel_guc_slpc * slpc,struct intel_rps * rps,u32 * max_act_freq)2351bb76ff1Sjsg static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq)
2361bb76ff1Sjsg {
2371bb76ff1Sjsg 	struct intel_gt *gt = rps_to_gt(rps);
2381bb76ff1Sjsg 	u32 perf_limit_reasons;
2391bb76ff1Sjsg 	int err = 0;
2401bb76ff1Sjsg 
2411bb76ff1Sjsg 	err = slpc_set_min_freq(slpc, slpc->rp0_freq);
2421bb76ff1Sjsg 	if (err)
2431bb76ff1Sjsg 		return err;
2441bb76ff1Sjsg 
2451bb76ff1Sjsg 	*max_act_freq =  intel_rps_read_actual_frequency(rps);
2461bb76ff1Sjsg 	if (*max_act_freq != slpc->rp0_freq) {
2471bb76ff1Sjsg 		/* Check if there was some throttling by pcode */
248*f005ef32Sjsg 		perf_limit_reasons = intel_uncore_read(gt->uncore,
249*f005ef32Sjsg 						       intel_gt_perf_limit_reasons_reg(gt));
2501bb76ff1Sjsg 
2511bb76ff1Sjsg 		/* If not, this is an error */
2521bb76ff1Sjsg 		if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
2531bb76ff1Sjsg 			pr_err("Pcode did not grant max freq\n");
2541bb76ff1Sjsg 			err = -EINVAL;
2551bb76ff1Sjsg 		} else {
2561bb76ff1Sjsg 			pr_info("Pcode throttled frequency 0x%x\n", perf_limit_reasons);
2571bb76ff1Sjsg 		}
2581bb76ff1Sjsg 	}
2591bb76ff1Sjsg 
2601bb76ff1Sjsg 	return err;
2611bb76ff1Sjsg }
2621bb76ff1Sjsg 
run_test(struct intel_gt * gt,int test_type)2631bb76ff1Sjsg static int run_test(struct intel_gt *gt, int test_type)
2641bb76ff1Sjsg {
2655ca02815Sjsg 	struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
2665ca02815Sjsg 	struct intel_rps *rps = &gt->rps;
2675ca02815Sjsg 	struct intel_engine_cs *engine;
2685ca02815Sjsg 	enum intel_engine_id id;
2695ca02815Sjsg 	struct igt_spinner spin;
2705ca02815Sjsg 	u32 slpc_min_freq, slpc_max_freq;
2715ca02815Sjsg 	int err = 0;
2725ca02815Sjsg 
2735ca02815Sjsg 	if (!intel_uc_uses_guc_slpc(&gt->uc))
2745ca02815Sjsg 		return 0;
2755ca02815Sjsg 
276*f005ef32Sjsg 	if (slpc->min_freq == slpc->rp0_freq) {
277*f005ef32Sjsg 		pr_err("Min/Max are fused to the same value\n");
278*f005ef32Sjsg 		return -EINVAL;
279*f005ef32Sjsg 	}
280*f005ef32Sjsg 
2815ca02815Sjsg 	if (igt_spinner_init(&spin, gt))
2825ca02815Sjsg 		return -ENOMEM;
2835ca02815Sjsg 
2845ca02815Sjsg 	if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
2855ca02815Sjsg 		pr_err("Could not get SLPC max freq\n");
2865ca02815Sjsg 		return -EIO;
2875ca02815Sjsg 	}
2885ca02815Sjsg 
2895ca02815Sjsg 	if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
2905ca02815Sjsg 		pr_err("Could not get SLPC min freq\n");
2915ca02815Sjsg 		return -EIO;
2925ca02815Sjsg 	}
2935ca02815Sjsg 
2941bb76ff1Sjsg 	/*
295*f005ef32Sjsg 	 * Set min frequency to RPn so that we can test the whole
296*f005ef32Sjsg 	 * range of RPn-RP0.
2971bb76ff1Sjsg 	 */
2981bb76ff1Sjsg 	err = slpc_set_min_freq(slpc, slpc->min_freq);
299*f005ef32Sjsg 	if (err) {
300*f005ef32Sjsg 		pr_err("Unable to update min freq!");
3011bb76ff1Sjsg 		return err;
302*f005ef32Sjsg 	}
3031bb76ff1Sjsg 
304*f005ef32Sjsg 	/*
305*f005ef32Sjsg 	 * Turn off efficient frequency so RPn/RP0 ranges are obeyed.
306*f005ef32Sjsg 	 */
307*f005ef32Sjsg 	err = intel_guc_slpc_set_ignore_eff_freq(slpc, true);
308*f005ef32Sjsg 	if (err) {
309*f005ef32Sjsg 		pr_err("Unable to turn off efficient freq!");
310*f005ef32Sjsg 		return err;
3115ca02815Sjsg 	}
3125ca02815Sjsg 
3135ca02815Sjsg 	intel_gt_pm_wait_for_idle(gt);
3145ca02815Sjsg 	intel_gt_pm_get(gt);
3155ca02815Sjsg 	for_each_engine(engine, gt, id) {
3165ca02815Sjsg 		struct i915_request *rq;
3171bb76ff1Sjsg 		u32 max_act_freq;
3185ca02815Sjsg 
3195ca02815Sjsg 		if (!intel_engine_can_store_dword(engine))
3205ca02815Sjsg 			continue;
3215ca02815Sjsg 
3225ca02815Sjsg 		st_engine_heartbeat_disable(engine);
3235ca02815Sjsg 
3245ca02815Sjsg 		rq = igt_spinner_create_request(&spin,
3255ca02815Sjsg 						engine->kernel_context,
3265ca02815Sjsg 						MI_NOOP);
3275ca02815Sjsg 		if (IS_ERR(rq)) {
3285ca02815Sjsg 			err = PTR_ERR(rq);
3295ca02815Sjsg 			st_engine_heartbeat_enable(engine);
3305ca02815Sjsg 			break;
3315ca02815Sjsg 		}
3325ca02815Sjsg 
3335ca02815Sjsg 		i915_request_add(rq);
3345ca02815Sjsg 
3355ca02815Sjsg 		if (!igt_wait_for_spinner(&spin, rq)) {
3365ca02815Sjsg 			pr_err("%s: Spinner did not start\n",
3375ca02815Sjsg 			       engine->name);
3385ca02815Sjsg 			igt_spinner_end(&spin);
3395ca02815Sjsg 			st_engine_heartbeat_enable(engine);
3405ca02815Sjsg 			intel_gt_set_wedged(engine->gt);
3415ca02815Sjsg 			err = -EIO;
3425ca02815Sjsg 			break;
3435ca02815Sjsg 		}
3445ca02815Sjsg 
3451bb76ff1Sjsg 		switch (test_type) {
3461bb76ff1Sjsg 		case VARY_MIN:
3471bb76ff1Sjsg 			err = vary_min_freq(slpc, rps, &max_act_freq);
3481bb76ff1Sjsg 			break;
3495ca02815Sjsg 
3501bb76ff1Sjsg 		case VARY_MAX:
3511bb76ff1Sjsg 			err = vary_max_freq(slpc, rps, &max_act_freq);
3521bb76ff1Sjsg 			break;
3535ca02815Sjsg 
3541bb76ff1Sjsg 		case MAX_GRANTED:
355*f005ef32Sjsg 		case TILE_INTERACTION:
3561bb76ff1Sjsg 			/* Media engines have a different RP0 */
357*f005ef32Sjsg 			if (gt->type != GT_MEDIA && (engine->class == VIDEO_DECODE_CLASS ||
358*f005ef32Sjsg 						     engine->class == VIDEO_ENHANCEMENT_CLASS)) {
3595ca02815Sjsg 				igt_spinner_end(&spin);
3605ca02815Sjsg 				st_engine_heartbeat_enable(engine);
3611bb76ff1Sjsg 				err = 0;
3621bb76ff1Sjsg 				continue;
3635ca02815Sjsg 			}
3645ca02815Sjsg 
3651bb76ff1Sjsg 			err = max_granted_freq(slpc, rps, &max_act_freq);
3661bb76ff1Sjsg 			break;
367*f005ef32Sjsg 
368*f005ef32Sjsg 		case SLPC_POWER:
369*f005ef32Sjsg 			err = slpc_power(gt, engine);
370*f005ef32Sjsg 			break;
3715ca02815Sjsg 		}
3725ca02815Sjsg 
373*f005ef32Sjsg 		if (test_type != SLPC_POWER) {
3745ca02815Sjsg 			pr_info("Max actual frequency for %s was %d\n",
3755ca02815Sjsg 				engine->name, max_act_freq);
3765ca02815Sjsg 
3775ca02815Sjsg 			/* Actual frequency should rise above min */
378*f005ef32Sjsg 			if (max_act_freq <= slpc->min_freq) {
3795ca02815Sjsg 				pr_err("Actual freq did not rise above min\n");
3801bb76ff1Sjsg 				pr_err("Perf Limit Reasons: 0x%x\n",
381*f005ef32Sjsg 				       intel_uncore_read(gt->uncore,
382*f005ef32Sjsg 							 intel_gt_perf_limit_reasons_reg(gt)));
3835ca02815Sjsg 				err = -EINVAL;
3845ca02815Sjsg 			}
385*f005ef32Sjsg 		}
3865ca02815Sjsg 
3871bb76ff1Sjsg 		igt_spinner_end(&spin);
3881bb76ff1Sjsg 		st_engine_heartbeat_enable(engine);
3891bb76ff1Sjsg 
3905ca02815Sjsg 		if (err)
3915ca02815Sjsg 			break;
3925ca02815Sjsg 	}
3935ca02815Sjsg 
394*f005ef32Sjsg 	/* Restore min/max/efficient frequencies */
395*f005ef32Sjsg 	err = slpc_restore_freq(slpc, slpc_min_freq, slpc_max_freq);
3965ca02815Sjsg 
3975ca02815Sjsg 	if (igt_flush_test(gt->i915))
3985ca02815Sjsg 		err = -EIO;
3995ca02815Sjsg 
4005ca02815Sjsg 	intel_gt_pm_put(gt);
4015ca02815Sjsg 	igt_spinner_fini(&spin);
4025ca02815Sjsg 	intel_gt_pm_wait_for_idle(gt);
4035ca02815Sjsg 
4045ca02815Sjsg 	return err;
4055ca02815Sjsg }
4065ca02815Sjsg 
live_slpc_vary_min(void * arg)4071bb76ff1Sjsg static int live_slpc_vary_min(void *arg)
4085ca02815Sjsg {
4095ca02815Sjsg 	struct drm_i915_private *i915 = arg;
410*f005ef32Sjsg 	struct intel_gt *gt;
411*f005ef32Sjsg 	unsigned int i;
412*f005ef32Sjsg 	int ret;
4135ca02815Sjsg 
414*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
415*f005ef32Sjsg 		ret = run_test(gt, VARY_MIN);
416*f005ef32Sjsg 		if (ret)
417*f005ef32Sjsg 			return ret;
418*f005ef32Sjsg 	}
419*f005ef32Sjsg 
420*f005ef32Sjsg 	return ret;
4215ca02815Sjsg }
4225ca02815Sjsg 
live_slpc_vary_max(void * arg)4231bb76ff1Sjsg static int live_slpc_vary_max(void *arg)
4241bb76ff1Sjsg {
4251bb76ff1Sjsg 	struct drm_i915_private *i915 = arg;
426*f005ef32Sjsg 	struct intel_gt *gt;
427*f005ef32Sjsg 	unsigned int i;
428*f005ef32Sjsg 	int ret;
4291bb76ff1Sjsg 
430*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
431*f005ef32Sjsg 		ret = run_test(gt, VARY_MAX);
432*f005ef32Sjsg 		if (ret)
433*f005ef32Sjsg 			return ret;
434*f005ef32Sjsg 	}
435*f005ef32Sjsg 
436*f005ef32Sjsg 	return ret;
4375ca02815Sjsg }
4385ca02815Sjsg 
4391bb76ff1Sjsg /* check if pcode can grant RP0 */
live_slpc_max_granted(void * arg)4401bb76ff1Sjsg static int live_slpc_max_granted(void *arg)
4411bb76ff1Sjsg {
4421bb76ff1Sjsg 	struct drm_i915_private *i915 = arg;
443*f005ef32Sjsg 	struct intel_gt *gt;
444*f005ef32Sjsg 	unsigned int i;
445*f005ef32Sjsg 	int ret;
4465ca02815Sjsg 
447*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
448*f005ef32Sjsg 		ret = run_test(gt, MAX_GRANTED);
449*f005ef32Sjsg 		if (ret)
450*f005ef32Sjsg 			return ret;
451*f005ef32Sjsg 	}
452*f005ef32Sjsg 
453*f005ef32Sjsg 	return ret;
454*f005ef32Sjsg }
455*f005ef32Sjsg 
live_slpc_power(void * arg)456*f005ef32Sjsg static int live_slpc_power(void *arg)
457*f005ef32Sjsg {
458*f005ef32Sjsg 	struct drm_i915_private *i915 = arg;
459*f005ef32Sjsg 	struct intel_gt *gt;
460*f005ef32Sjsg 	unsigned int i;
461*f005ef32Sjsg 	int ret;
462*f005ef32Sjsg 
463*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
464*f005ef32Sjsg 		ret = run_test(gt, SLPC_POWER);
465*f005ef32Sjsg 		if (ret)
466*f005ef32Sjsg 			return ret;
467*f005ef32Sjsg 	}
468*f005ef32Sjsg 
469*f005ef32Sjsg 	return ret;
470*f005ef32Sjsg }
471*f005ef32Sjsg 
slpc_spinner_thread(struct kthread_work * work)472*f005ef32Sjsg static void slpc_spinner_thread(struct kthread_work *work)
473*f005ef32Sjsg {
474*f005ef32Sjsg 	struct slpc_thread *thread = container_of(work, typeof(*thread), work);
475*f005ef32Sjsg 
476*f005ef32Sjsg 	thread->result = run_test(thread->gt, TILE_INTERACTION);
477*f005ef32Sjsg }
478*f005ef32Sjsg 
live_slpc_tile_interaction(void * arg)479*f005ef32Sjsg static int live_slpc_tile_interaction(void *arg)
480*f005ef32Sjsg {
481*f005ef32Sjsg 	struct drm_i915_private *i915 = arg;
482*f005ef32Sjsg 	struct intel_gt *gt;
483*f005ef32Sjsg 	struct slpc_thread *threads;
484*f005ef32Sjsg 	int i = 0, ret = 0;
485*f005ef32Sjsg 
486*f005ef32Sjsg 	threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL);
487*f005ef32Sjsg 	if (!threads)
488*f005ef32Sjsg 		return -ENOMEM;
489*f005ef32Sjsg 
490*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
491*f005ef32Sjsg 		threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
492*f005ef32Sjsg 
493*f005ef32Sjsg 		if (IS_ERR(threads[i].worker)) {
494*f005ef32Sjsg 			ret = PTR_ERR(threads[i].worker);
495*f005ef32Sjsg 			break;
496*f005ef32Sjsg 		}
497*f005ef32Sjsg 
498*f005ef32Sjsg 		threads[i].gt = gt;
499*f005ef32Sjsg 		kthread_init_work(&threads[i].work, slpc_spinner_thread);
500*f005ef32Sjsg 		kthread_queue_work(threads[i].worker, &threads[i].work);
501*f005ef32Sjsg 	}
502*f005ef32Sjsg 
503*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
504*f005ef32Sjsg 		int status;
505*f005ef32Sjsg 
506*f005ef32Sjsg 		if (IS_ERR_OR_NULL(threads[i].worker))
507*f005ef32Sjsg 			continue;
508*f005ef32Sjsg 
509*f005ef32Sjsg 		kthread_flush_work(&threads[i].work);
510*f005ef32Sjsg 		status = READ_ONCE(threads[i].result);
511*f005ef32Sjsg 		if (status && !ret) {
512*f005ef32Sjsg 			pr_err("%s GT %d failed ", __func__, gt->info.id);
513*f005ef32Sjsg 			ret = status;
514*f005ef32Sjsg 		}
515*f005ef32Sjsg 		kthread_destroy_worker(threads[i].worker);
516*f005ef32Sjsg 	}
517*f005ef32Sjsg 
518*f005ef32Sjsg 	kfree(threads);
519*f005ef32Sjsg 	return ret;
5205ca02815Sjsg }
5215ca02815Sjsg 
intel_slpc_live_selftests(struct drm_i915_private * i915)5225ca02815Sjsg int intel_slpc_live_selftests(struct drm_i915_private *i915)
5235ca02815Sjsg {
5245ca02815Sjsg 	static const struct i915_subtest tests[] = {
5251bb76ff1Sjsg 		SUBTEST(live_slpc_vary_max),
5261bb76ff1Sjsg 		SUBTEST(live_slpc_vary_min),
5271bb76ff1Sjsg 		SUBTEST(live_slpc_max_granted),
528*f005ef32Sjsg 		SUBTEST(live_slpc_power),
529*f005ef32Sjsg 		SUBTEST(live_slpc_tile_interaction),
5305ca02815Sjsg 	};
5315ca02815Sjsg 
532*f005ef32Sjsg 	struct intel_gt *gt;
533*f005ef32Sjsg 	unsigned int i;
534*f005ef32Sjsg 
535*f005ef32Sjsg 	for_each_gt(gt, i915, i) {
536*f005ef32Sjsg 		if (intel_gt_is_wedged(gt))
5375ca02815Sjsg 			return 0;
538*f005ef32Sjsg 	}
5395ca02815Sjsg 
5405ca02815Sjsg 	return i915_live_subtests(tests, i915);
5415ca02815Sjsg }
542