xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_sseu.c (revision 4f3d56cba183849b1f91e564d1a42f272af002ec)
15ca02815Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
61bb76ff1Sjsg #include <linux/string_helpers.h>
71bb76ff1Sjsg 
8c349dbc7Sjsg #include "i915_drv.h"
9*f005ef32Sjsg #include "i915_perf_types.h"
101bb76ff1Sjsg #include "intel_engine_regs.h"
111bb76ff1Sjsg #include "intel_gt_regs.h"
12c349dbc7Sjsg #include "intel_sseu.h"
13c349dbc7Sjsg 
intel_sseu_set_info(struct sseu_dev_info * sseu,u8 max_slices,u8 max_subslices,u8 max_eus_per_subslice)14c349dbc7Sjsg void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
15c349dbc7Sjsg 			 u8 max_subslices, u8 max_eus_per_subslice)
16c349dbc7Sjsg {
17c349dbc7Sjsg 	sseu->max_slices = max_slices;
18c349dbc7Sjsg 	sseu->max_subslices = max_subslices;
19c349dbc7Sjsg 	sseu->max_eus_per_subslice = max_eus_per_subslice;
20c349dbc7Sjsg }
21c349dbc7Sjsg 
22c349dbc7Sjsg unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info * sseu)23c349dbc7Sjsg intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
24c349dbc7Sjsg {
25c349dbc7Sjsg 	unsigned int i, total = 0;
26c349dbc7Sjsg 
271bb76ff1Sjsg 	if (sseu->has_xehp_dss)
281bb76ff1Sjsg 		return bitmap_weight(sseu->subslice_mask.xehp,
291bb76ff1Sjsg 				     XEHP_BITMAP_BITS(sseu->subslice_mask));
301bb76ff1Sjsg 
311bb76ff1Sjsg 	for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask.hsw); i++)
321bb76ff1Sjsg 		total += hweight8(sseu->subslice_mask.hsw[i]);
33c349dbc7Sjsg 
34c349dbc7Sjsg 	return total;
35c349dbc7Sjsg }
36c349dbc7Sjsg 
37c349dbc7Sjsg unsigned int
intel_sseu_get_hsw_subslices(const struct sseu_dev_info * sseu,u8 slice)381bb76ff1Sjsg intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice)
39c349dbc7Sjsg {
401bb76ff1Sjsg 	WARN_ON(sseu->has_xehp_dss);
411bb76ff1Sjsg 	if (WARN_ON(slice >= sseu->max_slices))
421bb76ff1Sjsg 		return 0;
43c349dbc7Sjsg 
441bb76ff1Sjsg 	return sseu->subslice_mask.hsw[slice];
45ad8b1aafSjsg }
46ad8b1aafSjsg 
sseu_get_eus(const struct sseu_dev_info * sseu,int slice,int subslice)47ad8b1aafSjsg static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
48ad8b1aafSjsg 			int subslice)
49ad8b1aafSjsg {
501bb76ff1Sjsg 	if (sseu->has_xehp_dss) {
511bb76ff1Sjsg 		WARN_ON(slice > 0);
521bb76ff1Sjsg 		return sseu->eu_mask.xehp[subslice];
531bb76ff1Sjsg 	} else {
541bb76ff1Sjsg 		return sseu->eu_mask.hsw[slice][subslice];
551bb76ff1Sjsg 	}
56ad8b1aafSjsg }
57ad8b1aafSjsg 
sseu_set_eus(struct sseu_dev_info * sseu,int slice,int subslice,u16 eu_mask)58ad8b1aafSjsg static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
59ad8b1aafSjsg 			 u16 eu_mask)
60ad8b1aafSjsg {
611bb76ff1Sjsg 	GEM_WARN_ON(eu_mask && __fls(eu_mask) >= sseu->max_eus_per_subslice);
621bb76ff1Sjsg 	if (sseu->has_xehp_dss) {
631bb76ff1Sjsg 		GEM_WARN_ON(slice > 0);
641bb76ff1Sjsg 		sseu->eu_mask.xehp[subslice] = eu_mask;
651bb76ff1Sjsg 	} else {
661bb76ff1Sjsg 		sseu->eu_mask.hsw[slice][subslice] = eu_mask;
671bb76ff1Sjsg 	}
68ad8b1aafSjsg }
69ad8b1aafSjsg 
compute_eu_total(const struct sseu_dev_info * sseu)70ad8b1aafSjsg static u16 compute_eu_total(const struct sseu_dev_info *sseu)
71ad8b1aafSjsg {
721bb76ff1Sjsg 	int s, ss, total = 0;
73ad8b1aafSjsg 
741bb76ff1Sjsg 	for (s = 0; s < sseu->max_slices; s++)
751bb76ff1Sjsg 		for (ss = 0; ss < sseu->max_subslices; ss++)
761bb76ff1Sjsg 			if (sseu->has_xehp_dss)
771bb76ff1Sjsg 				total += hweight16(sseu->eu_mask.xehp[ss]);
781bb76ff1Sjsg 			else
791bb76ff1Sjsg 				total += hweight16(sseu->eu_mask.hsw[s][ss]);
80ad8b1aafSjsg 
81ad8b1aafSjsg 	return total;
82ad8b1aafSjsg }
83ad8b1aafSjsg 
841bb76ff1Sjsg /**
851bb76ff1Sjsg  * intel_sseu_copy_eumask_to_user - Copy EU mask into a userspace buffer
861bb76ff1Sjsg  * @to: Pointer to userspace buffer to copy to
871bb76ff1Sjsg  * @sseu: SSEU structure containing EU mask to copy
881bb76ff1Sjsg  *
891bb76ff1Sjsg  * Copies the EU mask to a userspace buffer in the format expected by
901bb76ff1Sjsg  * the query ioctl's topology queries.
911bb76ff1Sjsg  *
921bb76ff1Sjsg  * Returns the result of the copy_to_user() operation.
931bb76ff1Sjsg  */
intel_sseu_copy_eumask_to_user(void __user * to,const struct sseu_dev_info * sseu)941bb76ff1Sjsg int intel_sseu_copy_eumask_to_user(void __user *to,
951bb76ff1Sjsg 				   const struct sseu_dev_info *sseu)
96ad8b1aafSjsg {
971bb76ff1Sjsg 	u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE] = {};
981bb76ff1Sjsg 	int eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
991bb76ff1Sjsg 	int len = sseu->max_slices * sseu->max_subslices * eu_stride;
1001bb76ff1Sjsg 	int s, ss, i;
101ad8b1aafSjsg 
102ad8b1aafSjsg 	for (s = 0; s < sseu->max_slices; s++) {
1031bb76ff1Sjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
1041bb76ff1Sjsg 			int uapi_offset =
1051bb76ff1Sjsg 				s * sseu->max_subslices * eu_stride +
1061bb76ff1Sjsg 				ss * eu_stride;
1071bb76ff1Sjsg 			u16 mask = sseu_get_eus(sseu, s, ss);
1081bb76ff1Sjsg 
1091bb76ff1Sjsg 			for (i = 0; i < eu_stride; i++)
1101bb76ff1Sjsg 				eu_mask[uapi_offset + i] =
1111bb76ff1Sjsg 					(mask >> (BITS_PER_BYTE * i)) & 0xff;
1121bb76ff1Sjsg 		}
1131bb76ff1Sjsg 	}
1141bb76ff1Sjsg 
1151bb76ff1Sjsg 	return copy_to_user(to, eu_mask, len);
1161bb76ff1Sjsg }
1171bb76ff1Sjsg 
1181bb76ff1Sjsg /**
1191bb76ff1Sjsg  * intel_sseu_copy_ssmask_to_user - Copy subslice mask into a userspace buffer
1201bb76ff1Sjsg  * @to: Pointer to userspace buffer to copy to
1211bb76ff1Sjsg  * @sseu: SSEU structure containing subslice mask to copy
1221bb76ff1Sjsg  *
1231bb76ff1Sjsg  * Copies the subslice mask to a userspace buffer in the format expected by
1241bb76ff1Sjsg  * the query ioctl's topology queries.
1251bb76ff1Sjsg  *
1261bb76ff1Sjsg  * Returns the result of the copy_to_user() operation.
1271bb76ff1Sjsg  */
intel_sseu_copy_ssmask_to_user(void __user * to,const struct sseu_dev_info * sseu)1281bb76ff1Sjsg int intel_sseu_copy_ssmask_to_user(void __user *to,
1291bb76ff1Sjsg 				   const struct sseu_dev_info *sseu)
1301bb76ff1Sjsg {
1311bb76ff1Sjsg 	u8 ss_mask[GEN_SS_MASK_SIZE] = {};
1321bb76ff1Sjsg 	int ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
1331bb76ff1Sjsg 	int len = sseu->max_slices * ss_stride;
1341bb76ff1Sjsg 	int s, ss, i;
1351bb76ff1Sjsg 
1361bb76ff1Sjsg 	for (s = 0; s < sseu->max_slices; s++) {
1371bb76ff1Sjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
1381bb76ff1Sjsg 			i = s * ss_stride * BITS_PER_BYTE + ss;
1391bb76ff1Sjsg 
1401bb76ff1Sjsg 			if (!intel_sseu_has_subslice(sseu, s, ss))
141ad8b1aafSjsg 				continue;
142ad8b1aafSjsg 
1431bb76ff1Sjsg 			ss_mask[i / BITS_PER_BYTE] |= BIT(i % BITS_PER_BYTE);
1441bb76ff1Sjsg 		}
1451bb76ff1Sjsg 	}
146ad8b1aafSjsg 
1471bb76ff1Sjsg 	return copy_to_user(to, ss_mask, len);
1481bb76ff1Sjsg }
1491bb76ff1Sjsg 
gen11_compute_sseu_info(struct sseu_dev_info * sseu,u32 ss_en,u16 eu_en)1501bb76ff1Sjsg static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
1511bb76ff1Sjsg 				    u32 ss_en, u16 eu_en)
1521bb76ff1Sjsg {
1531bb76ff1Sjsg 	u32 valid_ss_mask = GENMASK(sseu->max_subslices - 1, 0);
1541bb76ff1Sjsg 	int ss;
1551bb76ff1Sjsg 
1561bb76ff1Sjsg 	sseu->slice_mask |= BIT(0);
1571bb76ff1Sjsg 	sseu->subslice_mask.hsw[0] = ss_en & valid_ss_mask;
158ad8b1aafSjsg 
159ad8b1aafSjsg 	for (ss = 0; ss < sseu->max_subslices; ss++)
1601bb76ff1Sjsg 		if (intel_sseu_has_subslice(sseu, 0, ss))
1611bb76ff1Sjsg 			sseu_set_eus(sseu, 0, ss, eu_en);
1621bb76ff1Sjsg 
163ad8b1aafSjsg 	sseu->eu_per_subslice = hweight16(eu_en);
164ad8b1aafSjsg 	sseu->eu_total = compute_eu_total(sseu);
165ad8b1aafSjsg }
166ad8b1aafSjsg 
xehp_compute_sseu_info(struct sseu_dev_info * sseu,u16 eu_en)1671bb76ff1Sjsg static void xehp_compute_sseu_info(struct sseu_dev_info *sseu,
1681bb76ff1Sjsg 				   u16 eu_en)
1691bb76ff1Sjsg {
1701bb76ff1Sjsg 	int ss;
1711bb76ff1Sjsg 
1721bb76ff1Sjsg 	sseu->slice_mask |= BIT(0);
1731bb76ff1Sjsg 
1741bb76ff1Sjsg 	bitmap_or(sseu->subslice_mask.xehp,
1751bb76ff1Sjsg 		  sseu->compute_subslice_mask.xehp,
1761bb76ff1Sjsg 		  sseu->geometry_subslice_mask.xehp,
1771bb76ff1Sjsg 		  XEHP_BITMAP_BITS(sseu->subslice_mask));
1781bb76ff1Sjsg 
1791bb76ff1Sjsg 	for (ss = 0; ss < sseu->max_subslices; ss++)
1801bb76ff1Sjsg 		if (intel_sseu_has_subslice(sseu, 0, ss))
1811bb76ff1Sjsg 			sseu_set_eus(sseu, 0, ss, eu_en);
1821bb76ff1Sjsg 
1831bb76ff1Sjsg 	sseu->eu_per_subslice = hweight16(eu_en);
1841bb76ff1Sjsg 	sseu->eu_total = compute_eu_total(sseu);
1851bb76ff1Sjsg }
1861bb76ff1Sjsg 
1871bb76ff1Sjsg static void
xehp_load_dss_mask(struct intel_uncore * uncore,intel_sseu_ss_mask_t * ssmask,int numregs,...)1881bb76ff1Sjsg xehp_load_dss_mask(struct intel_uncore *uncore,
1891bb76ff1Sjsg 		   intel_sseu_ss_mask_t *ssmask,
1901bb76ff1Sjsg 		   int numregs,
1911bb76ff1Sjsg 		   ...)
1921bb76ff1Sjsg {
1931bb76ff1Sjsg 	va_list argp;
1941bb76ff1Sjsg 	u32 fuse_val[I915_MAX_SS_FUSE_REGS] = {};
1951bb76ff1Sjsg 	int i;
1961bb76ff1Sjsg 
1971bb76ff1Sjsg 	if (WARN_ON(numregs > I915_MAX_SS_FUSE_REGS))
1981bb76ff1Sjsg 		numregs = I915_MAX_SS_FUSE_REGS;
1991bb76ff1Sjsg 
2001bb76ff1Sjsg 	va_start(argp, numregs);
2011bb76ff1Sjsg 	for (i = 0; i < numregs; i++)
2021bb76ff1Sjsg 		fuse_val[i] = intel_uncore_read(uncore, va_arg(argp, i915_reg_t));
2031bb76ff1Sjsg 	va_end(argp);
2041bb76ff1Sjsg 
2051bb76ff1Sjsg 	bitmap_from_arr32(ssmask->xehp, fuse_val, numregs * 32);
2061bb76ff1Sjsg }
2071bb76ff1Sjsg 
xehp_sseu_info_init(struct intel_gt * gt)2081bb76ff1Sjsg static void xehp_sseu_info_init(struct intel_gt *gt)
2091bb76ff1Sjsg {
2101bb76ff1Sjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
2111bb76ff1Sjsg 	struct intel_uncore *uncore = gt->uncore;
2121bb76ff1Sjsg 	u16 eu_en = 0;
2131bb76ff1Sjsg 	u8 eu_en_fuse;
2141bb76ff1Sjsg 	int num_compute_regs, num_geometry_regs;
2151bb76ff1Sjsg 	int eu;
2161bb76ff1Sjsg 
2171bb76ff1Sjsg 	if (IS_PONTEVECCHIO(gt->i915)) {
2181bb76ff1Sjsg 		num_geometry_regs = 0;
2191bb76ff1Sjsg 		num_compute_regs = 2;
2201bb76ff1Sjsg 	} else {
2211bb76ff1Sjsg 		num_geometry_regs = 1;
2221bb76ff1Sjsg 		num_compute_regs = 1;
2231bb76ff1Sjsg 	}
2241bb76ff1Sjsg 
2251bb76ff1Sjsg 	/*
2261bb76ff1Sjsg 	 * The concept of slice has been removed in Xe_HP.  To be compatible
2271bb76ff1Sjsg 	 * with prior generations, assume a single slice across the entire
2281bb76ff1Sjsg 	 * device. Then calculate out the DSS for each workload type within
2291bb76ff1Sjsg 	 * that software slice.
2301bb76ff1Sjsg 	 */
2311bb76ff1Sjsg 	intel_sseu_set_info(sseu, 1,
2321bb76ff1Sjsg 			    32 * max(num_geometry_regs, num_compute_regs),
2331bb76ff1Sjsg 			    HAS_ONE_EU_PER_FUSE_BIT(gt->i915) ? 8 : 16);
2341bb76ff1Sjsg 	sseu->has_xehp_dss = 1;
2351bb76ff1Sjsg 
2361bb76ff1Sjsg 	xehp_load_dss_mask(uncore, &sseu->geometry_subslice_mask,
2371bb76ff1Sjsg 			   num_geometry_regs,
2381bb76ff1Sjsg 			   GEN12_GT_GEOMETRY_DSS_ENABLE);
2391bb76ff1Sjsg 	xehp_load_dss_mask(uncore, &sseu->compute_subslice_mask,
2401bb76ff1Sjsg 			   num_compute_regs,
2411bb76ff1Sjsg 			   GEN12_GT_COMPUTE_DSS_ENABLE,
2421bb76ff1Sjsg 			   XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
2431bb76ff1Sjsg 
2441bb76ff1Sjsg 	eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
2451bb76ff1Sjsg 
2461bb76ff1Sjsg 	if (HAS_ONE_EU_PER_FUSE_BIT(gt->i915))
2471bb76ff1Sjsg 		eu_en = eu_en_fuse;
2481bb76ff1Sjsg 	else
2491bb76ff1Sjsg 		for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
2501bb76ff1Sjsg 			if (eu_en_fuse & BIT(eu))
2511bb76ff1Sjsg 				eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
2521bb76ff1Sjsg 
2531bb76ff1Sjsg 	xehp_compute_sseu_info(sseu, eu_en);
2541bb76ff1Sjsg }
2551bb76ff1Sjsg 
gen12_sseu_info_init(struct intel_gt * gt)256ad8b1aafSjsg static void gen12_sseu_info_init(struct intel_gt *gt)
257ad8b1aafSjsg {
258ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
259ad8b1aafSjsg 	struct intel_uncore *uncore = gt->uncore;
2601bb76ff1Sjsg 	u32 g_dss_en;
261ad8b1aafSjsg 	u16 eu_en = 0;
262ad8b1aafSjsg 	u8 eu_en_fuse;
263ad8b1aafSjsg 	u8 s_en;
264ad8b1aafSjsg 	int eu;
265ad8b1aafSjsg 
266ad8b1aafSjsg 	/*
267ad8b1aafSjsg 	 * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
268ad8b1aafSjsg 	 * Instead of splitting these, provide userspace with an array
269ad8b1aafSjsg 	 * of DSS to more closely represent the hardware resource.
270ad8b1aafSjsg 	 */
271ad8b1aafSjsg 	intel_sseu_set_info(sseu, 1, 6, 16);
272ad8b1aafSjsg 
2735ca02815Sjsg 	/*
2741bb76ff1Sjsg 	 * Although gen12 architecture supported multiple slices, TGL, RKL,
2751bb76ff1Sjsg 	 * DG1, and ADL only had a single slice.
2765ca02815Sjsg 	 */
277ad8b1aafSjsg 	s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
278ad8b1aafSjsg 		GEN11_GT_S_ENA_MASK;
2791bb76ff1Sjsg 	drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
280ad8b1aafSjsg 
2811bb76ff1Sjsg 	g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
282ad8b1aafSjsg 
283ad8b1aafSjsg 	/* one bit per pair of EUs */
284ad8b1aafSjsg 	eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
285ad8b1aafSjsg 		       GEN11_EU_DIS_MASK);
2865ca02815Sjsg 
287ad8b1aafSjsg 	for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
288ad8b1aafSjsg 		if (eu_en_fuse & BIT(eu))
289ad8b1aafSjsg 			eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
290ad8b1aafSjsg 
2911bb76ff1Sjsg 	gen11_compute_sseu_info(sseu, g_dss_en, eu_en);
292ad8b1aafSjsg 
293ad8b1aafSjsg 	/* TGL only supports slice-level power gating */
294ad8b1aafSjsg 	sseu->has_slice_pg = 1;
295ad8b1aafSjsg }
296ad8b1aafSjsg 
gen11_sseu_info_init(struct intel_gt * gt)297ad8b1aafSjsg static void gen11_sseu_info_init(struct intel_gt *gt)
298ad8b1aafSjsg {
299ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
300ad8b1aafSjsg 	struct intel_uncore *uncore = gt->uncore;
301ad8b1aafSjsg 	u32 ss_en;
302ad8b1aafSjsg 	u8 eu_en;
303ad8b1aafSjsg 	u8 s_en;
304ad8b1aafSjsg 
305*f005ef32Sjsg 	if (IS_JASPERLAKE(gt->i915) || IS_ELKHARTLAKE(gt->i915))
306ad8b1aafSjsg 		intel_sseu_set_info(sseu, 1, 4, 8);
307ad8b1aafSjsg 	else
308ad8b1aafSjsg 		intel_sseu_set_info(sseu, 1, 8, 8);
309ad8b1aafSjsg 
3101bb76ff1Sjsg 	/*
3111bb76ff1Sjsg 	 * Although gen11 architecture supported multiple slices, ICL and
3121bb76ff1Sjsg 	 * EHL/JSL only had a single slice in practice.
3131bb76ff1Sjsg 	 */
314ad8b1aafSjsg 	s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
315ad8b1aafSjsg 		GEN11_GT_S_ENA_MASK;
3161bb76ff1Sjsg 	drm_WARN_ON(&gt->i915->drm, s_en != 0x1);
3171bb76ff1Sjsg 
318ad8b1aafSjsg 	ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE);
319ad8b1aafSjsg 
320ad8b1aafSjsg 	eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
321ad8b1aafSjsg 		  GEN11_EU_DIS_MASK);
322ad8b1aafSjsg 
3231bb76ff1Sjsg 	gen11_compute_sseu_info(sseu, ss_en, eu_en);
324ad8b1aafSjsg 
325ad8b1aafSjsg 	/* ICL has no power gating restrictions. */
326ad8b1aafSjsg 	sseu->has_slice_pg = 1;
327ad8b1aafSjsg 	sseu->has_subslice_pg = 1;
328ad8b1aafSjsg 	sseu->has_eu_pg = 1;
329ad8b1aafSjsg }
330ad8b1aafSjsg 
cherryview_sseu_info_init(struct intel_gt * gt)331ad8b1aafSjsg static void cherryview_sseu_info_init(struct intel_gt *gt)
332ad8b1aafSjsg {
333ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
334ad8b1aafSjsg 	u32 fuse;
335ad8b1aafSjsg 
336ad8b1aafSjsg 	fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT);
337ad8b1aafSjsg 
338ad8b1aafSjsg 	sseu->slice_mask = BIT(0);
339ad8b1aafSjsg 	intel_sseu_set_info(sseu, 1, 2, 8);
340ad8b1aafSjsg 
341ad8b1aafSjsg 	if (!(fuse & CHV_FGT_DISABLE_SS0)) {
342ad8b1aafSjsg 		u8 disabled_mask =
343ad8b1aafSjsg 			((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >>
344ad8b1aafSjsg 			 CHV_FGT_EU_DIS_SS0_R0_SHIFT) |
345ad8b1aafSjsg 			(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
346ad8b1aafSjsg 			  CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
347ad8b1aafSjsg 
3481bb76ff1Sjsg 		sseu->subslice_mask.hsw[0] |= BIT(0);
3491bb76ff1Sjsg 		sseu_set_eus(sseu, 0, 0, ~disabled_mask & 0xFF);
350ad8b1aafSjsg 	}
351ad8b1aafSjsg 
352ad8b1aafSjsg 	if (!(fuse & CHV_FGT_DISABLE_SS1)) {
353ad8b1aafSjsg 		u8 disabled_mask =
354ad8b1aafSjsg 			((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >>
355ad8b1aafSjsg 			 CHV_FGT_EU_DIS_SS1_R0_SHIFT) |
356ad8b1aafSjsg 			(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
357ad8b1aafSjsg 			  CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
358ad8b1aafSjsg 
3591bb76ff1Sjsg 		sseu->subslice_mask.hsw[0] |= BIT(1);
3601bb76ff1Sjsg 		sseu_set_eus(sseu, 0, 1, ~disabled_mask & 0xFF);
361ad8b1aafSjsg 	}
362ad8b1aafSjsg 
363ad8b1aafSjsg 	sseu->eu_total = compute_eu_total(sseu);
364ad8b1aafSjsg 
365ad8b1aafSjsg 	/*
366ad8b1aafSjsg 	 * CHV expected to always have a uniform distribution of EU
367ad8b1aafSjsg 	 * across subslices.
368ad8b1aafSjsg 	 */
369ad8b1aafSjsg 	sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
370ad8b1aafSjsg 		sseu->eu_total /
371ad8b1aafSjsg 		intel_sseu_subslice_total(sseu) :
372ad8b1aafSjsg 		0;
373ad8b1aafSjsg 	/*
374ad8b1aafSjsg 	 * CHV supports subslice power gating on devices with more than
375ad8b1aafSjsg 	 * one subslice, and supports EU power gating on devices with
376ad8b1aafSjsg 	 * more than one EU pair per subslice.
377ad8b1aafSjsg 	 */
378ad8b1aafSjsg 	sseu->has_slice_pg = 0;
379ad8b1aafSjsg 	sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
380ad8b1aafSjsg 	sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
381ad8b1aafSjsg }
382ad8b1aafSjsg 
gen9_sseu_info_init(struct intel_gt * gt)383ad8b1aafSjsg static void gen9_sseu_info_init(struct intel_gt *gt)
384ad8b1aafSjsg {
385ad8b1aafSjsg 	struct drm_i915_private *i915 = gt->i915;
386ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
387ad8b1aafSjsg 	struct intel_uncore *uncore = gt->uncore;
388ad8b1aafSjsg 	u32 fuse2, eu_disable, subslice_mask;
389ad8b1aafSjsg 	const u8 eu_mask = 0xff;
390ad8b1aafSjsg 	int s, ss;
391ad8b1aafSjsg 
392ad8b1aafSjsg 	fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
393ad8b1aafSjsg 	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
394ad8b1aafSjsg 
395ad8b1aafSjsg 	/* BXT has a single slice and at most 3 subslices. */
396ad8b1aafSjsg 	intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3,
397ad8b1aafSjsg 			    IS_GEN9_LP(i915) ? 3 : 4, 8);
398ad8b1aafSjsg 
399ad8b1aafSjsg 	/*
400ad8b1aafSjsg 	 * The subslice disable field is global, i.e. it applies
401ad8b1aafSjsg 	 * to each of the enabled slices.
402ad8b1aafSjsg 	 */
403ad8b1aafSjsg 	subslice_mask = (1 << sseu->max_subslices) - 1;
404ad8b1aafSjsg 	subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >>
405ad8b1aafSjsg 			   GEN9_F2_SS_DIS_SHIFT);
406ad8b1aafSjsg 
407ad8b1aafSjsg 	/*
408ad8b1aafSjsg 	 * Iterate through enabled slices and subslices to
409ad8b1aafSjsg 	 * count the total enabled EU.
410ad8b1aafSjsg 	 */
411ad8b1aafSjsg 	for (s = 0; s < sseu->max_slices; s++) {
412ad8b1aafSjsg 		if (!(sseu->slice_mask & BIT(s)))
413ad8b1aafSjsg 			/* skip disabled slice */
414ad8b1aafSjsg 			continue;
415ad8b1aafSjsg 
4161bb76ff1Sjsg 		sseu->subslice_mask.hsw[s] = subslice_mask;
417ad8b1aafSjsg 
418ad8b1aafSjsg 		eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
419ad8b1aafSjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
420ad8b1aafSjsg 			int eu_per_ss;
421ad8b1aafSjsg 			u8 eu_disabled_mask;
422ad8b1aafSjsg 
423ad8b1aafSjsg 			if (!intel_sseu_has_subslice(sseu, s, ss))
424ad8b1aafSjsg 				/* skip disabled subslice */
425ad8b1aafSjsg 				continue;
426ad8b1aafSjsg 
427ad8b1aafSjsg 			eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask;
428ad8b1aafSjsg 
4291bb76ff1Sjsg 			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & eu_mask);
430ad8b1aafSjsg 
431ad8b1aafSjsg 			eu_per_ss = sseu->max_eus_per_subslice -
432ad8b1aafSjsg 				hweight8(eu_disabled_mask);
433ad8b1aafSjsg 
434ad8b1aafSjsg 			/*
435ad8b1aafSjsg 			 * Record which subslice(s) has(have) 7 EUs. we
436ad8b1aafSjsg 			 * can tune the hash used to spread work among
437ad8b1aafSjsg 			 * subslices if they are unbalanced.
438ad8b1aafSjsg 			 */
439ad8b1aafSjsg 			if (eu_per_ss == 7)
440ad8b1aafSjsg 				sseu->subslice_7eu[s] |= BIT(ss);
441ad8b1aafSjsg 		}
442ad8b1aafSjsg 	}
443ad8b1aafSjsg 
444ad8b1aafSjsg 	sseu->eu_total = compute_eu_total(sseu);
445ad8b1aafSjsg 
446ad8b1aafSjsg 	/*
447ad8b1aafSjsg 	 * SKL is expected to always have a uniform distribution
448ad8b1aafSjsg 	 * of EU across subslices with the exception that any one
449ad8b1aafSjsg 	 * EU in any one subslice may be fused off for die
450ad8b1aafSjsg 	 * recovery. BXT is expected to be perfectly uniform in EU
451ad8b1aafSjsg 	 * distribution.
452ad8b1aafSjsg 	 */
453ad8b1aafSjsg 	sseu->eu_per_subslice =
454ad8b1aafSjsg 		intel_sseu_subslice_total(sseu) ?
455ad8b1aafSjsg 		DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
456ad8b1aafSjsg 		0;
457ad8b1aafSjsg 
458ad8b1aafSjsg 	/*
459ad8b1aafSjsg 	 * SKL+ supports slice power gating on devices with more than
460ad8b1aafSjsg 	 * one slice, and supports EU power gating on devices with
461ad8b1aafSjsg 	 * more than one EU pair per subslice. BXT+ supports subslice
462ad8b1aafSjsg 	 * power gating on devices with more than one subslice, and
463ad8b1aafSjsg 	 * supports EU power gating on devices with more than one EU
464ad8b1aafSjsg 	 * pair per subslice.
465ad8b1aafSjsg 	 */
466ad8b1aafSjsg 	sseu->has_slice_pg =
467ad8b1aafSjsg 		!IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1;
468ad8b1aafSjsg 	sseu->has_subslice_pg =
469ad8b1aafSjsg 		IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1;
470ad8b1aafSjsg 	sseu->has_eu_pg = sseu->eu_per_subslice > 2;
471ad8b1aafSjsg 
472ad8b1aafSjsg 	if (IS_GEN9_LP(i915)) {
4731bb76ff1Sjsg #define IS_SS_DISABLED(ss)	(!(sseu->subslice_mask.hsw[0] & BIT(ss)))
4741bb76ff1Sjsg 		RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
475ad8b1aafSjsg 
476ad8b1aafSjsg 		sseu->min_eu_in_pool = 0;
4771bb76ff1Sjsg 		if (HAS_POOLED_EU(i915)) {
478ad8b1aafSjsg 			if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
479ad8b1aafSjsg 				sseu->min_eu_in_pool = 3;
480ad8b1aafSjsg 			else if (IS_SS_DISABLED(1))
481ad8b1aafSjsg 				sseu->min_eu_in_pool = 6;
482ad8b1aafSjsg 			else
483ad8b1aafSjsg 				sseu->min_eu_in_pool = 9;
484ad8b1aafSjsg 		}
485ad8b1aafSjsg #undef IS_SS_DISABLED
486ad8b1aafSjsg 	}
487ad8b1aafSjsg }
488ad8b1aafSjsg 
bdw_sseu_info_init(struct intel_gt * gt)489ad8b1aafSjsg static void bdw_sseu_info_init(struct intel_gt *gt)
490ad8b1aafSjsg {
491ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
492ad8b1aafSjsg 	struct intel_uncore *uncore = gt->uncore;
493ad8b1aafSjsg 	int s, ss;
494ad8b1aafSjsg 	u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
495ad8b1aafSjsg 	u32 eu_disable0, eu_disable1, eu_disable2;
496ad8b1aafSjsg 
497ad8b1aafSjsg 	fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
498ad8b1aafSjsg 	sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
499ad8b1aafSjsg 	intel_sseu_set_info(sseu, 3, 3, 8);
500ad8b1aafSjsg 
501ad8b1aafSjsg 	/*
502ad8b1aafSjsg 	 * The subslice disable field is global, i.e. it applies
503ad8b1aafSjsg 	 * to each of the enabled slices.
504ad8b1aafSjsg 	 */
505ad8b1aafSjsg 	subslice_mask = GENMASK(sseu->max_subslices - 1, 0);
506ad8b1aafSjsg 	subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
507ad8b1aafSjsg 			   GEN8_F2_SS_DIS_SHIFT);
508ad8b1aafSjsg 	eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0);
509ad8b1aafSjsg 	eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1);
510ad8b1aafSjsg 	eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2);
511ad8b1aafSjsg 	eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK;
512ad8b1aafSjsg 	eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) |
513ad8b1aafSjsg 		((eu_disable1 & GEN8_EU_DIS1_S1_MASK) <<
514ad8b1aafSjsg 		 (32 - GEN8_EU_DIS0_S1_SHIFT));
515ad8b1aafSjsg 	eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) |
516ad8b1aafSjsg 		((eu_disable2 & GEN8_EU_DIS2_S2_MASK) <<
517ad8b1aafSjsg 		 (32 - GEN8_EU_DIS1_S2_SHIFT));
518ad8b1aafSjsg 
519ad8b1aafSjsg 	/*
520ad8b1aafSjsg 	 * Iterate through enabled slices and subslices to
521ad8b1aafSjsg 	 * count the total enabled EU.
522ad8b1aafSjsg 	 */
523ad8b1aafSjsg 	for (s = 0; s < sseu->max_slices; s++) {
524ad8b1aafSjsg 		if (!(sseu->slice_mask & BIT(s)))
525ad8b1aafSjsg 			/* skip disabled slice */
526ad8b1aafSjsg 			continue;
527ad8b1aafSjsg 
5281bb76ff1Sjsg 		sseu->subslice_mask.hsw[s] = subslice_mask;
529ad8b1aafSjsg 
530ad8b1aafSjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
531ad8b1aafSjsg 			u8 eu_disabled_mask;
532ad8b1aafSjsg 			u32 n_disabled;
533ad8b1aafSjsg 
534ad8b1aafSjsg 			if (!intel_sseu_has_subslice(sseu, s, ss))
535ad8b1aafSjsg 				/* skip disabled subslice */
536ad8b1aafSjsg 				continue;
537ad8b1aafSjsg 
538ad8b1aafSjsg 			eu_disabled_mask =
539ad8b1aafSjsg 				eu_disable[s] >> (ss * sseu->max_eus_per_subslice);
540ad8b1aafSjsg 
5411bb76ff1Sjsg 			sseu_set_eus(sseu, s, ss, ~eu_disabled_mask & 0xFF);
542ad8b1aafSjsg 
543ad8b1aafSjsg 			n_disabled = hweight8(eu_disabled_mask);
544ad8b1aafSjsg 
545ad8b1aafSjsg 			/*
546ad8b1aafSjsg 			 * Record which subslices have 7 EUs.
547ad8b1aafSjsg 			 */
548ad8b1aafSjsg 			if (sseu->max_eus_per_subslice - n_disabled == 7)
549ad8b1aafSjsg 				sseu->subslice_7eu[s] |= 1 << ss;
550ad8b1aafSjsg 		}
551ad8b1aafSjsg 	}
552ad8b1aafSjsg 
553ad8b1aafSjsg 	sseu->eu_total = compute_eu_total(sseu);
554ad8b1aafSjsg 
555ad8b1aafSjsg 	/*
556ad8b1aafSjsg 	 * BDW is expected to always have a uniform distribution of EU across
557ad8b1aafSjsg 	 * subslices with the exception that any one EU in any one subslice may
558ad8b1aafSjsg 	 * be fused off for die recovery.
559ad8b1aafSjsg 	 */
560ad8b1aafSjsg 	sseu->eu_per_subslice =
561ad8b1aafSjsg 		intel_sseu_subslice_total(sseu) ?
562ad8b1aafSjsg 		DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
563ad8b1aafSjsg 		0;
564ad8b1aafSjsg 
565ad8b1aafSjsg 	/*
566ad8b1aafSjsg 	 * BDW supports slice power gating on devices with more than
567ad8b1aafSjsg 	 * one slice.
568ad8b1aafSjsg 	 */
569ad8b1aafSjsg 	sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1;
570ad8b1aafSjsg 	sseu->has_subslice_pg = 0;
571ad8b1aafSjsg 	sseu->has_eu_pg = 0;
572ad8b1aafSjsg }
573ad8b1aafSjsg 
hsw_sseu_info_init(struct intel_gt * gt)574ad8b1aafSjsg static void hsw_sseu_info_init(struct intel_gt *gt)
575ad8b1aafSjsg {
576ad8b1aafSjsg 	struct drm_i915_private *i915 = gt->i915;
577ad8b1aafSjsg 	struct sseu_dev_info *sseu = &gt->info.sseu;
578ad8b1aafSjsg 	u32 fuse1;
579ad8b1aafSjsg 	u8 subslice_mask = 0;
580ad8b1aafSjsg 	int s, ss;
581ad8b1aafSjsg 
582ad8b1aafSjsg 	/*
583ad8b1aafSjsg 	 * There isn't a register to tell us how many slices/subslices. We
584ad8b1aafSjsg 	 * work off the PCI-ids here.
585ad8b1aafSjsg 	 */
586ad8b1aafSjsg 	switch (INTEL_INFO(i915)->gt) {
587ad8b1aafSjsg 	default:
588ad8b1aafSjsg 		MISSING_CASE(INTEL_INFO(i915)->gt);
589ad8b1aafSjsg 		fallthrough;
590ad8b1aafSjsg 	case 1:
591ad8b1aafSjsg 		sseu->slice_mask = BIT(0);
592ad8b1aafSjsg 		subslice_mask = BIT(0);
593ad8b1aafSjsg 		break;
594ad8b1aafSjsg 	case 2:
595ad8b1aafSjsg 		sseu->slice_mask = BIT(0);
596ad8b1aafSjsg 		subslice_mask = BIT(0) | BIT(1);
597ad8b1aafSjsg 		break;
598ad8b1aafSjsg 	case 3:
599ad8b1aafSjsg 		sseu->slice_mask = BIT(0) | BIT(1);
600ad8b1aafSjsg 		subslice_mask = BIT(0) | BIT(1);
601ad8b1aafSjsg 		break;
602ad8b1aafSjsg 	}
603ad8b1aafSjsg 
604ad8b1aafSjsg 	fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
6051bb76ff1Sjsg 	switch (REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)) {
606ad8b1aafSjsg 	default:
6071bb76ff1Sjsg 		MISSING_CASE(REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1));
608ad8b1aafSjsg 		fallthrough;
609ad8b1aafSjsg 	case HSW_F1_EU_DIS_10EUS:
610ad8b1aafSjsg 		sseu->eu_per_subslice = 10;
611ad8b1aafSjsg 		break;
612ad8b1aafSjsg 	case HSW_F1_EU_DIS_8EUS:
613ad8b1aafSjsg 		sseu->eu_per_subslice = 8;
614ad8b1aafSjsg 		break;
615ad8b1aafSjsg 	case HSW_F1_EU_DIS_6EUS:
616ad8b1aafSjsg 		sseu->eu_per_subslice = 6;
617ad8b1aafSjsg 		break;
618ad8b1aafSjsg 	}
619ad8b1aafSjsg 
620ad8b1aafSjsg 	intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
621ad8b1aafSjsg 			    hweight8(subslice_mask),
622ad8b1aafSjsg 			    sseu->eu_per_subslice);
623ad8b1aafSjsg 
624ad8b1aafSjsg 	for (s = 0; s < sseu->max_slices; s++) {
6251bb76ff1Sjsg 		sseu->subslice_mask.hsw[s] = subslice_mask;
626ad8b1aafSjsg 
627ad8b1aafSjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
628ad8b1aafSjsg 			sseu_set_eus(sseu, s, ss,
629ad8b1aafSjsg 				     (1UL << sseu->eu_per_subslice) - 1);
630ad8b1aafSjsg 		}
631ad8b1aafSjsg 	}
632ad8b1aafSjsg 
633ad8b1aafSjsg 	sseu->eu_total = compute_eu_total(sseu);
634ad8b1aafSjsg 
635ad8b1aafSjsg 	/* No powergating for you. */
636ad8b1aafSjsg 	sseu->has_slice_pg = 0;
637ad8b1aafSjsg 	sseu->has_subslice_pg = 0;
638ad8b1aafSjsg 	sseu->has_eu_pg = 0;
639ad8b1aafSjsg }
640ad8b1aafSjsg 
intel_sseu_info_init(struct intel_gt * gt)641ad8b1aafSjsg void intel_sseu_info_init(struct intel_gt *gt)
642ad8b1aafSjsg {
643ad8b1aafSjsg 	struct drm_i915_private *i915 = gt->i915;
644ad8b1aafSjsg 
6451bb76ff1Sjsg 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
6461bb76ff1Sjsg 		xehp_sseu_info_init(gt);
6475ca02815Sjsg 	else if (GRAPHICS_VER(i915) >= 12)
648ad8b1aafSjsg 		gen12_sseu_info_init(gt);
6491bb76ff1Sjsg 	else if (GRAPHICS_VER(i915) >= 11)
6501bb76ff1Sjsg 		gen11_sseu_info_init(gt);
6511bb76ff1Sjsg 	else if (GRAPHICS_VER(i915) >= 9)
6521bb76ff1Sjsg 		gen9_sseu_info_init(gt);
6531bb76ff1Sjsg 	else if (IS_BROADWELL(i915))
6541bb76ff1Sjsg 		bdw_sseu_info_init(gt);
6551bb76ff1Sjsg 	else if (IS_CHERRYVIEW(i915))
6561bb76ff1Sjsg 		cherryview_sseu_info_init(gt);
6571bb76ff1Sjsg 	else if (IS_HASWELL(i915))
6581bb76ff1Sjsg 		hsw_sseu_info_init(gt);
659ad8b1aafSjsg }
660ad8b1aafSjsg 
intel_sseu_make_rpcs(struct intel_gt * gt,const struct intel_sseu * req_sseu)661ad8b1aafSjsg u32 intel_sseu_make_rpcs(struct intel_gt *gt,
662c349dbc7Sjsg 			 const struct intel_sseu *req_sseu)
663c349dbc7Sjsg {
664ad8b1aafSjsg 	struct drm_i915_private *i915 = gt->i915;
665ad8b1aafSjsg 	const struct sseu_dev_info *sseu = &gt->info.sseu;
666c349dbc7Sjsg 	bool subslice_pg = sseu->has_subslice_pg;
667c349dbc7Sjsg 	u8 slices, subslices;
668c349dbc7Sjsg 	u32 rpcs = 0;
669c349dbc7Sjsg 
670c349dbc7Sjsg 	/*
671c349dbc7Sjsg 	 * No explicit RPCS request is needed to ensure full
672c349dbc7Sjsg 	 * slice/subslice/EU enablement prior to Gen9.
673c349dbc7Sjsg 	 */
6745ca02815Sjsg 	if (GRAPHICS_VER(i915) < 9)
675c349dbc7Sjsg 		return 0;
676c349dbc7Sjsg 
677c349dbc7Sjsg 	/*
678c349dbc7Sjsg 	 * If i915/perf is active, we want a stable powergating configuration
679ad8b1aafSjsg 	 * on the system. Use the configuration pinned by i915/perf.
680c349dbc7Sjsg 	 */
681*f005ef32Sjsg 	if (gt->perf.group && gt->perf.group[PERF_GROUP_OAG].exclusive_stream)
682*f005ef32Sjsg 		req_sseu = &gt->perf.sseu;
683c349dbc7Sjsg 
684ad8b1aafSjsg 	slices = hweight8(req_sseu->slice_mask);
685ad8b1aafSjsg 	subslices = hweight8(req_sseu->subslice_mask);
686c349dbc7Sjsg 
687c349dbc7Sjsg 	/*
688c349dbc7Sjsg 	 * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
689c349dbc7Sjsg 	 * wide and Icelake has up to eight subslices, specfial programming is
690c349dbc7Sjsg 	 * needed in order to correctly enable all subslices.
691c349dbc7Sjsg 	 *
692c349dbc7Sjsg 	 * According to documentation software must consider the configuration
693c349dbc7Sjsg 	 * as 2x4x8 and hardware will translate this to 1x8x8.
694c349dbc7Sjsg 	 *
695c349dbc7Sjsg 	 * Furthemore, even though SScount is three bits, maximum documented
696c349dbc7Sjsg 	 * value for it is four. From this some rules/restrictions follow:
697c349dbc7Sjsg 	 *
698c349dbc7Sjsg 	 * 1.
699c349dbc7Sjsg 	 * If enabled subslice count is greater than four, two whole slices must
700c349dbc7Sjsg 	 * be enabled instead.
701c349dbc7Sjsg 	 *
702c349dbc7Sjsg 	 * 2.
703c349dbc7Sjsg 	 * When more than one slice is enabled, hardware ignores the subslice
704c349dbc7Sjsg 	 * count altogether.
705c349dbc7Sjsg 	 *
706c349dbc7Sjsg 	 * From these restrictions it follows that it is not possible to enable
707c349dbc7Sjsg 	 * a count of subslices between the SScount maximum of four restriction,
708c349dbc7Sjsg 	 * and the maximum available number on a particular SKU. Either all
709c349dbc7Sjsg 	 * subslices are enabled, or a count between one and four on the first
710c349dbc7Sjsg 	 * slice.
711c349dbc7Sjsg 	 */
7125ca02815Sjsg 	if (GRAPHICS_VER(i915) == 11 &&
713c349dbc7Sjsg 	    slices == 1 &&
7141bb76ff1Sjsg 	    subslices > min_t(u8, 4, hweight8(sseu->subslice_mask.hsw[0]) / 2)) {
715c349dbc7Sjsg 		GEM_BUG_ON(subslices & 1);
716c349dbc7Sjsg 
717c349dbc7Sjsg 		subslice_pg = false;
718c349dbc7Sjsg 		slices *= 2;
719c349dbc7Sjsg 	}
720c349dbc7Sjsg 
721c349dbc7Sjsg 	/*
722c349dbc7Sjsg 	 * Starting in Gen9, render power gating can leave
723c349dbc7Sjsg 	 * slice/subslice/EU in a partially enabled state. We
724c349dbc7Sjsg 	 * must make an explicit request through RPCS for full
725c349dbc7Sjsg 	 * enablement.
726c349dbc7Sjsg 	 */
727c349dbc7Sjsg 	if (sseu->has_slice_pg) {
728c349dbc7Sjsg 		u32 mask, val = slices;
729c349dbc7Sjsg 
7305ca02815Sjsg 		if (GRAPHICS_VER(i915) >= 11) {
731c349dbc7Sjsg 			mask = GEN11_RPCS_S_CNT_MASK;
732c349dbc7Sjsg 			val <<= GEN11_RPCS_S_CNT_SHIFT;
733c349dbc7Sjsg 		} else {
734c349dbc7Sjsg 			mask = GEN8_RPCS_S_CNT_MASK;
735c349dbc7Sjsg 			val <<= GEN8_RPCS_S_CNT_SHIFT;
736c349dbc7Sjsg 		}
737c349dbc7Sjsg 
738c349dbc7Sjsg 		GEM_BUG_ON(val & ~mask);
739c349dbc7Sjsg 		val &= mask;
740c349dbc7Sjsg 
741c349dbc7Sjsg 		rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
742c349dbc7Sjsg 	}
743c349dbc7Sjsg 
744c349dbc7Sjsg 	if (subslice_pg) {
745c349dbc7Sjsg 		u32 val = subslices;
746c349dbc7Sjsg 
747c349dbc7Sjsg 		val <<= GEN8_RPCS_SS_CNT_SHIFT;
748c349dbc7Sjsg 
749c349dbc7Sjsg 		GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
750c349dbc7Sjsg 		val &= GEN8_RPCS_SS_CNT_MASK;
751c349dbc7Sjsg 
752c349dbc7Sjsg 		rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
753c349dbc7Sjsg 	}
754c349dbc7Sjsg 
755c349dbc7Sjsg 	if (sseu->has_eu_pg) {
756c349dbc7Sjsg 		u32 val;
757c349dbc7Sjsg 
758ad8b1aafSjsg 		val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
759c349dbc7Sjsg 		GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
760c349dbc7Sjsg 		val &= GEN8_RPCS_EU_MIN_MASK;
761c349dbc7Sjsg 
762c349dbc7Sjsg 		rpcs |= val;
763c349dbc7Sjsg 
764ad8b1aafSjsg 		val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
765c349dbc7Sjsg 		GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
766c349dbc7Sjsg 		val &= GEN8_RPCS_EU_MAX_MASK;
767c349dbc7Sjsg 
768c349dbc7Sjsg 		rpcs |= val;
769c349dbc7Sjsg 
770c349dbc7Sjsg 		rpcs |= GEN8_RPCS_ENABLE;
771c349dbc7Sjsg 	}
772c349dbc7Sjsg 
773c349dbc7Sjsg 	return rpcs;
774c349dbc7Sjsg }
775ad8b1aafSjsg 
intel_sseu_dump(const struct sseu_dev_info * sseu,struct drm_printer * p)776ad8b1aafSjsg void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
777ad8b1aafSjsg {
778ad8b1aafSjsg 	int s;
779ad8b1aafSjsg 
7801bb76ff1Sjsg 	if (sseu->has_xehp_dss) {
7811bb76ff1Sjsg 		drm_printf(p, "subslice total: %u\n",
7821bb76ff1Sjsg 			   intel_sseu_subslice_total(sseu));
7831bb76ff1Sjsg 		drm_printf(p, "geometry dss mask=%*pb\n",
7841bb76ff1Sjsg 			   XEHP_BITMAP_BITS(sseu->geometry_subslice_mask),
7851bb76ff1Sjsg 			   sseu->geometry_subslice_mask.xehp);
7861bb76ff1Sjsg 		drm_printf(p, "compute dss mask=%*pb\n",
7871bb76ff1Sjsg 			   XEHP_BITMAP_BITS(sseu->compute_subslice_mask),
7881bb76ff1Sjsg 			   sseu->compute_subslice_mask.xehp);
7891bb76ff1Sjsg 	} else {
790ad8b1aafSjsg 		drm_printf(p, "slice total: %u, mask=%04x\n",
791ad8b1aafSjsg 			   hweight8(sseu->slice_mask), sseu->slice_mask);
7921bb76ff1Sjsg 		drm_printf(p, "subslice total: %u\n",
7931bb76ff1Sjsg 			   intel_sseu_subslice_total(sseu));
7941bb76ff1Sjsg 
795ad8b1aafSjsg 		for (s = 0; s < sseu->max_slices; s++) {
7961bb76ff1Sjsg 			u8 ss_mask = sseu->subslice_mask.hsw[s];
7971bb76ff1Sjsg 
798ad8b1aafSjsg 			drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
7991bb76ff1Sjsg 				   s, hweight8(ss_mask), ss_mask);
800ad8b1aafSjsg 		}
8011bb76ff1Sjsg 	}
8021bb76ff1Sjsg 
803ad8b1aafSjsg 	drm_printf(p, "EU total: %u\n", sseu->eu_total);
804ad8b1aafSjsg 	drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
805ad8b1aafSjsg 	drm_printf(p, "has slice power gating: %s\n",
8061bb76ff1Sjsg 		   str_yes_no(sseu->has_slice_pg));
807ad8b1aafSjsg 	drm_printf(p, "has subslice power gating: %s\n",
8081bb76ff1Sjsg 		   str_yes_no(sseu->has_subslice_pg));
8091bb76ff1Sjsg 	drm_printf(p, "has EU power gating: %s\n",
8101bb76ff1Sjsg 		   str_yes_no(sseu->has_eu_pg));
811ad8b1aafSjsg }
812ad8b1aafSjsg 
sseu_print_hsw_topology(const struct sseu_dev_info * sseu,struct drm_printer * p)8131bb76ff1Sjsg static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
814ad8b1aafSjsg 				    struct drm_printer *p)
815ad8b1aafSjsg {
816ad8b1aafSjsg 	int s, ss;
817ad8b1aafSjsg 
818ad8b1aafSjsg 	for (s = 0; s < sseu->max_slices; s++) {
8191bb76ff1Sjsg 		u8 ss_mask = sseu->subslice_mask.hsw[s];
8201bb76ff1Sjsg 
821ad8b1aafSjsg 		drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
8221bb76ff1Sjsg 			   s, hweight8(ss_mask), ss_mask);
823ad8b1aafSjsg 
824ad8b1aafSjsg 		for (ss = 0; ss < sseu->max_subslices; ss++) {
825ad8b1aafSjsg 			u16 enabled_eus = sseu_get_eus(sseu, s, ss);
826ad8b1aafSjsg 
827ad8b1aafSjsg 			drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n",
828ad8b1aafSjsg 				   ss, hweight16(enabled_eus), enabled_eus);
829ad8b1aafSjsg 		}
830ad8b1aafSjsg 	}
831ad8b1aafSjsg }
8325ca02815Sjsg 
sseu_print_xehp_topology(const struct sseu_dev_info * sseu,struct drm_printer * p)8331bb76ff1Sjsg static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
8341bb76ff1Sjsg 				     struct drm_printer *p)
8355ca02815Sjsg {
8361bb76ff1Sjsg 	int dss;
8371bb76ff1Sjsg 
8381bb76ff1Sjsg 	for (dss = 0; dss < sseu->max_subslices; dss++) {
8391bb76ff1Sjsg 		u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
8401bb76ff1Sjsg 
8411bb76ff1Sjsg 		drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
8421bb76ff1Sjsg 			   str_yes_no(test_bit(dss, sseu->geometry_subslice_mask.xehp)),
8431bb76ff1Sjsg 			   str_yes_no(test_bit(dss, sseu->compute_subslice_mask.xehp)),
8441bb76ff1Sjsg 			   hweight16(enabled_eus), enabled_eus);
8451bb76ff1Sjsg 	}
8461bb76ff1Sjsg }
8471bb76ff1Sjsg 
intel_sseu_print_topology(struct drm_i915_private * i915,const struct sseu_dev_info * sseu,struct drm_printer * p)8481bb76ff1Sjsg void intel_sseu_print_topology(struct drm_i915_private *i915,
8491bb76ff1Sjsg 			       const struct sseu_dev_info *sseu,
8501bb76ff1Sjsg 			       struct drm_printer *p)
8511bb76ff1Sjsg {
8521bb76ff1Sjsg 	if (sseu->max_slices == 0) {
8531bb76ff1Sjsg 		drm_printf(p, "Unavailable\n");
8541bb76ff1Sjsg 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
8551bb76ff1Sjsg 		sseu_print_xehp_topology(sseu, p);
8561bb76ff1Sjsg 	} else {
8571bb76ff1Sjsg 		sseu_print_hsw_topology(sseu, p);
8581bb76ff1Sjsg 	}
8591bb76ff1Sjsg }
8601bb76ff1Sjsg 
intel_sseu_print_ss_info(const char * type,const struct sseu_dev_info * sseu,struct seq_file * m)8611bb76ff1Sjsg void intel_sseu_print_ss_info(const char *type,
8621bb76ff1Sjsg 			      const struct sseu_dev_info *sseu,
8631bb76ff1Sjsg 			      struct seq_file *m)
8641bb76ff1Sjsg {
8651bb76ff1Sjsg 	int s;
8661bb76ff1Sjsg 
8671bb76ff1Sjsg 	if (sseu->has_xehp_dss) {
8681bb76ff1Sjsg 		seq_printf(m, "  %s Geometry DSS: %u\n", type,
8691bb76ff1Sjsg 			   bitmap_weight(sseu->geometry_subslice_mask.xehp,
8701bb76ff1Sjsg 					 XEHP_BITMAP_BITS(sseu->geometry_subslice_mask)));
8711bb76ff1Sjsg 		seq_printf(m, "  %s Compute DSS: %u\n", type,
8721bb76ff1Sjsg 			   bitmap_weight(sseu->compute_subslice_mask.xehp,
8731bb76ff1Sjsg 					 XEHP_BITMAP_BITS(sseu->compute_subslice_mask)));
8741bb76ff1Sjsg 	} else {
8751bb76ff1Sjsg 		for (s = 0; s < fls(sseu->slice_mask); s++)
8761bb76ff1Sjsg 			seq_printf(m, "  %s Slice%i subslices: %u\n", type,
8771bb76ff1Sjsg 				   s, hweight8(sseu->subslice_mask.hsw[s]));
8781bb76ff1Sjsg 	}
8791bb76ff1Sjsg }
8801bb76ff1Sjsg 
intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask,int dss_per_slice)8811bb76ff1Sjsg u16 intel_slicemask_from_xehp_dssmask(intel_sseu_ss_mask_t dss_mask,
8821bb76ff1Sjsg 				      int dss_per_slice)
8831bb76ff1Sjsg {
8841bb76ff1Sjsg 	STUB();
8851bb76ff1Sjsg 	return 0;
8861bb76ff1Sjsg #ifdef notyet
8871bb76ff1Sjsg 	intel_sseu_ss_mask_t per_slice_mask = {};
8881bb76ff1Sjsg 	unsigned long slice_mask = 0;
8895ca02815Sjsg 	int i;
8905ca02815Sjsg 
8911bb76ff1Sjsg 	WARN_ON(DIV_ROUND_UP(XEHP_BITMAP_BITS(dss_mask), dss_per_slice) >
8921bb76ff1Sjsg 		8 * sizeof(slice_mask));
8935ca02815Sjsg 
8941bb76ff1Sjsg 	bitmap_fill(per_slice_mask.xehp, dss_per_slice);
8951bb76ff1Sjsg 	for (i = 0; !bitmap_empty(dss_mask.xehp, XEHP_BITMAP_BITS(dss_mask)); i++) {
8961bb76ff1Sjsg 		if (bitmap_intersects(dss_mask.xehp, per_slice_mask.xehp, dss_per_slice))
8975ca02815Sjsg 			slice_mask |= BIT(i);
8985ca02815Sjsg 
8991bb76ff1Sjsg 		bitmap_shift_right(dss_mask.xehp, dss_mask.xehp, dss_per_slice,
9001bb76ff1Sjsg 				   XEHP_BITMAP_BITS(dss_mask));
9015ca02815Sjsg 	}
9025ca02815Sjsg 
9035ca02815Sjsg 	return slice_mask;
9041bb76ff1Sjsg #endif
9055ca02815Sjsg }
906