xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_gt_mcr.c (revision 596b68695fdf626a81565d499a5929be2765afa1)
11bb76ff1Sjsg // SPDX-License-Identifier: MIT
21bb76ff1Sjsg /*
31bb76ff1Sjsg  * Copyright © 2022 Intel Corporation
41bb76ff1Sjsg  */
51bb76ff1Sjsg 
6*596b6869Sjsg #include "intel_gt.h"
71bb76ff1Sjsg #include "intel_gt_mcr.h"
8f005ef32Sjsg #include "intel_gt_print.h"
91bb76ff1Sjsg #include "intel_gt_regs.h"
101bb76ff1Sjsg 
111bb76ff1Sjsg /**
121bb76ff1Sjsg  * DOC: GT Multicast/Replicated (MCR) Register Support
131bb76ff1Sjsg  *
141bb76ff1Sjsg  * Some GT registers are designed as "multicast" or "replicated" registers:
151bb76ff1Sjsg  * multiple instances of the same register share a single MMIO offset.  MCR
161bb76ff1Sjsg  * registers are generally used when the hardware needs to potentially track
171bb76ff1Sjsg  * independent values of a register per hardware unit (e.g., per-subslice,
181bb76ff1Sjsg  * per-L3bank, etc.).  The specific types of replication that exist vary
191bb76ff1Sjsg  * per-platform.
201bb76ff1Sjsg  *
211bb76ff1Sjsg  * MMIO accesses to MCR registers are controlled according to the settings
221bb76ff1Sjsg  * programmed in the platform's MCR_SELECTOR register(s).  MMIO writes to MCR
231bb76ff1Sjsg  * registers can be done in either a (i.e., a single write updates all
241bb76ff1Sjsg  * instances of the register to the same value) or unicast (a write updates only
251bb76ff1Sjsg  * one specific instance).  Reads of MCR registers always operate in a unicast
261bb76ff1Sjsg  * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
271bb76ff1Sjsg  * Selection of a specific MCR instance for unicast operations is referred to
281bb76ff1Sjsg  * as "steering."
291bb76ff1Sjsg  *
301bb76ff1Sjsg  * If MCR register operations are steered toward a hardware unit that is
311bb76ff1Sjsg  * fused off or currently powered down due to power gating, the MMIO operation
321bb76ff1Sjsg  * is "terminated" by the hardware.  Terminated read operations will return a
331bb76ff1Sjsg  * value of zero and terminated unicast write operations will be silently
341bb76ff1Sjsg  * ignored.
351bb76ff1Sjsg  */
361bb76ff1Sjsg 
37f005ef32Sjsg #define HAS_MSLICE_STEERING(i915)	(INTEL_INFO(i915)->has_mslice_steering)
381bb76ff1Sjsg 
391bb76ff1Sjsg static const char * const intel_steering_types[] = {
401bb76ff1Sjsg 	"L3BANK",
411bb76ff1Sjsg 	"MSLICE",
421bb76ff1Sjsg 	"LNCF",
43f005ef32Sjsg 	"GAM",
44f005ef32Sjsg 	"DSS",
45f005ef32Sjsg 	"OADDRM",
461bb76ff1Sjsg 	"INSTANCE 0",
471bb76ff1Sjsg };
481bb76ff1Sjsg 
491bb76ff1Sjsg static const struct intel_mmio_range icl_l3bank_steering_table[] = {
501bb76ff1Sjsg 	{ 0x00B100, 0x00B3FF },
511bb76ff1Sjsg 	{},
521bb76ff1Sjsg };
531bb76ff1Sjsg 
54f005ef32Sjsg /*
55f005ef32Sjsg  * Although the bspec lists more "MSLICE" ranges than shown here, some of those
56f005ef32Sjsg  * are of a "GAM" subclass that has special rules.  Thus we use a separate
57f005ef32Sjsg  * GAM table farther down for those.
58f005ef32Sjsg  */
591bb76ff1Sjsg static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
601bb76ff1Sjsg 	{ 0x00DD00, 0x00DDFF },
611bb76ff1Sjsg 	{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
621bb76ff1Sjsg 	{},
631bb76ff1Sjsg };
641bb76ff1Sjsg 
65f005ef32Sjsg static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
66f005ef32Sjsg 	{ 0x004000, 0x004AFF },
67f005ef32Sjsg 	{ 0x00C800, 0x00CFFF },
68f005ef32Sjsg 	{},
69f005ef32Sjsg };
70f005ef32Sjsg 
711bb76ff1Sjsg static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
721bb76ff1Sjsg 	{ 0x00B000, 0x00B0FF },
731bb76ff1Sjsg 	{ 0x00D800, 0x00D8FF },
741bb76ff1Sjsg 	{},
751bb76ff1Sjsg };
761bb76ff1Sjsg 
771bb76ff1Sjsg static const struct intel_mmio_range dg2_lncf_steering_table[] = {
781bb76ff1Sjsg 	{ 0x00B000, 0x00B0FF },
791bb76ff1Sjsg 	{ 0x00D880, 0x00D8FF },
801bb76ff1Sjsg 	{},
811bb76ff1Sjsg };
821bb76ff1Sjsg 
831bb76ff1Sjsg /*
841bb76ff1Sjsg  * We have several types of MCR registers on PVC where steering to (0,0)
851bb76ff1Sjsg  * will always provide us with a non-terminated value.  We'll stick them
861bb76ff1Sjsg  * all in the same table for simplicity.
871bb76ff1Sjsg  */
881bb76ff1Sjsg static const struct intel_mmio_range pvc_instance0_steering_table[] = {
891bb76ff1Sjsg 	{ 0x004000, 0x004AFF },		/* HALF-BSLICE */
901bb76ff1Sjsg 	{ 0x008800, 0x00887F },		/* CC */
911bb76ff1Sjsg 	{ 0x008A80, 0x008AFF },		/* TILEPSMI */
921bb76ff1Sjsg 	{ 0x00B000, 0x00B0FF },		/* HALF-BSLICE */
931bb76ff1Sjsg 	{ 0x00B100, 0x00B3FF },		/* L3BANK */
941bb76ff1Sjsg 	{ 0x00C800, 0x00CFFF },		/* HALF-BSLICE */
951bb76ff1Sjsg 	{ 0x00D800, 0x00D8FF },		/* HALF-BSLICE */
961bb76ff1Sjsg 	{ 0x00DD00, 0x00DDFF },		/* BSLICE */
971bb76ff1Sjsg 	{ 0x00E900, 0x00E9FF },		/* HALF-BSLICE */
981bb76ff1Sjsg 	{ 0x00EC00, 0x00EEFF },		/* HALF-BSLICE */
991bb76ff1Sjsg 	{ 0x00F000, 0x00FFFF },		/* HALF-BSLICE */
1001bb76ff1Sjsg 	{ 0x024180, 0x0241FF },		/* HALF-BSLICE */
1011bb76ff1Sjsg 	{},
1021bb76ff1Sjsg };
1031bb76ff1Sjsg 
104f005ef32Sjsg static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
105f005ef32Sjsg 	{ 0x000B00, 0x000BFF },         /* SQIDI */
106f005ef32Sjsg 	{ 0x001000, 0x001FFF },         /* SQIDI */
107f005ef32Sjsg 	{ 0x004000, 0x0048FF },         /* GAM */
108f005ef32Sjsg 	{ 0x008700, 0x0087FF },         /* SQIDI */
109f005ef32Sjsg 	{ 0x00B000, 0x00B0FF },         /* NODE */
110f005ef32Sjsg 	{ 0x00C800, 0x00CFFF },         /* GAM */
111f005ef32Sjsg 	{ 0x00D880, 0x00D8FF },         /* NODE */
112f005ef32Sjsg 	{ 0x00DD00, 0x00DDFF },         /* OAAL2 */
113f005ef32Sjsg 	{},
114f005ef32Sjsg };
115f005ef32Sjsg 
116f005ef32Sjsg static const struct intel_mmio_range xelpg_l3bank_steering_table[] = {
117f005ef32Sjsg 	{ 0x00B100, 0x00B3FF },
118f005ef32Sjsg 	{},
119f005ef32Sjsg };
120f005ef32Sjsg 
121f005ef32Sjsg /* DSS steering is used for SLICE ranges as well */
122f005ef32Sjsg static const struct intel_mmio_range xelpg_dss_steering_table[] = {
123f005ef32Sjsg 	{ 0x005200, 0x0052FF },		/* SLICE */
124f005ef32Sjsg 	{ 0x005500, 0x007FFF },		/* SLICE */
125f005ef32Sjsg 	{ 0x008140, 0x00815F },		/* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
126f005ef32Sjsg 	{ 0x0094D0, 0x00955F },		/* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
127f005ef32Sjsg 	{ 0x009680, 0x0096FF },		/* DSS */
128f005ef32Sjsg 	{ 0x00D800, 0x00D87F },		/* SLICE */
129f005ef32Sjsg 	{ 0x00DC00, 0x00DCFF },		/* SLICE */
130f005ef32Sjsg 	{ 0x00DE80, 0x00E8FF },		/* DSS (0xE000-0xE0FF reserved) */
131f005ef32Sjsg 	{},
132f005ef32Sjsg };
133f005ef32Sjsg 
134f005ef32Sjsg static const struct intel_mmio_range xelpmp_oaddrm_steering_table[] = {
135f005ef32Sjsg 	{ 0x393200, 0x39323F },
136f005ef32Sjsg 	{ 0x393400, 0x3934FF },
137f005ef32Sjsg 	{},
138f005ef32Sjsg };
139f005ef32Sjsg 
intel_gt_mcr_init(struct intel_gt * gt)1401bb76ff1Sjsg void intel_gt_mcr_init(struct intel_gt *gt)
1411bb76ff1Sjsg {
1421bb76ff1Sjsg 	struct drm_i915_private *i915 = gt->i915;
143f005ef32Sjsg 	unsigned long fuse;
144f005ef32Sjsg 	int i;
145f005ef32Sjsg 
146f005ef32Sjsg 	mtx_init(&gt->mcr_lock, IPL_TTY);
1471bb76ff1Sjsg 
1481bb76ff1Sjsg 	/*
1491bb76ff1Sjsg 	 * An mslice is unavailable only if both the meml3 for the slice is
1501bb76ff1Sjsg 	 * disabled *and* all of the DSS in the slice (quadrant) are disabled.
1511bb76ff1Sjsg 	 */
1521bb76ff1Sjsg 	if (HAS_MSLICE_STEERING(i915)) {
1531bb76ff1Sjsg 		gt->info.mslice_mask =
1541bb76ff1Sjsg 			intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
1551bb76ff1Sjsg 							  GEN_DSS_PER_MSLICE);
1561bb76ff1Sjsg 		gt->info.mslice_mask |=
1571bb76ff1Sjsg 			(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
1581bb76ff1Sjsg 			 GEN12_MEML3_EN_MASK);
1591bb76ff1Sjsg 
1601bb76ff1Sjsg 		if (!gt->info.mslice_mask) /* should be impossible! */
161f005ef32Sjsg 			gt_warn(gt, "mslice mask all zero!\n");
1621bb76ff1Sjsg 	}
1631bb76ff1Sjsg 
164f005ef32Sjsg 	if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
165f005ef32Sjsg 		gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
166f005ef32Sjsg 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
167f005ef32Sjsg 		/* Wa_14016747170 */
168*596b6869Sjsg 		if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
169*596b6869Sjsg 		    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
170f005ef32Sjsg 			fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
171f005ef32Sjsg 					     intel_uncore_read(gt->uncore,
172f005ef32Sjsg 							       MTL_GT_ACTIVITY_FACTOR));
173f005ef32Sjsg 		else
174f005ef32Sjsg 			fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
175f005ef32Sjsg 					     intel_uncore_read(gt->uncore, XEHP_FUSE4));
176f005ef32Sjsg 
177f005ef32Sjsg 		/*
178f005ef32Sjsg 		 * Despite the register field being named "exclude mask" the
179f005ef32Sjsg 		 * bits actually represent enabled banks (two banks per bit).
180f005ef32Sjsg 		 */
181f005ef32Sjsg 		for_each_set_bit(i, &fuse, 3)
182f005ef32Sjsg 			gt->info.l3bank_mask |= 0x3 << 2 * i;
183f005ef32Sjsg 
184f005ef32Sjsg 		gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
185f005ef32Sjsg 		gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
186f005ef32Sjsg 		gt->steering_table[DSS] = xelpg_dss_steering_table;
187f005ef32Sjsg 	} else if (IS_PONTEVECCHIO(i915)) {
1881bb76ff1Sjsg 		gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
1891bb76ff1Sjsg 	} else if (IS_DG2(i915)) {
1901bb76ff1Sjsg 		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
1911bb76ff1Sjsg 		gt->steering_table[LNCF] = dg2_lncf_steering_table;
192f005ef32Sjsg 		/*
193f005ef32Sjsg 		 * No need to hook up the GAM table since it has a dedicated
194f005ef32Sjsg 		 * steering control register on DG2 and can use implicit
195f005ef32Sjsg 		 * steering.
196f005ef32Sjsg 		 */
1971bb76ff1Sjsg 	} else if (IS_XEHPSDV(i915)) {
1981bb76ff1Sjsg 		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
1991bb76ff1Sjsg 		gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
200f005ef32Sjsg 		gt->steering_table[GAM] = xehpsdv_gam_steering_table;
2011bb76ff1Sjsg 	} else if (GRAPHICS_VER(i915) >= 11 &&
2021bb76ff1Sjsg 		   GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
2031bb76ff1Sjsg 		gt->steering_table[L3BANK] = icl_l3bank_steering_table;
2041bb76ff1Sjsg 		gt->info.l3bank_mask =
2051bb76ff1Sjsg 			~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
2061bb76ff1Sjsg 			GEN10_L3BANK_MASK;
2071bb76ff1Sjsg 		if (!gt->info.l3bank_mask) /* should be impossible! */
208f005ef32Sjsg 			gt_warn(gt, "L3 bank mask is all zero!\n");
2091bb76ff1Sjsg 	} else if (GRAPHICS_VER(i915) >= 11) {
2101bb76ff1Sjsg 		/*
2111bb76ff1Sjsg 		 * We expect all modern platforms to have at least some
2121bb76ff1Sjsg 		 * type of steering that needs to be initialized.
2131bb76ff1Sjsg 		 */
2141bb76ff1Sjsg 		MISSING_CASE(INTEL_INFO(i915)->platform);
2151bb76ff1Sjsg 	}
2161bb76ff1Sjsg }
2171bb76ff1Sjsg 
2181bb76ff1Sjsg /*
219f005ef32Sjsg  * Although the rest of the driver should use MCR-specific functions to
220f005ef32Sjsg  * read/write MCR registers, we still use the regular intel_uncore_* functions
221f005ef32Sjsg  * internally to implement those, so we need a way for the functions in this
222f005ef32Sjsg  * file to "cast" an i915_mcr_reg_t into an i915_reg_t.
223f005ef32Sjsg  */
mcr_reg_cast(const i915_mcr_reg_t mcr)224f005ef32Sjsg static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
225f005ef32Sjsg {
226f005ef32Sjsg 	i915_reg_t r = { .reg = mcr.reg };
227f005ef32Sjsg 
228f005ef32Sjsg 	return r;
229f005ef32Sjsg }
230f005ef32Sjsg 
231f005ef32Sjsg /*
2321bb76ff1Sjsg  * rw_with_mcr_steering_fw - Access a register with specific MCR steering
233f005ef32Sjsg  * @gt: GT to read register from
2341bb76ff1Sjsg  * @reg: register being accessed
2351bb76ff1Sjsg  * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
2361bb76ff1Sjsg  * @group: group number (documented as "sliceid" on older platforms)
2371bb76ff1Sjsg  * @instance: instance number (documented as "subsliceid" on older platforms)
2381bb76ff1Sjsg  * @value: register value to be written (ignored for read)
2391bb76ff1Sjsg  *
240f005ef32Sjsg  * Context: The caller must hold the MCR lock
2411bb76ff1Sjsg  * Return: 0 for write access. register value for read access.
2421bb76ff1Sjsg  *
2431bb76ff1Sjsg  * Caller needs to make sure the relevant forcewake wells are up.
2441bb76ff1Sjsg  */
rw_with_mcr_steering_fw(struct intel_gt * gt,i915_mcr_reg_t reg,u8 rw_flag,int group,int instance,u32 value)245f005ef32Sjsg static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
246f005ef32Sjsg 				   i915_mcr_reg_t reg, u8 rw_flag,
2471bb76ff1Sjsg 				   int group, int instance, u32 value)
2481bb76ff1Sjsg {
249f005ef32Sjsg 	struct intel_uncore *uncore = gt->uncore;
2501bb76ff1Sjsg 	u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
2511bb76ff1Sjsg 
252f005ef32Sjsg 	lockdep_assert_held(&gt->mcr_lock);
2531bb76ff1Sjsg 
254f005ef32Sjsg 	if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
255f005ef32Sjsg 		/*
256f005ef32Sjsg 		 * Always leave the hardware in multicast mode when doing reads
257f005ef32Sjsg 		 * (see comment about Wa_22013088509 below) and only change it
258f005ef32Sjsg 		 * to unicast mode when doing writes of a specific instance.
259f005ef32Sjsg 		 *
260f005ef32Sjsg 		 * No need to save old steering reg value.
261f005ef32Sjsg 		 */
262f005ef32Sjsg 		intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR,
263f005ef32Sjsg 				      REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
264f005ef32Sjsg 				      REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance) |
265f005ef32Sjsg 				      (rw_flag == FW_REG_READ ? GEN11_MCR_MULTICAST : 0));
266f005ef32Sjsg 	} else if (GRAPHICS_VER(uncore->i915) >= 11) {
2671bb76ff1Sjsg 		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2681bb76ff1Sjsg 		mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
2691bb76ff1Sjsg 
2701bb76ff1Sjsg 		/*
2711bb76ff1Sjsg 		 * Wa_22013088509
2721bb76ff1Sjsg 		 *
2731bb76ff1Sjsg 		 * The setting of the multicast/unicast bit usually wouldn't
2741bb76ff1Sjsg 		 * matter for read operations (which always return the value
2751bb76ff1Sjsg 		 * from a single register instance regardless of how that bit
2761bb76ff1Sjsg 		 * is set), but some platforms have a workaround requiring us
2771bb76ff1Sjsg 		 * to remain in multicast mode for reads.  There's no real
2781bb76ff1Sjsg 		 * downside to this, so we'll just go ahead and do so on all
2791bb76ff1Sjsg 		 * platforms; we'll only clear the multicast bit from the mask
2801bb76ff1Sjsg 		 * when exlicitly doing a write operation.
2811bb76ff1Sjsg 		 */
2821bb76ff1Sjsg 		if (rw_flag == FW_REG_WRITE)
2831bb76ff1Sjsg 			mcr_mask |= GEN11_MCR_MULTICAST;
2841bb76ff1Sjsg 
285f005ef32Sjsg 		mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
286f005ef32Sjsg 		old_mcr = mcr;
2871bb76ff1Sjsg 
2881bb76ff1Sjsg 		mcr &= ~mcr_mask;
2891bb76ff1Sjsg 		mcr |= mcr_ss;
2901bb76ff1Sjsg 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
291f005ef32Sjsg 	} else {
292f005ef32Sjsg 		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
293f005ef32Sjsg 		mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
2941bb76ff1Sjsg 
295f005ef32Sjsg 		mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
296f005ef32Sjsg 		old_mcr = mcr;
2971bb76ff1Sjsg 
2981bb76ff1Sjsg 		mcr &= ~mcr_mask;
299f005ef32Sjsg 		mcr |= mcr_ss;
3001bb76ff1Sjsg 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
301f005ef32Sjsg 	}
302f005ef32Sjsg 
303f005ef32Sjsg 	if (rw_flag == FW_REG_READ)
304f005ef32Sjsg 		val = intel_uncore_read_fw(uncore, mcr_reg_cast(reg));
305f005ef32Sjsg 	else
306f005ef32Sjsg 		intel_uncore_write_fw(uncore, mcr_reg_cast(reg), value);
307f005ef32Sjsg 
308f005ef32Sjsg 	/*
309f005ef32Sjsg 	 * For pre-MTL platforms, we need to restore the old value of the
310f005ef32Sjsg 	 * steering control register to ensure that implicit steering continues
311f005ef32Sjsg 	 * to behave as expected.  For MTL and beyond, we need only reinstate
312f005ef32Sjsg 	 * the 'multicast' bit (and only if we did a write that cleared it).
313f005ef32Sjsg 	 */
314f005ef32Sjsg 	if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70) && rw_flag == FW_REG_WRITE)
315f005ef32Sjsg 		intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
316f005ef32Sjsg 	else if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 70))
317f005ef32Sjsg 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, old_mcr);
3181bb76ff1Sjsg 
3191bb76ff1Sjsg 	return val;
3201bb76ff1Sjsg }
3211bb76ff1Sjsg 
rw_with_mcr_steering(struct intel_gt * gt,i915_mcr_reg_t reg,u8 rw_flag,int group,int instance,u32 value)322f005ef32Sjsg static u32 rw_with_mcr_steering(struct intel_gt *gt,
323f005ef32Sjsg 				i915_mcr_reg_t reg, u8 rw_flag,
3241bb76ff1Sjsg 				int group, int instance,
3251bb76ff1Sjsg 				u32 value)
3261bb76ff1Sjsg {
327f005ef32Sjsg 	struct intel_uncore *uncore = gt->uncore;
3281bb76ff1Sjsg 	enum forcewake_domains fw_domains;
329f005ef32Sjsg 	unsigned long flags;
3301bb76ff1Sjsg 	u32 val;
3311bb76ff1Sjsg 
332f005ef32Sjsg 	fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
3331bb76ff1Sjsg 						    rw_flag);
3341bb76ff1Sjsg 	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
3351bb76ff1Sjsg 						     GEN8_MCR_SELECTOR,
3361bb76ff1Sjsg 						     FW_REG_READ | FW_REG_WRITE);
3371bb76ff1Sjsg 
338f005ef32Sjsg 	intel_gt_mcr_lock(gt, &flags);
339f005ef32Sjsg 	spin_lock(&uncore->lock);
3401bb76ff1Sjsg 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
3411bb76ff1Sjsg 
342f005ef32Sjsg 	val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value);
3431bb76ff1Sjsg 
3441bb76ff1Sjsg 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
345f005ef32Sjsg 	spin_unlock(&uncore->lock);
346f005ef32Sjsg 	intel_gt_mcr_unlock(gt, flags);
3471bb76ff1Sjsg 
3481bb76ff1Sjsg 	return val;
3491bb76ff1Sjsg }
3501bb76ff1Sjsg 
3511bb76ff1Sjsg /**
352f005ef32Sjsg  * intel_gt_mcr_lock - Acquire MCR steering lock
353f005ef32Sjsg  * @gt: GT structure
354f005ef32Sjsg  * @flags: storage to save IRQ flags to
355f005ef32Sjsg  *
356f005ef32Sjsg  * Performs locking to protect the steering for the duration of an MCR
357f005ef32Sjsg  * operation.  On MTL and beyond, a hardware lock will also be taken to
358f005ef32Sjsg  * serialize access not only for the driver, but also for external hardware and
359f005ef32Sjsg  * firmware agents.
360f005ef32Sjsg  *
361f005ef32Sjsg  * Context: Takes gt->mcr_lock.  uncore->lock should *not* be held when this
362f005ef32Sjsg  *          function is called, although it may be acquired after this
363f005ef32Sjsg  *          function call.
364f005ef32Sjsg  */
intel_gt_mcr_lock(struct intel_gt * gt,unsigned long * flags)365f005ef32Sjsg void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
366f005ef32Sjsg 	__acquires(&gt->mcr_lock)
367f005ef32Sjsg {
368f005ef32Sjsg 	unsigned long __flags;
369f005ef32Sjsg 	int err = 0;
370f005ef32Sjsg 
371f005ef32Sjsg 	lockdep_assert_not_held(&gt->uncore->lock);
372f005ef32Sjsg 
373f005ef32Sjsg 	/*
374f005ef32Sjsg 	 * Starting with MTL, we need to coordinate not only with other
375f005ef32Sjsg 	 * driver threads, but also with hardware/firmware agents.  A dedicated
376f005ef32Sjsg 	 * locking register is used.
377f005ef32Sjsg 	 */
378f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
379f005ef32Sjsg 		/*
380f005ef32Sjsg 		 * The steering control and semaphore registers are inside an
381f005ef32Sjsg 		 * "always on" power domain with respect to RC6.  However there
382f005ef32Sjsg 		 * are some issues if higher-level platform sleep states are
383f005ef32Sjsg 		 * entering/exiting at the same time these registers are
384f005ef32Sjsg 		 * accessed.  Grabbing GT forcewake and holding it over the
385f005ef32Sjsg 		 * entire lock/steer/unlock cycle ensures that those sleep
386f005ef32Sjsg 		 * states have been fully exited before we access these
387f005ef32Sjsg 		 * registers.  This wakeref will be released in the unlock
388f005ef32Sjsg 		 * routine.
389f005ef32Sjsg 		 *
390f005ef32Sjsg 		 * This is expected to become a formally documented/numbered
391f005ef32Sjsg 		 * workaround soon.
392f005ef32Sjsg 		 */
393f005ef32Sjsg 		intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
394f005ef32Sjsg 
395f005ef32Sjsg 		err = wait_for(intel_uncore_read_fw(gt->uncore,
396f005ef32Sjsg 						    MTL_STEER_SEMAPHORE) == 0x1, 100);
397f005ef32Sjsg 	}
398f005ef32Sjsg 
399f005ef32Sjsg 	/*
400f005ef32Sjsg 	 * Even on platforms with a hardware lock, we'll continue to grab
401f005ef32Sjsg 	 * a software spinlock too for lockdep purposes.  If the hardware lock
402f005ef32Sjsg 	 * was already acquired, there should never be contention on the
403f005ef32Sjsg 	 * software lock.
404f005ef32Sjsg 	 */
405f005ef32Sjsg 	spin_lock_irqsave(&gt->mcr_lock, __flags);
406f005ef32Sjsg 
407f005ef32Sjsg 	*flags = __flags;
408f005ef32Sjsg 
409f005ef32Sjsg 	/*
410f005ef32Sjsg 	 * In theory we should never fail to acquire the HW semaphore; this
411f005ef32Sjsg 	 * would indicate some hardware/firmware is misbehaving and not
412f005ef32Sjsg 	 * releasing it properly.
413f005ef32Sjsg 	 */
414f005ef32Sjsg 	if (err == -ETIMEDOUT) {
415f005ef32Sjsg 		gt_err_ratelimited(gt, "hardware MCR steering semaphore timed out");
416f005ef32Sjsg 		add_taint_for_CI(gt->i915, TAINT_WARN);  /* CI is now unreliable */
417f005ef32Sjsg 	}
418f005ef32Sjsg }
419f005ef32Sjsg 
420f005ef32Sjsg /**
421f005ef32Sjsg  * intel_gt_mcr_unlock - Release MCR steering lock
422f005ef32Sjsg  * @gt: GT structure
423f005ef32Sjsg  * @flags: IRQ flags to restore
424f005ef32Sjsg  *
425f005ef32Sjsg  * Releases the lock acquired by intel_gt_mcr_lock().
426f005ef32Sjsg  *
427f005ef32Sjsg  * Context: Releases gt->mcr_lock
428f005ef32Sjsg  */
intel_gt_mcr_unlock(struct intel_gt * gt,unsigned long flags)429f005ef32Sjsg void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
430f005ef32Sjsg 	__releases(&gt->mcr_lock)
431f005ef32Sjsg {
432f005ef32Sjsg 	spin_unlock_irqrestore(&gt->mcr_lock, flags);
433f005ef32Sjsg 
434f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
435f005ef32Sjsg 		intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
436f005ef32Sjsg 
437f005ef32Sjsg 		intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
438f005ef32Sjsg 	}
439f005ef32Sjsg }
440f005ef32Sjsg 
441f005ef32Sjsg /**
4421bb76ff1Sjsg  * intel_gt_mcr_read - read a specific instance of an MCR register
4431bb76ff1Sjsg  * @gt: GT structure
4441bb76ff1Sjsg  * @reg: the MCR register to read
4451bb76ff1Sjsg  * @group: the MCR group
4461bb76ff1Sjsg  * @instance: the MCR instance
4471bb76ff1Sjsg  *
448f005ef32Sjsg  * Context: Takes and releases gt->mcr_lock
449f005ef32Sjsg  *
4501bb76ff1Sjsg  * Returns the value read from an MCR register after steering toward a specific
4511bb76ff1Sjsg  * group/instance.
4521bb76ff1Sjsg  */
intel_gt_mcr_read(struct intel_gt * gt,i915_mcr_reg_t reg,int group,int instance)4531bb76ff1Sjsg u32 intel_gt_mcr_read(struct intel_gt *gt,
454f005ef32Sjsg 		      i915_mcr_reg_t reg,
4551bb76ff1Sjsg 		      int group, int instance)
4561bb76ff1Sjsg {
457f005ef32Sjsg 	return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0);
4581bb76ff1Sjsg }
4591bb76ff1Sjsg 
4601bb76ff1Sjsg /**
4611bb76ff1Sjsg  * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
4621bb76ff1Sjsg  * @gt: GT structure
4631bb76ff1Sjsg  * @reg: the MCR register to write
4641bb76ff1Sjsg  * @value: value to write
4651bb76ff1Sjsg  * @group: the MCR group
4661bb76ff1Sjsg  * @instance: the MCR instance
4671bb76ff1Sjsg  *
4681bb76ff1Sjsg  * Write an MCR register in unicast mode after steering toward a specific
4691bb76ff1Sjsg  * group/instance.
470f005ef32Sjsg  *
471f005ef32Sjsg  * Context: Calls a function that takes and releases gt->mcr_lock
4721bb76ff1Sjsg  */
intel_gt_mcr_unicast_write(struct intel_gt * gt,i915_mcr_reg_t reg,u32 value,int group,int instance)473f005ef32Sjsg void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
4741bb76ff1Sjsg 				int group, int instance)
4751bb76ff1Sjsg {
476f005ef32Sjsg 	rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value);
4771bb76ff1Sjsg }
4781bb76ff1Sjsg 
4791bb76ff1Sjsg /**
4801bb76ff1Sjsg  * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
4811bb76ff1Sjsg  * @gt: GT structure
4821bb76ff1Sjsg  * @reg: the MCR register to write
4831bb76ff1Sjsg  * @value: value to write
4841bb76ff1Sjsg  *
4851bb76ff1Sjsg  * Write an MCR register in multicast mode to update all instances.
486f005ef32Sjsg  *
487f005ef32Sjsg  * Context: Takes and releases gt->mcr_lock
4881bb76ff1Sjsg  */
intel_gt_mcr_multicast_write(struct intel_gt * gt,i915_mcr_reg_t reg,u32 value)4891bb76ff1Sjsg void intel_gt_mcr_multicast_write(struct intel_gt *gt,
490f005ef32Sjsg 				  i915_mcr_reg_t reg, u32 value)
4911bb76ff1Sjsg {
492f005ef32Sjsg 	unsigned long flags;
493f005ef32Sjsg 
494f005ef32Sjsg 	intel_gt_mcr_lock(gt, &flags);
495f005ef32Sjsg 
496f005ef32Sjsg 	/*
497f005ef32Sjsg 	 * Ensure we have multicast behavior, just in case some non-i915 agent
498f005ef32Sjsg 	 * left the hardware in unicast mode.
499f005ef32Sjsg 	 */
500f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
501f005ef32Sjsg 		intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
502f005ef32Sjsg 
503f005ef32Sjsg 	intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
504f005ef32Sjsg 
505f005ef32Sjsg 	intel_gt_mcr_unlock(gt, flags);
5061bb76ff1Sjsg }
5071bb76ff1Sjsg 
5081bb76ff1Sjsg /**
5091bb76ff1Sjsg  * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
5101bb76ff1Sjsg  * @gt: GT structure
5111bb76ff1Sjsg  * @reg: the MCR register to write
5121bb76ff1Sjsg  * @value: value to write
5131bb76ff1Sjsg  *
5141bb76ff1Sjsg  * Write an MCR register in multicast mode to update all instances.  This
5151bb76ff1Sjsg  * function assumes the caller is already holding any necessary forcewake
5161bb76ff1Sjsg  * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
5171bb76ff1Sjsg  * be obtained automatically.
518f005ef32Sjsg  *
519f005ef32Sjsg  * Context: The caller must hold gt->mcr_lock.
5201bb76ff1Sjsg  */
intel_gt_mcr_multicast_write_fw(struct intel_gt * gt,i915_mcr_reg_t reg,u32 value)521f005ef32Sjsg void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
5221bb76ff1Sjsg {
523f005ef32Sjsg 	lockdep_assert_held(&gt->mcr_lock);
524f005ef32Sjsg 
525f005ef32Sjsg 	/*
526f005ef32Sjsg 	 * Ensure we have multicast behavior, just in case some non-i915 agent
527f005ef32Sjsg 	 * left the hardware in unicast mode.
528f005ef32Sjsg 	 */
529f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
530f005ef32Sjsg 		intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
531f005ef32Sjsg 
532f005ef32Sjsg 	intel_uncore_write_fw(gt->uncore, mcr_reg_cast(reg), value);
533f005ef32Sjsg }
534f005ef32Sjsg 
535f005ef32Sjsg /**
536f005ef32Sjsg  * intel_gt_mcr_multicast_rmw - Performs a multicast RMW operations
537f005ef32Sjsg  * @gt: GT structure
538f005ef32Sjsg  * @reg: the MCR register to read and write
539f005ef32Sjsg  * @clear: bits to clear during RMW
540f005ef32Sjsg  * @set: bits to set during RMW
541f005ef32Sjsg  *
542f005ef32Sjsg  * Performs a read-modify-write on an MCR register in a multicast manner.
543f005ef32Sjsg  * This operation only makes sense on MCR registers where all instances are
544f005ef32Sjsg  * expected to have the same value.  The read will target any non-terminated
545f005ef32Sjsg  * instance and the write will be applied to all instances.
546f005ef32Sjsg  *
547f005ef32Sjsg  * This function assumes the caller is already holding any necessary forcewake
548f005ef32Sjsg  * domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
549f005ef32Sjsg  * be obtained automatically.
550f005ef32Sjsg  *
551f005ef32Sjsg  * Context: Calls functions that take and release gt->mcr_lock
552f005ef32Sjsg  *
553f005ef32Sjsg  * Returns the old (unmodified) value read.
554f005ef32Sjsg  */
intel_gt_mcr_multicast_rmw(struct intel_gt * gt,i915_mcr_reg_t reg,u32 clear,u32 set)555f005ef32Sjsg u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
556f005ef32Sjsg 			       u32 clear, u32 set)
557f005ef32Sjsg {
558f005ef32Sjsg 	u32 val = intel_gt_mcr_read_any(gt, reg);
559f005ef32Sjsg 
560f005ef32Sjsg 	intel_gt_mcr_multicast_write(gt, reg, (val & ~clear) | set);
561f005ef32Sjsg 
562f005ef32Sjsg 	return val;
5631bb76ff1Sjsg }
5641bb76ff1Sjsg 
5651bb76ff1Sjsg /*
5661bb76ff1Sjsg  * reg_needs_read_steering - determine whether a register read requires
5671bb76ff1Sjsg  *     explicit steering
5681bb76ff1Sjsg  * @gt: GT structure
5691bb76ff1Sjsg  * @reg: the register to check steering requirements for
5701bb76ff1Sjsg  * @type: type of multicast steering to check
5711bb76ff1Sjsg  *
5721bb76ff1Sjsg  * Determines whether @reg needs explicit steering of a specific type for
5731bb76ff1Sjsg  * reads.
5741bb76ff1Sjsg  *
5751bb76ff1Sjsg  * Returns false if @reg does not belong to a register range of the given
5761bb76ff1Sjsg  * steering type, or if the default (subslice-based) steering IDs are suitable
5771bb76ff1Sjsg  * for @type steering too.
5781bb76ff1Sjsg  */
reg_needs_read_steering(struct intel_gt * gt,i915_mcr_reg_t reg,enum intel_steering_type type)5791bb76ff1Sjsg static bool reg_needs_read_steering(struct intel_gt *gt,
580f005ef32Sjsg 				    i915_mcr_reg_t reg,
5811bb76ff1Sjsg 				    enum intel_steering_type type)
5821bb76ff1Sjsg {
583f005ef32Sjsg 	u32 offset = i915_mmio_reg_offset(reg);
5841bb76ff1Sjsg 	const struct intel_mmio_range *entry;
5851bb76ff1Sjsg 
5861bb76ff1Sjsg 	if (likely(!gt->steering_table[type]))
5871bb76ff1Sjsg 		return false;
5881bb76ff1Sjsg 
589f005ef32Sjsg 	if (IS_GSI_REG(offset))
590f005ef32Sjsg 		offset += gt->uncore->gsi_offset;
591f005ef32Sjsg 
5921bb76ff1Sjsg 	for (entry = gt->steering_table[type]; entry->end; entry++) {
5931bb76ff1Sjsg 		if (offset >= entry->start && offset <= entry->end)
5941bb76ff1Sjsg 			return true;
5951bb76ff1Sjsg 	}
5961bb76ff1Sjsg 
5971bb76ff1Sjsg 	return false;
5981bb76ff1Sjsg }
5991bb76ff1Sjsg 
6001bb76ff1Sjsg /*
6011bb76ff1Sjsg  * get_nonterminated_steering - determines valid IDs for a class of MCR steering
6021bb76ff1Sjsg  * @gt: GT structure
6031bb76ff1Sjsg  * @type: multicast register type
6041bb76ff1Sjsg  * @group: Group ID returned
6051bb76ff1Sjsg  * @instance: Instance ID returned
6061bb76ff1Sjsg  *
6071bb76ff1Sjsg  * Determines group and instance values that will steer reads of the specified
6081bb76ff1Sjsg  * MCR class to a non-terminated instance.
6091bb76ff1Sjsg  */
get_nonterminated_steering(struct intel_gt * gt,enum intel_steering_type type,u8 * group,u8 * instance)6101bb76ff1Sjsg static void get_nonterminated_steering(struct intel_gt *gt,
6111bb76ff1Sjsg 				       enum intel_steering_type type,
6121bb76ff1Sjsg 				       u8 *group, u8 *instance)
6131bb76ff1Sjsg {
614f005ef32Sjsg 	u32 dss;
615f005ef32Sjsg 
6161bb76ff1Sjsg 	switch (type) {
6171bb76ff1Sjsg 	case L3BANK:
6181bb76ff1Sjsg 		*group = 0;		/* unused */
6191bb76ff1Sjsg 		*instance = __ffs(gt->info.l3bank_mask);
6201bb76ff1Sjsg 		break;
6211bb76ff1Sjsg 	case MSLICE:
6221bb76ff1Sjsg 		GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
6231bb76ff1Sjsg 		*group = __ffs(gt->info.mslice_mask);
6241bb76ff1Sjsg 		*instance = 0;	/* unused */
6251bb76ff1Sjsg 		break;
6261bb76ff1Sjsg 	case LNCF:
6271bb76ff1Sjsg 		/*
6281bb76ff1Sjsg 		 * An LNCF is always present if its mslice is present, so we
6291bb76ff1Sjsg 		 * can safely just steer to LNCF 0 in all cases.
6301bb76ff1Sjsg 		 */
6311bb76ff1Sjsg 		GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
6321bb76ff1Sjsg 		*group = __ffs(gt->info.mslice_mask) << 1;
6331bb76ff1Sjsg 		*instance = 0;	/* unused */
6341bb76ff1Sjsg 		break;
635f005ef32Sjsg 	case GAM:
636f005ef32Sjsg 		*group = IS_DG2(gt->i915) ? 1 : 0;
637f005ef32Sjsg 		*instance = 0;
638f005ef32Sjsg 		break;
639f005ef32Sjsg 	case DSS:
640f005ef32Sjsg 		dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
641f005ef32Sjsg 		*group = dss / GEN_DSS_PER_GSLICE;
642f005ef32Sjsg 		*instance = dss % GEN_DSS_PER_GSLICE;
643f005ef32Sjsg 		break;
6441bb76ff1Sjsg 	case INSTANCE0:
6451bb76ff1Sjsg 		/*
6461bb76ff1Sjsg 		 * There are a lot of MCR types for which instance (0, 0)
6471bb76ff1Sjsg 		 * will always provide a non-terminated value.
6481bb76ff1Sjsg 		 */
6491bb76ff1Sjsg 		*group = 0;
6501bb76ff1Sjsg 		*instance = 0;
6511bb76ff1Sjsg 		break;
652f005ef32Sjsg 	case OADDRM:
653f005ef32Sjsg 		if ((VDBOX_MASK(gt) | VEBOX_MASK(gt) | gt->info.sfc_mask) & BIT(0))
654f005ef32Sjsg 			*group = 0;
655f005ef32Sjsg 		else
656f005ef32Sjsg 			*group = 1;
657f005ef32Sjsg 		*instance = 0;
658f005ef32Sjsg 		break;
6591bb76ff1Sjsg 	default:
6601bb76ff1Sjsg 		MISSING_CASE(type);
6611bb76ff1Sjsg 		*group = 0;
6621bb76ff1Sjsg 		*instance = 0;
6631bb76ff1Sjsg 	}
6641bb76ff1Sjsg }
6651bb76ff1Sjsg 
6661bb76ff1Sjsg /**
6671bb76ff1Sjsg  * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
6681bb76ff1Sjsg  *    will steer a register to a non-terminated instance
6691bb76ff1Sjsg  * @gt: GT structure
6701bb76ff1Sjsg  * @reg: register for which the steering is required
6711bb76ff1Sjsg  * @group: return variable for group steering
6721bb76ff1Sjsg  * @instance: return variable for instance steering
6731bb76ff1Sjsg  *
6741bb76ff1Sjsg  * This function returns a group/instance pair that is guaranteed to work for
6751bb76ff1Sjsg  * read steering of the given register. Note that a value will be returned even
6761bb76ff1Sjsg  * if the register is not replicated and therefore does not actually require
6771bb76ff1Sjsg  * steering.
6781bb76ff1Sjsg  */
intel_gt_mcr_get_nonterminated_steering(struct intel_gt * gt,i915_mcr_reg_t reg,u8 * group,u8 * instance)6791bb76ff1Sjsg void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
680f005ef32Sjsg 					     i915_mcr_reg_t reg,
6811bb76ff1Sjsg 					     u8 *group, u8 *instance)
6821bb76ff1Sjsg {
6831bb76ff1Sjsg 	int type;
6841bb76ff1Sjsg 
6851bb76ff1Sjsg 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
6861bb76ff1Sjsg 		if (reg_needs_read_steering(gt, reg, type)) {
6871bb76ff1Sjsg 			get_nonterminated_steering(gt, type, group, instance);
6881bb76ff1Sjsg 			return;
6891bb76ff1Sjsg 		}
6901bb76ff1Sjsg 	}
6911bb76ff1Sjsg 
6921bb76ff1Sjsg 	*group = gt->default_steering.groupid;
6931bb76ff1Sjsg 	*instance = gt->default_steering.instanceid;
6941bb76ff1Sjsg }
6951bb76ff1Sjsg 
6961bb76ff1Sjsg /**
6971bb76ff1Sjsg  * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
6981bb76ff1Sjsg  * @gt: GT structure
6991bb76ff1Sjsg  * @reg: register to read
7001bb76ff1Sjsg  *
7011bb76ff1Sjsg  * Reads a GT MCR register.  The read will be steered to a non-terminated
7021bb76ff1Sjsg  * instance (i.e., one that isn't fused off or powered down by power gating).
7031bb76ff1Sjsg  * This function assumes the caller is already holding any necessary forcewake
7041bb76ff1Sjsg  * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
7051bb76ff1Sjsg  * obtained automatically.
7061bb76ff1Sjsg  *
707f005ef32Sjsg  * Context: The caller must hold gt->mcr_lock.
708f005ef32Sjsg  *
7091bb76ff1Sjsg  * Returns the value from a non-terminated instance of @reg.
7101bb76ff1Sjsg  */
intel_gt_mcr_read_any_fw(struct intel_gt * gt,i915_mcr_reg_t reg)711f005ef32Sjsg u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
7121bb76ff1Sjsg {
7131bb76ff1Sjsg 	int type;
7141bb76ff1Sjsg 	u8 group, instance;
7151bb76ff1Sjsg 
716f005ef32Sjsg 	lockdep_assert_held(&gt->mcr_lock);
717f005ef32Sjsg 
7181bb76ff1Sjsg 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
7191bb76ff1Sjsg 		if (reg_needs_read_steering(gt, reg, type)) {
7201bb76ff1Sjsg 			get_nonterminated_steering(gt, type, &group, &instance);
721f005ef32Sjsg 			return rw_with_mcr_steering_fw(gt, reg,
7221bb76ff1Sjsg 						       FW_REG_READ,
7231bb76ff1Sjsg 						       group, instance, 0);
7241bb76ff1Sjsg 		}
7251bb76ff1Sjsg 	}
7261bb76ff1Sjsg 
727f005ef32Sjsg 	return intel_uncore_read_fw(gt->uncore, mcr_reg_cast(reg));
7281bb76ff1Sjsg }
7291bb76ff1Sjsg 
7301bb76ff1Sjsg /**
7311bb76ff1Sjsg  * intel_gt_mcr_read_any - reads one instance of an MCR register
7321bb76ff1Sjsg  * @gt: GT structure
7331bb76ff1Sjsg  * @reg: register to read
7341bb76ff1Sjsg  *
7351bb76ff1Sjsg  * Reads a GT MCR register.  The read will be steered to a non-terminated
7361bb76ff1Sjsg  * instance (i.e., one that isn't fused off or powered down by power gating).
7371bb76ff1Sjsg  *
738f005ef32Sjsg  * Context: Calls a function that takes and releases gt->mcr_lock.
739f005ef32Sjsg  *
7401bb76ff1Sjsg  * Returns the value from a non-terminated instance of @reg.
7411bb76ff1Sjsg  */
intel_gt_mcr_read_any(struct intel_gt * gt,i915_mcr_reg_t reg)742f005ef32Sjsg u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
7431bb76ff1Sjsg {
7441bb76ff1Sjsg 	int type;
7451bb76ff1Sjsg 	u8 group, instance;
7461bb76ff1Sjsg 
7471bb76ff1Sjsg 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
7481bb76ff1Sjsg 		if (reg_needs_read_steering(gt, reg, type)) {
7491bb76ff1Sjsg 			get_nonterminated_steering(gt, type, &group, &instance);
750f005ef32Sjsg 			return rw_with_mcr_steering(gt, reg,
7511bb76ff1Sjsg 						    FW_REG_READ,
7521bb76ff1Sjsg 						    group, instance, 0);
7531bb76ff1Sjsg 		}
7541bb76ff1Sjsg 	}
7551bb76ff1Sjsg 
756f005ef32Sjsg 	return intel_uncore_read(gt->uncore, mcr_reg_cast(reg));
7571bb76ff1Sjsg }
7581bb76ff1Sjsg 
report_steering_type(struct drm_printer * p,struct intel_gt * gt,enum intel_steering_type type,bool dump_table)7591bb76ff1Sjsg static void report_steering_type(struct drm_printer *p,
7601bb76ff1Sjsg 				 struct intel_gt *gt,
7611bb76ff1Sjsg 				 enum intel_steering_type type,
7621bb76ff1Sjsg 				 bool dump_table)
7631bb76ff1Sjsg {
7641bb76ff1Sjsg 	const struct intel_mmio_range *entry;
7651bb76ff1Sjsg 	u8 group, instance;
7661bb76ff1Sjsg 
7671bb76ff1Sjsg 	BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
7681bb76ff1Sjsg 
7691bb76ff1Sjsg 	if (!gt->steering_table[type]) {
7701bb76ff1Sjsg 		drm_printf(p, "%s steering: uses default steering\n",
7711bb76ff1Sjsg 			   intel_steering_types[type]);
7721bb76ff1Sjsg 		return;
7731bb76ff1Sjsg 	}
7741bb76ff1Sjsg 
7751bb76ff1Sjsg 	get_nonterminated_steering(gt, type, &group, &instance);
7761bb76ff1Sjsg 	drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
7771bb76ff1Sjsg 		   intel_steering_types[type], group, instance);
7781bb76ff1Sjsg 
7791bb76ff1Sjsg 	if (!dump_table)
7801bb76ff1Sjsg 		return;
7811bb76ff1Sjsg 
7821bb76ff1Sjsg 	for (entry = gt->steering_table[type]; entry->end; entry++)
7831bb76ff1Sjsg 		drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
7841bb76ff1Sjsg }
7851bb76ff1Sjsg 
intel_gt_mcr_report_steering(struct drm_printer * p,struct intel_gt * gt,bool dump_table)7861bb76ff1Sjsg void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
7871bb76ff1Sjsg 				  bool dump_table)
7881bb76ff1Sjsg {
789f005ef32Sjsg 	/*
790f005ef32Sjsg 	 * Starting with MTL we no longer have default steering;
791f005ef32Sjsg 	 * all ranges are explicitly steered.
792f005ef32Sjsg 	 */
793f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))
7941bb76ff1Sjsg 		drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
7951bb76ff1Sjsg 			   gt->default_steering.groupid,
7961bb76ff1Sjsg 			   gt->default_steering.instanceid);
7971bb76ff1Sjsg 
798f005ef32Sjsg 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
799f005ef32Sjsg 		for (int i = 0; i < NUM_STEERING_TYPES; i++)
800f005ef32Sjsg 			if (gt->steering_table[i])
801f005ef32Sjsg 				report_steering_type(p, gt, i, dump_table);
802f005ef32Sjsg 	} else if (IS_PONTEVECCHIO(gt->i915)) {
8031bb76ff1Sjsg 		report_steering_type(p, gt, INSTANCE0, dump_table);
8041bb76ff1Sjsg 	} else if (HAS_MSLICE_STEERING(gt->i915)) {
8051bb76ff1Sjsg 		report_steering_type(p, gt, MSLICE, dump_table);
8061bb76ff1Sjsg 		report_steering_type(p, gt, LNCF, dump_table);
8071bb76ff1Sjsg 	}
8081bb76ff1Sjsg }
8091bb76ff1Sjsg 
8101bb76ff1Sjsg /**
8111bb76ff1Sjsg  * intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
8121bb76ff1Sjsg  * @gt: GT structure
8131bb76ff1Sjsg  * @dss: DSS ID to obtain steering for
8141bb76ff1Sjsg  * @group: pointer to storage for steering group ID
8151bb76ff1Sjsg  * @instance: pointer to storage for steering instance ID
8161bb76ff1Sjsg  *
8171bb76ff1Sjsg  * Returns the steering IDs (via the @group and @instance parameters) that
8181bb76ff1Sjsg  * correspond to a specific subslice/DSS ID.
8191bb76ff1Sjsg  */
intel_gt_mcr_get_ss_steering(struct intel_gt * gt,unsigned int dss,unsigned int * group,unsigned int * instance)8201bb76ff1Sjsg void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
8211bb76ff1Sjsg 				   unsigned int *group, unsigned int *instance)
8221bb76ff1Sjsg {
8231bb76ff1Sjsg 	if (IS_PONTEVECCHIO(gt->i915)) {
8241bb76ff1Sjsg 		*group = dss / GEN_DSS_PER_CSLICE;
8251bb76ff1Sjsg 		*instance = dss % GEN_DSS_PER_CSLICE;
8261bb76ff1Sjsg 	} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
8271bb76ff1Sjsg 		*group = dss / GEN_DSS_PER_GSLICE;
8281bb76ff1Sjsg 		*instance = dss % GEN_DSS_PER_GSLICE;
8291bb76ff1Sjsg 	} else {
8301bb76ff1Sjsg 		*group = dss / GEN_MAX_SS_PER_HSW_SLICE;
8311bb76ff1Sjsg 		*instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
8321bb76ff1Sjsg 		return;
8331bb76ff1Sjsg 	}
8341bb76ff1Sjsg }
835f005ef32Sjsg 
836f005ef32Sjsg /**
837f005ef32Sjsg  * intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
838f005ef32Sjsg  * @gt: GT structure
839f005ef32Sjsg  * @reg: the register to read
840f005ef32Sjsg  * @mask: mask to apply to register value
841f005ef32Sjsg  * @value: value to wait for
842f005ef32Sjsg  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
843f005ef32Sjsg  * @slow_timeout_ms: slow timeout in millisecond
844f005ef32Sjsg  *
845f005ef32Sjsg  * This routine waits until the target register @reg contains the expected
846f005ef32Sjsg  * @value after applying the @mask, i.e. it waits until ::
847f005ef32Sjsg  *
848f005ef32Sjsg  *     (intel_gt_mcr_read_any_fw(gt, reg) & mask) == value
849f005ef32Sjsg  *
850f005ef32Sjsg  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
851f005ef32Sjsg  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
852f005ef32Sjsg  * must be not larger than 20,0000 microseconds.
853f005ef32Sjsg  *
854f005ef32Sjsg  * This function is basically an MCR-friendly version of
855f005ef32Sjsg  * __intel_wait_for_register_fw().  Generally this function will only be used
856f005ef32Sjsg  * on GAM registers which are a bit special --- although they're MCR registers,
857f005ef32Sjsg  * reads (e.g., waiting for status updates) are always directed to the primary
858f005ef32Sjsg  * instance.
859f005ef32Sjsg  *
860f005ef32Sjsg  * Note that this routine assumes the caller holds forcewake asserted, it is
861f005ef32Sjsg  * not suitable for very long waits.
862f005ef32Sjsg  *
863f005ef32Sjsg  * Context: Calls a function that takes and releases gt->mcr_lock
864f005ef32Sjsg  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
865f005ef32Sjsg  */
intel_gt_mcr_wait_for_reg(struct intel_gt * gt,i915_mcr_reg_t reg,u32 mask,u32 value,unsigned int fast_timeout_us,unsigned int slow_timeout_ms)866f005ef32Sjsg int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
867f005ef32Sjsg 			      i915_mcr_reg_t reg,
868f005ef32Sjsg 			      u32 mask,
869f005ef32Sjsg 			      u32 value,
870f005ef32Sjsg 			      unsigned int fast_timeout_us,
871f005ef32Sjsg 			      unsigned int slow_timeout_ms)
872f005ef32Sjsg {
873f005ef32Sjsg 	int ret;
874f005ef32Sjsg 
875f005ef32Sjsg 	lockdep_assert_not_held(&gt->mcr_lock);
876f005ef32Sjsg 
877f005ef32Sjsg #define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value)
878f005ef32Sjsg 
879f005ef32Sjsg 	/* Catch any overuse of this function */
880f005ef32Sjsg 	might_sleep_if(slow_timeout_ms);
881f005ef32Sjsg 	GEM_BUG_ON(fast_timeout_us > 20000);
882f005ef32Sjsg 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
883f005ef32Sjsg 
884f005ef32Sjsg 	ret = -ETIMEDOUT;
885f005ef32Sjsg 	if (fast_timeout_us && fast_timeout_us <= 20000)
886f005ef32Sjsg 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
887f005ef32Sjsg 	if (ret && slow_timeout_ms)
888f005ef32Sjsg 		ret = wait_for(done, slow_timeout_ms);
889f005ef32Sjsg 
890f005ef32Sjsg 	return ret;
891f005ef32Sjsg #undef done
892f005ef32Sjsg }
893