xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_gt_mcr.h (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
11bb76ff1Sjsg /* SPDX-License-Identifier: MIT */
21bb76ff1Sjsg /*
31bb76ff1Sjsg  * Copyright © 2022 Intel Corporation
41bb76ff1Sjsg  */
51bb76ff1Sjsg 
61bb76ff1Sjsg #ifndef __INTEL_GT_MCR__
71bb76ff1Sjsg #define __INTEL_GT_MCR__
81bb76ff1Sjsg 
91bb76ff1Sjsg #include "intel_gt_types.h"
101bb76ff1Sjsg 
111bb76ff1Sjsg void intel_gt_mcr_init(struct intel_gt *gt);
12*f005ef32Sjsg void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
13*f005ef32Sjsg void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
141bb76ff1Sjsg 
151bb76ff1Sjsg u32 intel_gt_mcr_read(struct intel_gt *gt,
16*f005ef32Sjsg 		      i915_mcr_reg_t reg,
171bb76ff1Sjsg 		      int group, int instance);
18*f005ef32Sjsg u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg);
19*f005ef32Sjsg u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg);
201bb76ff1Sjsg 
211bb76ff1Sjsg void intel_gt_mcr_unicast_write(struct intel_gt *gt,
22*f005ef32Sjsg 				i915_mcr_reg_t reg, u32 value,
231bb76ff1Sjsg 				int group, int instance);
241bb76ff1Sjsg void intel_gt_mcr_multicast_write(struct intel_gt *gt,
25*f005ef32Sjsg 				  i915_mcr_reg_t reg, u32 value);
261bb76ff1Sjsg void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt,
27*f005ef32Sjsg 				     i915_mcr_reg_t reg, u32 value);
28*f005ef32Sjsg 
29*f005ef32Sjsg u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
30*f005ef32Sjsg 			       u32 clear, u32 set);
311bb76ff1Sjsg 
321bb76ff1Sjsg void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
33*f005ef32Sjsg 					     i915_mcr_reg_t reg,
341bb76ff1Sjsg 					     u8 *group, u8 *instance);
351bb76ff1Sjsg 
361bb76ff1Sjsg void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
371bb76ff1Sjsg 				  bool dump_table);
381bb76ff1Sjsg 
391bb76ff1Sjsg void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
401bb76ff1Sjsg 				  unsigned int *group, unsigned int *instance);
411bb76ff1Sjsg 
42*f005ef32Sjsg int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
43*f005ef32Sjsg 			      i915_mcr_reg_t reg,
44*f005ef32Sjsg 			      u32 mask,
45*f005ef32Sjsg 			      u32 value,
46*f005ef32Sjsg 			      unsigned int fast_timeout_us,
47*f005ef32Sjsg 			      unsigned int slow_timeout_ms);
48*f005ef32Sjsg 
491bb76ff1Sjsg /*
501bb76ff1Sjsg  * Helper for for_each_ss_steering loop.  On pre-Xe_HP platforms, subslice
511bb76ff1Sjsg  * presence is determined by using the group/instance as direct lookups in the
521bb76ff1Sjsg  * slice/subslice topology.  On Xe_HP and beyond, the steering is unrelated to
531bb76ff1Sjsg  * the topology, so we lookup the DSS ID directly in "slice 0."
541bb76ff1Sjsg  */
551bb76ff1Sjsg #define _HAS_SS(ss_, gt_, group_, instance_) ( \
561bb76ff1Sjsg 	GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \
571bb76ff1Sjsg 		intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
581bb76ff1Sjsg 		intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
591bb76ff1Sjsg 
601bb76ff1Sjsg /*
611bb76ff1Sjsg  * Loop over each subslice/DSS and determine the group and instance IDs that
621bb76ff1Sjsg  * should be used to steer MCR accesses toward this DSS.
631bb76ff1Sjsg  */
641bb76ff1Sjsg #define for_each_ss_steering(ss_, gt_, group_, instance_) \
651bb76ff1Sjsg 	for (ss_ = 0, intel_gt_mcr_get_ss_steering(gt_, 0, &group_, &instance_); \
661bb76ff1Sjsg 	     ss_ < I915_MAX_SS_FUSE_BITS; \
671bb76ff1Sjsg 	     ss_++, intel_gt_mcr_get_ss_steering(gt_, ss_, &group_, &instance_)) \
681bb76ff1Sjsg 		for_each_if(_HAS_SS(ss_, gt_, group_, instance_))
691bb76ff1Sjsg 
701bb76ff1Sjsg #endif /* __INTEL_GT_MCR__ */
71