xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_gt.h (revision 09e4f8a57cbb964481b065a211c47c97f5179a0a)
1c349dbc7Sjsg /* SPDX-License-Identifier: MIT */
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
6c349dbc7Sjsg #ifndef __INTEL_GT__
7c349dbc7Sjsg #define __INTEL_GT__
8c349dbc7Sjsg 
9f005ef32Sjsg #include "i915_drv.h"
10c349dbc7Sjsg #include "intel_engine_types.h"
11c349dbc7Sjsg #include "intel_gt_types.h"
12c349dbc7Sjsg #include "intel_reset.h"
13c349dbc7Sjsg 
14c349dbc7Sjsg struct drm_i915_private;
15ad8b1aafSjsg struct drm_printer;
16c349dbc7Sjsg 
1713f2a72cSjsg /*
1813f2a72cSjsg  * Check that the GT is a graphics GT and has an IP version within the
1913f2a72cSjsg  * specified range (inclusive).
2013f2a72cSjsg  */
2113f2a72cSjsg #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \
2213f2a72cSjsg 	BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
2313f2a72cSjsg 	BUILD_BUG_ON_ZERO((until) < (from)) + \
2413f2a72cSjsg 	((gt)->type != GT_MEDIA && \
2513f2a72cSjsg 	 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
2613f2a72cSjsg 	 GRAPHICS_VER_FULL((gt)->i915) <= (until)))
2713f2a72cSjsg 
28596b6869Sjsg /*
29596b6869Sjsg  * Check that the GT is a graphics GT with a specific IP version and has
30596b6869Sjsg  * a stepping in the range [from, until).  The lower stepping bound is
31596b6869Sjsg  * inclusive, the upper bound is exclusive.  The most common use-case of this
32596b6869Sjsg  * macro is for checking bounds for workarounds, which usually have a stepping
33596b6869Sjsg  * ("from") at which the hardware issue is first present and another stepping
34596b6869Sjsg  * ("until") at which a hardware fix is present and the software workaround is
35596b6869Sjsg  * no longer necessary.  E.g.,
36596b6869Sjsg  *
37596b6869Sjsg  *    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)
38596b6869Sjsg  *    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER)
39596b6869Sjsg  *
40596b6869Sjsg  * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
41596b6869Sjsg  * stepping bound for the specified IP version.
42596b6869Sjsg  */
43596b6869Sjsg #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \
44596b6869Sjsg 	BUILD_BUG_ON_ZERO((until) <= (from)) + \
45596b6869Sjsg 	(IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
46596b6869Sjsg 	 IS_GRAPHICS_STEP((gt)->i915, (from), (until))))
47596b6869Sjsg 
48c349dbc7Sjsg #define GT_TRACE(gt, fmt, ...) do {					\
49c349dbc7Sjsg 	const struct intel_gt *gt__ __maybe_unused = (gt);		\
50c349dbc7Sjsg 	GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev),		\
51c349dbc7Sjsg 		  ##__VA_ARGS__);					\
52c349dbc7Sjsg } while (0)
53c349dbc7Sjsg 
541bb76ff1Sjsg static inline bool gt_is_root(struct intel_gt *gt)
551bb76ff1Sjsg {
561bb76ff1Sjsg 	return !gt->info.id;
571bb76ff1Sjsg }
581bb76ff1Sjsg 
59f005ef32Sjsg static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
60f005ef32Sjsg {
61f005ef32Sjsg 	return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
62f005ef32Sjsg }
63f005ef32Sjsg 
64c349dbc7Sjsg static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
65c349dbc7Sjsg {
66c349dbc7Sjsg 	return container_of(uc, struct intel_gt, uc);
67c349dbc7Sjsg }
68c349dbc7Sjsg 
69c349dbc7Sjsg static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
70c349dbc7Sjsg {
71c349dbc7Sjsg 	return container_of(guc, struct intel_gt, uc.guc);
72c349dbc7Sjsg }
73c349dbc7Sjsg 
74c349dbc7Sjsg static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
75c349dbc7Sjsg {
76c349dbc7Sjsg 	return container_of(huc, struct intel_gt, uc.huc);
77c349dbc7Sjsg }
78c349dbc7Sjsg 
79f005ef32Sjsg static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
80f005ef32Sjsg {
81f005ef32Sjsg 	return container_of(gsc_uc, struct intel_gt, uc.gsc);
82f005ef32Sjsg }
83f005ef32Sjsg 
841bb76ff1Sjsg static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
851bb76ff1Sjsg {
861bb76ff1Sjsg 	return container_of(gsc, struct intel_gt, gsc);
871bb76ff1Sjsg }
881bb76ff1Sjsg 
891bb76ff1Sjsg void intel_gt_common_init_early(struct intel_gt *gt);
901bb76ff1Sjsg int intel_root_gt_init_early(struct drm_i915_private *i915);
911bb76ff1Sjsg int intel_gt_assign_ggtt(struct intel_gt *gt);
92ad8b1aafSjsg int intel_gt_init_mmio(struct intel_gt *gt);
93c349dbc7Sjsg int __must_check intel_gt_init_hw(struct intel_gt *gt);
94c349dbc7Sjsg int intel_gt_init(struct intel_gt *gt);
95c349dbc7Sjsg void intel_gt_driver_register(struct intel_gt *gt);
96c349dbc7Sjsg 
97c349dbc7Sjsg void intel_gt_driver_unregister(struct intel_gt *gt);
98c349dbc7Sjsg void intel_gt_driver_remove(struct intel_gt *gt);
99c349dbc7Sjsg void intel_gt_driver_release(struct intel_gt *gt);
1001bb76ff1Sjsg void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
101c349dbc7Sjsg 
1025ca02815Sjsg int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
1035ca02815Sjsg 
104c349dbc7Sjsg void intel_gt_check_and_clear_faults(struct intel_gt *gt);
105f005ef32Sjsg i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt);
106c349dbc7Sjsg void intel_gt_clear_error_registers(struct intel_gt *gt,
107c349dbc7Sjsg 				    intel_engine_mask_t engine_mask);
108c349dbc7Sjsg 
109c349dbc7Sjsg void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
110c349dbc7Sjsg void intel_gt_chipset_flush(struct intel_gt *gt);
111c349dbc7Sjsg 
112c349dbc7Sjsg static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
113c349dbc7Sjsg 					  enum intel_gt_scratch_field field)
114c349dbc7Sjsg {
115c349dbc7Sjsg 	return i915_ggtt_offset(gt->scratch) + field;
116c349dbc7Sjsg }
117c349dbc7Sjsg 
118ad8b1aafSjsg static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
119c349dbc7Sjsg {
120ad8b1aafSjsg 	return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
121ad8b1aafSjsg 	       test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
122c349dbc7Sjsg }
123c349dbc7Sjsg 
124ad8b1aafSjsg static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
125c349dbc7Sjsg {
126ad8b1aafSjsg 	GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
127ad8b1aafSjsg 		   !test_bit(I915_WEDGED, &gt->reset.flags));
128ad8b1aafSjsg 
129ad8b1aafSjsg 	return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
130c349dbc7Sjsg }
131c349dbc7Sjsg 
1321bb76ff1Sjsg int intel_gt_probe_all(struct drm_i915_private *i915);
1331bb76ff1Sjsg int intel_gt_tiles_init(struct drm_i915_private *i915);
1341bb76ff1Sjsg void intel_gt_release_all(struct drm_i915_private *i915);
1355ca02815Sjsg 
1361bb76ff1Sjsg #define for_each_gt(gt__, i915__, id__) \
1371bb76ff1Sjsg 	for ((id__) = 0; \
1381bb76ff1Sjsg 	     (id__) < I915_MAX_GT; \
1391bb76ff1Sjsg 	     (id__)++) \
1401bb76ff1Sjsg 		for_each_if(((gt__) = (i915__)->gt[(id__)]))
1415ca02815Sjsg 
142ad8b1aafSjsg void intel_gt_info_print(const struct intel_gt_info *info,
143ad8b1aafSjsg 			 struct drm_printer *p);
144ad8b1aafSjsg 
1455ca02815Sjsg void intel_gt_watchdog_work(struct work_struct *work);
1465ca02815Sjsg 
147f005ef32Sjsg enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
148f005ef32Sjsg 					      struct drm_i915_gem_object *obj,
149f005ef32Sjsg 					      bool always_coherent);
1501fd8e27eSjsg 
151*09e4f8a5Sjsg void intel_gt_bind_context_set_ready(struct intel_gt *gt);
152*09e4f8a5Sjsg void intel_gt_bind_context_set_unready(struct intel_gt *gt);
153*09e4f8a5Sjsg bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
154c349dbc7Sjsg #endif /* __INTEL_GT_H__ */
155