xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_gt.c (revision 09e4f8a57cbb964481b065a211c47c97f5179a0a)
1c349dbc7Sjsg // SPDX-License-Identifier: MIT
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
61bb76ff1Sjsg #include <drm/drm_managed.h>
71bb76ff1Sjsg #include <drm/intel-gtt.h>
85ca02815Sjsg 
91bb76ff1Sjsg #include "gem/i915_gem_internal.h"
105ca02815Sjsg #include "gem/i915_gem_lmem.h"
111bb76ff1Sjsg 
12c349dbc7Sjsg #include "i915_drv.h"
131bb76ff1Sjsg #include "i915_perf_oa_regs.h"
14f005ef32Sjsg #include "i915_reg.h"
15c349dbc7Sjsg #include "intel_context.h"
161bb76ff1Sjsg #include "intel_engine_pm.h"
171bb76ff1Sjsg #include "intel_engine_regs.h"
181bb76ff1Sjsg #include "intel_ggtt_gmch.h"
19c349dbc7Sjsg #include "intel_gt.h"
20ad8b1aafSjsg #include "intel_gt_buffer_pool.h"
21ad8b1aafSjsg #include "intel_gt_clock_utils.h"
221bb76ff1Sjsg #include "intel_gt_debugfs.h"
231bb76ff1Sjsg #include "intel_gt_mcr.h"
24c349dbc7Sjsg #include "intel_gt_pm.h"
25f005ef32Sjsg #include "intel_gt_print.h"
261bb76ff1Sjsg #include "intel_gt_regs.h"
27c349dbc7Sjsg #include "intel_gt_requests.h"
285ca02815Sjsg #include "intel_migrate.h"
29c349dbc7Sjsg #include "intel_mocs.h"
301bb76ff1Sjsg #include "intel_pci_config.h"
31c349dbc7Sjsg #include "intel_rc6.h"
32c349dbc7Sjsg #include "intel_renderstate.h"
33c349dbc7Sjsg #include "intel_rps.h"
341bb76ff1Sjsg #include "intel_sa_media.h"
351bb76ff1Sjsg #include "intel_gt_sysfs.h"
36f005ef32Sjsg #include "intel_tlb.h"
37c349dbc7Sjsg #include "intel_uncore.h"
38ad8b1aafSjsg #include "shmem_utils.h"
39c349dbc7Sjsg 
401bb76ff1Sjsg void intel_gt_common_init_early(struct intel_gt *gt)
41c349dbc7Sjsg {
421bb76ff1Sjsg 	mtx_init(gt->irq_lock, IPL_TTY);
431fd8e27eSjsg 
44c349dbc7Sjsg 	INIT_LIST_HEAD(&gt->closed_vma);
45c349dbc7Sjsg 	mtx_init(&gt->closed_lock, IPL_TTY);
46c349dbc7Sjsg 
475ca02815Sjsg 	init_llist_head(&gt->watchdog.list);
485ca02815Sjsg 	INIT_WORK(&gt->watchdog.work, intel_gt_watchdog_work);
495ca02815Sjsg 
50ad8b1aafSjsg 	intel_gt_init_buffer_pool(gt);
51c349dbc7Sjsg 	intel_gt_init_reset(gt);
52c349dbc7Sjsg 	intel_gt_init_requests(gt);
53c349dbc7Sjsg 	intel_gt_init_timelines(gt);
54f005ef32Sjsg 	intel_gt_init_tlb(gt);
55c349dbc7Sjsg 	intel_gt_pm_init_early(gt);
56c349dbc7Sjsg 
57f005ef32Sjsg 	intel_wopcm_init_early(&gt->wopcm);
58c349dbc7Sjsg 	intel_uc_init_early(&gt->uc);
595ca02815Sjsg 	intel_rps_init_early(&gt->rps);
605ca02815Sjsg }
615ca02815Sjsg 
621bb76ff1Sjsg /* Preliminary initialization of Tile 0 */
631bb76ff1Sjsg int intel_root_gt_init_early(struct drm_i915_private *i915)
641bb76ff1Sjsg {
651bb76ff1Sjsg 	struct intel_gt *gt = to_gt(i915);
661bb76ff1Sjsg 
671bb76ff1Sjsg 	gt->i915 = i915;
681bb76ff1Sjsg 	gt->uncore = &i915->uncore;
691bb76ff1Sjsg 	gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
701bb76ff1Sjsg 	if (!gt->irq_lock)
711bb76ff1Sjsg 		return -ENOMEM;
721bb76ff1Sjsg 
731bb76ff1Sjsg 	intel_gt_common_init_early(gt);
741bb76ff1Sjsg 
751bb76ff1Sjsg 	return 0;
761bb76ff1Sjsg }
771bb76ff1Sjsg 
781bb76ff1Sjsg static int intel_gt_probe_lmem(struct intel_gt *gt)
795ca02815Sjsg {
805ca02815Sjsg 	struct drm_i915_private *i915 = gt->i915;
811bb76ff1Sjsg 	unsigned int instance = gt->info.id;
821bb76ff1Sjsg 	int id = INTEL_REGION_LMEM_0 + instance;
835ca02815Sjsg 	struct intel_memory_region *mem;
845ca02815Sjsg 	int err;
855ca02815Sjsg 
865ca02815Sjsg 	mem = intel_gt_setup_lmem(gt);
875ca02815Sjsg 	if (IS_ERR(mem)) {
885ca02815Sjsg 		err = PTR_ERR(mem);
895ca02815Sjsg 		if (err == -ENODEV)
905ca02815Sjsg 			return 0;
915ca02815Sjsg 
92f005ef32Sjsg 		gt_err(gt, "Failed to setup region(%d) type=%d\n",
935ca02815Sjsg 		       err, INTEL_MEMORY_LOCAL);
945ca02815Sjsg 		return err;
955ca02815Sjsg 	}
965ca02815Sjsg 
975ca02815Sjsg 	mem->id = id;
981bb76ff1Sjsg 	mem->instance = instance;
995ca02815Sjsg 
1005ca02815Sjsg 	intel_memory_region_set_name(mem, "local%u", mem->instance);
1015ca02815Sjsg 
1025ca02815Sjsg 	GEM_BUG_ON(!HAS_REGION(i915, id));
1035ca02815Sjsg 	GEM_BUG_ON(i915->mm.regions[id]);
1045ca02815Sjsg 	i915->mm.regions[id] = mem;
1055ca02815Sjsg 
1065ca02815Sjsg 	return 0;
107c349dbc7Sjsg }
108c349dbc7Sjsg 
1091bb76ff1Sjsg int intel_gt_assign_ggtt(struct intel_gt *gt)
110c349dbc7Sjsg {
111f005ef32Sjsg 	/* Media GT shares primary GT's GGTT */
112f005ef32Sjsg 	if (gt->type == GT_MEDIA) {
113f005ef32Sjsg 		gt->ggtt = to_gt(gt->i915)->ggtt;
114f005ef32Sjsg 	} else {
115f005ef32Sjsg 		gt->ggtt = i915_ggtt_create(gt->i915);
116f005ef32Sjsg 		if (IS_ERR(gt->ggtt))
117f005ef32Sjsg 			return PTR_ERR(gt->ggtt);
118f005ef32Sjsg 	}
119c349dbc7Sjsg 
120f005ef32Sjsg 	list_add_tail(&gt->ggtt_link, &gt->ggtt->gt_list);
121f005ef32Sjsg 
122f005ef32Sjsg 	return 0;
1235ca02815Sjsg }
1245ca02815Sjsg 
125ad8b1aafSjsg int intel_gt_init_mmio(struct intel_gt *gt)
126ad8b1aafSjsg {
1275ca02815Sjsg 	intel_gt_init_clock_frequency(gt);
1285ca02815Sjsg 
129ad8b1aafSjsg 	intel_uc_init_mmio(&gt->uc);
130ad8b1aafSjsg 	intel_sseu_info_init(gt);
1311bb76ff1Sjsg 	intel_gt_mcr_init(gt);
1325ca02815Sjsg 
133ad8b1aafSjsg 	return intel_engines_init_mmio(gt);
134ad8b1aafSjsg }
135ad8b1aafSjsg 
136c349dbc7Sjsg static void init_unused_ring(struct intel_gt *gt, u32 base)
137c349dbc7Sjsg {
138c349dbc7Sjsg 	struct intel_uncore *uncore = gt->uncore;
139c349dbc7Sjsg 
140c349dbc7Sjsg 	intel_uncore_write(uncore, RING_CTL(base), 0);
141c349dbc7Sjsg 	intel_uncore_write(uncore, RING_HEAD(base), 0);
142c349dbc7Sjsg 	intel_uncore_write(uncore, RING_TAIL(base), 0);
143c349dbc7Sjsg 	intel_uncore_write(uncore, RING_START(base), 0);
144c349dbc7Sjsg }
145c349dbc7Sjsg 
146c349dbc7Sjsg static void init_unused_rings(struct intel_gt *gt)
147c349dbc7Sjsg {
148c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
149c349dbc7Sjsg 
150c349dbc7Sjsg 	if (IS_I830(i915)) {
151c349dbc7Sjsg 		init_unused_ring(gt, PRB1_BASE);
152c349dbc7Sjsg 		init_unused_ring(gt, SRB0_BASE);
153c349dbc7Sjsg 		init_unused_ring(gt, SRB1_BASE);
154c349dbc7Sjsg 		init_unused_ring(gt, SRB2_BASE);
155c349dbc7Sjsg 		init_unused_ring(gt, SRB3_BASE);
1565ca02815Sjsg 	} else if (GRAPHICS_VER(i915) == 2) {
157c349dbc7Sjsg 		init_unused_ring(gt, SRB0_BASE);
158c349dbc7Sjsg 		init_unused_ring(gt, SRB1_BASE);
1595ca02815Sjsg 	} else if (GRAPHICS_VER(i915) == 3) {
160c349dbc7Sjsg 		init_unused_ring(gt, PRB1_BASE);
161c349dbc7Sjsg 		init_unused_ring(gt, PRB2_BASE);
162c349dbc7Sjsg 	}
163c349dbc7Sjsg }
164c349dbc7Sjsg 
165c349dbc7Sjsg int intel_gt_init_hw(struct intel_gt *gt)
166c349dbc7Sjsg {
167c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
168c349dbc7Sjsg 	struct intel_uncore *uncore = gt->uncore;
169c349dbc7Sjsg 	int ret;
170c349dbc7Sjsg 
171c349dbc7Sjsg 	gt->last_init_time = ktime_get();
172c349dbc7Sjsg 
173c349dbc7Sjsg 	/* Double layer security blanket, see i915_gem_init() */
174c349dbc7Sjsg 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
175c349dbc7Sjsg 
1765ca02815Sjsg 	if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
177c349dbc7Sjsg 		intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
178c349dbc7Sjsg 
179c349dbc7Sjsg 	if (IS_HASWELL(i915))
180c349dbc7Sjsg 		intel_uncore_write(uncore,
1811bb76ff1Sjsg 				   HSW_MI_PREDICATE_RESULT_2,
182f005ef32Sjsg 				   IS_HASWELL_GT3(i915) ?
183c349dbc7Sjsg 				   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
184c349dbc7Sjsg 
185c349dbc7Sjsg 	/* Apply the GT workarounds... */
186c349dbc7Sjsg 	intel_gt_apply_workarounds(gt);
187c349dbc7Sjsg 	/* ...and determine whether they are sticking. */
188c349dbc7Sjsg 	intel_gt_verify_workarounds(gt, "init");
189c349dbc7Sjsg 
190c349dbc7Sjsg 	intel_gt_init_swizzling(gt);
191c349dbc7Sjsg 
192c349dbc7Sjsg 	/*
193c349dbc7Sjsg 	 * At least 830 can leave some of the unused rings
194c349dbc7Sjsg 	 * "active" (ie. head != tail) after resume which
195c349dbc7Sjsg 	 * will prevent c3 entry. Makes sure all unused rings
196c349dbc7Sjsg 	 * are totally idle.
197c349dbc7Sjsg 	 */
198c349dbc7Sjsg 	init_unused_rings(gt);
199c349dbc7Sjsg 
200c349dbc7Sjsg 	ret = i915_ppgtt_init_hw(gt);
201c349dbc7Sjsg 	if (ret) {
202f005ef32Sjsg 		gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
203c349dbc7Sjsg 		goto out;
204c349dbc7Sjsg 	}
205c349dbc7Sjsg 
206c349dbc7Sjsg 	/* We can't enable contexts until all firmware is loaded */
207c349dbc7Sjsg 	ret = intel_uc_init_hw(&gt->uc);
208c349dbc7Sjsg 	if (ret) {
209f005ef32Sjsg 		gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
210c349dbc7Sjsg 		goto out;
211c349dbc7Sjsg 	}
212c349dbc7Sjsg 
213c349dbc7Sjsg 	intel_mocs_init(gt);
214c349dbc7Sjsg 
215c349dbc7Sjsg out:
216c349dbc7Sjsg 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
217c349dbc7Sjsg 	return ret;
218c349dbc7Sjsg }
219c349dbc7Sjsg 
2205ca02815Sjsg static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
221c349dbc7Sjsg {
222c349dbc7Sjsg 	GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
223c349dbc7Sjsg 	GEN6_RING_FAULT_REG_POSTING_READ(engine);
224c349dbc7Sjsg }
225c349dbc7Sjsg 
226f005ef32Sjsg i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
227f005ef32Sjsg {
228f005ef32Sjsg 	/* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
229f005ef32Sjsg 	if (GRAPHICS_VER(gt->i915) < 11)
230f005ef32Sjsg 		return INVALID_MMIO_REG;
231f005ef32Sjsg 
232f005ef32Sjsg 	return gt->type == GT_MEDIA ?
233f005ef32Sjsg 		MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
234f005ef32Sjsg }
235f005ef32Sjsg 
236c349dbc7Sjsg void
237c349dbc7Sjsg intel_gt_clear_error_registers(struct intel_gt *gt,
238c349dbc7Sjsg 			       intel_engine_mask_t engine_mask)
239c349dbc7Sjsg {
240c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
241c349dbc7Sjsg 	struct intel_uncore *uncore = gt->uncore;
242c349dbc7Sjsg 	u32 eir;
243c349dbc7Sjsg 
2445ca02815Sjsg 	if (GRAPHICS_VER(i915) != 2)
245f005ef32Sjsg 		intel_uncore_write(uncore, PGTBL_ER, 0);
246c349dbc7Sjsg 
2475ca02815Sjsg 	if (GRAPHICS_VER(i915) < 4)
248f005ef32Sjsg 		intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
249c349dbc7Sjsg 	else
250f005ef32Sjsg 		intel_uncore_write(uncore, IPEIR_I965, 0);
251c349dbc7Sjsg 
252f005ef32Sjsg 	intel_uncore_write(uncore, EIR, 0);
253c349dbc7Sjsg 	eir = intel_uncore_read(uncore, EIR);
254c349dbc7Sjsg 	if (eir) {
255c349dbc7Sjsg 		/*
256c349dbc7Sjsg 		 * some errors might have become stuck,
257c349dbc7Sjsg 		 * mask them.
258c349dbc7Sjsg 		 */
259f005ef32Sjsg 		gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
260f005ef32Sjsg 		intel_uncore_rmw(uncore, EMR, 0, eir);
261c349dbc7Sjsg 		intel_uncore_write(uncore, GEN2_IIR,
262c349dbc7Sjsg 				   I915_MASTER_ERROR_INTERRUPT);
263c349dbc7Sjsg 	}
264c349dbc7Sjsg 
265f005ef32Sjsg 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
266f005ef32Sjsg 		intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
267f005ef32Sjsg 					   RING_FAULT_VALID, 0);
268f005ef32Sjsg 		intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
269f005ef32Sjsg 	} else if (GRAPHICS_VER(i915) >= 12) {
270f005ef32Sjsg 		intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
271c349dbc7Sjsg 		intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
2725ca02815Sjsg 	} else if (GRAPHICS_VER(i915) >= 8) {
273f005ef32Sjsg 		intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
274c349dbc7Sjsg 		intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
2755ca02815Sjsg 	} else if (GRAPHICS_VER(i915) >= 6) {
276c349dbc7Sjsg 		struct intel_engine_cs *engine;
277c349dbc7Sjsg 		enum intel_engine_id id;
278c349dbc7Sjsg 
279c349dbc7Sjsg 		for_each_engine_masked(engine, gt, engine_mask, id)
2805ca02815Sjsg 			gen6_clear_engine_error_register(engine);
281c349dbc7Sjsg 	}
282c349dbc7Sjsg }
283c349dbc7Sjsg 
284c349dbc7Sjsg static void gen6_check_faults(struct intel_gt *gt)
285c349dbc7Sjsg {
286c349dbc7Sjsg 	struct intel_engine_cs *engine;
287c349dbc7Sjsg 	enum intel_engine_id id;
288c349dbc7Sjsg 	u32 fault;
289c349dbc7Sjsg 
290c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
291c349dbc7Sjsg 		fault = GEN6_RING_FAULT_REG_READ(engine);
292c349dbc7Sjsg 		if (fault & RING_FAULT_VALID) {
293f005ef32Sjsg 			gt_dbg(gt, "Unexpected fault\n"
294c349dbc7Sjsg 			       "\tAddr: 0x%08lx\n"
295c349dbc7Sjsg 			       "\tAddress space: %s\n"
296c349dbc7Sjsg 			       "\tSource ID: %d\n"
297c349dbc7Sjsg 			       "\tType: %d\n",
298ad8b1aafSjsg 			       (unsigned long)(fault & LINUX_PAGE_MASK),
299c349dbc7Sjsg 			       fault & RING_FAULT_GTTSEL_MASK ?
300c349dbc7Sjsg 			       "GGTT" : "PPGTT",
301c349dbc7Sjsg 			       RING_FAULT_SRCID(fault),
302c349dbc7Sjsg 			       RING_FAULT_FAULT_TYPE(fault));
303c349dbc7Sjsg 		}
304c349dbc7Sjsg 	}
305c349dbc7Sjsg }
306c349dbc7Sjsg 
307f005ef32Sjsg static void xehp_check_faults(struct intel_gt *gt)
308f005ef32Sjsg {
309f005ef32Sjsg 	u32 fault;
310f005ef32Sjsg 
311f005ef32Sjsg 	/*
312f005ef32Sjsg 	 * Although the fault register now lives in an MCR register range,
313f005ef32Sjsg 	 * the GAM registers are special and we only truly need to read
314f005ef32Sjsg 	 * the "primary" GAM instance rather than handling each instance
315f005ef32Sjsg 	 * individually.  intel_gt_mcr_read_any() will automatically steer
316f005ef32Sjsg 	 * toward the primary instance.
317f005ef32Sjsg 	 */
318f005ef32Sjsg 	fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
319f005ef32Sjsg 	if (fault & RING_FAULT_VALID) {
320f005ef32Sjsg 		u32 fault_data0, fault_data1;
321f005ef32Sjsg 		u64 fault_addr;
322f005ef32Sjsg 
323f005ef32Sjsg 		fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
324f005ef32Sjsg 		fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
325f005ef32Sjsg 
326f005ef32Sjsg 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
327f005ef32Sjsg 			     ((u64)fault_data0 << 12);
328f005ef32Sjsg 
329f005ef32Sjsg 		gt_dbg(gt, "Unexpected fault\n"
330f005ef32Sjsg 		       "\tAddr: 0x%08x_%08x\n"
331f005ef32Sjsg 		       "\tAddress space: %s\n"
332f005ef32Sjsg 		       "\tEngine ID: %d\n"
333f005ef32Sjsg 		       "\tSource ID: %d\n"
334f005ef32Sjsg 		       "\tType: %d\n",
335f005ef32Sjsg 		       upper_32_bits(fault_addr), lower_32_bits(fault_addr),
336f005ef32Sjsg 		       fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
337f005ef32Sjsg 		       GEN8_RING_FAULT_ENGINE_ID(fault),
338f005ef32Sjsg 		       RING_FAULT_SRCID(fault),
339f005ef32Sjsg 		       RING_FAULT_FAULT_TYPE(fault));
340f005ef32Sjsg 	}
341f005ef32Sjsg }
342f005ef32Sjsg 
343c349dbc7Sjsg static void gen8_check_faults(struct intel_gt *gt)
344c349dbc7Sjsg {
345c349dbc7Sjsg 	struct intel_uncore *uncore = gt->uncore;
346c349dbc7Sjsg 	i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
347c349dbc7Sjsg 	u32 fault;
348c349dbc7Sjsg 
3495ca02815Sjsg 	if (GRAPHICS_VER(gt->i915) >= 12) {
350c349dbc7Sjsg 		fault_reg = GEN12_RING_FAULT_REG;
351c349dbc7Sjsg 		fault_data0_reg = GEN12_FAULT_TLB_DATA0;
352c349dbc7Sjsg 		fault_data1_reg = GEN12_FAULT_TLB_DATA1;
353c349dbc7Sjsg 	} else {
354c349dbc7Sjsg 		fault_reg = GEN8_RING_FAULT_REG;
355c349dbc7Sjsg 		fault_data0_reg = GEN8_FAULT_TLB_DATA0;
356c349dbc7Sjsg 		fault_data1_reg = GEN8_FAULT_TLB_DATA1;
357c349dbc7Sjsg 	}
358c349dbc7Sjsg 
359c349dbc7Sjsg 	fault = intel_uncore_read(uncore, fault_reg);
360c349dbc7Sjsg 	if (fault & RING_FAULT_VALID) {
361c349dbc7Sjsg 		u32 fault_data0, fault_data1;
362c349dbc7Sjsg 		u64 fault_addr;
363c349dbc7Sjsg 
364c349dbc7Sjsg 		fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
365c349dbc7Sjsg 		fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
366c349dbc7Sjsg 
367c349dbc7Sjsg 		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
368c349dbc7Sjsg 			     ((u64)fault_data0 << 12);
369c349dbc7Sjsg 
370f005ef32Sjsg 		gt_dbg(gt, "Unexpected fault\n"
371c349dbc7Sjsg 		       "\tAddr: 0x%08x_%08x\n"
372c349dbc7Sjsg 		       "\tAddress space: %s\n"
373c349dbc7Sjsg 		       "\tEngine ID: %d\n"
374c349dbc7Sjsg 		       "\tSource ID: %d\n"
375c349dbc7Sjsg 		       "\tType: %d\n",
376c349dbc7Sjsg 		       upper_32_bits(fault_addr), lower_32_bits(fault_addr),
377c349dbc7Sjsg 		       fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
378c349dbc7Sjsg 		       GEN8_RING_FAULT_ENGINE_ID(fault),
379c349dbc7Sjsg 		       RING_FAULT_SRCID(fault),
380c349dbc7Sjsg 		       RING_FAULT_FAULT_TYPE(fault));
381c349dbc7Sjsg 	}
382c349dbc7Sjsg }
383c349dbc7Sjsg 
384c349dbc7Sjsg void intel_gt_check_and_clear_faults(struct intel_gt *gt)
385c349dbc7Sjsg {
386c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
387c349dbc7Sjsg 
388c349dbc7Sjsg 	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
389f005ef32Sjsg 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
390f005ef32Sjsg 		xehp_check_faults(gt);
391f005ef32Sjsg 	else if (GRAPHICS_VER(i915) >= 8)
392c349dbc7Sjsg 		gen8_check_faults(gt);
3935ca02815Sjsg 	else if (GRAPHICS_VER(i915) >= 6)
394c349dbc7Sjsg 		gen6_check_faults(gt);
395c349dbc7Sjsg 	else
396c349dbc7Sjsg 		return;
397c349dbc7Sjsg 
398c349dbc7Sjsg 	intel_gt_clear_error_registers(gt, ALL_ENGINES);
399c349dbc7Sjsg }
400c349dbc7Sjsg 
401c349dbc7Sjsg void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
402c349dbc7Sjsg {
403c349dbc7Sjsg 	struct intel_uncore *uncore = gt->uncore;
404c349dbc7Sjsg 	intel_wakeref_t wakeref;
405c349dbc7Sjsg 
406c349dbc7Sjsg 	/*
407c349dbc7Sjsg 	 * No actual flushing is required for the GTT write domain for reads
408c349dbc7Sjsg 	 * from the GTT domain. Writes to it "immediately" go to main memory
409c349dbc7Sjsg 	 * as far as we know, so there's no chipset flush. It also doesn't
410c349dbc7Sjsg 	 * land in the GPU render cache.
411c349dbc7Sjsg 	 *
412c349dbc7Sjsg 	 * However, we do have to enforce the order so that all writes through
413c349dbc7Sjsg 	 * the GTT land before any writes to the device, such as updates to
414c349dbc7Sjsg 	 * the GATT itself.
415c349dbc7Sjsg 	 *
416c349dbc7Sjsg 	 * We also have to wait a bit for the writes to land from the GTT.
417c349dbc7Sjsg 	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
418c349dbc7Sjsg 	 * timing. This issue has only been observed when switching quickly
419c349dbc7Sjsg 	 * between GTT writes and CPU reads from inside the kernel on recent hw,
420c349dbc7Sjsg 	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
421c349dbc7Sjsg 	 * system agents we cannot reproduce this behaviour, until Cannonlake
422c349dbc7Sjsg 	 * that was!).
423c349dbc7Sjsg 	 */
424c349dbc7Sjsg 
425c349dbc7Sjsg 	wmb();
426c349dbc7Sjsg 
427c349dbc7Sjsg 	if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
428c349dbc7Sjsg 		return;
429c349dbc7Sjsg 
430c349dbc7Sjsg 	intel_gt_chipset_flush(gt);
431c349dbc7Sjsg 
432c349dbc7Sjsg 	with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
433c349dbc7Sjsg 		unsigned long flags;
434c349dbc7Sjsg 
435c349dbc7Sjsg 		spin_lock_irqsave(&uncore->lock, flags);
436c349dbc7Sjsg 		intel_uncore_posting_read_fw(uncore,
437c349dbc7Sjsg 					     RING_HEAD(RENDER_RING_BASE));
438c349dbc7Sjsg 		spin_unlock_irqrestore(&uncore->lock, flags);
439c349dbc7Sjsg 	}
440c349dbc7Sjsg }
441c349dbc7Sjsg 
442c349dbc7Sjsg void intel_gt_chipset_flush(struct intel_gt *gt)
443c349dbc7Sjsg {
444c349dbc7Sjsg 	wmb();
4455ca02815Sjsg 	if (GRAPHICS_VER(gt->i915) < 6)
4461bb76ff1Sjsg 		intel_ggtt_gmch_flush();
447c349dbc7Sjsg }
448c349dbc7Sjsg 
449c349dbc7Sjsg void intel_gt_driver_register(struct intel_gt *gt)
450c349dbc7Sjsg {
4511bb76ff1Sjsg 	intel_gsc_init(&gt->gsc, gt->i915);
4521bb76ff1Sjsg 
453c349dbc7Sjsg 	intel_rps_driver_register(&gt->rps);
454c349dbc7Sjsg 
4551bb76ff1Sjsg 	intel_gt_debugfs_register(gt);
4561bb76ff1Sjsg 	intel_gt_sysfs_register(gt);
457c349dbc7Sjsg }
458c349dbc7Sjsg 
459c349dbc7Sjsg static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
460c349dbc7Sjsg {
461c349dbc7Sjsg 	struct drm_i915_private *i915 = gt->i915;
462c349dbc7Sjsg 	struct drm_i915_gem_object *obj;
463c349dbc7Sjsg 	struct i915_vma *vma;
464c349dbc7Sjsg 	int ret;
465c349dbc7Sjsg 
4661bb76ff1Sjsg 	obj = i915_gem_object_create_lmem(i915, size,
4671bb76ff1Sjsg 					  I915_BO_ALLOC_VOLATILE |
4681bb76ff1Sjsg 					  I915_BO_ALLOC_GPU_ONLY);
469f005ef32Sjsg 	if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
470c349dbc7Sjsg 		obj = i915_gem_object_create_stolen(i915, size);
471c349dbc7Sjsg 	if (IS_ERR(obj))
472c349dbc7Sjsg 		obj = i915_gem_object_create_internal(i915, size);
473c349dbc7Sjsg 	if (IS_ERR(obj)) {
474f005ef32Sjsg 		gt_err(gt, "Failed to allocate scratch page\n");
475c349dbc7Sjsg 		return PTR_ERR(obj);
476c349dbc7Sjsg 	}
477c349dbc7Sjsg 
478c349dbc7Sjsg 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
479c349dbc7Sjsg 	if (IS_ERR(vma)) {
480c349dbc7Sjsg 		ret = PTR_ERR(vma);
481c349dbc7Sjsg 		goto err_unref;
482c349dbc7Sjsg 	}
483c349dbc7Sjsg 
484ad8b1aafSjsg 	ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
485c349dbc7Sjsg 	if (ret)
486c349dbc7Sjsg 		goto err_unref;
487c349dbc7Sjsg 
488c349dbc7Sjsg 	gt->scratch = i915_vma_make_unshrinkable(vma);
489c349dbc7Sjsg 
490c349dbc7Sjsg 	return 0;
491c349dbc7Sjsg 
492c349dbc7Sjsg err_unref:
493c349dbc7Sjsg 	i915_gem_object_put(obj);
494c349dbc7Sjsg 	return ret;
495c349dbc7Sjsg }
496c349dbc7Sjsg 
497c349dbc7Sjsg static void intel_gt_fini_scratch(struct intel_gt *gt)
498c349dbc7Sjsg {
499c349dbc7Sjsg 	i915_vma_unpin_and_release(&gt->scratch, 0);
500c349dbc7Sjsg }
501c349dbc7Sjsg 
502c349dbc7Sjsg static struct i915_address_space *kernel_vm(struct intel_gt *gt)
503c349dbc7Sjsg {
504c349dbc7Sjsg 	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
5051bb76ff1Sjsg 		return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
506c349dbc7Sjsg 	else
507c349dbc7Sjsg 		return i915_vm_get(&gt->ggtt->vm);
508c349dbc7Sjsg }
509c349dbc7Sjsg 
510c349dbc7Sjsg static int __engines_record_defaults(struct intel_gt *gt)
511c349dbc7Sjsg {
512c349dbc7Sjsg 	struct i915_request *requests[I915_NUM_ENGINES] = {};
513c349dbc7Sjsg 	struct intel_engine_cs *engine;
514c349dbc7Sjsg 	enum intel_engine_id id;
515c349dbc7Sjsg 	int err = 0;
516c349dbc7Sjsg 
517c349dbc7Sjsg 	/*
518c349dbc7Sjsg 	 * As we reset the gpu during very early sanitisation, the current
519c349dbc7Sjsg 	 * register state on the GPU should reflect its defaults values.
520c349dbc7Sjsg 	 * We load a context onto the hw (with restore-inhibit), then switch
521c349dbc7Sjsg 	 * over to a second context to save that default register state. We
522c349dbc7Sjsg 	 * can then prime every new context with that state so they all start
523c349dbc7Sjsg 	 * from the same default HW values.
524c349dbc7Sjsg 	 */
525c349dbc7Sjsg 
526c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
527c349dbc7Sjsg 		struct intel_renderstate so;
528c349dbc7Sjsg 		struct intel_context *ce;
529c349dbc7Sjsg 		struct i915_request *rq;
530c349dbc7Sjsg 
531c349dbc7Sjsg 		/* We must be able to switch to something! */
532c349dbc7Sjsg 		GEM_BUG_ON(!engine->kernel_context);
533c349dbc7Sjsg 
534c349dbc7Sjsg 		ce = intel_context_create(engine);
535c349dbc7Sjsg 		if (IS_ERR(ce)) {
536c349dbc7Sjsg 			err = PTR_ERR(ce);
537c349dbc7Sjsg 			goto out;
538c349dbc7Sjsg 		}
539c349dbc7Sjsg 
540ad8b1aafSjsg 		err = intel_renderstate_init(&so, ce);
541ad8b1aafSjsg 		if (err)
542ad8b1aafSjsg 			goto err;
543ad8b1aafSjsg 
544ad8b1aafSjsg 		rq = i915_request_create(ce);
545c349dbc7Sjsg 		if (IS_ERR(rq)) {
546c349dbc7Sjsg 			err = PTR_ERR(rq);
547ad8b1aafSjsg 			goto err_fini;
548c349dbc7Sjsg 		}
549c349dbc7Sjsg 
550c349dbc7Sjsg 		err = intel_engine_emit_ctx_wa(rq);
551c349dbc7Sjsg 		if (err)
552c349dbc7Sjsg 			goto err_rq;
553c349dbc7Sjsg 
554c349dbc7Sjsg 		err = intel_renderstate_emit(&so, rq);
555c349dbc7Sjsg 		if (err)
556c349dbc7Sjsg 			goto err_rq;
557c349dbc7Sjsg 
558c349dbc7Sjsg err_rq:
559c349dbc7Sjsg 		requests[id] = i915_request_get(rq);
560c349dbc7Sjsg 		i915_request_add(rq);
561ad8b1aafSjsg err_fini:
562ad8b1aafSjsg 		intel_renderstate_fini(&so, ce);
563ad8b1aafSjsg err:
564ad8b1aafSjsg 		if (err) {
565ad8b1aafSjsg 			intel_context_put(ce);
566c349dbc7Sjsg 			goto out;
567c349dbc7Sjsg 		}
568ad8b1aafSjsg 	}
569c349dbc7Sjsg 
570c349dbc7Sjsg 	/* Flush the default context image to memory, and enable powersaving. */
571c349dbc7Sjsg 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
572c349dbc7Sjsg 		err = -EIO;
573c349dbc7Sjsg 		goto out;
574c349dbc7Sjsg 	}
575c349dbc7Sjsg 
576c349dbc7Sjsg 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
577c349dbc7Sjsg 		struct i915_request *rq;
578ad8b1aafSjsg 		struct uvm_object *state;
579c349dbc7Sjsg 
580c349dbc7Sjsg 		rq = requests[id];
581c349dbc7Sjsg 		if (!rq)
582c349dbc7Sjsg 			continue;
583c349dbc7Sjsg 
584c349dbc7Sjsg 		if (rq->fence.error) {
585c349dbc7Sjsg 			err = -EIO;
586c349dbc7Sjsg 			goto out;
587c349dbc7Sjsg 		}
588c349dbc7Sjsg 
589c349dbc7Sjsg 		GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
590ad8b1aafSjsg 		if (!rq->context->state)
591c349dbc7Sjsg 			continue;
592c349dbc7Sjsg 
593ad8b1aafSjsg 		/* Keep a copy of the state's backing pages; free the obj */
594ad8b1aafSjsg #ifdef __linux__
595ad8b1aafSjsg 		state = shmem_create_from_object(rq->context->state->obj);
596ad8b1aafSjsg #else
597ad8b1aafSjsg 		state = uao_create_from_object(rq->context->state->obj);
598ad8b1aafSjsg #endif
599ad8b1aafSjsg 		if (IS_ERR(state)) {
600ad8b1aafSjsg 			err = PTR_ERR(state);
601c349dbc7Sjsg 			goto out;
602c349dbc7Sjsg 		}
603ad8b1aafSjsg 		rq->engine->default_state = state;
604c349dbc7Sjsg 	}
605c349dbc7Sjsg 
606c349dbc7Sjsg out:
607c349dbc7Sjsg 	/*
608c349dbc7Sjsg 	 * If we have to abandon now, we expect the engines to be idle
609c349dbc7Sjsg 	 * and ready to be torn-down. The quickest way we can accomplish
610c349dbc7Sjsg 	 * this is by declaring ourselves wedged.
611c349dbc7Sjsg 	 */
612c349dbc7Sjsg 	if (err)
613c349dbc7Sjsg 		intel_gt_set_wedged(gt);
614c349dbc7Sjsg 
615c349dbc7Sjsg 	for (id = 0; id < ARRAY_SIZE(requests); id++) {
616c349dbc7Sjsg 		struct intel_context *ce;
617c349dbc7Sjsg 		struct i915_request *rq;
618c349dbc7Sjsg 
619c349dbc7Sjsg 		rq = requests[id];
620c349dbc7Sjsg 		if (!rq)
621c349dbc7Sjsg 			continue;
622c349dbc7Sjsg 
623c349dbc7Sjsg 		ce = rq->context;
624c349dbc7Sjsg 		i915_request_put(rq);
625c349dbc7Sjsg 		intel_context_put(ce);
626c349dbc7Sjsg 	}
627c349dbc7Sjsg 	return err;
628c349dbc7Sjsg }
629c349dbc7Sjsg 
630c349dbc7Sjsg static int __engines_verify_workarounds(struct intel_gt *gt)
631c349dbc7Sjsg {
632c349dbc7Sjsg 	struct intel_engine_cs *engine;
633c349dbc7Sjsg 	enum intel_engine_id id;
634c349dbc7Sjsg 	int err = 0;
635c349dbc7Sjsg 
636c349dbc7Sjsg 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
637c349dbc7Sjsg 		return 0;
638c349dbc7Sjsg 
639c349dbc7Sjsg 	for_each_engine(engine, gt, id) {
640c349dbc7Sjsg 		if (intel_engine_verify_workarounds(engine, "load"))
641c349dbc7Sjsg 			err = -EIO;
642c349dbc7Sjsg 	}
643c349dbc7Sjsg 
644c349dbc7Sjsg 	/* Flush and restore the kernel context for safety */
645c349dbc7Sjsg 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
646c349dbc7Sjsg 		err = -EIO;
647c349dbc7Sjsg 
648c349dbc7Sjsg 	return err;
649c349dbc7Sjsg }
650c349dbc7Sjsg 
651c349dbc7Sjsg static void __intel_gt_disable(struct intel_gt *gt)
652c349dbc7Sjsg {
653ad8b1aafSjsg 	intel_gt_set_wedged_on_fini(gt);
654c349dbc7Sjsg 
655c349dbc7Sjsg 	intel_gt_suspend_prepare(gt);
656c349dbc7Sjsg 	intel_gt_suspend_late(gt);
657c349dbc7Sjsg 
658c349dbc7Sjsg 	GEM_BUG_ON(intel_gt_pm_is_awake(gt));
659c349dbc7Sjsg }
660c349dbc7Sjsg 
6615ca02815Sjsg int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
6625ca02815Sjsg {
6635ca02815Sjsg 	long remaining_timeout;
6645ca02815Sjsg 
6655ca02815Sjsg 	/* If the device is asleep, we have no requests outstanding */
6665ca02815Sjsg 	if (!intel_gt_pm_is_awake(gt))
6675ca02815Sjsg 		return 0;
6685ca02815Sjsg 
6695ca02815Sjsg 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
6705ca02815Sjsg 							   &remaining_timeout)) > 0) {
6715ca02815Sjsg 		cond_resched();
6725ca02815Sjsg 		if (signal_pending(current))
6735ca02815Sjsg 			return -EINTR;
6745ca02815Sjsg 	}
6755ca02815Sjsg 
6761bb76ff1Sjsg 	if (timeout)
6771bb76ff1Sjsg 		return timeout;
6781bb76ff1Sjsg 
6791bb76ff1Sjsg 	if (remaining_timeout < 0)
6801bb76ff1Sjsg 		remaining_timeout = 0;
6811bb76ff1Sjsg 
6821bb76ff1Sjsg 	return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
6835ca02815Sjsg }
6845ca02815Sjsg 
685c349dbc7Sjsg int intel_gt_init(struct intel_gt *gt)
686c349dbc7Sjsg {
687c349dbc7Sjsg 	int err;
688c349dbc7Sjsg 
689c349dbc7Sjsg 	err = i915_inject_probe_error(gt->i915, -ENODEV);
690c349dbc7Sjsg 	if (err)
691c349dbc7Sjsg 		return err;
692c349dbc7Sjsg 
6931bb76ff1Sjsg 	intel_gt_init_workarounds(gt);
6941bb76ff1Sjsg 
695c349dbc7Sjsg 	/*
696c349dbc7Sjsg 	 * This is just a security blanket to placate dragons.
697c349dbc7Sjsg 	 * On some systems, we very sporadically observe that the first TLBs
698c349dbc7Sjsg 	 * used by the CS may be stale, despite us poking the TLB reset. If
699c349dbc7Sjsg 	 * we hold the forcewake during initialisation these problems
700c349dbc7Sjsg 	 * just magically go away.
701c349dbc7Sjsg 	 */
702c349dbc7Sjsg 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
703c349dbc7Sjsg 
7045ca02815Sjsg 	err = intel_gt_init_scratch(gt,
7055ca02815Sjsg 				    GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
706c349dbc7Sjsg 	if (err)
707c349dbc7Sjsg 		goto out_fw;
708c349dbc7Sjsg 
709c349dbc7Sjsg 	intel_gt_pm_init(gt);
710c349dbc7Sjsg 
711c349dbc7Sjsg 	gt->vm = kernel_vm(gt);
712c349dbc7Sjsg 	if (!gt->vm) {
713c349dbc7Sjsg 		err = -ENOMEM;
714c349dbc7Sjsg 		goto err_pm;
715c349dbc7Sjsg 	}
716c349dbc7Sjsg 
7171bb76ff1Sjsg 	intel_set_mocs_index(gt);
7181bb76ff1Sjsg 
719c349dbc7Sjsg 	err = intel_engines_init(gt);
720c349dbc7Sjsg 	if (err)
721c349dbc7Sjsg 		goto err_engines;
722c349dbc7Sjsg 
723c349dbc7Sjsg 	err = intel_uc_init(&gt->uc);
724c349dbc7Sjsg 	if (err)
725c349dbc7Sjsg 		goto err_engines;
726c349dbc7Sjsg 
727c349dbc7Sjsg 	err = intel_gt_resume(gt);
728c349dbc7Sjsg 	if (err)
729c349dbc7Sjsg 		goto err_uc_init;
730c349dbc7Sjsg 
7311bb76ff1Sjsg 	err = intel_gt_init_hwconfig(gt);
7321bb76ff1Sjsg 	if (err)
733f005ef32Sjsg 		gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
7341bb76ff1Sjsg 
735c349dbc7Sjsg 	err = __engines_record_defaults(gt);
736c349dbc7Sjsg 	if (err)
737c349dbc7Sjsg 		goto err_gt;
738c349dbc7Sjsg 
739c349dbc7Sjsg 	err = __engines_verify_workarounds(gt);
740c349dbc7Sjsg 	if (err)
741c349dbc7Sjsg 		goto err_gt;
742c349dbc7Sjsg 
743c349dbc7Sjsg 	err = i915_inject_probe_error(gt->i915, -EIO);
744c349dbc7Sjsg 	if (err)
745c349dbc7Sjsg 		goto err_gt;
746c349dbc7Sjsg 
747e0f86922Sjsg 	intel_uc_init_late(&gt->uc);
748e0f86922Sjsg 
7495ca02815Sjsg 	intel_migrate_init(&gt->migrate, gt);
7505ca02815Sjsg 
751c349dbc7Sjsg 	goto out_fw;
752c349dbc7Sjsg err_gt:
753c349dbc7Sjsg 	__intel_gt_disable(gt);
754c349dbc7Sjsg 	intel_uc_fini_hw(&gt->uc);
755c349dbc7Sjsg err_uc_init:
756c349dbc7Sjsg 	intel_uc_fini(&gt->uc);
757c349dbc7Sjsg err_engines:
758c349dbc7Sjsg 	intel_engines_release(gt);
759c349dbc7Sjsg 	i915_vm_put(fetch_and_zero(&gt->vm));
760c349dbc7Sjsg err_pm:
761c349dbc7Sjsg 	intel_gt_pm_fini(gt);
762c349dbc7Sjsg 	intel_gt_fini_scratch(gt);
763c349dbc7Sjsg out_fw:
764c349dbc7Sjsg 	if (err)
765c349dbc7Sjsg 		intel_gt_set_wedged_on_init(gt);
766c349dbc7Sjsg 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
767c349dbc7Sjsg 	return err;
768c349dbc7Sjsg }
769c349dbc7Sjsg 
770c349dbc7Sjsg void intel_gt_driver_remove(struct intel_gt *gt)
771c349dbc7Sjsg {
772c349dbc7Sjsg 	__intel_gt_disable(gt);
773c349dbc7Sjsg 
7745ca02815Sjsg 	intel_migrate_fini(&gt->migrate);
775ad8b1aafSjsg 	intel_uc_driver_remove(&gt->uc);
776c349dbc7Sjsg 
777c349dbc7Sjsg 	intel_engines_release(gt);
7781bb76ff1Sjsg 
7791bb76ff1Sjsg 	intel_gt_flush_buffer_pool(gt);
780c349dbc7Sjsg }
781c349dbc7Sjsg 
782c349dbc7Sjsg void intel_gt_driver_unregister(struct intel_gt *gt)
783c349dbc7Sjsg {
7845ca02815Sjsg 	intel_wakeref_t wakeref;
7855ca02815Sjsg 
7861bb76ff1Sjsg 	intel_gt_sysfs_unregister(gt);
787c349dbc7Sjsg 	intel_rps_driver_unregister(&gt->rps);
7881bb76ff1Sjsg 	intel_gsc_fini(&gt->gsc);
7891bb76ff1Sjsg 
790f005ef32Sjsg 	/*
791f005ef32Sjsg 	 * If we unload the driver and wedge before the GSC worker is complete,
792f005ef32Sjsg 	 * the worker will hit an error on its submission to the GSC engine and
793f005ef32Sjsg 	 * then exit. This is hard to hit for a user, but it is reproducible
794f005ef32Sjsg 	 * with skipping selftests. The error is handled gracefully by the
795f005ef32Sjsg 	 * worker, so there are no functional issues, but we still end up with
796f005ef32Sjsg 	 * an error message in dmesg, which is something we want to avoid as
797f005ef32Sjsg 	 * this is a supported scenario. We could modify the worker to better
798f005ef32Sjsg 	 * handle a wedging occurring during its execution, but that gets
799f005ef32Sjsg 	 * complicated for a couple of reasons:
800f005ef32Sjsg 	 * - We do want the error on runtime wedging, because there are
801f005ef32Sjsg 	 *   implications for subsystems outside of GT (i.e., PXP, HDCP), it's
802f005ef32Sjsg 	 *   only the error on driver unload that we want to silence.
803f005ef32Sjsg 	 * - The worker is responsible for multiple submissions (GSC FW load,
804f005ef32Sjsg 	 *   HuC auth, SW proxy), so all of those will have to be adapted to
805f005ef32Sjsg 	 *   handle the wedged_on_fini scenario.
806f005ef32Sjsg 	 * Therefore, it's much simpler to just wait for the worker to be done
807f005ef32Sjsg 	 * before wedging on driver removal, also considering that the worker
808f005ef32Sjsg 	 * will likely already be idle in the great majority of non-selftest
809f005ef32Sjsg 	 * scenarios.
810f005ef32Sjsg 	 */
811f005ef32Sjsg 	intel_gsc_uc_flush_work(&gt->uc.gsc);
812c349dbc7Sjsg 
813c349dbc7Sjsg 	/*
814c349dbc7Sjsg 	 * Upon unregistering the device to prevent any new users, cancel
815c349dbc7Sjsg 	 * all in-flight requests so that we can quickly unbind the active
816c349dbc7Sjsg 	 * resources.
817c349dbc7Sjsg 	 */
8181bb76ff1Sjsg 	intel_gt_set_wedged_on_fini(gt);
8195ca02815Sjsg 
8205ca02815Sjsg 	/* Scrub all HW state upon release */
8215ca02815Sjsg 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
8225ca02815Sjsg 		__intel_gt_reset(gt, ALL_ENGINES);
823c349dbc7Sjsg }
824c349dbc7Sjsg 
825c349dbc7Sjsg void intel_gt_driver_release(struct intel_gt *gt)
826c349dbc7Sjsg {
827c349dbc7Sjsg 	struct i915_address_space *vm;
828c349dbc7Sjsg 
829c349dbc7Sjsg 	vm = fetch_and_zero(&gt->vm);
830c349dbc7Sjsg 	if (vm) /* FIXME being called twice on error paths :( */
831c349dbc7Sjsg 		i915_vm_put(vm);
832c349dbc7Sjsg 
8331bb76ff1Sjsg 	intel_wa_list_free(&gt->wa_list);
834c349dbc7Sjsg 	intel_gt_pm_fini(gt);
835c349dbc7Sjsg 	intel_gt_fini_scratch(gt);
836ad8b1aafSjsg 	intel_gt_fini_buffer_pool(gt);
8371bb76ff1Sjsg 	intel_gt_fini_hwconfig(gt);
838c349dbc7Sjsg }
839c349dbc7Sjsg 
8401bb76ff1Sjsg void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
841c349dbc7Sjsg {
8421bb76ff1Sjsg 	struct intel_gt *gt;
8431bb76ff1Sjsg 	unsigned int id;
8441bb76ff1Sjsg 
845c349dbc7Sjsg 	/* We need to wait for inflight RCU frees to release their grip */
846c349dbc7Sjsg 	rcu_barrier();
847c349dbc7Sjsg 
8481bb76ff1Sjsg 	for_each_gt(gt, i915, id) {
849c349dbc7Sjsg 		intel_uc_driver_late_release(&gt->uc);
850c349dbc7Sjsg 		intel_gt_fini_requests(gt);
851c349dbc7Sjsg 		intel_gt_fini_reset(gt);
852c349dbc7Sjsg 		intel_gt_fini_timelines(gt);
853f005ef32Sjsg 		intel_gt_fini_tlb(gt);
854c349dbc7Sjsg 		intel_engines_free(gt);
855c349dbc7Sjsg 	}
8565ca02815Sjsg }
8575ca02815Sjsg 
8581bb76ff1Sjsg static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
8591bb76ff1Sjsg {
8601bb76ff1Sjsg 	int ret;
8611bb76ff1Sjsg 
8621bb76ff1Sjsg 	if (!gt_is_root(gt)) {
8631bb76ff1Sjsg 		struct intel_uncore *uncore;
8641bb76ff1Sjsg 		spinlock_t *irq_lock;
8651bb76ff1Sjsg 
8661bb76ff1Sjsg 		uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
8671bb76ff1Sjsg 		if (!uncore)
8681bb76ff1Sjsg 			return -ENOMEM;
8691bb76ff1Sjsg 
8701bb76ff1Sjsg 		irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
8711bb76ff1Sjsg 		if (!irq_lock)
8721bb76ff1Sjsg 			return -ENOMEM;
8731bb76ff1Sjsg 
8741bb76ff1Sjsg 		gt->uncore = uncore;
8751bb76ff1Sjsg 		gt->irq_lock = irq_lock;
8761bb76ff1Sjsg 
8771bb76ff1Sjsg 		intel_gt_common_init_early(gt);
8785ca02815Sjsg 	}
8795ca02815Sjsg 
8801bb76ff1Sjsg 	intel_uncore_init_early(gt->uncore, gt);
8811bb76ff1Sjsg 
8821bb76ff1Sjsg 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
8831bb76ff1Sjsg 	if (ret)
8841bb76ff1Sjsg 		return ret;
8851bb76ff1Sjsg 
8861bb76ff1Sjsg 	gt->phys_addr = phys_addr;
8871bb76ff1Sjsg 
8881bb76ff1Sjsg 	return 0;
8891bb76ff1Sjsg }
8901bb76ff1Sjsg 
8911bb76ff1Sjsg #ifdef __linux__
8921bb76ff1Sjsg 
8931bb76ff1Sjsg int intel_gt_probe_all(struct drm_i915_private *i915)
8945ca02815Sjsg {
895f005ef32Sjsg 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
896f005ef32Sjsg 	struct intel_gt *gt = to_gt(i915);
8971bb76ff1Sjsg 	const struct intel_gt_definition *gtdef;
8981bb76ff1Sjsg 	phys_addr_t phys_addr;
8991bb76ff1Sjsg 	unsigned int mmio_bar;
9001bb76ff1Sjsg 	unsigned int i;
9011bb76ff1Sjsg 	int ret;
9025ca02815Sjsg 
903f005ef32Sjsg 	mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
9041bb76ff1Sjsg 	phys_addr = pci_resource_start(pdev, mmio_bar);
9055ca02815Sjsg 
9065ca02815Sjsg 	/*
9071bb76ff1Sjsg 	 * We always have at least one primary GT on any device
9081bb76ff1Sjsg 	 * and it has been already initialized early during probe
9091bb76ff1Sjsg 	 * in i915_driver_probe()
9105ca02815Sjsg 	 */
9111bb76ff1Sjsg 	gt->i915 = i915;
9121bb76ff1Sjsg 	gt->name = "Primary GT";
913f005ef32Sjsg 	gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
9141bb76ff1Sjsg 
915f005ef32Sjsg 	gt_dbg(gt, "Setting up %s\n", gt->name);
9161bb76ff1Sjsg 	ret = intel_gt_tile_setup(gt, phys_addr);
9171bb76ff1Sjsg 	if (ret)
9181bb76ff1Sjsg 		return ret;
9191bb76ff1Sjsg 
9201bb76ff1Sjsg 	i915->gt[0] = gt;
9211bb76ff1Sjsg 
9221bb76ff1Sjsg 	if (!HAS_EXTRA_GT_LIST(i915))
9231bb76ff1Sjsg 		return 0;
9241bb76ff1Sjsg 
9251bb76ff1Sjsg 	for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
9261bb76ff1Sjsg 	     gtdef->name != NULL;
9271bb76ff1Sjsg 	     i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
9281bb76ff1Sjsg 		gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
9291bb76ff1Sjsg 		if (!gt) {
9301bb76ff1Sjsg 			ret = -ENOMEM;
9311bb76ff1Sjsg 			goto err;
9321bb76ff1Sjsg 		}
9331bb76ff1Sjsg 
9341bb76ff1Sjsg 		gt->i915 = i915;
9351bb76ff1Sjsg 		gt->name = gtdef->name;
9361bb76ff1Sjsg 		gt->type = gtdef->type;
9371bb76ff1Sjsg 		gt->info.engine_mask = gtdef->engine_mask;
9381bb76ff1Sjsg 		gt->info.id = i;
9391bb76ff1Sjsg 
940f005ef32Sjsg 		gt_dbg(gt, "Setting up %s\n", gt->name);
9411bb76ff1Sjsg 		if (GEM_WARN_ON(range_overflows_t(resource_size_t,
9421bb76ff1Sjsg 						  gtdef->mapping_base,
9431bb76ff1Sjsg 						  SZ_16M,
9441bb76ff1Sjsg 						  pci_resource_len(pdev, mmio_bar)))) {
9451bb76ff1Sjsg 			ret = -ENODEV;
9461bb76ff1Sjsg 			goto err;
9471bb76ff1Sjsg 		}
9481bb76ff1Sjsg 
9491bb76ff1Sjsg 		switch (gtdef->type) {
9501bb76ff1Sjsg 		case GT_TILE:
9511bb76ff1Sjsg 			ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
9525ca02815Sjsg 			break;
9531bb76ff1Sjsg 
9541bb76ff1Sjsg 		case GT_MEDIA:
9551bb76ff1Sjsg 			ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
9561bb76ff1Sjsg 						     gtdef->gsi_offset);
9571bb76ff1Sjsg 			break;
9581bb76ff1Sjsg 
9591bb76ff1Sjsg 		case GT_PRIMARY:
9601bb76ff1Sjsg 			/* Primary GT should not appear in extra GT list */
9615ca02815Sjsg 		default:
9621bb76ff1Sjsg 			MISSING_CASE(gtdef->type);
9631bb76ff1Sjsg 			ret = -ENODEV;
9645ca02815Sjsg 		}
9655ca02815Sjsg 
9661bb76ff1Sjsg 		if (ret)
9671bb76ff1Sjsg 			goto err;
9681bb76ff1Sjsg 
9691bb76ff1Sjsg 		i915->gt[i] = gt;
9701bb76ff1Sjsg 	}
9711bb76ff1Sjsg 
9721bb76ff1Sjsg 	return 0;
9731bb76ff1Sjsg 
9741bb76ff1Sjsg err:
9751bb76ff1Sjsg 	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
9761bb76ff1Sjsg 	return ret;
9771bb76ff1Sjsg }
9781bb76ff1Sjsg 
9791bb76ff1Sjsg #else
9801bb76ff1Sjsg 
9811bb76ff1Sjsg int intel_gt_probe_all(struct drm_i915_private *i915)
9825ca02815Sjsg {
9831bb76ff1Sjsg 	struct pci_dev *pdev = i915->drm.pdev;
984f005ef32Sjsg 	struct intel_gt *gt = to_gt(i915);
9851bb76ff1Sjsg 	const struct intel_gt_definition *gtdef;
9861bb76ff1Sjsg 	phys_addr_t phys_addr;
9871bb76ff1Sjsg 	bus_size_t len;
9881bb76ff1Sjsg 	pcireg_t type;
9891bb76ff1Sjsg 	int flags;
9901bb76ff1Sjsg 	unsigned int mmio_bar;
9911bb76ff1Sjsg 	unsigned int i;
9921bb76ff1Sjsg 	int ret;
9935ca02815Sjsg 
994f005ef32Sjsg 	mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
9951bb76ff1Sjsg 	type = pci_mapreg_type(i915->pc, i915->tag, 0x10 + (mmio_bar * 4));
9961bb76ff1Sjsg 	ret = -pci_mapreg_info(i915->pc, i915->tag, 0x10 + (mmio_bar * 4), type,
9971bb76ff1Sjsg 	    &phys_addr, &len, NULL);
9981bb76ff1Sjsg 	if (ret)
9991bb76ff1Sjsg 		return ret;
10001bb76ff1Sjsg 
10011bb76ff1Sjsg 	/*
10021bb76ff1Sjsg 	 * We always have at least one primary GT on any device
10031bb76ff1Sjsg 	 * and it has been already initialized early during probe
10041bb76ff1Sjsg 	 * in i915_driver_probe()
10051bb76ff1Sjsg 	 */
10061bb76ff1Sjsg 	gt->i915 = i915;
10071bb76ff1Sjsg 	gt->name = "Primary GT";
1008f005ef32Sjsg 	gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
10091bb76ff1Sjsg 
1010f005ef32Sjsg 	gt_dbg(gt, "Setting up %s\n", gt->name);
10111bb76ff1Sjsg 	ret = intel_gt_tile_setup(gt, phys_addr);
10121bb76ff1Sjsg 	if (ret)
10131bb76ff1Sjsg 		return ret;
10141bb76ff1Sjsg 
10151bb76ff1Sjsg 	i915->gt[0] = gt;
10161bb76ff1Sjsg 
10171bb76ff1Sjsg 	if (!HAS_EXTRA_GT_LIST(i915))
10181bb76ff1Sjsg 		return 0;
10191bb76ff1Sjsg 
10201bb76ff1Sjsg 	for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
10211bb76ff1Sjsg 	     gtdef->name != NULL;
10221bb76ff1Sjsg 	     i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
10231bb76ff1Sjsg 		gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
10241bb76ff1Sjsg 		if (!gt) {
10251bb76ff1Sjsg 			ret = -ENOMEM;
10261bb76ff1Sjsg 			goto err;
10275ca02815Sjsg 		}
10285ca02815Sjsg 
10291bb76ff1Sjsg 		gt->i915 = i915;
10301bb76ff1Sjsg 		gt->name = gtdef->name;
10311bb76ff1Sjsg 		gt->type = gtdef->type;
10321bb76ff1Sjsg 		gt->info.engine_mask = gtdef->engine_mask;
10331bb76ff1Sjsg 		gt->info.id = i;
10341bb76ff1Sjsg 
1035f005ef32Sjsg 		gt_dbg(gt, "Setting up %s\n", gt->name);
10361bb76ff1Sjsg 		if (GEM_WARN_ON(range_overflows_t(resource_size_t,
10371bb76ff1Sjsg 						  gtdef->mapping_base,
10381bb76ff1Sjsg 						  SZ_16M,
10391bb76ff1Sjsg 						  len))) {
10401bb76ff1Sjsg 			ret = -ENODEV;
10411bb76ff1Sjsg 			goto err;
10421bb76ff1Sjsg 		}
10431bb76ff1Sjsg 
10441bb76ff1Sjsg 		switch (gtdef->type) {
10451bb76ff1Sjsg 		case GT_TILE:
10461bb76ff1Sjsg 			ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
10471bb76ff1Sjsg 			break;
10481bb76ff1Sjsg 
10491bb76ff1Sjsg 		case GT_MEDIA:
10501bb76ff1Sjsg 			ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
10511bb76ff1Sjsg 						     gtdef->gsi_offset);
10521bb76ff1Sjsg 			break;
10531bb76ff1Sjsg 
10541bb76ff1Sjsg 		case GT_PRIMARY:
10551bb76ff1Sjsg 			/* Primary GT should not appear in extra GT list */
10561bb76ff1Sjsg 		default:
10571bb76ff1Sjsg 			MISSING_CASE(gtdef->type);
10581bb76ff1Sjsg 			ret = -ENODEV;
10591bb76ff1Sjsg 		}
10601bb76ff1Sjsg 
10611bb76ff1Sjsg 		if (ret)
10621bb76ff1Sjsg 			goto err;
10631bb76ff1Sjsg 
10641bb76ff1Sjsg 		i915->gt[i] = gt;
10651bb76ff1Sjsg 	}
10661bb76ff1Sjsg 
10671bb76ff1Sjsg 	return 0;
10681bb76ff1Sjsg 
10691bb76ff1Sjsg err:
10701bb76ff1Sjsg 	i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
10711bb76ff1Sjsg 	return ret;
10721bb76ff1Sjsg }
10731bb76ff1Sjsg 
10741bb76ff1Sjsg #endif
10751bb76ff1Sjsg 
1076*09e4f8a5Sjsg static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
1077*09e4f8a5Sjsg {
1078*09e4f8a5Sjsg 	struct intel_engine_cs *engine = gt->engine[BCS0];
1079*09e4f8a5Sjsg 
1080*09e4f8a5Sjsg 	if (engine && engine->bind_context)
1081*09e4f8a5Sjsg 		engine->bind_context_ready = ready;
1082*09e4f8a5Sjsg }
1083*09e4f8a5Sjsg 
1084*09e4f8a5Sjsg /**
1085*09e4f8a5Sjsg  * intel_gt_bind_context_set_ready - Set the context binding as ready
1086*09e4f8a5Sjsg  *
1087*09e4f8a5Sjsg  * @gt: GT structure
1088*09e4f8a5Sjsg  *
1089*09e4f8a5Sjsg  * This function marks the binder context as ready.
1090*09e4f8a5Sjsg  */
1091*09e4f8a5Sjsg void intel_gt_bind_context_set_ready(struct intel_gt *gt)
1092*09e4f8a5Sjsg {
1093*09e4f8a5Sjsg 	__intel_gt_bind_context_set_ready(gt, true);
1094*09e4f8a5Sjsg }
1095*09e4f8a5Sjsg 
1096*09e4f8a5Sjsg /**
1097*09e4f8a5Sjsg  * intel_gt_bind_context_set_unready - Set the context binding as ready
1098*09e4f8a5Sjsg  * @gt: GT structure
1099*09e4f8a5Sjsg  *
1100*09e4f8a5Sjsg  * This function marks the binder context as not ready.
1101*09e4f8a5Sjsg  */
1102*09e4f8a5Sjsg 
1103*09e4f8a5Sjsg void intel_gt_bind_context_set_unready(struct intel_gt *gt)
1104*09e4f8a5Sjsg {
1105*09e4f8a5Sjsg 	__intel_gt_bind_context_set_ready(gt, false);
1106*09e4f8a5Sjsg }
1107*09e4f8a5Sjsg 
1108*09e4f8a5Sjsg /**
1109*09e4f8a5Sjsg  * intel_gt_is_bind_context_ready - Check if context binding is ready
1110*09e4f8a5Sjsg  *
1111*09e4f8a5Sjsg  * @gt: GT structure
1112*09e4f8a5Sjsg  *
1113*09e4f8a5Sjsg  * This function returns binder context's ready status.
1114*09e4f8a5Sjsg  */
1115*09e4f8a5Sjsg bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
1116*09e4f8a5Sjsg {
1117*09e4f8a5Sjsg 	struct intel_engine_cs *engine = gt->engine[BCS0];
1118*09e4f8a5Sjsg 
1119*09e4f8a5Sjsg 	if (engine)
1120*09e4f8a5Sjsg 		return engine->bind_context_ready;
1121*09e4f8a5Sjsg 
1122*09e4f8a5Sjsg 	return false;
1123*09e4f8a5Sjsg }
1124*09e4f8a5Sjsg 
11251bb76ff1Sjsg int intel_gt_tiles_init(struct drm_i915_private *i915)
11261bb76ff1Sjsg {
11271bb76ff1Sjsg 	struct intel_gt *gt;
11281bb76ff1Sjsg 	unsigned int id;
11291bb76ff1Sjsg 	int ret;
11301bb76ff1Sjsg 
11311bb76ff1Sjsg 	for_each_gt(gt, i915, id) {
11321bb76ff1Sjsg 		ret = intel_gt_probe_lmem(gt);
11331bb76ff1Sjsg 		if (ret)
11341bb76ff1Sjsg 			return ret;
11351bb76ff1Sjsg 	}
11361bb76ff1Sjsg 
11371bb76ff1Sjsg 	return 0;
11381bb76ff1Sjsg }
11391bb76ff1Sjsg 
1140ad8b1aafSjsg void intel_gt_info_print(const struct intel_gt_info *info,
1141ad8b1aafSjsg 			 struct drm_printer *p)
1142ad8b1aafSjsg {
1143ad8b1aafSjsg 	drm_printf(p, "available engines: %x\n", info->engine_mask);
1144ad8b1aafSjsg 
1145ad8b1aafSjsg 	intel_sseu_dump(&info->sseu, p);
1146ad8b1aafSjsg }
11471fd8e27eSjsg 
1148f005ef32Sjsg enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
1149f005ef32Sjsg 					      struct drm_i915_gem_object *obj,
1150f005ef32Sjsg 					      bool always_coherent)
11511fd8e27eSjsg {
1152f005ef32Sjsg 	/*
1153f005ef32Sjsg 	 * Wa_22016122933: always return I915_MAP_WC for Media
1154f005ef32Sjsg 	 * version 13.0 when the object is on the Media GT
1155f005ef32Sjsg 	 */
1156f005ef32Sjsg 	if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
1157f005ef32Sjsg 		return I915_MAP_WC;
1158f005ef32Sjsg 	if (HAS_LLC(gt->i915) || always_coherent)
1159f005ef32Sjsg 		return I915_MAP_WB;
11601fd8e27eSjsg 	else
1161f005ef32Sjsg 		return I915_MAP_WC;
11621fd8e27eSjsg }
1163