xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/intel_guc.c (revision 2a8c33eaff5adddac3ef2c5cb48ee67ef6d5d6dc)
1 /*	$NetBSD: intel_guc.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 /*
5  * Copyright © 2014-2019 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_guc.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $");
10 
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_irq.h"
13 #include "gt/intel_gt_pm_irq.h"
14 #include "intel_guc.h"
15 #include "intel_guc_ads.h"
16 #include "intel_guc_submission.h"
17 #include "i915_drv.h"
18 
19 #include <linux/nbsd-namespace.h>
20 
21 /**
22  * DOC: GuC
23  *
24  * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
25  * designed to offload some of the functionality usually performed by the host
26  * driver; currently the main operations it can take care of are:
27  *
28  * - Authentication of the HuC, which is required to fully enable HuC usage.
29  * - Low latency graphics context scheduling (a.k.a. GuC submission).
30  * - GT Power management.
31  *
32  * The enable_guc module parameter can be used to select which of those
33  * operations to enable within GuC. Note that not all the operations are
34  * supported on all gen9+ platforms.
35  *
36  * Enabling the GuC is not mandatory and therefore the firmware is only loaded
37  * if at least one of the operations is selected. However, not loading the GuC
38  * might result in the loss of some features that do require the GuC (currently
39  * just the HuC, but more are expected to land in the future).
40  */
41 
intel_guc_notify(struct intel_guc * guc)42 void intel_guc_notify(struct intel_guc *guc)
43 {
44 	struct intel_gt *gt = guc_to_gt(guc);
45 
46 	/*
47 	 * On Gen11+, the value written to the register is passes as a payload
48 	 * to the FW. However, the FW currently treats all values the same way
49 	 * (H2G interrupt), so we can just write the value that the HW expects
50 	 * on older gens.
51 	 */
52 	intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
53 }
54 
guc_send_reg(struct intel_guc * guc,u32 i)55 static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
56 {
57 	GEM_BUG_ON(!guc->send_regs.base);
58 	GEM_BUG_ON(!guc->send_regs.count);
59 	GEM_BUG_ON(i >= guc->send_regs.count);
60 
61 	return _MMIO(guc->send_regs.base + 4 * i);
62 }
63 
intel_guc_init_send_regs(struct intel_guc * guc)64 void intel_guc_init_send_regs(struct intel_guc *guc)
65 {
66 	struct intel_gt *gt = guc_to_gt(guc);
67 	enum forcewake_domains fw_domains = 0;
68 	unsigned int i;
69 
70 	if (INTEL_GEN(gt->i915) >= 11) {
71 		guc->send_regs.base =
72 				i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
73 		guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
74 	} else {
75 		guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
76 		guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
77 		BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
78 	}
79 
80 	for (i = 0; i < guc->send_regs.count; i++) {
81 		fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
82 					guc_send_reg(guc, i),
83 					FW_REG_READ | FW_REG_WRITE);
84 	}
85 	guc->send_regs.fw_domains = fw_domains;
86 }
87 
gen9_reset_guc_interrupts(struct intel_guc * guc)88 static void gen9_reset_guc_interrupts(struct intel_guc *guc)
89 {
90 	struct intel_gt *gt = guc_to_gt(guc);
91 
92 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
93 
94 	spin_lock_irq(&gt->irq_lock);
95 	gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
96 	spin_unlock_irq(&gt->irq_lock);
97 }
98 
gen9_enable_guc_interrupts(struct intel_guc * guc)99 static void gen9_enable_guc_interrupts(struct intel_guc *guc)
100 {
101 	struct intel_gt *gt = guc_to_gt(guc);
102 
103 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
104 
105 	spin_lock_irq(&gt->irq_lock);
106 	if (!guc->interrupts.enabled) {
107 		WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
108 			     gt->pm_guc_events);
109 		guc->interrupts.enabled = true;
110 		gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
111 	}
112 	spin_unlock_irq(&gt->irq_lock);
113 }
114 
gen9_disable_guc_interrupts(struct intel_guc * guc)115 static void gen9_disable_guc_interrupts(struct intel_guc *guc)
116 {
117 	struct intel_gt *gt = guc_to_gt(guc);
118 
119 	assert_rpm_wakelock_held(&gt->i915->runtime_pm);
120 
121 	spin_lock_irq(&gt->irq_lock);
122 	guc->interrupts.enabled = false;
123 
124 	gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
125 
126 	spin_unlock_irq(&gt->irq_lock);
127 	intel_synchronize_irq(gt->i915);
128 
129 	gen9_reset_guc_interrupts(guc);
130 }
131 
gen11_reset_guc_interrupts(struct intel_guc * guc)132 static void gen11_reset_guc_interrupts(struct intel_guc *guc)
133 {
134 	struct intel_gt *gt = guc_to_gt(guc);
135 
136 	spin_lock_irq(&gt->irq_lock);
137 	gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
138 	spin_unlock_irq(&gt->irq_lock);
139 }
140 
gen11_enable_guc_interrupts(struct intel_guc * guc)141 static void gen11_enable_guc_interrupts(struct intel_guc *guc)
142 {
143 	struct intel_gt *gt = guc_to_gt(guc);
144 
145 	spin_lock_irq(&gt->irq_lock);
146 	if (!guc->interrupts.enabled) {
147 		u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
148 
149 		WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
150 		intel_uncore_write(gt->uncore,
151 				   GEN11_GUC_SG_INTR_ENABLE, events);
152 		intel_uncore_write(gt->uncore,
153 				   GEN11_GUC_SG_INTR_MASK, ~events);
154 		guc->interrupts.enabled = true;
155 	}
156 	spin_unlock_irq(&gt->irq_lock);
157 }
158 
gen11_disable_guc_interrupts(struct intel_guc * guc)159 static void gen11_disable_guc_interrupts(struct intel_guc *guc)
160 {
161 	struct intel_gt *gt = guc_to_gt(guc);
162 
163 	spin_lock_irq(&gt->irq_lock);
164 	guc->interrupts.enabled = false;
165 
166 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
167 	intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
168 
169 	spin_unlock_irq(&gt->irq_lock);
170 	intel_synchronize_irq(gt->i915);
171 
172 	gen11_reset_guc_interrupts(guc);
173 }
174 
intel_guc_init_early(struct intel_guc * guc)175 void intel_guc_init_early(struct intel_guc *guc)
176 {
177 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
178 
179 	intel_guc_fw_init_early(guc);
180 	intel_guc_ct_init_early(&guc->ct);
181 	intel_guc_log_init_early(&guc->log);
182 	intel_guc_submission_init_early(guc);
183 
184 	mutex_init(&guc->send_mutex);
185 	spin_lock_init(&guc->irq_lock);
186 	if (INTEL_GEN(i915) >= 11) {
187 		guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
188 		guc->interrupts.reset = gen11_reset_guc_interrupts;
189 		guc->interrupts.enable = gen11_enable_guc_interrupts;
190 		guc->interrupts.disable = gen11_disable_guc_interrupts;
191 	} else {
192 		guc->notify_reg = GUC_SEND_INTERRUPT;
193 		guc->interrupts.reset = gen9_reset_guc_interrupts;
194 		guc->interrupts.enable = gen9_enable_guc_interrupts;
195 		guc->interrupts.disable = gen9_disable_guc_interrupts;
196 	}
197 }
198 
guc_ctl_debug_flags(struct intel_guc * guc)199 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
200 {
201 	u32 level = intel_guc_log_get_level(&guc->log);
202 	u32 flags = 0;
203 
204 	if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
205 		flags |= GUC_LOG_DISABLED;
206 	else
207 		flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
208 			 GUC_LOG_VERBOSITY_SHIFT;
209 
210 	return flags;
211 }
212 
guc_ctl_feature_flags(struct intel_guc * guc)213 static u32 guc_ctl_feature_flags(struct intel_guc *guc)
214 {
215 	u32 flags = 0;
216 
217 	if (!intel_guc_is_submission_supported(guc))
218 		flags |= GUC_CTL_DISABLE_SCHEDULER;
219 
220 	return flags;
221 }
222 
guc_ctl_ctxinfo_flags(struct intel_guc * guc)223 static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
224 {
225 	u32 flags = 0;
226 
227 	if (intel_guc_is_submission_supported(guc)) {
228 		u32 ctxnum, base;
229 
230 		base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
231 		ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
232 
233 		base >>= PAGE_SHIFT;
234 		flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
235 			(ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
236 	}
237 	return flags;
238 }
239 
guc_ctl_log_params_flags(struct intel_guc * guc)240 static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
241 {
242 	u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
243 	u32 flags;
244 
245 	#if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
246 	#define UNIT SZ_1M
247 	#define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
248 	#else
249 	#define UNIT SZ_4K
250 	#define FLAG 0
251 	#endif
252 
253 	BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
254 	BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
255 	BUILD_BUG_ON(!DPC_BUFFER_SIZE);
256 	BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
257 	BUILD_BUG_ON(!ISR_BUFFER_SIZE);
258 	BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
259 
260 	BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
261 			(GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
262 	BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
263 			(GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
264 	BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
265 			(GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
266 
267 	flags = GUC_LOG_VALID |
268 		GUC_LOG_NOTIFY_ON_HALF_FULL |
269 		FLAG |
270 		((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
271 		((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
272 		((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
273 		(offset << GUC_LOG_BUF_ADDR_SHIFT);
274 
275 	#undef UNIT
276 	#undef FLAG
277 
278 	return flags;
279 }
280 
guc_ctl_ads_flags(struct intel_guc * guc)281 static u32 guc_ctl_ads_flags(struct intel_guc *guc)
282 {
283 	u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
284 	u32 flags = ads << GUC_ADS_ADDR_SHIFT;
285 
286 	return flags;
287 }
288 
289 /*
290  * Initialise the GuC parameter block before starting the firmware
291  * transfer. These parameters are read by the firmware on startup
292  * and cannot be changed thereafter.
293  */
guc_init_params(struct intel_guc * guc)294 static void guc_init_params(struct intel_guc *guc)
295 {
296 	u32 *params = guc->params;
297 	int i;
298 
299 	BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
300 
301 	params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
302 	params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
303 	params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
304 	params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
305 	params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
306 
307 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
308 		DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
309 }
310 
311 /*
312  * Initialise the GuC parameter block before starting the firmware
313  * transfer. These parameters are read by the firmware on startup
314  * and cannot be changed thereafter.
315  */
intel_guc_write_params(struct intel_guc * guc)316 void intel_guc_write_params(struct intel_guc *guc)
317 {
318 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
319 	int i;
320 
321 	/*
322 	 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
323 	 * they are power context saved so it's ok to release forcewake
324 	 * when we are done here and take it again at xfer time.
325 	 */
326 	intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
327 
328 	intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
329 
330 	for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
331 		intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
332 
333 	intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
334 }
335 
intel_guc_init(struct intel_guc * guc)336 int intel_guc_init(struct intel_guc *guc)
337 {
338 	struct intel_gt *gt = guc_to_gt(guc);
339 	int ret;
340 
341 	ret = intel_uc_fw_init(&guc->fw);
342 	if (ret)
343 		goto err_fetch;
344 
345 	ret = intel_guc_log_create(&guc->log);
346 	if (ret)
347 		goto err_fw;
348 
349 	ret = intel_guc_ads_create(guc);
350 	if (ret)
351 		goto err_log;
352 	GEM_BUG_ON(!guc->ads_vma);
353 
354 	ret = intel_guc_ct_init(&guc->ct);
355 	if (ret)
356 		goto err_ads;
357 
358 	if (intel_guc_is_submission_supported(guc)) {
359 		/*
360 		 * This is stuff we need to have available at fw load time
361 		 * if we are planning to enable submission later
362 		 */
363 		ret = intel_guc_submission_init(guc);
364 		if (ret)
365 			goto err_ct;
366 	}
367 
368 	/* now that everything is perma-pinned, initialize the parameters */
369 	guc_init_params(guc);
370 
371 	/* We need to notify the guc whenever we change the GGTT */
372 	i915_ggtt_enable_guc(gt->ggtt);
373 
374 	return 0;
375 
376 err_ct:
377 	intel_guc_ct_fini(&guc->ct);
378 err_ads:
379 	intel_guc_ads_destroy(guc);
380 err_log:
381 	intel_guc_log_destroy(&guc->log);
382 err_fw:
383 	intel_uc_fw_fini(&guc->fw);
384 err_fetch:
385 	intel_uc_fw_cleanup_fetch(&guc->fw);
386 	DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
387 	return ret;
388 }
389 
intel_guc_fini(struct intel_guc * guc)390 void intel_guc_fini(struct intel_guc *guc)
391 {
392 	struct intel_gt *gt = guc_to_gt(guc);
393 
394 	if (!intel_uc_fw_is_available(&guc->fw))
395 		return;
396 
397 	i915_ggtt_disable_guc(gt->ggtt);
398 
399 	if (intel_guc_is_submission_supported(guc))
400 		intel_guc_submission_fini(guc);
401 
402 	intel_guc_ct_fini(&guc->ct);
403 
404 	intel_guc_ads_destroy(guc);
405 	intel_guc_log_destroy(&guc->log);
406 	intel_uc_fw_fini(&guc->fw);
407 	intel_uc_fw_cleanup_fetch(&guc->fw);
408 
409 	intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
410 
411 	spin_lock_destroy(&guc->irq_lock);
412 	mutex_destroy(&guc->send_mutex);
413 }
414 
415 /*
416  * This function implements the MMIO based host to GuC interface.
417  */
intel_guc_send_mmio(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)418 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
419 			u32 *response_buf, u32 response_buf_size)
420 {
421 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
422 	u32 status;
423 	int i;
424 	int ret;
425 
426 	GEM_BUG_ON(!len);
427 	GEM_BUG_ON(len > guc->send_regs.count);
428 
429 	/* We expect only action code */
430 	GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
431 
432 	/* If CT is available, we expect to use MMIO only during init/fini */
433 	GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
434 		   *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
435 
436 	mutex_lock(&guc->send_mutex);
437 	intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
438 
439 	for (i = 0; i < len; i++)
440 		intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
441 
442 	intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
443 
444 	intel_guc_notify(guc);
445 
446 	/*
447 	 * No GuC command should ever take longer than 10ms.
448 	 * Fast commands should still complete in 10us.
449 	 */
450 	ret = __intel_wait_for_register_fw(uncore,
451 					   guc_send_reg(guc, 0),
452 					   INTEL_GUC_MSG_TYPE_MASK,
453 					   INTEL_GUC_MSG_TYPE_RESPONSE <<
454 					   INTEL_GUC_MSG_TYPE_SHIFT,
455 					   10, 10, &status);
456 	/* If GuC explicitly returned an error, convert it to -EIO */
457 	if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
458 		ret = -EIO;
459 
460 	if (ret) {
461 		DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
462 			  action[0], ret, status);
463 		goto out;
464 	}
465 
466 	if (response_buf) {
467 		int count = min(response_buf_size, guc->send_regs.count - 1);
468 
469 		for (i = 0; i < count; i++)
470 			response_buf[i] = intel_uncore_read(uncore,
471 							    guc_send_reg(guc, i + 1));
472 	}
473 
474 	/* Use data from the GuC response as our return value */
475 	ret = INTEL_GUC_MSG_TO_DATA(status);
476 
477 out:
478 	intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
479 	mutex_unlock(&guc->send_mutex);
480 
481 	return ret;
482 }
483 
intel_guc_to_host_process_recv_msg(struct intel_guc * guc,const u32 * payload,u32 len)484 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
485 				       const u32 *payload, u32 len)
486 {
487 	u32 msg;
488 
489 	if (unlikely(!len))
490 		return -EPROTO;
491 
492 	/* Make sure to handle only enabled messages */
493 	msg = payload[0] & guc->msg_enabled_mask;
494 
495 	if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
496 		   INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
497 		intel_guc_log_handle_flush_event(&guc->log);
498 
499 	return 0;
500 }
501 
intel_guc_sample_forcewake(struct intel_guc * guc)502 int intel_guc_sample_forcewake(struct intel_guc *guc)
503 {
504 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
505 	u32 action[2];
506 
507 	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
508 	/* WaRsDisableCoarsePowerGating:skl,cnl */
509 	if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
510 		action[1] = 0;
511 	else
512 		/* bit 0 and 1 are for Render and Media domain separately */
513 		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
514 
515 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
516 }
517 
518 /**
519  * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
520  * @guc: intel_guc structure
521  * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
522  *
523  * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
524  * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
525  * intel_huc_auth().
526  *
527  * Return:	non-zero code on error
528  */
intel_guc_auth_huc(struct intel_guc * guc,u32 rsa_offset)529 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
530 {
531 	u32 action[] = {
532 		INTEL_GUC_ACTION_AUTHENTICATE_HUC,
533 		rsa_offset
534 	};
535 
536 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
537 }
538 
539 /**
540  * intel_guc_suspend() - notify GuC entering suspend state
541  * @guc:	the guc
542  */
intel_guc_suspend(struct intel_guc * guc)543 int intel_guc_suspend(struct intel_guc *guc)
544 {
545 	struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
546 	int ret;
547 	u32 status;
548 	u32 action[] = {
549 		INTEL_GUC_ACTION_ENTER_S_STATE,
550 		GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
551 	};
552 
553 	/*
554 	 * If GuC communication is enabled but submission is not supported,
555 	 * we do not need to suspend the GuC.
556 	 */
557 	if (!intel_guc_submission_is_enabled(guc))
558 		return 0;
559 
560 	/*
561 	 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
562 	 * and then returns, so waiting on the H2G is not enough to guarantee
563 	 * GuC is done. When all the processing is done, GuC writes
564 	 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
565 	 * on that. Note that GuC does not ensure that the value in the register
566 	 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
567 	 * in progress so we need to take care of that ourselves as well.
568 	 */
569 
570 	intel_uncore_write(uncore, SOFT_SCRATCH(14),
571 			   INTEL_GUC_SLEEP_STATE_INVALID_MASK);
572 
573 	ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
574 	if (ret)
575 		return ret;
576 
577 	ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
578 					INTEL_GUC_SLEEP_STATE_INVALID_MASK,
579 					0, 0, 10, &status);
580 	if (ret)
581 		return ret;
582 
583 	if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
584 		DRM_ERROR("GuC failed to change sleep state. "
585 			  "action=0x%x, err=%u\n",
586 			  action[0], status);
587 		return -EIO;
588 	}
589 
590 	return 0;
591 }
592 
593 /**
594  * intel_guc_reset_engine() - ask GuC to reset an engine
595  * @guc:	intel_guc structure
596  * @engine:	engine to be reset
597  */
intel_guc_reset_engine(struct intel_guc * guc,struct intel_engine_cs * engine)598 int intel_guc_reset_engine(struct intel_guc *guc,
599 			   struct intel_engine_cs *engine)
600 {
601 	/* XXX: to be implemented with submission interface rework */
602 
603 	return -ENODEV;
604 }
605 
606 /**
607  * intel_guc_resume() - notify GuC resuming from suspend state
608  * @guc:	the guc
609  */
intel_guc_resume(struct intel_guc * guc)610 int intel_guc_resume(struct intel_guc *guc)
611 {
612 	u32 action[] = {
613 		INTEL_GUC_ACTION_EXIT_S_STATE,
614 		GUC_POWER_D0,
615 	};
616 
617 	/*
618 	 * If GuC communication is enabled but submission is not supported,
619 	 * we do not need to resume the GuC but we do need to enable the
620 	 * GuC communication on resume (above).
621 	 */
622 	if (!intel_guc_submission_is_enabled(guc))
623 		return 0;
624 
625 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
626 }
627 
628 /**
629  * DOC: GuC Memory Management
630  *
631  * GuC can't allocate any memory for its own usage, so all the allocations must
632  * be handled by the host driver. GuC accesses the memory via the GGTT, with the
633  * exception of the top and bottom parts of the 4GB address space, which are
634  * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
635  * or other parts of the HW. The driver must take care not to place objects that
636  * the GuC is going to access in these reserved ranges. The layout of the GuC
637  * address space is shown below:
638  *
639  * ::
640  *
641  *     +===========> +====================+ <== FFFF_FFFF
642  *     ^             |      Reserved      |
643  *     |             +====================+ <== GUC_GGTT_TOP
644  *     |             |                    |
645  *     |             |        DRAM        |
646  *    GuC            |                    |
647  *  Address    +===> +====================+ <== GuC ggtt_pin_bias
648  *   Space     ^     |                    |
649  *     |       |     |                    |
650  *     |      GuC    |        GuC         |
651  *     |     WOPCM   |       WOPCM        |
652  *     |      Size   |                    |
653  *     |       |     |                    |
654  *     v       v     |                    |
655  *     +=======+===> +====================+ <== 0000_0000
656  *
657  * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
658  * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
659  * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
660  */
661 
662 /**
663  * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
664  * @guc:	the guc
665  * @size:	size of area to allocate (both virtual space and memory)
666  *
667  * This is a wrapper to create an object for use with the GuC. In order to
668  * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
669  * both some backing storage and a range inside the Global GTT. We must pin
670  * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
671  * range is reserved inside GuC.
672  *
673  * Return:	A i915_vma if successful, otherwise an ERR_PTR.
674  */
intel_guc_allocate_vma(struct intel_guc * guc,u32 size)675 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
676 {
677 	struct intel_gt *gt = guc_to_gt(guc);
678 	struct drm_i915_gem_object *obj;
679 	struct i915_vma *vma;
680 	u64 flags;
681 	int ret;
682 
683 	obj = i915_gem_object_create_shmem(gt->i915, size);
684 	if (IS_ERR(obj))
685 		return ERR_CAST(obj);
686 
687 	vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
688 	if (IS_ERR(vma))
689 		goto err;
690 
691 	flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
692 	ret = i915_vma_pin(vma, 0, 0, flags);
693 	if (ret) {
694 		vma = ERR_PTR(ret);
695 		goto err;
696 	}
697 
698 	return i915_vma_make_unshrinkable(vma);
699 
700 err:
701 	i915_gem_object_put(obj);
702 	return vma;
703 }
704 
705 /**
706  * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
707  * @guc:	the guc
708  * @size:	size of area to allocate (both virtual space and memory)
709  * @out_vma:	return variable for the allocated vma pointer
710  * @out_vaddr:	return variable for the obj mapping
711  *
712  * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
713  * object with I915_MAP_WB.
714  *
715  * Return:	0 if successful, a negative errno code otherwise.
716  */
intel_guc_allocate_and_map_vma(struct intel_guc * guc,u32 size,struct i915_vma ** out_vma,void ** out_vaddr)717 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
718 				   struct i915_vma **out_vma, void **out_vaddr)
719 {
720 	struct i915_vma *vma;
721 	void *vaddr;
722 
723 	vma = intel_guc_allocate_vma(guc, size);
724 	if (IS_ERR(vma))
725 		return PTR_ERR(vma);
726 
727 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
728 	if (IS_ERR(vaddr)) {
729 		i915_vma_unpin_and_release(&vma, 0);
730 		return PTR_ERR(vaddr);
731 	}
732 
733 	*out_vma = vma;
734 	*out_vaddr = vaddr;
735 
736 	return 0;
737 }
738