xref: /openbsd-src/sys/dev/pci/drm/i915/intel_pcode.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
11bb76ff1Sjsg // SPDX-License-Identifier: MIT
21bb76ff1Sjsg /*
31bb76ff1Sjsg  * Copyright © 2013-2021 Intel Corporation
41bb76ff1Sjsg  */
51bb76ff1Sjsg 
61bb76ff1Sjsg #include "i915_drv.h"
71bb76ff1Sjsg #include "i915_reg.h"
81bb76ff1Sjsg #include "intel_pcode.h"
91bb76ff1Sjsg 
gen6_check_mailbox_status(u32 mbox)101bb76ff1Sjsg static int gen6_check_mailbox_status(u32 mbox)
111bb76ff1Sjsg {
121bb76ff1Sjsg 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
131bb76ff1Sjsg 	case GEN6_PCODE_SUCCESS:
141bb76ff1Sjsg 		return 0;
151bb76ff1Sjsg 	case GEN6_PCODE_UNIMPLEMENTED_CMD:
161bb76ff1Sjsg 		return -ENODEV;
171bb76ff1Sjsg 	case GEN6_PCODE_ILLEGAL_CMD:
181bb76ff1Sjsg 		return -ENXIO;
191bb76ff1Sjsg 	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
201bb76ff1Sjsg 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
211bb76ff1Sjsg 		return -EOVERFLOW;
221bb76ff1Sjsg 	case GEN6_PCODE_TIMEOUT:
231bb76ff1Sjsg 		return -ETIMEDOUT;
241bb76ff1Sjsg 	default:
251bb76ff1Sjsg 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
261bb76ff1Sjsg 		return 0;
271bb76ff1Sjsg 	}
281bb76ff1Sjsg }
291bb76ff1Sjsg 
gen7_check_mailbox_status(u32 mbox)301bb76ff1Sjsg static int gen7_check_mailbox_status(u32 mbox)
311bb76ff1Sjsg {
321bb76ff1Sjsg 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
331bb76ff1Sjsg 	case GEN6_PCODE_SUCCESS:
341bb76ff1Sjsg 		return 0;
351bb76ff1Sjsg 	case GEN6_PCODE_ILLEGAL_CMD:
361bb76ff1Sjsg 		return -ENXIO;
371bb76ff1Sjsg 	case GEN7_PCODE_TIMEOUT:
381bb76ff1Sjsg 		return -ETIMEDOUT;
391bb76ff1Sjsg 	case GEN7_PCODE_ILLEGAL_DATA:
401bb76ff1Sjsg 		return -EINVAL;
411bb76ff1Sjsg 	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
421bb76ff1Sjsg 		return -ENXIO;
431bb76ff1Sjsg 	case GEN11_PCODE_LOCKED:
441bb76ff1Sjsg 		return -EBUSY;
451bb76ff1Sjsg 	case GEN11_PCODE_REJECTED:
461bb76ff1Sjsg 		return -EACCES;
471bb76ff1Sjsg 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
481bb76ff1Sjsg 		return -EOVERFLOW;
491bb76ff1Sjsg 	default:
501bb76ff1Sjsg 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
511bb76ff1Sjsg 		return 0;
521bb76ff1Sjsg 	}
531bb76ff1Sjsg }
541bb76ff1Sjsg 
__snb_pcode_rw(struct intel_uncore * uncore,u32 mbox,u32 * val,u32 * val1,int fast_timeout_us,int slow_timeout_ms,bool is_read)551bb76ff1Sjsg static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
561bb76ff1Sjsg 			  u32 *val, u32 *val1,
571bb76ff1Sjsg 			  int fast_timeout_us, int slow_timeout_ms,
581bb76ff1Sjsg 			  bool is_read)
591bb76ff1Sjsg {
601bb76ff1Sjsg 	lockdep_assert_held(&uncore->i915->sb_lock);
611bb76ff1Sjsg 
621bb76ff1Sjsg 	/*
631bb76ff1Sjsg 	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
641bb76ff1Sjsg 	 * intel_uncore_read/write_fw variants to reduce the amount of work
651bb76ff1Sjsg 	 * required when reading/writing.
661bb76ff1Sjsg 	 */
671bb76ff1Sjsg 
681bb76ff1Sjsg 	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
691bb76ff1Sjsg 		return -EAGAIN;
701bb76ff1Sjsg 
711bb76ff1Sjsg 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
721bb76ff1Sjsg 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
731bb76ff1Sjsg 	intel_uncore_write_fw(uncore,
741bb76ff1Sjsg 			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
751bb76ff1Sjsg 
761bb76ff1Sjsg 	if (__intel_wait_for_register_fw(uncore,
771bb76ff1Sjsg 					 GEN6_PCODE_MAILBOX,
781bb76ff1Sjsg 					 GEN6_PCODE_READY, 0,
791bb76ff1Sjsg 					 fast_timeout_us,
801bb76ff1Sjsg 					 slow_timeout_ms,
811bb76ff1Sjsg 					 &mbox))
821bb76ff1Sjsg 		return -ETIMEDOUT;
831bb76ff1Sjsg 
841bb76ff1Sjsg 	if (is_read)
851bb76ff1Sjsg 		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
861bb76ff1Sjsg 	if (is_read && val1)
871bb76ff1Sjsg 		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
881bb76ff1Sjsg 
891bb76ff1Sjsg 	if (GRAPHICS_VER(uncore->i915) > 6)
901bb76ff1Sjsg 		return gen7_check_mailbox_status(mbox);
911bb76ff1Sjsg 	else
921bb76ff1Sjsg 		return gen6_check_mailbox_status(mbox);
931bb76ff1Sjsg }
941bb76ff1Sjsg 
snb_pcode_read(struct intel_uncore * uncore,u32 mbox,u32 * val,u32 * val1)951bb76ff1Sjsg int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
961bb76ff1Sjsg {
971bb76ff1Sjsg 	int err;
981bb76ff1Sjsg 
991bb76ff1Sjsg 	mutex_lock(&uncore->i915->sb_lock);
1001bb76ff1Sjsg 	err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
1011bb76ff1Sjsg 	mutex_unlock(&uncore->i915->sb_lock);
1021bb76ff1Sjsg 
1031bb76ff1Sjsg 	if (err) {
1041bb76ff1Sjsg 		drm_dbg(&uncore->i915->drm,
1051bb76ff1Sjsg 			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
1061bb76ff1Sjsg 			mbox, __builtin_return_address(0), err);
1071bb76ff1Sjsg 	}
1081bb76ff1Sjsg 
1091bb76ff1Sjsg 	return err;
1101bb76ff1Sjsg }
1111bb76ff1Sjsg 
snb_pcode_write_timeout(struct intel_uncore * uncore,u32 mbox,u32 val,int fast_timeout_us,int slow_timeout_ms)1121bb76ff1Sjsg int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
1131bb76ff1Sjsg 			    int fast_timeout_us, int slow_timeout_ms)
1141bb76ff1Sjsg {
1151bb76ff1Sjsg 	int err;
1161bb76ff1Sjsg 
1171bb76ff1Sjsg 	mutex_lock(&uncore->i915->sb_lock);
1181bb76ff1Sjsg 	err = __snb_pcode_rw(uncore, mbox, &val, NULL,
1191bb76ff1Sjsg 			     fast_timeout_us, slow_timeout_ms, false);
1201bb76ff1Sjsg 	mutex_unlock(&uncore->i915->sb_lock);
1211bb76ff1Sjsg 
1221bb76ff1Sjsg 	if (err) {
1231bb76ff1Sjsg 		drm_dbg(&uncore->i915->drm,
1241bb76ff1Sjsg 			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
1251bb76ff1Sjsg 			val, mbox, __builtin_return_address(0), err);
1261bb76ff1Sjsg 	}
1271bb76ff1Sjsg 
1281bb76ff1Sjsg 	return err;
1291bb76ff1Sjsg }
1301bb76ff1Sjsg 
skl_pcode_try_request(struct intel_uncore * uncore,u32 mbox,u32 request,u32 reply_mask,u32 reply,u32 * status)1311bb76ff1Sjsg static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
1321bb76ff1Sjsg 				  u32 request, u32 reply_mask, u32 reply,
1331bb76ff1Sjsg 				  u32 *status)
1341bb76ff1Sjsg {
1351bb76ff1Sjsg 	*status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
1361bb76ff1Sjsg 
1371bb76ff1Sjsg 	return (*status == 0) && ((request & reply_mask) == reply);
1381bb76ff1Sjsg }
1391bb76ff1Sjsg 
1401bb76ff1Sjsg /**
1411bb76ff1Sjsg  * skl_pcode_request - send PCODE request until acknowledgment
1421bb76ff1Sjsg  * @uncore: uncore
1431bb76ff1Sjsg  * @mbox: PCODE mailbox ID the request is targeted for
1441bb76ff1Sjsg  * @request: request ID
1451bb76ff1Sjsg  * @reply_mask: mask used to check for request acknowledgment
1461bb76ff1Sjsg  * @reply: value used to check for request acknowledgment
1471bb76ff1Sjsg  * @timeout_base_ms: timeout for polling with preemption enabled
1481bb76ff1Sjsg  *
1491bb76ff1Sjsg  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
1501bb76ff1Sjsg  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
1511bb76ff1Sjsg  * The request is acknowledged once the PCODE reply dword equals @reply after
1521bb76ff1Sjsg  * applying @reply_mask. Polling is first attempted with preemption enabled
1531bb76ff1Sjsg  * for @timeout_base_ms and if this times out for another 50 ms with
1541bb76ff1Sjsg  * preemption disabled.
1551bb76ff1Sjsg  *
1561bb76ff1Sjsg  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
1571bb76ff1Sjsg  * other error as reported by PCODE.
1581bb76ff1Sjsg  */
skl_pcode_request(struct intel_uncore * uncore,u32 mbox,u32 request,u32 reply_mask,u32 reply,int timeout_base_ms)1591bb76ff1Sjsg int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
1601bb76ff1Sjsg 		      u32 reply_mask, u32 reply, int timeout_base_ms)
1611bb76ff1Sjsg {
1621bb76ff1Sjsg 	u32 status;
1631bb76ff1Sjsg 	int ret;
1641bb76ff1Sjsg 
1651bb76ff1Sjsg 	mutex_lock(&uncore->i915->sb_lock);
1661bb76ff1Sjsg 
1671bb76ff1Sjsg #define COND \
1681bb76ff1Sjsg 	skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
1691bb76ff1Sjsg 
1701bb76ff1Sjsg 	/*
1711bb76ff1Sjsg 	 * Prime the PCODE by doing a request first. Normally it guarantees
1721bb76ff1Sjsg 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
1731bb76ff1Sjsg 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
1741bb76ff1Sjsg 	 * first, so send the first request explicitly.
1751bb76ff1Sjsg 	 */
1761bb76ff1Sjsg 	if (COND) {
1771bb76ff1Sjsg 		ret = 0;
1781bb76ff1Sjsg 		goto out;
1791bb76ff1Sjsg 	}
1801bb76ff1Sjsg 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
1811bb76ff1Sjsg 	if (!ret)
1821bb76ff1Sjsg 		goto out;
1831bb76ff1Sjsg 
1841bb76ff1Sjsg 	/*
1851bb76ff1Sjsg 	 * The above can time out if the number of requests was low (2 in the
1861bb76ff1Sjsg 	 * worst case) _and_ PCODE was busy for some reason even after a
1871bb76ff1Sjsg 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
1881bb76ff1Sjsg 	 * the poll with preemption disabled to maximize the number of
1891bb76ff1Sjsg 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
1901bb76ff1Sjsg 	 * account for interrupts that could reduce the number of these
1911bb76ff1Sjsg 	 * requests, and for any quirks of the PCODE firmware that delays
1921bb76ff1Sjsg 	 * the request completion.
1931bb76ff1Sjsg 	 */
1941bb76ff1Sjsg 	drm_dbg_kms(&uncore->i915->drm,
1951bb76ff1Sjsg 		    "PCODE timeout, retrying with preemption disabled\n");
1961bb76ff1Sjsg 	drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
1971bb76ff1Sjsg 	preempt_disable();
1981bb76ff1Sjsg 	ret = wait_for_atomic(COND, 50);
1991bb76ff1Sjsg 	preempt_enable();
2001bb76ff1Sjsg 
2011bb76ff1Sjsg out:
2021bb76ff1Sjsg 	mutex_unlock(&uncore->i915->sb_lock);
2031bb76ff1Sjsg 	return status ? status : ret;
2041bb76ff1Sjsg #undef COND
2051bb76ff1Sjsg }
2061bb76ff1Sjsg 
pcode_init_wait(struct intel_uncore * uncore,int timeout_ms)207*f005ef32Sjsg static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
208*f005ef32Sjsg {
209*f005ef32Sjsg 	if (__intel_wait_for_register_fw(uncore,
210*f005ef32Sjsg 					 GEN6_PCODE_MAILBOX,
211*f005ef32Sjsg 					 GEN6_PCODE_READY, 0,
212*f005ef32Sjsg 					 500, timeout_ms,
213*f005ef32Sjsg 					 NULL))
214*f005ef32Sjsg 		return -EPROBE_DEFER;
215*f005ef32Sjsg 
216*f005ef32Sjsg 	return skl_pcode_request(uncore,
217*f005ef32Sjsg 				 DG1_PCODE_STATUS,
218*f005ef32Sjsg 				 DG1_UNCORE_GET_INIT_STATUS,
219*f005ef32Sjsg 				 DG1_UNCORE_INIT_STATUS_COMPLETE,
220*f005ef32Sjsg 				 DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
221*f005ef32Sjsg }
222*f005ef32Sjsg 
intel_pcode_init(struct intel_uncore * uncore)2231bb76ff1Sjsg int intel_pcode_init(struct intel_uncore *uncore)
2241bb76ff1Sjsg {
225*f005ef32Sjsg 	int err;
226*f005ef32Sjsg 
2271bb76ff1Sjsg 	if (!IS_DGFX(uncore->i915))
2281bb76ff1Sjsg 		return 0;
2291bb76ff1Sjsg 
230*f005ef32Sjsg 	/*
231*f005ef32Sjsg 	 * Wait 10 seconds so that the punit to settle and complete
232*f005ef32Sjsg 	 * any outstanding transactions upon module load
233*f005ef32Sjsg 	 */
234*f005ef32Sjsg 	err = pcode_init_wait(uncore, 10000);
235*f005ef32Sjsg 
236*f005ef32Sjsg 	if (err) {
237*f005ef32Sjsg 		drm_notice(&uncore->i915->drm,
238*f005ef32Sjsg 			   "Waiting for HW initialisation...\n");
239*f005ef32Sjsg 		err = pcode_init_wait(uncore, 180000);
240*f005ef32Sjsg 	}
241*f005ef32Sjsg 
242*f005ef32Sjsg 	return err;
2431bb76ff1Sjsg }
2441bb76ff1Sjsg 
snb_pcode_read_p(struct intel_uncore * uncore,u32 mbcmd,u32 p1,u32 p2,u32 * val)2451bb76ff1Sjsg int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
2461bb76ff1Sjsg {
2471bb76ff1Sjsg 	intel_wakeref_t wakeref;
2481bb76ff1Sjsg 	u32 mbox;
2491bb76ff1Sjsg 	int err;
2501bb76ff1Sjsg 
2511bb76ff1Sjsg 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
2521bb76ff1Sjsg 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
2531bb76ff1Sjsg 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
2541bb76ff1Sjsg 
2551bb76ff1Sjsg 	with_intel_runtime_pm(uncore->rpm, wakeref)
2561bb76ff1Sjsg 		err = snb_pcode_read(uncore, mbox, val, NULL);
2571bb76ff1Sjsg 
2581bb76ff1Sjsg 	return err;
2591bb76ff1Sjsg }
2601bb76ff1Sjsg 
snb_pcode_write_p(struct intel_uncore * uncore,u32 mbcmd,u32 p1,u32 p2,u32 val)2611bb76ff1Sjsg int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
2621bb76ff1Sjsg {
2631bb76ff1Sjsg 	intel_wakeref_t wakeref;
2641bb76ff1Sjsg 	u32 mbox;
2651bb76ff1Sjsg 	int err;
2661bb76ff1Sjsg 
2671bb76ff1Sjsg 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
2681bb76ff1Sjsg 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
2691bb76ff1Sjsg 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
2701bb76ff1Sjsg 
2711bb76ff1Sjsg 	with_intel_runtime_pm(uncore->rpm, wakeref)
2721bb76ff1Sjsg 		err = snb_pcode_write(uncore, mbox, val);
2731bb76ff1Sjsg 
2741bb76ff1Sjsg 	return err;
2751bb76ff1Sjsg }
276