xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_display_power.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1c349dbc7Sjsg /* SPDX-License-Identifier: MIT */
2c349dbc7Sjsg /*
3c349dbc7Sjsg  * Copyright © 2019 Intel Corporation
4c349dbc7Sjsg  */
5c349dbc7Sjsg 
61bb76ff1Sjsg #include <linux/string_helpers.h>
7c349dbc7Sjsg 
8c349dbc7Sjsg #include "i915_drv.h"
9c349dbc7Sjsg #include "i915_irq.h"
10*f005ef32Sjsg #include "i915_reg.h"
111bb76ff1Sjsg #include "intel_backlight_regs.h"
12c349dbc7Sjsg #include "intel_cdclk.h"
13*f005ef32Sjsg #include "intel_clock_gating.h"
14c349dbc7Sjsg #include "intel_combo_phy.h"
155ca02815Sjsg #include "intel_de.h"
161bb76ff1Sjsg #include "intel_display_power.h"
171bb76ff1Sjsg #include "intel_display_power_map.h"
181bb76ff1Sjsg #include "intel_display_power_well.h"
19c349dbc7Sjsg #include "intel_display_types.h"
205ca02815Sjsg #include "intel_dmc.h"
211bb76ff1Sjsg #include "intel_mchbar_regs.h"
221bb76ff1Sjsg #include "intel_pch_refclk.h"
231bb76ff1Sjsg #include "intel_pcode.h"
24*f005ef32Sjsg #include "intel_pmdemand.h"
25*f005ef32Sjsg #include "intel_pps_regs.h"
265ca02815Sjsg #include "intel_snps_phy.h"
271bb76ff1Sjsg #include "skl_watermark.h"
28*f005ef32Sjsg #include "skl_watermark_regs.h"
291bb76ff1Sjsg #include "vlv_sideband.h"
30c349dbc7Sjsg 
311bb76ff1Sjsg #define for_each_power_domain_well(__dev_priv, __power_well, __domain)	\
321bb76ff1Sjsg 	for_each_power_well(__dev_priv, __power_well)				\
331bb76ff1Sjsg 		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
341bb76ff1Sjsg 
351bb76ff1Sjsg #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
361bb76ff1Sjsg 	for_each_power_well_reverse(__dev_priv, __power_well)		        \
371bb76ff1Sjsg 		for_each_if(test_bit((__domain), (__power_well)->domains.bits))
38c349dbc7Sjsg 
39c349dbc7Sjsg const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)40c349dbc7Sjsg intel_display_power_domain_str(enum intel_display_power_domain domain)
41c349dbc7Sjsg {
42c349dbc7Sjsg 	switch (domain) {
43c349dbc7Sjsg 	case POWER_DOMAIN_DISPLAY_CORE:
44c349dbc7Sjsg 		return "DISPLAY_CORE";
45c349dbc7Sjsg 	case POWER_DOMAIN_PIPE_A:
46c349dbc7Sjsg 		return "PIPE_A";
47c349dbc7Sjsg 	case POWER_DOMAIN_PIPE_B:
48c349dbc7Sjsg 		return "PIPE_B";
49c349dbc7Sjsg 	case POWER_DOMAIN_PIPE_C:
50c349dbc7Sjsg 		return "PIPE_C";
51c349dbc7Sjsg 	case POWER_DOMAIN_PIPE_D:
52c349dbc7Sjsg 		return "PIPE_D";
531bb76ff1Sjsg 	case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
541bb76ff1Sjsg 		return "PIPE_PANEL_FITTER_A";
551bb76ff1Sjsg 	case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
561bb76ff1Sjsg 		return "PIPE_PANEL_FITTER_B";
571bb76ff1Sjsg 	case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
581bb76ff1Sjsg 		return "PIPE_PANEL_FITTER_C";
591bb76ff1Sjsg 	case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
601bb76ff1Sjsg 		return "PIPE_PANEL_FITTER_D";
61c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_A:
62c349dbc7Sjsg 		return "TRANSCODER_A";
63c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_B:
64c349dbc7Sjsg 		return "TRANSCODER_B";
65c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_C:
66c349dbc7Sjsg 		return "TRANSCODER_C";
67c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_D:
68c349dbc7Sjsg 		return "TRANSCODER_D";
69c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_EDP:
70c349dbc7Sjsg 		return "TRANSCODER_EDP";
71c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_DSI_A:
72c349dbc7Sjsg 		return "TRANSCODER_DSI_A";
73c349dbc7Sjsg 	case POWER_DOMAIN_TRANSCODER_DSI_C:
74c349dbc7Sjsg 		return "TRANSCODER_DSI_C";
751bb76ff1Sjsg 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
761bb76ff1Sjsg 		return "TRANSCODER_VDSC_PW2";
771bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_A:
781bb76ff1Sjsg 		return "PORT_DDI_LANES_A";
791bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_B:
801bb76ff1Sjsg 		return "PORT_DDI_LANES_B";
811bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_C:
821bb76ff1Sjsg 		return "PORT_DDI_LANES_C";
831bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_D:
841bb76ff1Sjsg 		return "PORT_DDI_LANES_D";
851bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_E:
861bb76ff1Sjsg 		return "PORT_DDI_LANES_E";
871bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_F:
881bb76ff1Sjsg 		return "PORT_DDI_LANES_F";
891bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC1:
901bb76ff1Sjsg 		return "PORT_DDI_LANES_TC1";
911bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC2:
921bb76ff1Sjsg 		return "PORT_DDI_LANES_TC2";
931bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC3:
941bb76ff1Sjsg 		return "PORT_DDI_LANES_TC3";
951bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC4:
961bb76ff1Sjsg 		return "PORT_DDI_LANES_TC4";
971bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC5:
981bb76ff1Sjsg 		return "PORT_DDI_LANES_TC5";
991bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_LANES_TC6:
1001bb76ff1Sjsg 		return "PORT_DDI_LANES_TC6";
1011bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_A:
1021bb76ff1Sjsg 		return "PORT_DDI_IO_A";
1031bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_B:
1041bb76ff1Sjsg 		return "PORT_DDI_IO_B";
1051bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_C:
1061bb76ff1Sjsg 		return "PORT_DDI_IO_C";
1071bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_D:
1081bb76ff1Sjsg 		return "PORT_DDI_IO_D";
1091bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_E:
1101bb76ff1Sjsg 		return "PORT_DDI_IO_E";
1111bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_F:
1121bb76ff1Sjsg 		return "PORT_DDI_IO_F";
1131bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC1:
1141bb76ff1Sjsg 		return "PORT_DDI_IO_TC1";
1151bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC2:
1161bb76ff1Sjsg 		return "PORT_DDI_IO_TC2";
1171bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC3:
1181bb76ff1Sjsg 		return "PORT_DDI_IO_TC3";
1191bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC4:
1201bb76ff1Sjsg 		return "PORT_DDI_IO_TC4";
1211bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC5:
1221bb76ff1Sjsg 		return "PORT_DDI_IO_TC5";
1231bb76ff1Sjsg 	case POWER_DOMAIN_PORT_DDI_IO_TC6:
1241bb76ff1Sjsg 		return "PORT_DDI_IO_TC6";
125c349dbc7Sjsg 	case POWER_DOMAIN_PORT_DSI:
126c349dbc7Sjsg 		return "PORT_DSI";
127c349dbc7Sjsg 	case POWER_DOMAIN_PORT_CRT:
128c349dbc7Sjsg 		return "PORT_CRT";
129c349dbc7Sjsg 	case POWER_DOMAIN_PORT_OTHER:
130c349dbc7Sjsg 		return "PORT_OTHER";
131c349dbc7Sjsg 	case POWER_DOMAIN_VGA:
132c349dbc7Sjsg 		return "VGA";
1335ca02815Sjsg 	case POWER_DOMAIN_AUDIO_MMIO:
1345ca02815Sjsg 		return "AUDIO_MMIO";
1355ca02815Sjsg 	case POWER_DOMAIN_AUDIO_PLAYBACK:
1365ca02815Sjsg 		return "AUDIO_PLAYBACK";
137*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_A:
138*f005ef32Sjsg 		return "AUX_IO_A";
139*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_B:
140*f005ef32Sjsg 		return "AUX_IO_B";
141*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_C:
142*f005ef32Sjsg 		return "AUX_IO_C";
143*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_D:
144*f005ef32Sjsg 		return "AUX_IO_D";
145*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_E:
146*f005ef32Sjsg 		return "AUX_IO_E";
147*f005ef32Sjsg 	case POWER_DOMAIN_AUX_IO_F:
148*f005ef32Sjsg 		return "AUX_IO_F";
149c349dbc7Sjsg 	case POWER_DOMAIN_AUX_A:
150c349dbc7Sjsg 		return "AUX_A";
151c349dbc7Sjsg 	case POWER_DOMAIN_AUX_B:
152c349dbc7Sjsg 		return "AUX_B";
153c349dbc7Sjsg 	case POWER_DOMAIN_AUX_C:
154c349dbc7Sjsg 		return "AUX_C";
155c349dbc7Sjsg 	case POWER_DOMAIN_AUX_D:
156c349dbc7Sjsg 		return "AUX_D";
157c349dbc7Sjsg 	case POWER_DOMAIN_AUX_E:
158c349dbc7Sjsg 		return "AUX_E";
159c349dbc7Sjsg 	case POWER_DOMAIN_AUX_F:
160c349dbc7Sjsg 		return "AUX_F";
1611bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC1:
1621bb76ff1Sjsg 		return "AUX_USBC1";
1631bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC2:
1641bb76ff1Sjsg 		return "AUX_USBC2";
1651bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC3:
1661bb76ff1Sjsg 		return "AUX_USBC3";
1671bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC4:
1681bb76ff1Sjsg 		return "AUX_USBC4";
1691bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC5:
1701bb76ff1Sjsg 		return "AUX_USBC5";
1711bb76ff1Sjsg 	case POWER_DOMAIN_AUX_USBC6:
1721bb76ff1Sjsg 		return "AUX_USBC6";
1731bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT1:
1741bb76ff1Sjsg 		return "AUX_TBT1";
1751bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT2:
1761bb76ff1Sjsg 		return "AUX_TBT2";
1771bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT3:
1781bb76ff1Sjsg 		return "AUX_TBT3";
1791bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT4:
1801bb76ff1Sjsg 		return "AUX_TBT4";
1811bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT5:
1821bb76ff1Sjsg 		return "AUX_TBT5";
1831bb76ff1Sjsg 	case POWER_DOMAIN_AUX_TBT6:
1841bb76ff1Sjsg 		return "AUX_TBT6";
185c349dbc7Sjsg 	case POWER_DOMAIN_GMBUS:
186c349dbc7Sjsg 		return "GMBUS";
187c349dbc7Sjsg 	case POWER_DOMAIN_INIT:
188c349dbc7Sjsg 		return "INIT";
189c349dbc7Sjsg 	case POWER_DOMAIN_MODESET:
190c349dbc7Sjsg 		return "MODESET";
191c349dbc7Sjsg 	case POWER_DOMAIN_GT_IRQ:
192c349dbc7Sjsg 		return "GT_IRQ";
1931bb76ff1Sjsg 	case POWER_DOMAIN_DC_OFF:
1941bb76ff1Sjsg 		return "DC_OFF";
195ad8b1aafSjsg 	case POWER_DOMAIN_TC_COLD_OFF:
196ad8b1aafSjsg 		return "TC_COLD_OFF";
197c349dbc7Sjsg 	default:
198c349dbc7Sjsg 		MISSING_CASE(domain);
199c349dbc7Sjsg 		return "?";
200c349dbc7Sjsg 	}
201c349dbc7Sjsg }
202c349dbc7Sjsg 
203c349dbc7Sjsg /**
204c349dbc7Sjsg  * __intel_display_power_is_enabled - unlocked check for a power domain
205c349dbc7Sjsg  * @dev_priv: i915 device instance
206c349dbc7Sjsg  * @domain: power domain to check
207c349dbc7Sjsg  *
208c349dbc7Sjsg  * This is the unlocked version of intel_display_power_is_enabled() and should
209c349dbc7Sjsg  * only be used from error capture and recovery code where deadlocks are
210c349dbc7Sjsg  * possible.
211c349dbc7Sjsg  *
212c349dbc7Sjsg  * Returns:
213c349dbc7Sjsg  * True when the power domain is enabled, false otherwise.
214c349dbc7Sjsg  */
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)215c349dbc7Sjsg bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
216c349dbc7Sjsg 				      enum intel_display_power_domain domain)
217c349dbc7Sjsg {
218c349dbc7Sjsg 	struct i915_power_well *power_well;
219c349dbc7Sjsg 	bool is_enabled;
220c349dbc7Sjsg 
221c349dbc7Sjsg 	if (dev_priv->runtime_pm.suspended)
222c349dbc7Sjsg 		return false;
223c349dbc7Sjsg 
224c349dbc7Sjsg 	is_enabled = true;
225c349dbc7Sjsg 
2261bb76ff1Sjsg 	for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
2271bb76ff1Sjsg 		if (intel_power_well_is_always_on(power_well))
228c349dbc7Sjsg 			continue;
229c349dbc7Sjsg 
2301bb76ff1Sjsg 		if (!intel_power_well_is_enabled_cached(power_well)) {
231c349dbc7Sjsg 			is_enabled = false;
232c349dbc7Sjsg 			break;
233c349dbc7Sjsg 		}
234c349dbc7Sjsg 	}
235c349dbc7Sjsg 
236c349dbc7Sjsg 	return is_enabled;
237c349dbc7Sjsg }
238c349dbc7Sjsg 
239c349dbc7Sjsg /**
240c349dbc7Sjsg  * intel_display_power_is_enabled - check for a power domain
241c349dbc7Sjsg  * @dev_priv: i915 device instance
242c349dbc7Sjsg  * @domain: power domain to check
243c349dbc7Sjsg  *
244c349dbc7Sjsg  * This function can be used to check the hw power domain state. It is mostly
245c349dbc7Sjsg  * used in hardware state readout functions. Everywhere else code should rely
246c349dbc7Sjsg  * upon explicit power domain reference counting to ensure that the hardware
247c349dbc7Sjsg  * block is powered up before accessing it.
248c349dbc7Sjsg  *
249c349dbc7Sjsg  * Callers must hold the relevant modesetting locks to ensure that concurrent
250c349dbc7Sjsg  * threads can't disable the power well while the caller tries to read a few
251c349dbc7Sjsg  * registers.
252c349dbc7Sjsg  *
253c349dbc7Sjsg  * Returns:
254c349dbc7Sjsg  * True when the power domain is enabled, false otherwise.
255c349dbc7Sjsg  */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)256c349dbc7Sjsg bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
257c349dbc7Sjsg 				    enum intel_display_power_domain domain)
258c349dbc7Sjsg {
259c349dbc7Sjsg 	struct i915_power_domains *power_domains;
260c349dbc7Sjsg 	bool ret;
261c349dbc7Sjsg 
2621bb76ff1Sjsg 	power_domains = &dev_priv->display.power.domains;
263c349dbc7Sjsg 
264c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
265c349dbc7Sjsg 	ret = __intel_display_power_is_enabled(dev_priv, domain);
266c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
267c349dbc7Sjsg 
268c349dbc7Sjsg 	return ret;
269c349dbc7Sjsg }
270c349dbc7Sjsg 
271c349dbc7Sjsg static u32
sanitize_target_dc_state(struct drm_i915_private * i915,u32 target_dc_state)272*f005ef32Sjsg sanitize_target_dc_state(struct drm_i915_private *i915,
273c349dbc7Sjsg 			 u32 target_dc_state)
274c349dbc7Sjsg {
275*f005ef32Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2761bb76ff1Sjsg 	static const u32 states[] = {
277c349dbc7Sjsg 		DC_STATE_EN_UPTO_DC6,
278c349dbc7Sjsg 		DC_STATE_EN_UPTO_DC5,
279c349dbc7Sjsg 		DC_STATE_EN_DC3CO,
280c349dbc7Sjsg 		DC_STATE_DISABLE,
281c349dbc7Sjsg 	};
282c349dbc7Sjsg 	int i;
283c349dbc7Sjsg 
284c349dbc7Sjsg 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
285c349dbc7Sjsg 		if (target_dc_state != states[i])
286c349dbc7Sjsg 			continue;
287c349dbc7Sjsg 
288*f005ef32Sjsg 		if (power_domains->allowed_dc_mask & target_dc_state)
289c349dbc7Sjsg 			break;
290c349dbc7Sjsg 
291c349dbc7Sjsg 		target_dc_state = states[i + 1];
292c349dbc7Sjsg 	}
293c349dbc7Sjsg 
294c349dbc7Sjsg 	return target_dc_state;
295c349dbc7Sjsg }
296c349dbc7Sjsg 
297c349dbc7Sjsg /**
298c349dbc7Sjsg  * intel_display_power_set_target_dc_state - Set target dc state.
299c349dbc7Sjsg  * @dev_priv: i915 device
300c349dbc7Sjsg  * @state: state which needs to be set as target_dc_state.
301c349dbc7Sjsg  *
302c349dbc7Sjsg  * This function set the "DC off" power well target_dc_state,
303c349dbc7Sjsg  * based upon this target_dc_stste, "DC off" power well will
304c349dbc7Sjsg  * enable desired DC state.
305c349dbc7Sjsg  */
intel_display_power_set_target_dc_state(struct drm_i915_private * dev_priv,u32 state)306c349dbc7Sjsg void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
307c349dbc7Sjsg 					     u32 state)
308c349dbc7Sjsg {
309c349dbc7Sjsg 	struct i915_power_well *power_well;
310c349dbc7Sjsg 	bool dc_off_enabled;
3111bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
312c349dbc7Sjsg 
313c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
314c349dbc7Sjsg 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
315c349dbc7Sjsg 
316c349dbc7Sjsg 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
317c349dbc7Sjsg 		goto unlock;
318c349dbc7Sjsg 
319c349dbc7Sjsg 	state = sanitize_target_dc_state(dev_priv, state);
320c349dbc7Sjsg 
321*f005ef32Sjsg 	if (state == power_domains->target_dc_state)
322c349dbc7Sjsg 		goto unlock;
323c349dbc7Sjsg 
3241bb76ff1Sjsg 	dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
325c349dbc7Sjsg 	/*
326c349dbc7Sjsg 	 * If DC off power well is disabled, need to enable and disable the
327c349dbc7Sjsg 	 * DC off power well to effect target DC state.
328c349dbc7Sjsg 	 */
329c349dbc7Sjsg 	if (!dc_off_enabled)
3301bb76ff1Sjsg 		intel_power_well_enable(dev_priv, power_well);
331c349dbc7Sjsg 
332*f005ef32Sjsg 	power_domains->target_dc_state = state;
333c349dbc7Sjsg 
334c349dbc7Sjsg 	if (!dc_off_enabled)
3351bb76ff1Sjsg 		intel_power_well_disable(dev_priv, power_well);
336c349dbc7Sjsg 
337c349dbc7Sjsg unlock:
338c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
339c349dbc7Sjsg }
340c349dbc7Sjsg 
341c349dbc7Sjsg #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
342c349dbc7Sjsg 
__async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)3431bb76ff1Sjsg static void __async_put_domains_mask(struct i915_power_domains *power_domains,
3441bb76ff1Sjsg 				     struct intel_power_domain_mask *mask)
345c349dbc7Sjsg {
3461bb76ff1Sjsg 	bitmap_or(mask->bits,
3471bb76ff1Sjsg 		  power_domains->async_put_domains[0].bits,
3481bb76ff1Sjsg 		  power_domains->async_put_domains[1].bits,
3491bb76ff1Sjsg 		  POWER_DOMAIN_NUM);
350c349dbc7Sjsg }
351c349dbc7Sjsg 
352c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
353c349dbc7Sjsg 
354c349dbc7Sjsg static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)355c349dbc7Sjsg assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
356c349dbc7Sjsg {
357ad8b1aafSjsg 	struct drm_i915_private *i915 = container_of(power_domains,
358ad8b1aafSjsg 						     struct drm_i915_private,
3591bb76ff1Sjsg 						     display.power.domains);
3601bb76ff1Sjsg 
3611bb76ff1Sjsg 	return !drm_WARN_ON(&i915->drm,
3621bb76ff1Sjsg 			    bitmap_intersects(power_domains->async_put_domains[0].bits,
3631bb76ff1Sjsg 					      power_domains->async_put_domains[1].bits,
3641bb76ff1Sjsg 					      POWER_DOMAIN_NUM));
365c349dbc7Sjsg }
366c349dbc7Sjsg 
367c349dbc7Sjsg static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)368c349dbc7Sjsg __async_put_domains_state_ok(struct i915_power_domains *power_domains)
369c349dbc7Sjsg {
370ad8b1aafSjsg 	struct drm_i915_private *i915 = container_of(power_domains,
371ad8b1aafSjsg 						     struct drm_i915_private,
3721bb76ff1Sjsg 						     display.power.domains);
3731bb76ff1Sjsg 	struct intel_power_domain_mask async_put_mask;
374c349dbc7Sjsg 	enum intel_display_power_domain domain;
375c349dbc7Sjsg 	bool err = false;
376c349dbc7Sjsg 
377c349dbc7Sjsg 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
3781bb76ff1Sjsg 	__async_put_domains_mask(power_domains, &async_put_mask);
3791bb76ff1Sjsg 	err |= drm_WARN_ON(&i915->drm,
3801bb76ff1Sjsg 			   !!power_domains->async_put_wakeref !=
3811bb76ff1Sjsg 			   !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
382c349dbc7Sjsg 
3831bb76ff1Sjsg 	for_each_power_domain(domain, &async_put_mask)
384ad8b1aafSjsg 		err |= drm_WARN_ON(&i915->drm,
385ad8b1aafSjsg 				   power_domains->domain_use_count[domain] != 1);
386c349dbc7Sjsg 
387c349dbc7Sjsg 	return !err;
388c349dbc7Sjsg }
389c349dbc7Sjsg 
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,struct intel_power_domain_mask * mask)390c349dbc7Sjsg static void print_power_domains(struct i915_power_domains *power_domains,
3911bb76ff1Sjsg 				const char *prefix, struct intel_power_domain_mask *mask)
392c349dbc7Sjsg {
393ad8b1aafSjsg 	struct drm_i915_private *i915 = container_of(power_domains,
394ad8b1aafSjsg 						     struct drm_i915_private,
3951bb76ff1Sjsg 						     display.power.domains);
396c349dbc7Sjsg 	enum intel_display_power_domain domain;
397c349dbc7Sjsg 
3981bb76ff1Sjsg 	drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
399c349dbc7Sjsg 	for_each_power_domain(domain, mask)
400ad8b1aafSjsg 		drm_dbg(&i915->drm, "%s use_count %d\n",
401c349dbc7Sjsg 			intel_display_power_domain_str(domain),
402c349dbc7Sjsg 			power_domains->domain_use_count[domain]);
403c349dbc7Sjsg }
404c349dbc7Sjsg 
405c349dbc7Sjsg static void
print_async_put_domains_state(struct i915_power_domains * power_domains)406c349dbc7Sjsg print_async_put_domains_state(struct i915_power_domains *power_domains)
407c349dbc7Sjsg {
408ad8b1aafSjsg 	struct drm_i915_private *i915 = container_of(power_domains,
409ad8b1aafSjsg 						     struct drm_i915_private,
4101bb76ff1Sjsg 						     display.power.domains);
411ad8b1aafSjsg 
412ad8b1aafSjsg 	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
413c349dbc7Sjsg 		power_domains->async_put_wakeref);
414c349dbc7Sjsg 
415c349dbc7Sjsg 	print_power_domains(power_domains, "async_put_domains[0]",
4161bb76ff1Sjsg 			    &power_domains->async_put_domains[0]);
417c349dbc7Sjsg 	print_power_domains(power_domains, "async_put_domains[1]",
4181bb76ff1Sjsg 			    &power_domains->async_put_domains[1]);
419c349dbc7Sjsg }
420c349dbc7Sjsg 
421c349dbc7Sjsg static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)422c349dbc7Sjsg verify_async_put_domains_state(struct i915_power_domains *power_domains)
423c349dbc7Sjsg {
424c349dbc7Sjsg 	if (!__async_put_domains_state_ok(power_domains))
425c349dbc7Sjsg 		print_async_put_domains_state(power_domains);
426c349dbc7Sjsg }
427c349dbc7Sjsg 
428c349dbc7Sjsg #else
429c349dbc7Sjsg 
430c349dbc7Sjsg static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)431c349dbc7Sjsg assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
432c349dbc7Sjsg {
433c349dbc7Sjsg }
434c349dbc7Sjsg 
435c349dbc7Sjsg static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)436c349dbc7Sjsg verify_async_put_domains_state(struct i915_power_domains *power_domains)
437c349dbc7Sjsg {
438c349dbc7Sjsg }
439c349dbc7Sjsg 
440c349dbc7Sjsg #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
441c349dbc7Sjsg 
async_put_domains_mask(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)4421bb76ff1Sjsg static void async_put_domains_mask(struct i915_power_domains *power_domains,
4431bb76ff1Sjsg 				   struct intel_power_domain_mask *mask)
4441bb76ff1Sjsg 
445c349dbc7Sjsg {
446c349dbc7Sjsg 	assert_async_put_domain_masks_disjoint(power_domains);
447c349dbc7Sjsg 
4481bb76ff1Sjsg 	__async_put_domains_mask(power_domains, mask);
449c349dbc7Sjsg }
450c349dbc7Sjsg 
451c349dbc7Sjsg static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)452c349dbc7Sjsg async_put_domains_clear_domain(struct i915_power_domains *power_domains,
453c349dbc7Sjsg 			       enum intel_display_power_domain domain)
454c349dbc7Sjsg {
455c349dbc7Sjsg 	assert_async_put_domain_masks_disjoint(power_domains);
456c349dbc7Sjsg 
4571bb76ff1Sjsg 	clear_bit(domain, power_domains->async_put_domains[0].bits);
4581bb76ff1Sjsg 	clear_bit(domain, power_domains->async_put_domains[1].bits);
459c349dbc7Sjsg }
460c349dbc7Sjsg 
461*f005ef32Sjsg static void
cancel_async_put_work(struct i915_power_domains * power_domains,bool sync)462*f005ef32Sjsg cancel_async_put_work(struct i915_power_domains *power_domains, bool sync)
463*f005ef32Sjsg {
464*f005ef32Sjsg 	if (sync)
465*f005ef32Sjsg 		cancel_delayed_work_sync(&power_domains->async_put_work);
466*f005ef32Sjsg 	else
467*f005ef32Sjsg 		cancel_delayed_work(&power_domains->async_put_work);
468*f005ef32Sjsg 
469*f005ef32Sjsg 	power_domains->async_put_next_delay = 0;
470*f005ef32Sjsg }
471*f005ef32Sjsg 
472c349dbc7Sjsg static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)473c349dbc7Sjsg intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
474c349dbc7Sjsg 				       enum intel_display_power_domain domain)
475c349dbc7Sjsg {
4761bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
4771bb76ff1Sjsg 	struct intel_power_domain_mask async_put_mask;
478c349dbc7Sjsg 	bool ret = false;
479c349dbc7Sjsg 
4801bb76ff1Sjsg 	async_put_domains_mask(power_domains, &async_put_mask);
4811bb76ff1Sjsg 	if (!test_bit(domain, async_put_mask.bits))
482c349dbc7Sjsg 		goto out_verify;
483c349dbc7Sjsg 
484c349dbc7Sjsg 	async_put_domains_clear_domain(power_domains, domain);
485c349dbc7Sjsg 
486c349dbc7Sjsg 	ret = true;
487c349dbc7Sjsg 
4881bb76ff1Sjsg 	async_put_domains_mask(power_domains, &async_put_mask);
4891bb76ff1Sjsg 	if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
490c349dbc7Sjsg 		goto out_verify;
491c349dbc7Sjsg 
492*f005ef32Sjsg 	cancel_async_put_work(power_domains, false);
493c349dbc7Sjsg 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
494c349dbc7Sjsg 				 fetch_and_zero(&power_domains->async_put_wakeref));
495c349dbc7Sjsg out_verify:
496c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
497c349dbc7Sjsg 
498c349dbc7Sjsg 	return ret;
499c349dbc7Sjsg }
500c349dbc7Sjsg 
501c349dbc7Sjsg static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)502c349dbc7Sjsg __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
503c349dbc7Sjsg 				 enum intel_display_power_domain domain)
504c349dbc7Sjsg {
5051bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
506c349dbc7Sjsg 	struct i915_power_well *power_well;
507c349dbc7Sjsg 
508c349dbc7Sjsg 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
509c349dbc7Sjsg 		return;
510c349dbc7Sjsg 
5111bb76ff1Sjsg 	for_each_power_domain_well(dev_priv, power_well, domain)
512c349dbc7Sjsg 		intel_power_well_get(dev_priv, power_well);
513c349dbc7Sjsg 
514c349dbc7Sjsg 	power_domains->domain_use_count[domain]++;
515c349dbc7Sjsg }
516c349dbc7Sjsg 
517c349dbc7Sjsg /**
518c349dbc7Sjsg  * intel_display_power_get - grab a power domain reference
519c349dbc7Sjsg  * @dev_priv: i915 device instance
520c349dbc7Sjsg  * @domain: power domain to reference
521c349dbc7Sjsg  *
522c349dbc7Sjsg  * This function grabs a power domain reference for @domain and ensures that the
523c349dbc7Sjsg  * power domain and all its parents are powered up. Therefore users should only
524c349dbc7Sjsg  * grab a reference to the innermost power domain they need.
525c349dbc7Sjsg  *
526c349dbc7Sjsg  * Any power domain reference obtained by this function must have a symmetric
527c349dbc7Sjsg  * call to intel_display_power_put() to release the reference again.
528c349dbc7Sjsg  */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)529c349dbc7Sjsg intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
530c349dbc7Sjsg 					enum intel_display_power_domain domain)
531c349dbc7Sjsg {
5321bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
533c349dbc7Sjsg 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
534c349dbc7Sjsg 
535c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
536c349dbc7Sjsg 	__intel_display_power_get_domain(dev_priv, domain);
537c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
538c349dbc7Sjsg 
539c349dbc7Sjsg 	return wakeref;
540c349dbc7Sjsg }
541c349dbc7Sjsg 
542c349dbc7Sjsg /**
543c349dbc7Sjsg  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
544c349dbc7Sjsg  * @dev_priv: i915 device instance
545c349dbc7Sjsg  * @domain: power domain to reference
546c349dbc7Sjsg  *
547c349dbc7Sjsg  * This function grabs a power domain reference for @domain and ensures that the
548c349dbc7Sjsg  * power domain and all its parents are powered up. Therefore users should only
549c349dbc7Sjsg  * grab a reference to the innermost power domain they need.
550c349dbc7Sjsg  *
551c349dbc7Sjsg  * Any power domain reference obtained by this function must have a symmetric
552c349dbc7Sjsg  * call to intel_display_power_put() to release the reference again.
553c349dbc7Sjsg  */
554c349dbc7Sjsg intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)555c349dbc7Sjsg intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
556c349dbc7Sjsg 				   enum intel_display_power_domain domain)
557c349dbc7Sjsg {
5581bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
559c349dbc7Sjsg 	intel_wakeref_t wakeref;
560c349dbc7Sjsg 	bool is_enabled;
561c349dbc7Sjsg 
562c349dbc7Sjsg 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
563c349dbc7Sjsg 	if (!wakeref)
564c349dbc7Sjsg 		return false;
565c349dbc7Sjsg 
566c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
567c349dbc7Sjsg 
568c349dbc7Sjsg 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
569c349dbc7Sjsg 		__intel_display_power_get_domain(dev_priv, domain);
570c349dbc7Sjsg 		is_enabled = true;
571c349dbc7Sjsg 	} else {
572c349dbc7Sjsg 		is_enabled = false;
573c349dbc7Sjsg 	}
574c349dbc7Sjsg 
575c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
576c349dbc7Sjsg 
577c349dbc7Sjsg 	if (!is_enabled) {
578c349dbc7Sjsg 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
579c349dbc7Sjsg 		wakeref = 0;
580c349dbc7Sjsg 	}
581c349dbc7Sjsg 
582c349dbc7Sjsg 	return wakeref;
583c349dbc7Sjsg }
584c349dbc7Sjsg 
585c349dbc7Sjsg static void
__intel_display_power_put_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)586c349dbc7Sjsg __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
587c349dbc7Sjsg 				 enum intel_display_power_domain domain)
588c349dbc7Sjsg {
589c349dbc7Sjsg 	struct i915_power_domains *power_domains;
590c349dbc7Sjsg 	struct i915_power_well *power_well;
591c349dbc7Sjsg 	const char *name = intel_display_power_domain_str(domain);
5921bb76ff1Sjsg 	struct intel_power_domain_mask async_put_mask;
593c349dbc7Sjsg 
5941bb76ff1Sjsg 	power_domains = &dev_priv->display.power.domains;
595c349dbc7Sjsg 
596c349dbc7Sjsg 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
597c349dbc7Sjsg 		 "Use count on domain %s is already zero\n",
598c349dbc7Sjsg 		 name);
5991bb76ff1Sjsg 	async_put_domains_mask(power_domains, &async_put_mask);
600c349dbc7Sjsg 	drm_WARN(&dev_priv->drm,
6011bb76ff1Sjsg 		 test_bit(domain, async_put_mask.bits),
602c349dbc7Sjsg 		 "Async disabling of domain %s is pending\n",
603c349dbc7Sjsg 		 name);
604c349dbc7Sjsg 
605c349dbc7Sjsg 	power_domains->domain_use_count[domain]--;
606c349dbc7Sjsg 
6071bb76ff1Sjsg 	for_each_power_domain_well_reverse(dev_priv, power_well, domain)
608c349dbc7Sjsg 		intel_power_well_put(dev_priv, power_well);
609c349dbc7Sjsg }
610c349dbc7Sjsg 
__intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)611c349dbc7Sjsg static void __intel_display_power_put(struct drm_i915_private *dev_priv,
612c349dbc7Sjsg 				      enum intel_display_power_domain domain)
613c349dbc7Sjsg {
6141bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
615c349dbc7Sjsg 
616c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
617c349dbc7Sjsg 	__intel_display_power_put_domain(dev_priv, domain);
618c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
619c349dbc7Sjsg }
620c349dbc7Sjsg 
621c349dbc7Sjsg static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref,int delay_ms)622c349dbc7Sjsg queue_async_put_domains_work(struct i915_power_domains *power_domains,
623*f005ef32Sjsg 			     intel_wakeref_t wakeref,
624*f005ef32Sjsg 			     int delay_ms)
625c349dbc7Sjsg {
626ad8b1aafSjsg 	struct drm_i915_private *i915 = container_of(power_domains,
627ad8b1aafSjsg 						     struct drm_i915_private,
6281bb76ff1Sjsg 						     display.power.domains);
629ad8b1aafSjsg 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
630c349dbc7Sjsg 	power_domains->async_put_wakeref = wakeref;
631ad8b1aafSjsg 	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
632c349dbc7Sjsg 						    &power_domains->async_put_work,
633*f005ef32Sjsg 						    msecs_to_jiffies(delay_ms)));
634c349dbc7Sjsg }
635c349dbc7Sjsg 
636c349dbc7Sjsg static void
release_async_put_domains(struct i915_power_domains * power_domains,struct intel_power_domain_mask * mask)6371bb76ff1Sjsg release_async_put_domains(struct i915_power_domains *power_domains,
6381bb76ff1Sjsg 			  struct intel_power_domain_mask *mask)
639c349dbc7Sjsg {
640c349dbc7Sjsg 	struct drm_i915_private *dev_priv =
641c349dbc7Sjsg 		container_of(power_domains, struct drm_i915_private,
6421bb76ff1Sjsg 			     display.power.domains);
643c349dbc7Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
644c349dbc7Sjsg 	enum intel_display_power_domain domain;
645c349dbc7Sjsg 	intel_wakeref_t wakeref;
646c349dbc7Sjsg 
647c349dbc7Sjsg 	/*
648c349dbc7Sjsg 	 * The caller must hold already raw wakeref, upgrade that to a proper
649c349dbc7Sjsg 	 * wakeref to make the state checker happy about the HW access during
650c349dbc7Sjsg 	 * power well disabling.
651c349dbc7Sjsg 	 */
652c349dbc7Sjsg 	assert_rpm_raw_wakeref_held(rpm);
653c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(rpm);
654c349dbc7Sjsg 
655c349dbc7Sjsg 	for_each_power_domain(domain, mask) {
656c349dbc7Sjsg 		/* Clear before put, so put's sanity check is happy. */
657c349dbc7Sjsg 		async_put_domains_clear_domain(power_domains, domain);
658c349dbc7Sjsg 		__intel_display_power_put_domain(dev_priv, domain);
659c349dbc7Sjsg 	}
660c349dbc7Sjsg 
661c349dbc7Sjsg 	intel_runtime_pm_put(rpm, wakeref);
662c349dbc7Sjsg }
663c349dbc7Sjsg 
664c349dbc7Sjsg static void
intel_display_power_put_async_work(struct work_struct * work)665c349dbc7Sjsg intel_display_power_put_async_work(struct work_struct *work)
666c349dbc7Sjsg {
667c349dbc7Sjsg 	struct drm_i915_private *dev_priv =
668c349dbc7Sjsg 		container_of(work, struct drm_i915_private,
6691bb76ff1Sjsg 			     display.power.domains.async_put_work.work);
6701bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
671c349dbc7Sjsg 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
672c349dbc7Sjsg 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
673c349dbc7Sjsg 	intel_wakeref_t old_work_wakeref = 0;
674c349dbc7Sjsg 
675c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
676c349dbc7Sjsg 
677c349dbc7Sjsg 	/*
678c349dbc7Sjsg 	 * Bail out if all the domain refs pending to be released were grabbed
679c349dbc7Sjsg 	 * by subsequent gets or a flush_work.
680c349dbc7Sjsg 	 */
681c349dbc7Sjsg 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
682c349dbc7Sjsg 	if (!old_work_wakeref)
683c349dbc7Sjsg 		goto out_verify;
684c349dbc7Sjsg 
685c349dbc7Sjsg 	release_async_put_domains(power_domains,
6861bb76ff1Sjsg 				  &power_domains->async_put_domains[0]);
687c349dbc7Sjsg 
688c349dbc7Sjsg 	/* Requeue the work if more domains were async put meanwhile. */
6891bb76ff1Sjsg 	if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
6901bb76ff1Sjsg 		bitmap_copy(power_domains->async_put_domains[0].bits,
6911bb76ff1Sjsg 			    power_domains->async_put_domains[1].bits,
6921bb76ff1Sjsg 			    POWER_DOMAIN_NUM);
6931bb76ff1Sjsg 		bitmap_zero(power_domains->async_put_domains[1].bits,
6941bb76ff1Sjsg 			    POWER_DOMAIN_NUM);
695c349dbc7Sjsg 		queue_async_put_domains_work(power_domains,
696*f005ef32Sjsg 					     fetch_and_zero(&new_work_wakeref),
697*f005ef32Sjsg 					     power_domains->async_put_next_delay);
698*f005ef32Sjsg 		power_domains->async_put_next_delay = 0;
6995ca02815Sjsg 	} else {
7005ca02815Sjsg 		/*
7015ca02815Sjsg 		 * Cancel the work that got queued after this one got dequeued,
7025ca02815Sjsg 		 * since here we released the corresponding async-put reference.
7035ca02815Sjsg 		 */
704*f005ef32Sjsg 		cancel_async_put_work(power_domains, false);
705c349dbc7Sjsg 	}
706c349dbc7Sjsg 
707c349dbc7Sjsg out_verify:
708c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
709c349dbc7Sjsg 
710c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
711c349dbc7Sjsg 
712c349dbc7Sjsg 	if (old_work_wakeref)
713c349dbc7Sjsg 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
714c349dbc7Sjsg 	if (new_work_wakeref)
715c349dbc7Sjsg 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
716c349dbc7Sjsg }
717c349dbc7Sjsg 
718c349dbc7Sjsg /**
719*f005ef32Sjsg  * __intel_display_power_put_async - release a power domain reference asynchronously
720c349dbc7Sjsg  * @i915: i915 device instance
721c349dbc7Sjsg  * @domain: power domain to reference
722c349dbc7Sjsg  * @wakeref: wakeref acquired for the reference that is being released
723*f005ef32Sjsg  * @delay_ms: delay of powering down the power domain
724c349dbc7Sjsg  *
725c349dbc7Sjsg  * This function drops the power domain reference obtained by
726c349dbc7Sjsg  * intel_display_power_get*() and schedules a work to power down the
727c349dbc7Sjsg  * corresponding hardware block if this is the last reference.
728*f005ef32Sjsg  * The power down is delayed by @delay_ms if this is >= 0, or by a default
729*f005ef32Sjsg  * 100 ms otherwise.
730c349dbc7Sjsg  */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref,int delay_ms)731c349dbc7Sjsg void __intel_display_power_put_async(struct drm_i915_private *i915,
732c349dbc7Sjsg 				     enum intel_display_power_domain domain,
733*f005ef32Sjsg 				     intel_wakeref_t wakeref,
734*f005ef32Sjsg 				     int delay_ms)
735c349dbc7Sjsg {
7361bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
737c349dbc7Sjsg 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
738c349dbc7Sjsg 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
739c349dbc7Sjsg 
740*f005ef32Sjsg 	delay_ms = delay_ms >= 0 ? delay_ms : 100;
741*f005ef32Sjsg 
742c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
743c349dbc7Sjsg 
744c349dbc7Sjsg 	if (power_domains->domain_use_count[domain] > 1) {
745c349dbc7Sjsg 		__intel_display_power_put_domain(i915, domain);
746c349dbc7Sjsg 
747c349dbc7Sjsg 		goto out_verify;
748c349dbc7Sjsg 	}
749c349dbc7Sjsg 
750c349dbc7Sjsg 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
751c349dbc7Sjsg 
752c349dbc7Sjsg 	/* Let a pending work requeue itself or queue a new one. */
753c349dbc7Sjsg 	if (power_domains->async_put_wakeref) {
7541bb76ff1Sjsg 		set_bit(domain, power_domains->async_put_domains[1].bits);
755*f005ef32Sjsg 		power_domains->async_put_next_delay = max(power_domains->async_put_next_delay,
756*f005ef32Sjsg 							  delay_ms);
757c349dbc7Sjsg 	} else {
7581bb76ff1Sjsg 		set_bit(domain, power_domains->async_put_domains[0].bits);
759c349dbc7Sjsg 		queue_async_put_domains_work(power_domains,
760*f005ef32Sjsg 					     fetch_and_zero(&work_wakeref),
761*f005ef32Sjsg 					     delay_ms);
762c349dbc7Sjsg 	}
763c349dbc7Sjsg 
764c349dbc7Sjsg out_verify:
765c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
766c349dbc7Sjsg 
767c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
768c349dbc7Sjsg 
769c349dbc7Sjsg 	if (work_wakeref)
770c349dbc7Sjsg 		intel_runtime_pm_put_raw(rpm, work_wakeref);
771c349dbc7Sjsg 
772c349dbc7Sjsg 	intel_runtime_pm_put(rpm, wakeref);
773c349dbc7Sjsg }
774c349dbc7Sjsg 
775c349dbc7Sjsg /**
776c349dbc7Sjsg  * intel_display_power_flush_work - flushes the async display power disabling work
777c349dbc7Sjsg  * @i915: i915 device instance
778c349dbc7Sjsg  *
779c349dbc7Sjsg  * Flushes any pending work that was scheduled by a preceding
780c349dbc7Sjsg  * intel_display_power_put_async() call, completing the disabling of the
781c349dbc7Sjsg  * corresponding power domains.
782c349dbc7Sjsg  *
783c349dbc7Sjsg  * Note that the work handler function may still be running after this
784c349dbc7Sjsg  * function returns; to ensure that the work handler isn't running use
785c349dbc7Sjsg  * intel_display_power_flush_work_sync() instead.
786c349dbc7Sjsg  */
intel_display_power_flush_work(struct drm_i915_private * i915)787c349dbc7Sjsg void intel_display_power_flush_work(struct drm_i915_private *i915)
788c349dbc7Sjsg {
7891bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
7901bb76ff1Sjsg 	struct intel_power_domain_mask async_put_mask;
791c349dbc7Sjsg 	intel_wakeref_t work_wakeref;
792c349dbc7Sjsg 
793c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
794c349dbc7Sjsg 
795c349dbc7Sjsg 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
796c349dbc7Sjsg 	if (!work_wakeref)
797c349dbc7Sjsg 		goto out_verify;
798c349dbc7Sjsg 
7991bb76ff1Sjsg 	async_put_domains_mask(power_domains, &async_put_mask);
8001bb76ff1Sjsg 	release_async_put_domains(power_domains, &async_put_mask);
801*f005ef32Sjsg 	cancel_async_put_work(power_domains, false);
802c349dbc7Sjsg 
803c349dbc7Sjsg out_verify:
804c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
805c349dbc7Sjsg 
806c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
807c349dbc7Sjsg 
808c349dbc7Sjsg 	if (work_wakeref)
809c349dbc7Sjsg 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
810c349dbc7Sjsg }
811c349dbc7Sjsg 
812c349dbc7Sjsg /**
813c349dbc7Sjsg  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
814c349dbc7Sjsg  * @i915: i915 device instance
815c349dbc7Sjsg  *
816c349dbc7Sjsg  * Like intel_display_power_flush_work(), but also ensure that the work
817c349dbc7Sjsg  * handler function is not running any more when this function returns.
818c349dbc7Sjsg  */
819c349dbc7Sjsg static void
intel_display_power_flush_work_sync(struct drm_i915_private * i915)820c349dbc7Sjsg intel_display_power_flush_work_sync(struct drm_i915_private *i915)
821c349dbc7Sjsg {
8221bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
823c349dbc7Sjsg 
824c349dbc7Sjsg 	intel_display_power_flush_work(i915);
825*f005ef32Sjsg 	cancel_async_put_work(power_domains, true);
826c349dbc7Sjsg 
827c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
828c349dbc7Sjsg 
829c349dbc7Sjsg 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
830c349dbc7Sjsg }
831c349dbc7Sjsg 
832c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
833c349dbc7Sjsg /**
834c349dbc7Sjsg  * intel_display_power_put - release a power domain reference
835c349dbc7Sjsg  * @dev_priv: i915 device instance
836c349dbc7Sjsg  * @domain: power domain to reference
837c349dbc7Sjsg  * @wakeref: wakeref acquired for the reference that is being released
838c349dbc7Sjsg  *
839c349dbc7Sjsg  * This function drops the power domain reference obtained by
840c349dbc7Sjsg  * intel_display_power_get() and might power down the corresponding hardware
841c349dbc7Sjsg  * block right away if this is the last reference.
842c349dbc7Sjsg  */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)843c349dbc7Sjsg void intel_display_power_put(struct drm_i915_private *dev_priv,
844c349dbc7Sjsg 			     enum intel_display_power_domain domain,
845c349dbc7Sjsg 			     intel_wakeref_t wakeref)
846c349dbc7Sjsg {
847c349dbc7Sjsg 	__intel_display_power_put(dev_priv, domain);
848c349dbc7Sjsg 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
849c349dbc7Sjsg }
8505ca02815Sjsg #else
8515ca02815Sjsg /**
8525ca02815Sjsg  * intel_display_power_put_unchecked - release an unchecked power domain reference
8535ca02815Sjsg  * @dev_priv: i915 device instance
8545ca02815Sjsg  * @domain: power domain to reference
8555ca02815Sjsg  *
8565ca02815Sjsg  * This function drops the power domain reference obtained by
8575ca02815Sjsg  * intel_display_power_get() and might power down the corresponding hardware
8585ca02815Sjsg  * block right away if this is the last reference.
8595ca02815Sjsg  *
8605ca02815Sjsg  * This function is only for the power domain code's internal use to suppress wakeref
8615ca02815Sjsg  * tracking when the correspondig debug kconfig option is disabled, should not
8625ca02815Sjsg  * be used otherwise.
8635ca02815Sjsg  */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)8645ca02815Sjsg void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
8655ca02815Sjsg 				       enum intel_display_power_domain domain)
8665ca02815Sjsg {
8675ca02815Sjsg 	__intel_display_power_put(dev_priv, domain);
8685ca02815Sjsg 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
8695ca02815Sjsg }
870c349dbc7Sjsg #endif
871c349dbc7Sjsg 
8725ca02815Sjsg void
intel_display_power_get_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)8735ca02815Sjsg intel_display_power_get_in_set(struct drm_i915_private *i915,
8745ca02815Sjsg 			       struct intel_display_power_domain_set *power_domain_set,
8755ca02815Sjsg 			       enum intel_display_power_domain domain)
8765ca02815Sjsg {
8775ca02815Sjsg 	intel_wakeref_t __maybe_unused wf;
8785ca02815Sjsg 
8791bb76ff1Sjsg 	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
8805ca02815Sjsg 
8815ca02815Sjsg 	wf = intel_display_power_get(i915, domain);
8825ca02815Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
8835ca02815Sjsg 	power_domain_set->wakerefs[domain] = wf;
8845ca02815Sjsg #endif
8851bb76ff1Sjsg 	set_bit(domain, power_domain_set->mask.bits);
8865ca02815Sjsg }
8875ca02815Sjsg 
8885ca02815Sjsg bool
intel_display_power_get_in_set_if_enabled(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,enum intel_display_power_domain domain)8895ca02815Sjsg intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
8905ca02815Sjsg 					  struct intel_display_power_domain_set *power_domain_set,
8915ca02815Sjsg 					  enum intel_display_power_domain domain)
8925ca02815Sjsg {
8935ca02815Sjsg 	intel_wakeref_t wf;
8945ca02815Sjsg 
8951bb76ff1Sjsg 	drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
8965ca02815Sjsg 
8975ca02815Sjsg 	wf = intel_display_power_get_if_enabled(i915, domain);
8985ca02815Sjsg 	if (!wf)
8995ca02815Sjsg 		return false;
9005ca02815Sjsg 
9015ca02815Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
9025ca02815Sjsg 	power_domain_set->wakerefs[domain] = wf;
9035ca02815Sjsg #endif
9041bb76ff1Sjsg 	set_bit(domain, power_domain_set->mask.bits);
9055ca02815Sjsg 
9065ca02815Sjsg 	return true;
9075ca02815Sjsg }
9085ca02815Sjsg 
9095ca02815Sjsg void
intel_display_power_put_mask_in_set(struct drm_i915_private * i915,struct intel_display_power_domain_set * power_domain_set,struct intel_power_domain_mask * mask)9105ca02815Sjsg intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
9115ca02815Sjsg 				    struct intel_display_power_domain_set *power_domain_set,
9121bb76ff1Sjsg 				    struct intel_power_domain_mask *mask)
9135ca02815Sjsg {
9145ca02815Sjsg 	enum intel_display_power_domain domain;
9155ca02815Sjsg 
9161bb76ff1Sjsg #ifdef notyet
9171bb76ff1Sjsg 	drm_WARN_ON(&i915->drm,
9181bb76ff1Sjsg 		    !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
9191bb76ff1Sjsg #endif
9205ca02815Sjsg 
9215ca02815Sjsg 	for_each_power_domain(domain, mask) {
9225ca02815Sjsg 		intel_wakeref_t __maybe_unused wf = -1;
9235ca02815Sjsg 
9245ca02815Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
9255ca02815Sjsg 		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
9265ca02815Sjsg #endif
9275ca02815Sjsg 		intel_display_power_put(i915, domain, wf);
9281bb76ff1Sjsg 		clear_bit(domain, power_domain_set->mask.bits);
9295ca02815Sjsg 	}
9305ca02815Sjsg }
9315ca02815Sjsg 
932c349dbc7Sjsg static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)933c349dbc7Sjsg sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
934c349dbc7Sjsg 				   int disable_power_well)
935c349dbc7Sjsg {
936c349dbc7Sjsg 	if (disable_power_well >= 0)
937c349dbc7Sjsg 		return !!disable_power_well;
938c349dbc7Sjsg 
939c349dbc7Sjsg 	return 1;
940c349dbc7Sjsg }
941c349dbc7Sjsg 
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)942c349dbc7Sjsg static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
943c349dbc7Sjsg 			       int enable_dc)
944c349dbc7Sjsg {
945c349dbc7Sjsg 	u32 mask;
946c349dbc7Sjsg 	int requested_dc;
947c349dbc7Sjsg 	int max_dc;
948c349dbc7Sjsg 
9495ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
9505ca02815Sjsg 		return 0;
9515ca02815Sjsg 
9521bb76ff1Sjsg 	if (IS_DG2(dev_priv))
9531bb76ff1Sjsg 		max_dc = 1;
9541bb76ff1Sjsg 	else if (IS_DG1(dev_priv))
9555ca02815Sjsg 		max_dc = 3;
9565ca02815Sjsg 	else if (DISPLAY_VER(dev_priv) >= 12)
957c349dbc7Sjsg 		max_dc = 4;
9585ca02815Sjsg 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
9595ca02815Sjsg 		max_dc = 1;
9605ca02815Sjsg 	else if (DISPLAY_VER(dev_priv) >= 9)
9615ca02815Sjsg 		max_dc = 2;
9625ca02815Sjsg 	else
9635ca02815Sjsg 		max_dc = 0;
9645ca02815Sjsg 
965c349dbc7Sjsg 	/*
966c349dbc7Sjsg 	 * DC9 has a separate HW flow from the rest of the DC states,
967c349dbc7Sjsg 	 * not depending on the DMC firmware. It's needed by system
968c349dbc7Sjsg 	 * suspend/resume, so allow it unconditionally.
969c349dbc7Sjsg 	 */
9705ca02815Sjsg 	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
9715ca02815Sjsg 		DISPLAY_VER(dev_priv) >= 11 ?
9725ca02815Sjsg 	       DC_STATE_EN_DC9 : 0;
973c349dbc7Sjsg 
974ad8b1aafSjsg 	if (!dev_priv->params.disable_power_well)
975c349dbc7Sjsg 		max_dc = 0;
976c349dbc7Sjsg 
977c349dbc7Sjsg 	if (enable_dc >= 0 && enable_dc <= max_dc) {
978c349dbc7Sjsg 		requested_dc = enable_dc;
979c349dbc7Sjsg 	} else if (enable_dc == -1) {
980c349dbc7Sjsg 		requested_dc = max_dc;
981c349dbc7Sjsg 	} else if (enable_dc > max_dc && enable_dc <= 4) {
982c349dbc7Sjsg 		drm_dbg_kms(&dev_priv->drm,
983c349dbc7Sjsg 			    "Adjusting requested max DC state (%d->%d)\n",
984c349dbc7Sjsg 			    enable_dc, max_dc);
985c349dbc7Sjsg 		requested_dc = max_dc;
986c349dbc7Sjsg 	} else {
987c349dbc7Sjsg 		drm_err(&dev_priv->drm,
988c349dbc7Sjsg 			"Unexpected value for enable_dc (%d)\n", enable_dc);
989c349dbc7Sjsg 		requested_dc = max_dc;
990c349dbc7Sjsg 	}
991c349dbc7Sjsg 
992c349dbc7Sjsg 	switch (requested_dc) {
993c349dbc7Sjsg 	case 4:
994c349dbc7Sjsg 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
995c349dbc7Sjsg 		break;
996c349dbc7Sjsg 	case 3:
997c349dbc7Sjsg 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
998c349dbc7Sjsg 		break;
999c349dbc7Sjsg 	case 2:
1000c349dbc7Sjsg 		mask |= DC_STATE_EN_UPTO_DC6;
1001c349dbc7Sjsg 		break;
1002c349dbc7Sjsg 	case 1:
1003c349dbc7Sjsg 		mask |= DC_STATE_EN_UPTO_DC5;
1004c349dbc7Sjsg 		break;
1005c349dbc7Sjsg 	}
1006c349dbc7Sjsg 
1007c349dbc7Sjsg 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
1008c349dbc7Sjsg 
1009c349dbc7Sjsg 	return mask;
1010c349dbc7Sjsg }
1011c349dbc7Sjsg 
1012c349dbc7Sjsg /**
1013c349dbc7Sjsg  * intel_power_domains_init - initializes the power domain structures
1014c349dbc7Sjsg  * @dev_priv: i915 device instance
1015c349dbc7Sjsg  *
1016c349dbc7Sjsg  * Initializes the power domain structures for @dev_priv depending upon the
1017c349dbc7Sjsg  * supported platform.
1018c349dbc7Sjsg  */
intel_power_domains_init(struct drm_i915_private * dev_priv)1019c349dbc7Sjsg int intel_power_domains_init(struct drm_i915_private *dev_priv)
1020c349dbc7Sjsg {
10211bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1022c349dbc7Sjsg 
1023ad8b1aafSjsg 	dev_priv->params.disable_power_well =
1024c349dbc7Sjsg 		sanitize_disable_power_well_option(dev_priv,
1025ad8b1aafSjsg 						   dev_priv->params.disable_power_well);
1026*f005ef32Sjsg 	power_domains->allowed_dc_mask =
1027ad8b1aafSjsg 		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
1028c349dbc7Sjsg 
1029*f005ef32Sjsg 	power_domains->target_dc_state =
1030c349dbc7Sjsg 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1031c349dbc7Sjsg 
1032c349dbc7Sjsg 	rw_init(&power_domains->lock, "ipdl");
1033c349dbc7Sjsg 
1034c349dbc7Sjsg 	INIT_DELAYED_WORK(&power_domains->async_put_work,
1035c349dbc7Sjsg 			  intel_display_power_put_async_work);
1036c349dbc7Sjsg 
10371bb76ff1Sjsg 	return intel_display_power_map_init(power_domains);
1038c349dbc7Sjsg }
1039c349dbc7Sjsg 
1040c349dbc7Sjsg /**
1041c349dbc7Sjsg  * intel_power_domains_cleanup - clean up power domains resources
1042c349dbc7Sjsg  * @dev_priv: i915 device instance
1043c349dbc7Sjsg  *
1044c349dbc7Sjsg  * Release any resources acquired by intel_power_domains_init()
1045c349dbc7Sjsg  */
intel_power_domains_cleanup(struct drm_i915_private * dev_priv)1046c349dbc7Sjsg void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
1047c349dbc7Sjsg {
10481bb76ff1Sjsg 	intel_display_power_map_cleanup(&dev_priv->display.power.domains);
1049c349dbc7Sjsg }
1050c349dbc7Sjsg 
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)1051c349dbc7Sjsg static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
1052c349dbc7Sjsg {
10531bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1054c349dbc7Sjsg 	struct i915_power_well *power_well;
1055c349dbc7Sjsg 
1056c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
10571bb76ff1Sjsg 	for_each_power_well(dev_priv, power_well)
10581bb76ff1Sjsg 		intel_power_well_sync_hw(dev_priv, power_well);
1059c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1060c349dbc7Sjsg }
1061c349dbc7Sjsg 
gen9_dbuf_slice_set(struct drm_i915_private * dev_priv,enum dbuf_slice slice,bool enable)1062ad8b1aafSjsg static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
1063ad8b1aafSjsg 				enum dbuf_slice slice, bool enable)
1064c349dbc7Sjsg {
1065ad8b1aafSjsg 	i915_reg_t reg = DBUF_CTL_S(slice);
1066ad8b1aafSjsg 	bool state;
1067c349dbc7Sjsg 
10685ca02815Sjsg 	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
10695ca02815Sjsg 		     enable ? DBUF_POWER_REQUEST : 0);
1070c349dbc7Sjsg 	intel_de_posting_read(dev_priv, reg);
1071c349dbc7Sjsg 	udelay(10);
1072c349dbc7Sjsg 
1073ad8b1aafSjsg 	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
1074ad8b1aafSjsg 	drm_WARN(&dev_priv->drm, enable != state,
1075ad8b1aafSjsg 		 "DBuf slice %d power %s timeout!\n",
10761bb76ff1Sjsg 		 slice, str_enable_disable(enable));
1077c349dbc7Sjsg }
1078c349dbc7Sjsg 
gen9_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)1079ad8b1aafSjsg void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
1080c349dbc7Sjsg 			     u8 req_slices)
1081c349dbc7Sjsg {
10821bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1083*f005ef32Sjsg 	u8 slice_mask = DISPLAY_INFO(dev_priv)->dbuf.slice_mask;
1084ad8b1aafSjsg 	enum dbuf_slice slice;
1085c349dbc7Sjsg 
10865ca02815Sjsg 	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
10875ca02815Sjsg 		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
10885ca02815Sjsg 		 req_slices, slice_mask);
1089c349dbc7Sjsg 
1090ad8b1aafSjsg 	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
1091ad8b1aafSjsg 		    req_slices);
1092c349dbc7Sjsg 
1093c349dbc7Sjsg 	/*
1094c349dbc7Sjsg 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
1095c349dbc7Sjsg 	 * being called from intel_dp_detect for instance,
1096c349dbc7Sjsg 	 * which causes assertion triggered by race condition,
1097c349dbc7Sjsg 	 * as gen9_assert_dbuf_enabled might preempt this when registers
1098c349dbc7Sjsg 	 * were already updated, while dev_priv was not.
1099c349dbc7Sjsg 	 */
1100c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1101c349dbc7Sjsg 
11025ca02815Sjsg 	for_each_dbuf_slice(dev_priv, slice)
1103ad8b1aafSjsg 		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
1104c349dbc7Sjsg 
11051bb76ff1Sjsg 	dev_priv->display.dbuf.enabled_slices = req_slices;
1106c349dbc7Sjsg 
1107c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1108c349dbc7Sjsg }
1109c349dbc7Sjsg 
gen9_dbuf_enable(struct drm_i915_private * dev_priv)1110ad8b1aafSjsg static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
1111c349dbc7Sjsg {
1112*f005ef32Sjsg 	u8 slices_mask;
1113*f005ef32Sjsg 
11141bb76ff1Sjsg 	dev_priv->display.dbuf.enabled_slices =
1115ad8b1aafSjsg 		intel_enabled_dbuf_slices_mask(dev_priv);
1116ad8b1aafSjsg 
1117*f005ef32Sjsg 	slices_mask = BIT(DBUF_S1) | dev_priv->display.dbuf.enabled_slices;
1118*f005ef32Sjsg 
1119*f005ef32Sjsg 	if (DISPLAY_VER(dev_priv) >= 14)
1120*f005ef32Sjsg 		intel_pmdemand_program_dbuf(dev_priv, slices_mask);
1121*f005ef32Sjsg 
1122c349dbc7Sjsg 	/*
1123c349dbc7Sjsg 	 * Just power up at least 1 slice, we will
1124c349dbc7Sjsg 	 * figure out later which slices we have and what we need.
1125c349dbc7Sjsg 	 */
1126*f005ef32Sjsg 	gen9_dbuf_slices_update(dev_priv, slices_mask);
1127c349dbc7Sjsg }
1128c349dbc7Sjsg 
gen9_dbuf_disable(struct drm_i915_private * dev_priv)1129ad8b1aafSjsg static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
1130c349dbc7Sjsg {
1131ad8b1aafSjsg 	gen9_dbuf_slices_update(dev_priv, 0);
1132*f005ef32Sjsg 
1133*f005ef32Sjsg 	if (DISPLAY_VER(dev_priv) >= 14)
1134*f005ef32Sjsg 		intel_pmdemand_program_dbuf(dev_priv, 0);
1135c349dbc7Sjsg }
1136c349dbc7Sjsg 
gen12_dbuf_slices_config(struct drm_i915_private * dev_priv)11375ca02815Sjsg static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
11385ca02815Sjsg {
11395ca02815Sjsg 	enum dbuf_slice slice;
11405ca02815Sjsg 
11415ca02815Sjsg 	if (IS_ALDERLAKE_P(dev_priv))
11425ca02815Sjsg 		return;
11435ca02815Sjsg 
11445ca02815Sjsg 	for_each_dbuf_slice(dev_priv, slice)
11455ca02815Sjsg 		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
11465ca02815Sjsg 			     DBUF_TRACKER_STATE_SERVICE_MASK,
11475ca02815Sjsg 			     DBUF_TRACKER_STATE_SERVICE(8));
11485ca02815Sjsg }
11495ca02815Sjsg 
icl_mbus_init(struct drm_i915_private * dev_priv)1150c349dbc7Sjsg static void icl_mbus_init(struct drm_i915_private *dev_priv)
1151c349dbc7Sjsg {
1152*f005ef32Sjsg 	unsigned long abox_regs = DISPLAY_INFO(dev_priv)->abox_mask;
1153ad8b1aafSjsg 	u32 mask, val, i;
1154c349dbc7Sjsg 
11551bb76ff1Sjsg 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
11565ca02815Sjsg 		return;
11575ca02815Sjsg 
1158c349dbc7Sjsg 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
1159c349dbc7Sjsg 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
1160c349dbc7Sjsg 		MBUS_ABOX_B_CREDIT_MASK |
1161c349dbc7Sjsg 		MBUS_ABOX_BW_CREDIT_MASK;
1162c349dbc7Sjsg 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
1163c349dbc7Sjsg 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
1164c349dbc7Sjsg 		MBUS_ABOX_B_CREDIT(1) |
1165c349dbc7Sjsg 		MBUS_ABOX_BW_CREDIT(1);
1166c349dbc7Sjsg 
1167ad8b1aafSjsg 	/*
1168ad8b1aafSjsg 	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
1169ad8b1aafSjsg 	 * expect us to program the abox_ctl0 register as well, even though
1170ad8b1aafSjsg 	 * we don't have to program other instance-0 registers like BW_BUDDY.
1171ad8b1aafSjsg 	 */
11725ca02815Sjsg 	if (DISPLAY_VER(dev_priv) == 12)
1173ad8b1aafSjsg 		abox_regs |= BIT(0);
1174ad8b1aafSjsg 
1175ad8b1aafSjsg 	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
1176ad8b1aafSjsg 		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
1177c349dbc7Sjsg }
1178c349dbc7Sjsg 
hsw_assert_cdclk(struct drm_i915_private * dev_priv)1179c349dbc7Sjsg static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
1180c349dbc7Sjsg {
1181c349dbc7Sjsg 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
1182c349dbc7Sjsg 
1183c349dbc7Sjsg 	/*
1184c349dbc7Sjsg 	 * The LCPLL register should be turned on by the BIOS. For now
1185c349dbc7Sjsg 	 * let's just check its state and print errors in case
1186c349dbc7Sjsg 	 * something is wrong.  Don't even try to turn it on.
1187c349dbc7Sjsg 	 */
1188c349dbc7Sjsg 
1189c349dbc7Sjsg 	if (val & LCPLL_CD_SOURCE_FCLK)
1190c349dbc7Sjsg 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
1191c349dbc7Sjsg 
1192c349dbc7Sjsg 	if (val & LCPLL_PLL_DISABLE)
1193c349dbc7Sjsg 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
1194c349dbc7Sjsg 
1195c349dbc7Sjsg 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
1196c349dbc7Sjsg 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
1197c349dbc7Sjsg }
1198c349dbc7Sjsg 
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)1199c349dbc7Sjsg static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
1200c349dbc7Sjsg {
1201c349dbc7Sjsg 	struct intel_crtc *crtc;
1202c349dbc7Sjsg 
1203*f005ef32Sjsg 	for_each_intel_crtc(&dev_priv->drm, crtc)
1204*f005ef32Sjsg 		I915_STATE_WARN(dev_priv, crtc->active,
1205*f005ef32Sjsg 				"CRTC for pipe %c enabled\n",
1206c349dbc7Sjsg 				pipe_name(crtc->pipe));
1207c349dbc7Sjsg 
1208*f005ef32Sjsg 	I915_STATE_WARN(dev_priv, intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
1209c349dbc7Sjsg 			"Display power well on\n");
1210*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1211*f005ef32Sjsg 			intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
1212c349dbc7Sjsg 			"SPLL enabled\n");
1213*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1214*f005ef32Sjsg 			intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
1215c349dbc7Sjsg 			"WRPLL1 enabled\n");
1216*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1217*f005ef32Sjsg 			intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
1218c349dbc7Sjsg 			"WRPLL2 enabled\n");
1219*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1220*f005ef32Sjsg 			intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
1221c349dbc7Sjsg 			"Panel power on\n");
1222*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1223*f005ef32Sjsg 			intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
1224c349dbc7Sjsg 			"CPU PWM1 enabled\n");
1225c349dbc7Sjsg 	if (IS_HASWELL(dev_priv))
1226*f005ef32Sjsg 		I915_STATE_WARN(dev_priv,
1227*f005ef32Sjsg 				intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
1228c349dbc7Sjsg 				"CPU PWM2 enabled\n");
1229*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1230*f005ef32Sjsg 			intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
1231c349dbc7Sjsg 			"PCH PWM1 enabled\n");
1232*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1233*f005ef32Sjsg 			(intel_de_read(dev_priv, UTIL_PIN_CTL) & (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
1234*f005ef32Sjsg 			"Utility pin enabled in PWM mode\n");
1235*f005ef32Sjsg 	I915_STATE_WARN(dev_priv,
1236*f005ef32Sjsg 			intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
1237c349dbc7Sjsg 			"PCH GTC enabled\n");
1238c349dbc7Sjsg 
1239c349dbc7Sjsg 	/*
1240c349dbc7Sjsg 	 * In theory we can still leave IRQs enabled, as long as only the HPD
1241c349dbc7Sjsg 	 * interrupts remain enabled. We used to check for that, but since it's
1242c349dbc7Sjsg 	 * gen-specific and since we only disable LCPLL after we fully disable
1243c349dbc7Sjsg 	 * the interrupts, the check below should be enough.
1244c349dbc7Sjsg 	 */
1245*f005ef32Sjsg 	I915_STATE_WARN(dev_priv, intel_irqs_enabled(dev_priv),
1246*f005ef32Sjsg 			"IRQs enabled\n");
1247c349dbc7Sjsg }
1248c349dbc7Sjsg 
hsw_read_dcomp(struct drm_i915_private * dev_priv)1249c349dbc7Sjsg static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
1250c349dbc7Sjsg {
1251c349dbc7Sjsg 	if (IS_HASWELL(dev_priv))
1252c349dbc7Sjsg 		return intel_de_read(dev_priv, D_COMP_HSW);
1253c349dbc7Sjsg 	else
1254c349dbc7Sjsg 		return intel_de_read(dev_priv, D_COMP_BDW);
1255c349dbc7Sjsg }
1256c349dbc7Sjsg 
hsw_write_dcomp(struct drm_i915_private * dev_priv,u32 val)1257c349dbc7Sjsg static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
1258c349dbc7Sjsg {
1259c349dbc7Sjsg 	if (IS_HASWELL(dev_priv)) {
12601bb76ff1Sjsg 		if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
1261c349dbc7Sjsg 			drm_dbg_kms(&dev_priv->drm,
1262c349dbc7Sjsg 				    "Failed to write to D_COMP\n");
1263c349dbc7Sjsg 	} else {
1264c349dbc7Sjsg 		intel_de_write(dev_priv, D_COMP_BDW, val);
1265c349dbc7Sjsg 		intel_de_posting_read(dev_priv, D_COMP_BDW);
1266c349dbc7Sjsg 	}
1267c349dbc7Sjsg }
1268c349dbc7Sjsg 
1269c349dbc7Sjsg /*
1270c349dbc7Sjsg  * This function implements pieces of two sequences from BSpec:
1271c349dbc7Sjsg  * - Sequence for display software to disable LCPLL
1272c349dbc7Sjsg  * - Sequence for display software to allow package C8+
1273c349dbc7Sjsg  * The steps implemented here are just the steps that actually touch the LCPLL
1274c349dbc7Sjsg  * register. Callers should take care of disabling all the display engine
1275c349dbc7Sjsg  * functions, doing the mode unset, fixing interrupts, etc.
1276c349dbc7Sjsg  */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)1277c349dbc7Sjsg static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
1278c349dbc7Sjsg 			      bool switch_to_fclk, bool allow_power_down)
1279c349dbc7Sjsg {
1280c349dbc7Sjsg 	u32 val;
1281c349dbc7Sjsg 
1282c349dbc7Sjsg 	assert_can_disable_lcpll(dev_priv);
1283c349dbc7Sjsg 
1284c349dbc7Sjsg 	val = intel_de_read(dev_priv, LCPLL_CTL);
1285c349dbc7Sjsg 
1286c349dbc7Sjsg 	if (switch_to_fclk) {
1287c349dbc7Sjsg 		val |= LCPLL_CD_SOURCE_FCLK;
1288c349dbc7Sjsg 		intel_de_write(dev_priv, LCPLL_CTL, val);
1289c349dbc7Sjsg 
1290c349dbc7Sjsg 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
1291c349dbc7Sjsg 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
1292c349dbc7Sjsg 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
1293c349dbc7Sjsg 
1294c349dbc7Sjsg 		val = intel_de_read(dev_priv, LCPLL_CTL);
1295c349dbc7Sjsg 	}
1296c349dbc7Sjsg 
1297c349dbc7Sjsg 	val |= LCPLL_PLL_DISABLE;
1298c349dbc7Sjsg 	intel_de_write(dev_priv, LCPLL_CTL, val);
1299c349dbc7Sjsg 	intel_de_posting_read(dev_priv, LCPLL_CTL);
1300c349dbc7Sjsg 
1301c349dbc7Sjsg 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
1302c349dbc7Sjsg 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
1303c349dbc7Sjsg 
1304c349dbc7Sjsg 	val = hsw_read_dcomp(dev_priv);
1305c349dbc7Sjsg 	val |= D_COMP_COMP_DISABLE;
1306c349dbc7Sjsg 	hsw_write_dcomp(dev_priv, val);
1307c349dbc7Sjsg 	ndelay(100);
1308c349dbc7Sjsg 
1309c349dbc7Sjsg 	if (wait_for((hsw_read_dcomp(dev_priv) &
1310c349dbc7Sjsg 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
1311c349dbc7Sjsg 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
1312c349dbc7Sjsg 
1313c349dbc7Sjsg 	if (allow_power_down) {
1314*f005ef32Sjsg 		intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW);
1315c349dbc7Sjsg 		intel_de_posting_read(dev_priv, LCPLL_CTL);
1316c349dbc7Sjsg 	}
1317c349dbc7Sjsg }
1318c349dbc7Sjsg 
1319c349dbc7Sjsg /*
1320c349dbc7Sjsg  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
1321c349dbc7Sjsg  * source.
1322c349dbc7Sjsg  */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)1323c349dbc7Sjsg static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
1324c349dbc7Sjsg {
1325c349dbc7Sjsg 	u32 val;
1326c349dbc7Sjsg 
1327c349dbc7Sjsg 	val = intel_de_read(dev_priv, LCPLL_CTL);
1328c349dbc7Sjsg 
1329c349dbc7Sjsg 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
1330c349dbc7Sjsg 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
1331c349dbc7Sjsg 		return;
1332c349dbc7Sjsg 
1333c349dbc7Sjsg 	/*
1334c349dbc7Sjsg 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
1335c349dbc7Sjsg 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
1336c349dbc7Sjsg 	 */
1337c349dbc7Sjsg 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1338c349dbc7Sjsg 
1339c349dbc7Sjsg 	if (val & LCPLL_POWER_DOWN_ALLOW) {
1340c349dbc7Sjsg 		val &= ~LCPLL_POWER_DOWN_ALLOW;
1341c349dbc7Sjsg 		intel_de_write(dev_priv, LCPLL_CTL, val);
1342c349dbc7Sjsg 		intel_de_posting_read(dev_priv, LCPLL_CTL);
1343c349dbc7Sjsg 	}
1344c349dbc7Sjsg 
1345c349dbc7Sjsg 	val = hsw_read_dcomp(dev_priv);
1346c349dbc7Sjsg 	val |= D_COMP_COMP_FORCE;
1347c349dbc7Sjsg 	val &= ~D_COMP_COMP_DISABLE;
1348c349dbc7Sjsg 	hsw_write_dcomp(dev_priv, val);
1349c349dbc7Sjsg 
1350c349dbc7Sjsg 	val = intel_de_read(dev_priv, LCPLL_CTL);
1351c349dbc7Sjsg 	val &= ~LCPLL_PLL_DISABLE;
1352c349dbc7Sjsg 	intel_de_write(dev_priv, LCPLL_CTL, val);
1353c349dbc7Sjsg 
1354c349dbc7Sjsg 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
1355c349dbc7Sjsg 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
1356c349dbc7Sjsg 
1357c349dbc7Sjsg 	if (val & LCPLL_CD_SOURCE_FCLK) {
1358*f005ef32Sjsg 		intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
1359c349dbc7Sjsg 
1360c349dbc7Sjsg 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
1361c349dbc7Sjsg 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
1362c349dbc7Sjsg 			drm_err(&dev_priv->drm,
1363c349dbc7Sjsg 				"Switching back to LCPLL failed\n");
1364c349dbc7Sjsg 	}
1365c349dbc7Sjsg 
1366c349dbc7Sjsg 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1367c349dbc7Sjsg 
1368c349dbc7Sjsg 	intel_update_cdclk(dev_priv);
13691bb76ff1Sjsg 	intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
1370c349dbc7Sjsg }
1371c349dbc7Sjsg 
1372c349dbc7Sjsg /*
1373c349dbc7Sjsg  * Package states C8 and deeper are really deep PC states that can only be
1374c349dbc7Sjsg  * reached when all the devices on the system allow it, so even if the graphics
1375c349dbc7Sjsg  * device allows PC8+, it doesn't mean the system will actually get to these
1376c349dbc7Sjsg  * states. Our driver only allows PC8+ when going into runtime PM.
1377c349dbc7Sjsg  *
1378c349dbc7Sjsg  * The requirements for PC8+ are that all the outputs are disabled, the power
1379c349dbc7Sjsg  * well is disabled and most interrupts are disabled, and these are also
1380c349dbc7Sjsg  * requirements for runtime PM. When these conditions are met, we manually do
1381c349dbc7Sjsg  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
1382c349dbc7Sjsg  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
1383c349dbc7Sjsg  * hang the machine.
1384c349dbc7Sjsg  *
1385c349dbc7Sjsg  * When we really reach PC8 or deeper states (not just when we allow it) we lose
1386c349dbc7Sjsg  * the state of some registers, so when we come back from PC8+ we need to
1387c349dbc7Sjsg  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1388c349dbc7Sjsg  * need to take care of the registers kept by RC6. Notice that this happens even
1389c349dbc7Sjsg  * if we don't put the device in PCI D3 state (which is what currently happens
1390c349dbc7Sjsg  * because of the runtime PM support).
1391c349dbc7Sjsg  *
1392c349dbc7Sjsg  * For more, read "Display Sequences for Package C8" on the hardware
1393c349dbc7Sjsg  * documentation.
1394c349dbc7Sjsg  */
hsw_enable_pc8(struct drm_i915_private * dev_priv)1395c349dbc7Sjsg static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
1396c349dbc7Sjsg {
1397c349dbc7Sjsg 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
1398c349dbc7Sjsg 
1399*f005ef32Sjsg 	if (HAS_PCH_LPT_LP(dev_priv))
1400*f005ef32Sjsg 		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
1401*f005ef32Sjsg 			     PCH_LP_PARTITION_LEVEL_DISABLE, 0);
1402c349dbc7Sjsg 
1403c349dbc7Sjsg 	lpt_disable_clkout_dp(dev_priv);
1404c349dbc7Sjsg 	hsw_disable_lcpll(dev_priv, true, true);
1405c349dbc7Sjsg }
1406c349dbc7Sjsg 
hsw_disable_pc8(struct drm_i915_private * dev_priv)1407c349dbc7Sjsg static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
1408c349dbc7Sjsg {
1409c349dbc7Sjsg 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
1410c349dbc7Sjsg 
1411c349dbc7Sjsg 	hsw_restore_lcpll(dev_priv);
1412c349dbc7Sjsg 	intel_init_pch_refclk(dev_priv);
1413c349dbc7Sjsg 
1414*f005ef32Sjsg 	/* Many display registers don't survive PC8+ */
1415*f005ef32Sjsg 	intel_clock_gating_init(dev_priv);
1416c349dbc7Sjsg }
1417c349dbc7Sjsg 
intel_pch_reset_handshake(struct drm_i915_private * dev_priv,bool enable)1418c349dbc7Sjsg static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
1419c349dbc7Sjsg 				      bool enable)
1420c349dbc7Sjsg {
1421c349dbc7Sjsg 	i915_reg_t reg;
1422*f005ef32Sjsg 	u32 reset_bits;
1423c349dbc7Sjsg 
1424c349dbc7Sjsg 	if (IS_IVYBRIDGE(dev_priv)) {
1425c349dbc7Sjsg 		reg = GEN7_MSG_CTL;
1426c349dbc7Sjsg 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
1427c349dbc7Sjsg 	} else {
1428c349dbc7Sjsg 		reg = HSW_NDE_RSTWRN_OPT;
1429c349dbc7Sjsg 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
1430c349dbc7Sjsg 	}
1431c349dbc7Sjsg 
14321bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) >= 14)
14331bb76ff1Sjsg 		reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
14341bb76ff1Sjsg 
1435*f005ef32Sjsg 	intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0);
1436c349dbc7Sjsg }
1437c349dbc7Sjsg 
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)1438c349dbc7Sjsg static void skl_display_core_init(struct drm_i915_private *dev_priv,
1439c349dbc7Sjsg 				  bool resume)
1440c349dbc7Sjsg {
14411bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1442c349dbc7Sjsg 	struct i915_power_well *well;
1443c349dbc7Sjsg 
1444c349dbc7Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1445c349dbc7Sjsg 
1446c349dbc7Sjsg 	/* enable PCH reset handshake */
1447c349dbc7Sjsg 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1448c349dbc7Sjsg 
14495ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
14505ca02815Sjsg 		return;
14515ca02815Sjsg 
1452c349dbc7Sjsg 	/* enable PG1 and Misc I/O */
1453c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1454c349dbc7Sjsg 
1455c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1456c349dbc7Sjsg 	intel_power_well_enable(dev_priv, well);
1457c349dbc7Sjsg 
1458c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1459c349dbc7Sjsg 	intel_power_well_enable(dev_priv, well);
1460c349dbc7Sjsg 
1461c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1462c349dbc7Sjsg 
1463c349dbc7Sjsg 	intel_cdclk_init_hw(dev_priv);
1464c349dbc7Sjsg 
1465c349dbc7Sjsg 	gen9_dbuf_enable(dev_priv);
1466c349dbc7Sjsg 
14671bb76ff1Sjsg 	if (resume)
14685ca02815Sjsg 		intel_dmc_load_program(dev_priv);
1469c349dbc7Sjsg }
1470c349dbc7Sjsg 
skl_display_core_uninit(struct drm_i915_private * dev_priv)1471c349dbc7Sjsg static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
1472c349dbc7Sjsg {
14731bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1474c349dbc7Sjsg 	struct i915_power_well *well;
1475c349dbc7Sjsg 
14765ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
14775ca02815Sjsg 		return;
14785ca02815Sjsg 
1479c349dbc7Sjsg 	gen9_disable_dc_states(dev_priv);
14801bb76ff1Sjsg 	/* TODO: disable DMC program */
1481c349dbc7Sjsg 
1482c349dbc7Sjsg 	gen9_dbuf_disable(dev_priv);
1483c349dbc7Sjsg 
1484c349dbc7Sjsg 	intel_cdclk_uninit_hw(dev_priv);
1485c349dbc7Sjsg 
1486c349dbc7Sjsg 	/* The spec doesn't call for removing the reset handshake flag */
1487c349dbc7Sjsg 	/* disable PG1 and Misc I/O */
1488c349dbc7Sjsg 
1489c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1490c349dbc7Sjsg 
1491c349dbc7Sjsg 	/*
1492c349dbc7Sjsg 	 * BSpec says to keep the MISC IO power well enabled here, only
1493c349dbc7Sjsg 	 * remove our request for power well 1.
1494c349dbc7Sjsg 	 * Note that even though the driver's request is removed power well 1
1495c349dbc7Sjsg 	 * may stay enabled after this due to DMC's own request on it.
1496c349dbc7Sjsg 	 */
1497c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1498c349dbc7Sjsg 	intel_power_well_disable(dev_priv, well);
1499c349dbc7Sjsg 
1500c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1501c349dbc7Sjsg 
1502c349dbc7Sjsg 	usleep_range(10, 30);		/* 10 us delay per Bspec */
1503c349dbc7Sjsg }
1504c349dbc7Sjsg 
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)1505c349dbc7Sjsg static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
1506c349dbc7Sjsg {
15071bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1508c349dbc7Sjsg 	struct i915_power_well *well;
1509c349dbc7Sjsg 
1510c349dbc7Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1511c349dbc7Sjsg 
1512c349dbc7Sjsg 	/*
1513c349dbc7Sjsg 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
1514c349dbc7Sjsg 	 * or else the reset will hang because there is no PCH to respond.
1515c349dbc7Sjsg 	 * Move the handshake programming to initialization sequence.
1516c349dbc7Sjsg 	 * Previously was left up to BIOS.
1517c349dbc7Sjsg 	 */
1518c349dbc7Sjsg 	intel_pch_reset_handshake(dev_priv, false);
1519c349dbc7Sjsg 
15205ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
15215ca02815Sjsg 		return;
15225ca02815Sjsg 
1523c349dbc7Sjsg 	/* Enable PG1 */
1524c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1525c349dbc7Sjsg 
1526c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1527c349dbc7Sjsg 	intel_power_well_enable(dev_priv, well);
1528c349dbc7Sjsg 
1529c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1530c349dbc7Sjsg 
1531c349dbc7Sjsg 	intel_cdclk_init_hw(dev_priv);
1532c349dbc7Sjsg 
1533c349dbc7Sjsg 	gen9_dbuf_enable(dev_priv);
1534c349dbc7Sjsg 
15351bb76ff1Sjsg 	if (resume)
15365ca02815Sjsg 		intel_dmc_load_program(dev_priv);
1537c349dbc7Sjsg }
1538c349dbc7Sjsg 
bxt_display_core_uninit(struct drm_i915_private * dev_priv)1539c349dbc7Sjsg static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
1540c349dbc7Sjsg {
15411bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1542c349dbc7Sjsg 	struct i915_power_well *well;
1543c349dbc7Sjsg 
15445ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
15455ca02815Sjsg 		return;
15465ca02815Sjsg 
1547c349dbc7Sjsg 	gen9_disable_dc_states(dev_priv);
15481bb76ff1Sjsg 	/* TODO: disable DMC program */
1549c349dbc7Sjsg 
1550c349dbc7Sjsg 	gen9_dbuf_disable(dev_priv);
1551c349dbc7Sjsg 
1552c349dbc7Sjsg 	intel_cdclk_uninit_hw(dev_priv);
1553c349dbc7Sjsg 
1554c349dbc7Sjsg 	/* The spec doesn't call for removing the reset handshake flag */
1555c349dbc7Sjsg 
1556c349dbc7Sjsg 	/*
1557c349dbc7Sjsg 	 * Disable PW1 (PG1).
1558c349dbc7Sjsg 	 * Note that even though the driver's request is removed power well 1
1559c349dbc7Sjsg 	 * may stay enabled after this due to DMC's own request on it.
1560c349dbc7Sjsg 	 */
1561c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1562c349dbc7Sjsg 
1563c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1564c349dbc7Sjsg 	intel_power_well_disable(dev_priv, well);
1565c349dbc7Sjsg 
1566c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1567c349dbc7Sjsg 
1568c349dbc7Sjsg 	usleep_range(10, 30);		/* 10 us delay per Bspec */
1569c349dbc7Sjsg }
1570c349dbc7Sjsg 
1571c349dbc7Sjsg struct buddy_page_mask {
1572c349dbc7Sjsg 	u32 page_mask;
1573c349dbc7Sjsg 	u8 type;
1574c349dbc7Sjsg 	u8 num_channels;
1575c349dbc7Sjsg };
1576c349dbc7Sjsg 
1577c349dbc7Sjsg static const struct buddy_page_mask tgl_buddy_page_masks[] = {
1578c349dbc7Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
15795ca02815Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
1580c349dbc7Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
15815ca02815Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
1582c349dbc7Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
15835ca02815Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
1584ad8b1aafSjsg 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
15855ca02815Sjsg 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
1586c349dbc7Sjsg 	{}
1587c349dbc7Sjsg };
1588c349dbc7Sjsg 
1589c349dbc7Sjsg static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
1590c349dbc7Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
1591c349dbc7Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
15925ca02815Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
15935ca02815Sjsg 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
1594c349dbc7Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
1595c349dbc7Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
15965ca02815Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
15975ca02815Sjsg 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
1598c349dbc7Sjsg 	{}
1599c349dbc7Sjsg };
1600c349dbc7Sjsg 
tgl_bw_buddy_init(struct drm_i915_private * dev_priv)1601c349dbc7Sjsg static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
1602c349dbc7Sjsg {
1603c349dbc7Sjsg 	enum intel_dram_type type = dev_priv->dram_info.type;
1604c349dbc7Sjsg 	u8 num_channels = dev_priv->dram_info.num_channels;
1605c349dbc7Sjsg 	const struct buddy_page_mask *table;
1606*f005ef32Sjsg 	unsigned long abox_mask = DISPLAY_INFO(dev_priv)->abox_mask;
1607ad8b1aafSjsg 	int config, i;
1608c349dbc7Sjsg 
16095ca02815Sjsg 	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
16105ca02815Sjsg 	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
16115ca02815Sjsg 		return;
16125ca02815Sjsg 
16135ca02815Sjsg 	if (IS_ALDERLAKE_S(dev_priv) ||
1614*f005ef32Sjsg 	    (IS_ROCKETLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)))
1615*f005ef32Sjsg 		/* Wa_1409767108 */
1616c349dbc7Sjsg 		table = wa_1409767108_buddy_page_masks;
1617c349dbc7Sjsg 	else
1618c349dbc7Sjsg 		table = tgl_buddy_page_masks;
1619c349dbc7Sjsg 
1620ad8b1aafSjsg 	for (config = 0; table[config].page_mask != 0; config++)
1621ad8b1aafSjsg 		if (table[config].num_channels == num_channels &&
1622ad8b1aafSjsg 		    table[config].type == type)
1623c349dbc7Sjsg 			break;
1624c349dbc7Sjsg 
1625ad8b1aafSjsg 	if (table[config].page_mask == 0) {
1626c349dbc7Sjsg 		drm_dbg(&dev_priv->drm,
1627c349dbc7Sjsg 			"Unknown memory configuration; disabling address buddy logic.\n");
1628ad8b1aafSjsg 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
1629ad8b1aafSjsg 			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
1630ad8b1aafSjsg 				       BW_BUDDY_DISABLE);
1631c349dbc7Sjsg 	} else {
1632ad8b1aafSjsg 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
1633ad8b1aafSjsg 			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
1634ad8b1aafSjsg 				       table[config].page_mask);
1635c349dbc7Sjsg 
16365ca02815Sjsg 			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
16375ca02815Sjsg 			if (DISPLAY_VER(dev_priv) == 12)
1638ad8b1aafSjsg 				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
1639c349dbc7Sjsg 					     BW_BUDDY_TLB_REQ_TIMER_MASK,
1640ad8b1aafSjsg 					     BW_BUDDY_TLB_REQ_TIMER(0x8));
1641ad8b1aafSjsg 		}
1642c349dbc7Sjsg 	}
1643c349dbc7Sjsg }
1644c349dbc7Sjsg 
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)1645c349dbc7Sjsg static void icl_display_core_init(struct drm_i915_private *dev_priv,
1646c349dbc7Sjsg 				  bool resume)
1647c349dbc7Sjsg {
16481bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1649c349dbc7Sjsg 	struct i915_power_well *well;
1650c349dbc7Sjsg 
1651c349dbc7Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1652c349dbc7Sjsg 
16535ca02815Sjsg 	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
16541bb76ff1Sjsg 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
1655ad8b1aafSjsg 	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
1656ad8b1aafSjsg 		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
1657ad8b1aafSjsg 			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
1658ad8b1aafSjsg 
1659c349dbc7Sjsg 	/* 1. Enable PCH reset handshake. */
1660c349dbc7Sjsg 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
1661c349dbc7Sjsg 
16625ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
16635ca02815Sjsg 		return;
16645ca02815Sjsg 
1665c349dbc7Sjsg 	/* 2. Initialize all combo phys */
1666c349dbc7Sjsg 	intel_combo_phy_init(dev_priv);
1667c349dbc7Sjsg 
1668c349dbc7Sjsg 	/*
1669c349dbc7Sjsg 	 * 3. Enable Power Well 1 (PG1).
1670c349dbc7Sjsg 	 *    The AUX IO power wells will be enabled on demand.
1671c349dbc7Sjsg 	 */
1672c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1673c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1674c349dbc7Sjsg 	intel_power_well_enable(dev_priv, well);
1675c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1676c349dbc7Sjsg 
1677*f005ef32Sjsg 	if (DISPLAY_VER(dev_priv) == 14)
1678*f005ef32Sjsg 		intel_de_rmw(dev_priv, DC_STATE_EN,
1679*f005ef32Sjsg 			     HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0);
1680*f005ef32Sjsg 
1681c349dbc7Sjsg 	/* 4. Enable CDCLK. */
1682c349dbc7Sjsg 	intel_cdclk_init_hw(dev_priv);
1683c349dbc7Sjsg 
16845ca02815Sjsg 	if (DISPLAY_VER(dev_priv) >= 12)
16855ca02815Sjsg 		gen12_dbuf_slices_config(dev_priv);
16865ca02815Sjsg 
1687c349dbc7Sjsg 	/* 5. Enable DBUF. */
1688ad8b1aafSjsg 	gen9_dbuf_enable(dev_priv);
1689c349dbc7Sjsg 
1690c349dbc7Sjsg 	/* 6. Setup MBUS. */
1691c349dbc7Sjsg 	icl_mbus_init(dev_priv);
1692c349dbc7Sjsg 
1693c349dbc7Sjsg 	/* 7. Program arbiter BW_BUDDY registers */
16945ca02815Sjsg 	if (DISPLAY_VER(dev_priv) >= 12)
1695c349dbc7Sjsg 		tgl_bw_buddy_init(dev_priv);
1696c349dbc7Sjsg 
16975ca02815Sjsg 	/* 8. Ensure PHYs have completed calibration and adaptation */
16985ca02815Sjsg 	if (IS_DG2(dev_priv))
16995ca02815Sjsg 		intel_snps_phy_wait_for_calibration(dev_priv);
1700ad8b1aafSjsg 
17011bb76ff1Sjsg 	if (resume)
17025ca02815Sjsg 		intel_dmc_load_program(dev_priv);
17035ca02815Sjsg 
17045ca02815Sjsg 	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
1705*f005ef32Sjsg 	if (DISPLAY_VER(dev_priv) >= 12)
1706*f005ef32Sjsg 		intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
1707*f005ef32Sjsg 			     DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
1708*f005ef32Sjsg 			     DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
17095ca02815Sjsg 
17105ca02815Sjsg 	/* Wa_14011503030:xelpd */
17115ca02815Sjsg 	if (DISPLAY_VER(dev_priv) >= 13)
17125ca02815Sjsg 		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
1713c349dbc7Sjsg }
1714c349dbc7Sjsg 
icl_display_core_uninit(struct drm_i915_private * dev_priv)1715c349dbc7Sjsg static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
1716c349dbc7Sjsg {
17171bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1718c349dbc7Sjsg 	struct i915_power_well *well;
1719c349dbc7Sjsg 
17205ca02815Sjsg 	if (!HAS_DISPLAY(dev_priv))
17215ca02815Sjsg 		return;
17225ca02815Sjsg 
1723c349dbc7Sjsg 	gen9_disable_dc_states(dev_priv);
17241bb76ff1Sjsg 	intel_dmc_disable_program(dev_priv);
1725c349dbc7Sjsg 
1726c349dbc7Sjsg 	/* 1. Disable all display engine functions -> aready done */
1727c349dbc7Sjsg 
1728c349dbc7Sjsg 	/* 2. Disable DBUF */
1729ad8b1aafSjsg 	gen9_dbuf_disable(dev_priv);
1730c349dbc7Sjsg 
1731c349dbc7Sjsg 	/* 3. Disable CD clock */
1732c349dbc7Sjsg 	intel_cdclk_uninit_hw(dev_priv);
1733c349dbc7Sjsg 
1734*f005ef32Sjsg 	if (DISPLAY_VER(dev_priv) == 14)
1735*f005ef32Sjsg 		intel_de_rmw(dev_priv, DC_STATE_EN, 0,
1736*f005ef32Sjsg 			     HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH);
1737*f005ef32Sjsg 
1738c349dbc7Sjsg 	/*
1739c349dbc7Sjsg 	 * 4. Disable Power Well 1 (PG1).
1740c349dbc7Sjsg 	 *    The AUX IO power wells are toggled on demand, so they are already
1741c349dbc7Sjsg 	 *    disabled at this point.
1742c349dbc7Sjsg 	 */
1743c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
1744c349dbc7Sjsg 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1745c349dbc7Sjsg 	intel_power_well_disable(dev_priv, well);
1746c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
1747c349dbc7Sjsg 
1748c349dbc7Sjsg 	/* 5. */
1749c349dbc7Sjsg 	intel_combo_phy_uninit(dev_priv);
1750c349dbc7Sjsg }
1751c349dbc7Sjsg 
chv_phy_control_init(struct drm_i915_private * dev_priv)1752c349dbc7Sjsg static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1753c349dbc7Sjsg {
1754c349dbc7Sjsg 	struct i915_power_well *cmn_bc =
1755c349dbc7Sjsg 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1756c349dbc7Sjsg 	struct i915_power_well *cmn_d =
1757c349dbc7Sjsg 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1758c349dbc7Sjsg 
1759c349dbc7Sjsg 	/*
1760c349dbc7Sjsg 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
1761c349dbc7Sjsg 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
1762c349dbc7Sjsg 	 * instead maintain a shadow copy ourselves. Use the actual
1763c349dbc7Sjsg 	 * power well state and lane status to reconstruct the
1764c349dbc7Sjsg 	 * expected initial value.
1765c349dbc7Sjsg 	 */
17661bb76ff1Sjsg 	dev_priv->display.power.chv_phy_control =
1767c349dbc7Sjsg 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
1768c349dbc7Sjsg 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
1769c349dbc7Sjsg 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
1770c349dbc7Sjsg 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
1771c349dbc7Sjsg 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
1772c349dbc7Sjsg 
1773c349dbc7Sjsg 	/*
1774c349dbc7Sjsg 	 * If all lanes are disabled we leave the override disabled
1775c349dbc7Sjsg 	 * with all power down bits cleared to match the state we
1776c349dbc7Sjsg 	 * would use after disabling the port. Otherwise enable the
1777c349dbc7Sjsg 	 * override and set the lane powerdown bits accding to the
1778c349dbc7Sjsg 	 * current lane status.
1779c349dbc7Sjsg 	 */
17801bb76ff1Sjsg 	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
1781c349dbc7Sjsg 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
1782c349dbc7Sjsg 		unsigned int mask;
1783c349dbc7Sjsg 
1784c349dbc7Sjsg 		mask = status & DPLL_PORTB_READY_MASK;
1785c349dbc7Sjsg 		if (mask == 0xf)
1786c349dbc7Sjsg 			mask = 0x0;
1787c349dbc7Sjsg 		else
17881bb76ff1Sjsg 			dev_priv->display.power.chv_phy_control |=
1789c349dbc7Sjsg 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
1790c349dbc7Sjsg 
17911bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |=
1792c349dbc7Sjsg 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
1793c349dbc7Sjsg 
1794c349dbc7Sjsg 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
1795c349dbc7Sjsg 		if (mask == 0xf)
1796c349dbc7Sjsg 			mask = 0x0;
1797c349dbc7Sjsg 		else
17981bb76ff1Sjsg 			dev_priv->display.power.chv_phy_control |=
1799c349dbc7Sjsg 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
1800c349dbc7Sjsg 
18011bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |=
1802c349dbc7Sjsg 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
1803c349dbc7Sjsg 
18041bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
1805c349dbc7Sjsg 
18061bb76ff1Sjsg 		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
1807c349dbc7Sjsg 	} else {
18081bb76ff1Sjsg 		dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
1809c349dbc7Sjsg 	}
1810c349dbc7Sjsg 
18111bb76ff1Sjsg 	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
1812c349dbc7Sjsg 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
1813c349dbc7Sjsg 		unsigned int mask;
1814c349dbc7Sjsg 
1815c349dbc7Sjsg 		mask = status & DPLL_PORTD_READY_MASK;
1816c349dbc7Sjsg 
1817c349dbc7Sjsg 		if (mask == 0xf)
1818c349dbc7Sjsg 			mask = 0x0;
1819c349dbc7Sjsg 		else
18201bb76ff1Sjsg 			dev_priv->display.power.chv_phy_control |=
1821c349dbc7Sjsg 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
1822c349dbc7Sjsg 
18231bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |=
1824c349dbc7Sjsg 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
1825c349dbc7Sjsg 
18261bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
1827c349dbc7Sjsg 
18281bb76ff1Sjsg 		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
1829c349dbc7Sjsg 	} else {
18301bb76ff1Sjsg 		dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
1831c349dbc7Sjsg 	}
1832c349dbc7Sjsg 
1833c349dbc7Sjsg 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
18341bb76ff1Sjsg 		    dev_priv->display.power.chv_phy_control);
1835c349dbc7Sjsg 
1836c349dbc7Sjsg 	/* Defer application of initial phy_control to enabling the powerwell */
1837c349dbc7Sjsg }
1838c349dbc7Sjsg 
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)1839c349dbc7Sjsg static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1840c349dbc7Sjsg {
1841c349dbc7Sjsg 	struct i915_power_well *cmn =
1842c349dbc7Sjsg 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1843c349dbc7Sjsg 	struct i915_power_well *disp2d =
1844c349dbc7Sjsg 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
1845c349dbc7Sjsg 
1846c349dbc7Sjsg 	/* If the display might be already active skip this */
18471bb76ff1Sjsg 	if (intel_power_well_is_enabled(dev_priv, cmn) &&
18481bb76ff1Sjsg 	    intel_power_well_is_enabled(dev_priv, disp2d) &&
1849c349dbc7Sjsg 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
1850c349dbc7Sjsg 		return;
1851c349dbc7Sjsg 
1852c349dbc7Sjsg 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
1853c349dbc7Sjsg 
1854c349dbc7Sjsg 	/* cmnlane needs DPLL registers */
18551bb76ff1Sjsg 	intel_power_well_enable(dev_priv, disp2d);
1856c349dbc7Sjsg 
1857c349dbc7Sjsg 	/*
1858c349dbc7Sjsg 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1859c349dbc7Sjsg 	 * Need to assert and de-assert PHY SB reset by gating the
1860c349dbc7Sjsg 	 * common lane power, then un-gating it.
1861c349dbc7Sjsg 	 * Simply ungating isn't enough to reset the PHY enough to get
1862c349dbc7Sjsg 	 * ports and lanes running.
1863c349dbc7Sjsg 	 */
18641bb76ff1Sjsg 	intel_power_well_disable(dev_priv, cmn);
1865c349dbc7Sjsg }
1866c349dbc7Sjsg 
vlv_punit_is_power_gated(struct drm_i915_private * dev_priv,u32 reg0)1867c349dbc7Sjsg static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
1868c349dbc7Sjsg {
1869c349dbc7Sjsg 	bool ret;
1870c349dbc7Sjsg 
1871c349dbc7Sjsg 	vlv_punit_get(dev_priv);
1872c349dbc7Sjsg 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
1873c349dbc7Sjsg 	vlv_punit_put(dev_priv);
1874c349dbc7Sjsg 
1875c349dbc7Sjsg 	return ret;
1876c349dbc7Sjsg }
1877c349dbc7Sjsg 
assert_ved_power_gated(struct drm_i915_private * dev_priv)1878c349dbc7Sjsg static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
1879c349dbc7Sjsg {
1880c349dbc7Sjsg 	drm_WARN(&dev_priv->drm,
1881c349dbc7Sjsg 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
1882c349dbc7Sjsg 		 "VED not power gated\n");
1883c349dbc7Sjsg }
1884c349dbc7Sjsg 
assert_isp_power_gated(struct drm_i915_private * dev_priv)1885c349dbc7Sjsg static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
1886c349dbc7Sjsg {
1887c349dbc7Sjsg #ifdef notyet
1888c349dbc7Sjsg 	static const struct pci_device_id isp_ids[] = {
1889c349dbc7Sjsg 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
1890c349dbc7Sjsg 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
1891c349dbc7Sjsg 		{}
1892c349dbc7Sjsg 	};
1893c349dbc7Sjsg 
1894c349dbc7Sjsg 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
1895c349dbc7Sjsg 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
1896c349dbc7Sjsg 		 "ISP not power gated\n");
1897c349dbc7Sjsg #endif
1898c349dbc7Sjsg }
1899c349dbc7Sjsg 
1900c349dbc7Sjsg static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
1901c349dbc7Sjsg 
1902c349dbc7Sjsg /**
1903c349dbc7Sjsg  * intel_power_domains_init_hw - initialize hardware power domain state
1904c349dbc7Sjsg  * @i915: i915 device instance
1905c349dbc7Sjsg  * @resume: Called from resume code paths or not
1906c349dbc7Sjsg  *
1907c349dbc7Sjsg  * This function initializes the hardware power domain state and enables all
1908c349dbc7Sjsg  * power wells belonging to the INIT power domain. Power wells in other
1909c349dbc7Sjsg  * domains (and not in the INIT domain) are referenced or disabled by
1910c349dbc7Sjsg  * intel_modeset_readout_hw_state(). After that the reference count of each
1911c349dbc7Sjsg  * power well must match its HW enabled state, see
1912c349dbc7Sjsg  * intel_power_domains_verify_state().
1913c349dbc7Sjsg  *
1914c349dbc7Sjsg  * It will return with power domains disabled (to be enabled later by
1915c349dbc7Sjsg  * intel_power_domains_enable()) and must be paired with
1916c349dbc7Sjsg  * intel_power_domains_driver_remove().
1917c349dbc7Sjsg  */
intel_power_domains_init_hw(struct drm_i915_private * i915,bool resume)1918c349dbc7Sjsg void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
1919c349dbc7Sjsg {
19201bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
1921c349dbc7Sjsg 
1922c349dbc7Sjsg 	power_domains->initializing = true;
1923c349dbc7Sjsg 
19245ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11) {
1925c349dbc7Sjsg 		icl_display_core_init(i915, resume);
19265ca02815Sjsg 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
1927c349dbc7Sjsg 		bxt_display_core_init(i915, resume);
19285ca02815Sjsg 	} else if (DISPLAY_VER(i915) == 9) {
19295ca02815Sjsg 		skl_display_core_init(i915, resume);
1930c349dbc7Sjsg 	} else if (IS_CHERRYVIEW(i915)) {
1931c349dbc7Sjsg 		mutex_lock(&power_domains->lock);
1932c349dbc7Sjsg 		chv_phy_control_init(i915);
1933c349dbc7Sjsg 		mutex_unlock(&power_domains->lock);
1934c349dbc7Sjsg 		assert_isp_power_gated(i915);
1935c349dbc7Sjsg 	} else if (IS_VALLEYVIEW(i915)) {
1936c349dbc7Sjsg 		mutex_lock(&power_domains->lock);
1937c349dbc7Sjsg 		vlv_cmnlane_wa(i915);
1938c349dbc7Sjsg 		mutex_unlock(&power_domains->lock);
1939c349dbc7Sjsg 		assert_ved_power_gated(i915);
1940c349dbc7Sjsg 		assert_isp_power_gated(i915);
1941c349dbc7Sjsg 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
1942c349dbc7Sjsg 		hsw_assert_cdclk(i915);
1943c349dbc7Sjsg 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1944c349dbc7Sjsg 	} else if (IS_IVYBRIDGE(i915)) {
1945c349dbc7Sjsg 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
1946c349dbc7Sjsg 	}
1947c349dbc7Sjsg 
1948c349dbc7Sjsg 	/*
1949c349dbc7Sjsg 	 * Keep all power wells enabled for any dependent HW access during
1950c349dbc7Sjsg 	 * initialization and to make sure we keep BIOS enabled display HW
1951c349dbc7Sjsg 	 * resources powered until display HW readout is complete. We drop
1952c349dbc7Sjsg 	 * this reference in intel_power_domains_enable().
1953c349dbc7Sjsg 	 */
19545ca02815Sjsg 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
19555ca02815Sjsg 	power_domains->init_wakeref =
1956c349dbc7Sjsg 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
1957c349dbc7Sjsg 
1958c349dbc7Sjsg 	/* Disable power support if the user asked so. */
19595ca02815Sjsg 	if (!i915->params.disable_power_well) {
19605ca02815Sjsg 		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
19611bb76ff1Sjsg 		i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
19625ca02815Sjsg 										      POWER_DOMAIN_INIT);
19635ca02815Sjsg 	}
1964c349dbc7Sjsg 	intel_power_domains_sync_hw(i915);
1965c349dbc7Sjsg 
1966c349dbc7Sjsg 	power_domains->initializing = false;
1967c349dbc7Sjsg }
1968c349dbc7Sjsg 
1969c349dbc7Sjsg /**
1970c349dbc7Sjsg  * intel_power_domains_driver_remove - deinitialize hw power domain state
1971c349dbc7Sjsg  * @i915: i915 device instance
1972c349dbc7Sjsg  *
1973c349dbc7Sjsg  * De-initializes the display power domain HW state. It also ensures that the
1974c349dbc7Sjsg  * device stays powered up so that the driver can be reloaded.
1975c349dbc7Sjsg  *
1976c349dbc7Sjsg  * It must be called with power domains already disabled (after a call to
1977c349dbc7Sjsg  * intel_power_domains_disable()) and must be paired with
1978c349dbc7Sjsg  * intel_power_domains_init_hw().
1979c349dbc7Sjsg  */
intel_power_domains_driver_remove(struct drm_i915_private * i915)1980c349dbc7Sjsg void intel_power_domains_driver_remove(struct drm_i915_private *i915)
1981c349dbc7Sjsg {
1982c349dbc7Sjsg 	intel_wakeref_t wakeref __maybe_unused =
19831bb76ff1Sjsg 		fetch_and_zero(&i915->display.power.domains.init_wakeref);
1984c349dbc7Sjsg 
1985c349dbc7Sjsg 	/* Remove the refcount we took to keep power well support disabled. */
1986ad8b1aafSjsg 	if (!i915->params.disable_power_well)
19875ca02815Sjsg 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
19881bb76ff1Sjsg 					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
1989c349dbc7Sjsg 
1990c349dbc7Sjsg 	intel_display_power_flush_work_sync(i915);
1991c349dbc7Sjsg 
1992c349dbc7Sjsg 	intel_power_domains_verify_state(i915);
1993c349dbc7Sjsg 
1994c349dbc7Sjsg 	/* Keep the power well enabled, but cancel its rpm wakeref. */
1995c349dbc7Sjsg 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1996c349dbc7Sjsg }
1997c349dbc7Sjsg 
1998c349dbc7Sjsg /**
19991bb76ff1Sjsg  * intel_power_domains_sanitize_state - sanitize power domains state
20001bb76ff1Sjsg  * @i915: i915 device instance
20011bb76ff1Sjsg  *
20021bb76ff1Sjsg  * Sanitize the power domains state during driver loading and system resume.
20031bb76ff1Sjsg  * The function will disable all display power wells that BIOS has enabled
20041bb76ff1Sjsg  * without a user for it (any user for a power well has taken a reference
20051bb76ff1Sjsg  * on it by the time this function is called, after the state of all the
20061bb76ff1Sjsg  * pipe, encoder, etc. HW resources have been sanitized).
20071bb76ff1Sjsg  */
intel_power_domains_sanitize_state(struct drm_i915_private * i915)20081bb76ff1Sjsg void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
20091bb76ff1Sjsg {
20101bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
20111bb76ff1Sjsg 	struct i915_power_well *power_well;
20121bb76ff1Sjsg 
20131bb76ff1Sjsg 	mutex_lock(&power_domains->lock);
20141bb76ff1Sjsg 
20151bb76ff1Sjsg 	for_each_power_well_reverse(i915, power_well) {
20161bb76ff1Sjsg 		if (power_well->desc->always_on || power_well->count ||
20171bb76ff1Sjsg 		    !intel_power_well_is_enabled(i915, power_well))
20181bb76ff1Sjsg 			continue;
20191bb76ff1Sjsg 
20201bb76ff1Sjsg 		drm_dbg_kms(&i915->drm,
20211bb76ff1Sjsg 			    "BIOS left unused %s power well enabled, disabling it\n",
20221bb76ff1Sjsg 			    intel_power_well_name(power_well));
20231bb76ff1Sjsg 		intel_power_well_disable(i915, power_well);
20241bb76ff1Sjsg 	}
20251bb76ff1Sjsg 
20261bb76ff1Sjsg 	mutex_unlock(&power_domains->lock);
20271bb76ff1Sjsg }
20281bb76ff1Sjsg 
20291bb76ff1Sjsg /**
2030c349dbc7Sjsg  * intel_power_domains_enable - enable toggling of display power wells
2031c349dbc7Sjsg  * @i915: i915 device instance
2032c349dbc7Sjsg  *
2033c349dbc7Sjsg  * Enable the ondemand enabling/disabling of the display power wells. Note that
2034c349dbc7Sjsg  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
2035c349dbc7Sjsg  * only at specific points of the display modeset sequence, thus they are not
2036c349dbc7Sjsg  * affected by the intel_power_domains_enable()/disable() calls. The purpose
2037c349dbc7Sjsg  * of these function is to keep the rest of power wells enabled until the end
2038c349dbc7Sjsg  * of display HW readout (which will acquire the power references reflecting
2039c349dbc7Sjsg  * the current HW state).
2040c349dbc7Sjsg  */
intel_power_domains_enable(struct drm_i915_private * i915)2041c349dbc7Sjsg void intel_power_domains_enable(struct drm_i915_private *i915)
2042c349dbc7Sjsg {
2043c349dbc7Sjsg 	intel_wakeref_t wakeref __maybe_unused =
20441bb76ff1Sjsg 		fetch_and_zero(&i915->display.power.domains.init_wakeref);
2045c349dbc7Sjsg 
2046c349dbc7Sjsg 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2047c349dbc7Sjsg 	intel_power_domains_verify_state(i915);
2048c349dbc7Sjsg }
2049c349dbc7Sjsg 
2050c349dbc7Sjsg /**
2051c349dbc7Sjsg  * intel_power_domains_disable - disable toggling of display power wells
2052c349dbc7Sjsg  * @i915: i915 device instance
2053c349dbc7Sjsg  *
2054c349dbc7Sjsg  * Disable the ondemand enabling/disabling of the display power wells. See
2055c349dbc7Sjsg  * intel_power_domains_enable() for which power wells this call controls.
2056c349dbc7Sjsg  */
intel_power_domains_disable(struct drm_i915_private * i915)2057c349dbc7Sjsg void intel_power_domains_disable(struct drm_i915_private *i915)
2058c349dbc7Sjsg {
20591bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2060c349dbc7Sjsg 
20615ca02815Sjsg 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
20625ca02815Sjsg 	power_domains->init_wakeref =
2063c349dbc7Sjsg 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
2064c349dbc7Sjsg 
2065c349dbc7Sjsg 	intel_power_domains_verify_state(i915);
2066c349dbc7Sjsg }
2067c349dbc7Sjsg 
2068c349dbc7Sjsg /**
2069c349dbc7Sjsg  * intel_power_domains_suspend - suspend power domain state
2070c349dbc7Sjsg  * @i915: i915 device instance
2071*f005ef32Sjsg  * @s2idle: specifies whether we go to idle, or deeper sleep
2072c349dbc7Sjsg  *
2073c349dbc7Sjsg  * This function prepares the hardware power domain state before entering
2074c349dbc7Sjsg  * system suspend.
2075c349dbc7Sjsg  *
2076c349dbc7Sjsg  * It must be called with power domains already disabled (after a call to
2077c349dbc7Sjsg  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
2078c349dbc7Sjsg  */
intel_power_domains_suspend(struct drm_i915_private * i915,bool s2idle)2079*f005ef32Sjsg void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
2080c349dbc7Sjsg {
20811bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2082c349dbc7Sjsg 	intel_wakeref_t wakeref __maybe_unused =
20835ca02815Sjsg 		fetch_and_zero(&power_domains->init_wakeref);
2084c349dbc7Sjsg 
2085c349dbc7Sjsg 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
2086c349dbc7Sjsg 
2087c349dbc7Sjsg 	/*
2088c349dbc7Sjsg 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
2089c349dbc7Sjsg 	 * support don't manually deinit the power domains. This also means the
20905ca02815Sjsg 	 * DMC firmware will stay active, it will power down any HW
2091c349dbc7Sjsg 	 * resources as required and also enable deeper system power states
2092c349dbc7Sjsg 	 * that would be blocked if the firmware was inactive.
2093c349dbc7Sjsg 	 */
2094*f005ef32Sjsg 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && s2idle &&
20955ca02815Sjsg 	    intel_dmc_has_payload(i915)) {
2096c349dbc7Sjsg 		intel_display_power_flush_work(i915);
2097c349dbc7Sjsg 		intel_power_domains_verify_state(i915);
2098c349dbc7Sjsg 		return;
2099c349dbc7Sjsg 	}
2100c349dbc7Sjsg 
2101c349dbc7Sjsg 	/*
2102c349dbc7Sjsg 	 * Even if power well support was disabled we still want to disable
2103c349dbc7Sjsg 	 * power wells if power domains must be deinitialized for suspend.
2104c349dbc7Sjsg 	 */
2105ad8b1aafSjsg 	if (!i915->params.disable_power_well)
21065ca02815Sjsg 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
21071bb76ff1Sjsg 					fetch_and_zero(&i915->display.power.domains.disable_wakeref));
2108c349dbc7Sjsg 
2109c349dbc7Sjsg 	intel_display_power_flush_work(i915);
2110c349dbc7Sjsg 	intel_power_domains_verify_state(i915);
2111c349dbc7Sjsg 
21125ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11)
2113c349dbc7Sjsg 		icl_display_core_uninit(i915);
21145ca02815Sjsg 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
2115c349dbc7Sjsg 		bxt_display_core_uninit(i915);
21165ca02815Sjsg 	else if (DISPLAY_VER(i915) == 9)
21175ca02815Sjsg 		skl_display_core_uninit(i915);
2118c349dbc7Sjsg 
2119c349dbc7Sjsg 	power_domains->display_core_suspended = true;
2120c349dbc7Sjsg }
2121c349dbc7Sjsg 
2122c349dbc7Sjsg /**
2123c349dbc7Sjsg  * intel_power_domains_resume - resume power domain state
2124c349dbc7Sjsg  * @i915: i915 device instance
2125c349dbc7Sjsg  *
2126c349dbc7Sjsg  * This function resume the hardware power domain state during system resume.
2127c349dbc7Sjsg  *
2128c349dbc7Sjsg  * It will return with power domain support disabled (to be enabled later by
2129c349dbc7Sjsg  * intel_power_domains_enable()) and must be paired with
2130c349dbc7Sjsg  * intel_power_domains_suspend().
2131c349dbc7Sjsg  */
intel_power_domains_resume(struct drm_i915_private * i915)2132c349dbc7Sjsg void intel_power_domains_resume(struct drm_i915_private *i915)
2133c349dbc7Sjsg {
21341bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2135c349dbc7Sjsg 
2136c349dbc7Sjsg 	if (power_domains->display_core_suspended) {
2137c349dbc7Sjsg 		intel_power_domains_init_hw(i915, true);
2138c349dbc7Sjsg 		power_domains->display_core_suspended = false;
2139c349dbc7Sjsg 	} else {
21405ca02815Sjsg 		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
21415ca02815Sjsg 		power_domains->init_wakeref =
2142c349dbc7Sjsg 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
2143c349dbc7Sjsg 	}
2144c349dbc7Sjsg 
2145c349dbc7Sjsg 	intel_power_domains_verify_state(i915);
2146c349dbc7Sjsg }
2147c349dbc7Sjsg 
2148c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2149c349dbc7Sjsg 
intel_power_domains_dump_info(struct drm_i915_private * i915)2150c349dbc7Sjsg static void intel_power_domains_dump_info(struct drm_i915_private *i915)
2151c349dbc7Sjsg {
21521bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2153c349dbc7Sjsg 	struct i915_power_well *power_well;
2154c349dbc7Sjsg 
2155c349dbc7Sjsg 	for_each_power_well(i915, power_well) {
2156c349dbc7Sjsg 		enum intel_display_power_domain domain;
2157c349dbc7Sjsg 
2158c349dbc7Sjsg 		drm_dbg(&i915->drm, "%-25s %d\n",
21591bb76ff1Sjsg 			intel_power_well_name(power_well), intel_power_well_refcount(power_well));
2160c349dbc7Sjsg 
21611bb76ff1Sjsg 		for_each_power_domain(domain, intel_power_well_domains(power_well))
2162c349dbc7Sjsg 			drm_dbg(&i915->drm, "  %-23s %d\n",
2163c349dbc7Sjsg 				intel_display_power_domain_str(domain),
2164c349dbc7Sjsg 				power_domains->domain_use_count[domain]);
2165c349dbc7Sjsg 	}
2166c349dbc7Sjsg }
2167c349dbc7Sjsg 
2168c349dbc7Sjsg /**
2169c349dbc7Sjsg  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
2170c349dbc7Sjsg  * @i915: i915 device instance
2171c349dbc7Sjsg  *
2172c349dbc7Sjsg  * Verify if the reference count of each power well matches its HW enabled
2173c349dbc7Sjsg  * state and the total refcount of the domains it belongs to. This must be
2174c349dbc7Sjsg  * called after modeset HW state sanitization, which is responsible for
2175c349dbc7Sjsg  * acquiring reference counts for any power wells in use and disabling the
2176c349dbc7Sjsg  * ones left on by BIOS but not required by any active output.
2177c349dbc7Sjsg  */
intel_power_domains_verify_state(struct drm_i915_private * i915)2178c349dbc7Sjsg static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2179c349dbc7Sjsg {
21801bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2181c349dbc7Sjsg 	struct i915_power_well *power_well;
2182c349dbc7Sjsg 	bool dump_domain_info;
2183c349dbc7Sjsg 
2184c349dbc7Sjsg 	mutex_lock(&power_domains->lock);
2185c349dbc7Sjsg 
2186c349dbc7Sjsg 	verify_async_put_domains_state(power_domains);
2187c349dbc7Sjsg 
2188c349dbc7Sjsg 	dump_domain_info = false;
2189c349dbc7Sjsg 	for_each_power_well(i915, power_well) {
2190c349dbc7Sjsg 		enum intel_display_power_domain domain;
2191c349dbc7Sjsg 		int domains_count;
2192c349dbc7Sjsg 		bool enabled;
2193c349dbc7Sjsg 
21941bb76ff1Sjsg 		enabled = intel_power_well_is_enabled(i915, power_well);
21951bb76ff1Sjsg 		if ((intel_power_well_refcount(power_well) ||
21961bb76ff1Sjsg 		     intel_power_well_is_always_on(power_well)) !=
2197c349dbc7Sjsg 		    enabled)
2198c349dbc7Sjsg 			drm_err(&i915->drm,
2199c349dbc7Sjsg 				"power well %s state mismatch (refcount %d/enabled %d)",
22001bb76ff1Sjsg 				intel_power_well_name(power_well),
22011bb76ff1Sjsg 				intel_power_well_refcount(power_well), enabled);
2202c349dbc7Sjsg 
2203c349dbc7Sjsg 		domains_count = 0;
22041bb76ff1Sjsg 		for_each_power_domain(domain, intel_power_well_domains(power_well))
2205c349dbc7Sjsg 			domains_count += power_domains->domain_use_count[domain];
2206c349dbc7Sjsg 
22071bb76ff1Sjsg 		if (intel_power_well_refcount(power_well) != domains_count) {
2208c349dbc7Sjsg 			drm_err(&i915->drm,
2209c349dbc7Sjsg 				"power well %s refcount/domain refcount mismatch "
2210c349dbc7Sjsg 				"(refcount %d/domains refcount %d)\n",
22111bb76ff1Sjsg 				intel_power_well_name(power_well),
22121bb76ff1Sjsg 				intel_power_well_refcount(power_well),
2213c349dbc7Sjsg 				domains_count);
2214c349dbc7Sjsg 			dump_domain_info = true;
2215c349dbc7Sjsg 		}
2216c349dbc7Sjsg 	}
2217c349dbc7Sjsg 
2218c349dbc7Sjsg 	if (dump_domain_info) {
2219c349dbc7Sjsg 		static bool dumped;
2220c349dbc7Sjsg 
2221c349dbc7Sjsg 		if (!dumped) {
2222c349dbc7Sjsg 			intel_power_domains_dump_info(i915);
2223c349dbc7Sjsg 			dumped = true;
2224c349dbc7Sjsg 		}
2225c349dbc7Sjsg 	}
2226c349dbc7Sjsg 
2227c349dbc7Sjsg 	mutex_unlock(&power_domains->lock);
2228c349dbc7Sjsg }
2229c349dbc7Sjsg 
2230c349dbc7Sjsg #else
2231c349dbc7Sjsg 
intel_power_domains_verify_state(struct drm_i915_private * i915)2232c349dbc7Sjsg static void intel_power_domains_verify_state(struct drm_i915_private *i915)
2233c349dbc7Sjsg {
2234c349dbc7Sjsg }
2235c349dbc7Sjsg 
2236c349dbc7Sjsg #endif
2237c349dbc7Sjsg 
intel_display_power_suspend_late(struct drm_i915_private * i915)2238c349dbc7Sjsg void intel_display_power_suspend_late(struct drm_i915_private *i915)
2239c349dbc7Sjsg {
22405ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
22415ca02815Sjsg 	    IS_BROXTON(i915)) {
2242c349dbc7Sjsg 		bxt_enable_dc9(i915);
22435ca02815Sjsg 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2244c349dbc7Sjsg 		hsw_enable_pc8(i915);
2245c349dbc7Sjsg 	}
2246c349dbc7Sjsg 
22475ca02815Sjsg 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
22485ca02815Sjsg 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
22495ca02815Sjsg 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
22505ca02815Sjsg }
22515ca02815Sjsg 
intel_display_power_resume_early(struct drm_i915_private * i915)2252c349dbc7Sjsg void intel_display_power_resume_early(struct drm_i915_private *i915)
2253c349dbc7Sjsg {
22545ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
22555ca02815Sjsg 	    IS_BROXTON(i915)) {
2256c349dbc7Sjsg 		gen9_sanitize_dc_state(i915);
2257c349dbc7Sjsg 		bxt_disable_dc9(i915);
2258c349dbc7Sjsg 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2259c349dbc7Sjsg 		hsw_disable_pc8(i915);
2260c349dbc7Sjsg 	}
22615ca02815Sjsg 
22625ca02815Sjsg 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
22635ca02815Sjsg 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
22645ca02815Sjsg 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
2265c349dbc7Sjsg }
2266c349dbc7Sjsg 
intel_display_power_suspend(struct drm_i915_private * i915)2267c349dbc7Sjsg void intel_display_power_suspend(struct drm_i915_private *i915)
2268c349dbc7Sjsg {
22695ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11) {
2270c349dbc7Sjsg 		icl_display_core_uninit(i915);
2271c349dbc7Sjsg 		bxt_enable_dc9(i915);
22725ca02815Sjsg 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2273c349dbc7Sjsg 		bxt_display_core_uninit(i915);
2274c349dbc7Sjsg 		bxt_enable_dc9(i915);
2275c349dbc7Sjsg 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2276c349dbc7Sjsg 		hsw_enable_pc8(i915);
2277c349dbc7Sjsg 	}
2278c349dbc7Sjsg }
2279c349dbc7Sjsg 
intel_display_power_resume(struct drm_i915_private * i915)2280c349dbc7Sjsg void intel_display_power_resume(struct drm_i915_private *i915)
2281c349dbc7Sjsg {
2282*f005ef32Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
2283*f005ef32Sjsg 
22845ca02815Sjsg 	if (DISPLAY_VER(i915) >= 11) {
2285c349dbc7Sjsg 		bxt_disable_dc9(i915);
2286c349dbc7Sjsg 		icl_display_core_init(i915, true);
22875ca02815Sjsg 		if (intel_dmc_has_payload(i915)) {
2288*f005ef32Sjsg 			if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
2289c349dbc7Sjsg 				skl_enable_dc6(i915);
2290*f005ef32Sjsg 			else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
2291c349dbc7Sjsg 				gen9_enable_dc5(i915);
2292c349dbc7Sjsg 		}
22935ca02815Sjsg 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
2294c349dbc7Sjsg 		bxt_disable_dc9(i915);
2295c349dbc7Sjsg 		bxt_display_core_init(i915, true);
22965ca02815Sjsg 		if (intel_dmc_has_payload(i915) &&
2297*f005ef32Sjsg 		    (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
2298c349dbc7Sjsg 			gen9_enable_dc5(i915);
2299c349dbc7Sjsg 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2300c349dbc7Sjsg 		hsw_disable_pc8(i915);
2301c349dbc7Sjsg 	}
2302c349dbc7Sjsg }
23031bb76ff1Sjsg 
intel_display_power_debug(struct drm_i915_private * i915,struct seq_file * m)23041bb76ff1Sjsg void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
23051bb76ff1Sjsg {
23061bb76ff1Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
23071bb76ff1Sjsg 	int i;
23081bb76ff1Sjsg 
23091bb76ff1Sjsg 	mutex_lock(&power_domains->lock);
23101bb76ff1Sjsg 
23111bb76ff1Sjsg 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
23121bb76ff1Sjsg 	for (i = 0; i < power_domains->power_well_count; i++) {
23131bb76ff1Sjsg 		struct i915_power_well *power_well;
23141bb76ff1Sjsg 		enum intel_display_power_domain power_domain;
23151bb76ff1Sjsg 
23161bb76ff1Sjsg 		power_well = &power_domains->power_wells[i];
23171bb76ff1Sjsg 		seq_printf(m, "%-25s %d\n", intel_power_well_name(power_well),
23181bb76ff1Sjsg 			   intel_power_well_refcount(power_well));
23191bb76ff1Sjsg 
23201bb76ff1Sjsg 		for_each_power_domain(power_domain, intel_power_well_domains(power_well))
23211bb76ff1Sjsg 			seq_printf(m, "  %-23s %d\n",
23221bb76ff1Sjsg 				   intel_display_power_domain_str(power_domain),
23231bb76ff1Sjsg 				   power_domains->domain_use_count[power_domain]);
23241bb76ff1Sjsg 	}
23251bb76ff1Sjsg 
23261bb76ff1Sjsg 	mutex_unlock(&power_domains->lock);
23271bb76ff1Sjsg }
23281bb76ff1Sjsg 
23291bb76ff1Sjsg struct intel_ddi_port_domains {
23301bb76ff1Sjsg 	enum port port_start;
23311bb76ff1Sjsg 	enum port port_end;
23321bb76ff1Sjsg 	enum aux_ch aux_ch_start;
23331bb76ff1Sjsg 	enum aux_ch aux_ch_end;
23341bb76ff1Sjsg 
23351bb76ff1Sjsg 	enum intel_display_power_domain ddi_lanes;
23361bb76ff1Sjsg 	enum intel_display_power_domain ddi_io;
2337*f005ef32Sjsg 	enum intel_display_power_domain aux_io;
23381bb76ff1Sjsg 	enum intel_display_power_domain aux_legacy_usbc;
23391bb76ff1Sjsg 	enum intel_display_power_domain aux_tbt;
23401bb76ff1Sjsg };
23411bb76ff1Sjsg 
23421bb76ff1Sjsg static const struct intel_ddi_port_domains
23431bb76ff1Sjsg i9xx_port_domains[] = {
23441bb76ff1Sjsg 	{
23451bb76ff1Sjsg 		.port_start = PORT_A,
23461bb76ff1Sjsg 		.port_end = PORT_F,
23471bb76ff1Sjsg 		.aux_ch_start = AUX_CH_A,
23481bb76ff1Sjsg 		.aux_ch_end = AUX_CH_F,
23491bb76ff1Sjsg 
23501bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
23511bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2352*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_A,
23531bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
23541bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_INVALID,
23551bb76ff1Sjsg 	},
23561bb76ff1Sjsg };
23571bb76ff1Sjsg 
23581bb76ff1Sjsg static const struct intel_ddi_port_domains
23591bb76ff1Sjsg d11_port_domains[] = {
23601bb76ff1Sjsg 	{
23611bb76ff1Sjsg 		.port_start = PORT_A,
23621bb76ff1Sjsg 		.port_end = PORT_B,
23631bb76ff1Sjsg 		.aux_ch_start = AUX_CH_A,
23641bb76ff1Sjsg 		.aux_ch_end = AUX_CH_B,
23651bb76ff1Sjsg 
23661bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
23671bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2368*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_A,
23691bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
23701bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_INVALID,
23711bb76ff1Sjsg 	}, {
23721bb76ff1Sjsg 		.port_start = PORT_C,
23731bb76ff1Sjsg 		.port_end = PORT_F,
23741bb76ff1Sjsg 		.aux_ch_start = AUX_CH_C,
23751bb76ff1Sjsg 		.aux_ch_end = AUX_CH_F,
23761bb76ff1Sjsg 
23771bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
23781bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
2379*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_C,
23801bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_C,
23811bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
23821bb76ff1Sjsg 	},
23831bb76ff1Sjsg };
23841bb76ff1Sjsg 
23851bb76ff1Sjsg static const struct intel_ddi_port_domains
23861bb76ff1Sjsg d12_port_domains[] = {
23871bb76ff1Sjsg 	{
23881bb76ff1Sjsg 		.port_start = PORT_A,
23891bb76ff1Sjsg 		.port_end = PORT_C,
23901bb76ff1Sjsg 		.aux_ch_start = AUX_CH_A,
23911bb76ff1Sjsg 		.aux_ch_end = AUX_CH_C,
23921bb76ff1Sjsg 
23931bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
23941bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2395*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_A,
23961bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
23971bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_INVALID,
23981bb76ff1Sjsg 	}, {
23991bb76ff1Sjsg 		.port_start = PORT_TC1,
24001bb76ff1Sjsg 		.port_end = PORT_TC6,
24011bb76ff1Sjsg 		.aux_ch_start = AUX_CH_USBC1,
24021bb76ff1Sjsg 		.aux_ch_end = AUX_CH_USBC6,
24031bb76ff1Sjsg 
24041bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
24051bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2406*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_INVALID,
24071bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
24081bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
24091bb76ff1Sjsg 	},
24101bb76ff1Sjsg };
24111bb76ff1Sjsg 
24121bb76ff1Sjsg static const struct intel_ddi_port_domains
24131bb76ff1Sjsg d13_port_domains[] = {
24141bb76ff1Sjsg 	{
24151bb76ff1Sjsg 		.port_start = PORT_A,
24161bb76ff1Sjsg 		.port_end = PORT_C,
24171bb76ff1Sjsg 		.aux_ch_start = AUX_CH_A,
24181bb76ff1Sjsg 		.aux_ch_end = AUX_CH_C,
24191bb76ff1Sjsg 
24201bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
24211bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
2422*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_A,
24231bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_A,
24241bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_INVALID,
24251bb76ff1Sjsg 	}, {
24261bb76ff1Sjsg 		.port_start = PORT_TC1,
24271bb76ff1Sjsg 		.port_end = PORT_TC4,
24281bb76ff1Sjsg 		.aux_ch_start = AUX_CH_USBC1,
24291bb76ff1Sjsg 		.aux_ch_end = AUX_CH_USBC4,
24301bb76ff1Sjsg 
24311bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
24321bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
2433*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_INVALID,
24341bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
24351bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_AUX_TBT1,
24361bb76ff1Sjsg 	}, {
24371bb76ff1Sjsg 		.port_start = PORT_D_XELPD,
24381bb76ff1Sjsg 		.port_end = PORT_E_XELPD,
24391bb76ff1Sjsg 		.aux_ch_start = AUX_CH_D_XELPD,
24401bb76ff1Sjsg 		.aux_ch_end = AUX_CH_E_XELPD,
24411bb76ff1Sjsg 
24421bb76ff1Sjsg 		.ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
24431bb76ff1Sjsg 		.ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
2444*f005ef32Sjsg 		.aux_io = POWER_DOMAIN_AUX_IO_D,
24451bb76ff1Sjsg 		.aux_legacy_usbc = POWER_DOMAIN_AUX_D,
24461bb76ff1Sjsg 		.aux_tbt = POWER_DOMAIN_INVALID,
24471bb76ff1Sjsg 	},
24481bb76ff1Sjsg };
24491bb76ff1Sjsg 
24501bb76ff1Sjsg static void
intel_port_domains_for_platform(struct drm_i915_private * i915,const struct intel_ddi_port_domains ** domains,int * domains_size)24511bb76ff1Sjsg intel_port_domains_for_platform(struct drm_i915_private *i915,
24521bb76ff1Sjsg 				const struct intel_ddi_port_domains **domains,
24531bb76ff1Sjsg 				int *domains_size)
24541bb76ff1Sjsg {
24551bb76ff1Sjsg 	if (DISPLAY_VER(i915) >= 13) {
24561bb76ff1Sjsg 		*domains = d13_port_domains;
24571bb76ff1Sjsg 		*domains_size = ARRAY_SIZE(d13_port_domains);
24581bb76ff1Sjsg 	} else if (DISPLAY_VER(i915) >= 12) {
24591bb76ff1Sjsg 		*domains = d12_port_domains;
24601bb76ff1Sjsg 		*domains_size = ARRAY_SIZE(d12_port_domains);
24611bb76ff1Sjsg 	} else if (DISPLAY_VER(i915) >= 11) {
24621bb76ff1Sjsg 		*domains = d11_port_domains;
24631bb76ff1Sjsg 		*domains_size = ARRAY_SIZE(d11_port_domains);
24641bb76ff1Sjsg 	} else {
24651bb76ff1Sjsg 		*domains = i9xx_port_domains;
24661bb76ff1Sjsg 		*domains_size = ARRAY_SIZE(i9xx_port_domains);
24671bb76ff1Sjsg 	}
24681bb76ff1Sjsg }
24691bb76ff1Sjsg 
24701bb76ff1Sjsg static const struct intel_ddi_port_domains *
intel_port_domains_for_port(struct drm_i915_private * i915,enum port port)24711bb76ff1Sjsg intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
24721bb76ff1Sjsg {
24731bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains;
24741bb76ff1Sjsg 	int domains_size;
24751bb76ff1Sjsg 	int i;
24761bb76ff1Sjsg 
24771bb76ff1Sjsg 	intel_port_domains_for_platform(i915, &domains, &domains_size);
24781bb76ff1Sjsg 	for (i = 0; i < domains_size; i++)
24791bb76ff1Sjsg 		if (port >= domains[i].port_start && port <= domains[i].port_end)
24801bb76ff1Sjsg 			return &domains[i];
24811bb76ff1Sjsg 
24821bb76ff1Sjsg 	return NULL;
24831bb76ff1Sjsg }
24841bb76ff1Sjsg 
24851bb76ff1Sjsg enum intel_display_power_domain
intel_display_power_ddi_io_domain(struct drm_i915_private * i915,enum port port)24861bb76ff1Sjsg intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
24871bb76ff1Sjsg {
24881bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
24891bb76ff1Sjsg 
24901bb76ff1Sjsg 	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_io == POWER_DOMAIN_INVALID))
24911bb76ff1Sjsg 		return POWER_DOMAIN_PORT_DDI_IO_A;
24921bb76ff1Sjsg 
24931bb76ff1Sjsg 	return domains->ddi_io + (int)(port - domains->port_start);
24941bb76ff1Sjsg }
24951bb76ff1Sjsg 
24961bb76ff1Sjsg enum intel_display_power_domain
intel_display_power_ddi_lanes_domain(struct drm_i915_private * i915,enum port port)24971bb76ff1Sjsg intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
24981bb76ff1Sjsg {
24991bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
25001bb76ff1Sjsg 
25011bb76ff1Sjsg 	if (drm_WARN_ON(&i915->drm, !domains || domains->ddi_lanes == POWER_DOMAIN_INVALID))
25021bb76ff1Sjsg 		return POWER_DOMAIN_PORT_DDI_LANES_A;
25031bb76ff1Sjsg 
25041bb76ff1Sjsg 	return domains->ddi_lanes + (int)(port - domains->port_start);
25051bb76ff1Sjsg }
25061bb76ff1Sjsg 
25071bb76ff1Sjsg static const struct intel_ddi_port_domains *
intel_port_domains_for_aux_ch(struct drm_i915_private * i915,enum aux_ch aux_ch)25081bb76ff1Sjsg intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
25091bb76ff1Sjsg {
25101bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains;
25111bb76ff1Sjsg 	int domains_size;
25121bb76ff1Sjsg 	int i;
25131bb76ff1Sjsg 
25141bb76ff1Sjsg 	intel_port_domains_for_platform(i915, &domains, &domains_size);
25151bb76ff1Sjsg 	for (i = 0; i < domains_size; i++)
25161bb76ff1Sjsg 		if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
25171bb76ff1Sjsg 			return &domains[i];
25181bb76ff1Sjsg 
25191bb76ff1Sjsg 	return NULL;
25201bb76ff1Sjsg }
25211bb76ff1Sjsg 
25221bb76ff1Sjsg enum intel_display_power_domain
intel_display_power_aux_io_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)2523*f005ef32Sjsg intel_display_power_aux_io_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
2524*f005ef32Sjsg {
2525*f005ef32Sjsg 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
2526*f005ef32Sjsg 
2527*f005ef32Sjsg 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_io == POWER_DOMAIN_INVALID))
2528*f005ef32Sjsg 		return POWER_DOMAIN_AUX_IO_A;
2529*f005ef32Sjsg 
2530*f005ef32Sjsg 	return domains->aux_io + (int)(aux_ch - domains->aux_ch_start);
2531*f005ef32Sjsg }
2532*f005ef32Sjsg 
2533*f005ef32Sjsg enum intel_display_power_domain
intel_display_power_legacy_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)25341bb76ff1Sjsg intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
25351bb76ff1Sjsg {
25361bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
25371bb76ff1Sjsg 
25381bb76ff1Sjsg 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID))
25391bb76ff1Sjsg 		return POWER_DOMAIN_AUX_A;
25401bb76ff1Sjsg 
25411bb76ff1Sjsg 	return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
25421bb76ff1Sjsg }
25431bb76ff1Sjsg 
25441bb76ff1Sjsg enum intel_display_power_domain
intel_display_power_tbt_aux_domain(struct drm_i915_private * i915,enum aux_ch aux_ch)25451bb76ff1Sjsg intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
25461bb76ff1Sjsg {
25471bb76ff1Sjsg 	const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
25481bb76ff1Sjsg 
25491bb76ff1Sjsg 	if (drm_WARN_ON(&i915->drm, !domains || domains->aux_tbt == POWER_DOMAIN_INVALID))
25501bb76ff1Sjsg 		return POWER_DOMAIN_AUX_TBT1;
25511bb76ff1Sjsg 
25521bb76ff1Sjsg 	return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
25531bb76ff1Sjsg }
2554