xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_display_power_well.c (revision cd67bc62953d5c95bdba8c17ea18f42457e99208)
11bb76ff1Sjsg // SPDX-License-Identifier: MIT
21bb76ff1Sjsg /*
31bb76ff1Sjsg  * Copyright © 2022 Intel Corporation
41bb76ff1Sjsg  */
51bb76ff1Sjsg 
61bb76ff1Sjsg #include "i915_drv.h"
71bb76ff1Sjsg #include "i915_irq.h"
8f005ef32Sjsg #include "i915_reg.h"
91bb76ff1Sjsg #include "intel_backlight_regs.h"
101bb76ff1Sjsg #include "intel_combo_phy.h"
111bb76ff1Sjsg #include "intel_combo_phy_regs.h"
121bb76ff1Sjsg #include "intel_crt.h"
131bb76ff1Sjsg #include "intel_de.h"
14f005ef32Sjsg #include "intel_display_irq.h"
151bb76ff1Sjsg #include "intel_display_power_well.h"
161bb76ff1Sjsg #include "intel_display_types.h"
171bb76ff1Sjsg #include "intel_dkl_phy.h"
18f005ef32Sjsg #include "intel_dkl_phy_regs.h"
191bb76ff1Sjsg #include "intel_dmc.h"
20f005ef32Sjsg #include "intel_dp_aux_regs.h"
211bb76ff1Sjsg #include "intel_dpio_phy.h"
221bb76ff1Sjsg #include "intel_dpll.h"
231bb76ff1Sjsg #include "intel_hotplug.h"
241bb76ff1Sjsg #include "intel_pcode.h"
251bb76ff1Sjsg #include "intel_pps.h"
261bb76ff1Sjsg #include "intel_tc.h"
271bb76ff1Sjsg #include "intel_vga.h"
281bb76ff1Sjsg #include "skl_watermark.h"
291bb76ff1Sjsg #include "vlv_sideband.h"
301bb76ff1Sjsg #include "vlv_sideband_reg.h"
311bb76ff1Sjsg 
321bb76ff1Sjsg struct i915_power_well_regs {
331bb76ff1Sjsg 	i915_reg_t bios;
341bb76ff1Sjsg 	i915_reg_t driver;
351bb76ff1Sjsg 	i915_reg_t kvmr;
361bb76ff1Sjsg 	i915_reg_t debug;
371bb76ff1Sjsg };
381bb76ff1Sjsg 
391bb76ff1Sjsg struct i915_power_well_ops {
401bb76ff1Sjsg 	const struct i915_power_well_regs *regs;
411bb76ff1Sjsg 	/*
421bb76ff1Sjsg 	 * Synchronize the well's hw state to match the current sw state, for
431bb76ff1Sjsg 	 * example enable/disable it based on the current refcount. Called
441bb76ff1Sjsg 	 * during driver init and resume time, possibly after first calling
451bb76ff1Sjsg 	 * the enable/disable handlers.
461bb76ff1Sjsg 	 */
471bb76ff1Sjsg 	void (*sync_hw)(struct drm_i915_private *i915,
481bb76ff1Sjsg 			struct i915_power_well *power_well);
491bb76ff1Sjsg 	/*
501bb76ff1Sjsg 	 * Enable the well and resources that depend on it (for example
511bb76ff1Sjsg 	 * interrupts located on the well). Called after the 0->1 refcount
521bb76ff1Sjsg 	 * transition.
531bb76ff1Sjsg 	 */
541bb76ff1Sjsg 	void (*enable)(struct drm_i915_private *i915,
551bb76ff1Sjsg 		       struct i915_power_well *power_well);
561bb76ff1Sjsg 	/*
571bb76ff1Sjsg 	 * Disable the well and resources that depend on it. Called after
581bb76ff1Sjsg 	 * the 1->0 refcount transition.
591bb76ff1Sjsg 	 */
601bb76ff1Sjsg 	void (*disable)(struct drm_i915_private *i915,
611bb76ff1Sjsg 			struct i915_power_well *power_well);
621bb76ff1Sjsg 	/* Returns the hw enabled state. */
631bb76ff1Sjsg 	bool (*is_enabled)(struct drm_i915_private *i915,
641bb76ff1Sjsg 			   struct i915_power_well *power_well);
651bb76ff1Sjsg };
661bb76ff1Sjsg 
671bb76ff1Sjsg static const struct i915_power_well_instance *
i915_power_well_instance(const struct i915_power_well * power_well)681bb76ff1Sjsg i915_power_well_instance(const struct i915_power_well *power_well)
691bb76ff1Sjsg {
701bb76ff1Sjsg 	return &power_well->desc->instances->list[power_well->instance_idx];
711bb76ff1Sjsg }
721bb76ff1Sjsg 
731bb76ff1Sjsg struct i915_power_well *
lookup_power_well(struct drm_i915_private * i915,enum i915_power_well_id power_well_id)741bb76ff1Sjsg lookup_power_well(struct drm_i915_private *i915,
751bb76ff1Sjsg 		  enum i915_power_well_id power_well_id)
761bb76ff1Sjsg {
771bb76ff1Sjsg 	struct i915_power_well *power_well;
781bb76ff1Sjsg 
791bb76ff1Sjsg 	for_each_power_well(i915, power_well)
801bb76ff1Sjsg 		if (i915_power_well_instance(power_well)->id == power_well_id)
811bb76ff1Sjsg 			return power_well;
821bb76ff1Sjsg 
831bb76ff1Sjsg 	/*
841bb76ff1Sjsg 	 * It's not feasible to add error checking code to the callers since
851bb76ff1Sjsg 	 * this condition really shouldn't happen and it doesn't even make sense
861bb76ff1Sjsg 	 * to abort things like display initialization sequences. Just return
871bb76ff1Sjsg 	 * the first power well and hope the WARN gets reported so we can fix
881bb76ff1Sjsg 	 * our driver.
891bb76ff1Sjsg 	 */
901bb76ff1Sjsg 	drm_WARN(&i915->drm, 1,
911bb76ff1Sjsg 		 "Power well %d not defined for this platform\n",
921bb76ff1Sjsg 		 power_well_id);
931bb76ff1Sjsg 	return &i915->display.power.domains.power_wells[0];
941bb76ff1Sjsg }
951bb76ff1Sjsg 
intel_power_well_enable(struct drm_i915_private * i915,struct i915_power_well * power_well)961bb76ff1Sjsg void intel_power_well_enable(struct drm_i915_private *i915,
971bb76ff1Sjsg 			     struct i915_power_well *power_well)
981bb76ff1Sjsg {
991bb76ff1Sjsg 	drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
1001bb76ff1Sjsg 	power_well->desc->ops->enable(i915, power_well);
1011bb76ff1Sjsg 	power_well->hw_enabled = true;
1021bb76ff1Sjsg }
1031bb76ff1Sjsg 
intel_power_well_disable(struct drm_i915_private * i915,struct i915_power_well * power_well)1041bb76ff1Sjsg void intel_power_well_disable(struct drm_i915_private *i915,
1051bb76ff1Sjsg 			      struct i915_power_well *power_well)
1061bb76ff1Sjsg {
1071bb76ff1Sjsg 	drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
1081bb76ff1Sjsg 	power_well->hw_enabled = false;
1091bb76ff1Sjsg 	power_well->desc->ops->disable(i915, power_well);
1101bb76ff1Sjsg }
1111bb76ff1Sjsg 
intel_power_well_sync_hw(struct drm_i915_private * i915,struct i915_power_well * power_well)1121bb76ff1Sjsg void intel_power_well_sync_hw(struct drm_i915_private *i915,
1131bb76ff1Sjsg 			      struct i915_power_well *power_well)
1141bb76ff1Sjsg {
1151bb76ff1Sjsg 	power_well->desc->ops->sync_hw(i915, power_well);
1161bb76ff1Sjsg 	power_well->hw_enabled =
1171bb76ff1Sjsg 		power_well->desc->ops->is_enabled(i915, power_well);
1181bb76ff1Sjsg }
1191bb76ff1Sjsg 
intel_power_well_get(struct drm_i915_private * i915,struct i915_power_well * power_well)1201bb76ff1Sjsg void intel_power_well_get(struct drm_i915_private *i915,
1211bb76ff1Sjsg 			  struct i915_power_well *power_well)
1221bb76ff1Sjsg {
1231bb76ff1Sjsg 	if (!power_well->count++)
1241bb76ff1Sjsg 		intel_power_well_enable(i915, power_well);
1251bb76ff1Sjsg }
1261bb76ff1Sjsg 
intel_power_well_put(struct drm_i915_private * i915,struct i915_power_well * power_well)1271bb76ff1Sjsg void intel_power_well_put(struct drm_i915_private *i915,
1281bb76ff1Sjsg 			  struct i915_power_well *power_well)
1291bb76ff1Sjsg {
1301bb76ff1Sjsg 	drm_WARN(&i915->drm, !power_well->count,
1311bb76ff1Sjsg 		 "Use count on power well %s is already zero",
1321bb76ff1Sjsg 		 i915_power_well_instance(power_well)->name);
1331bb76ff1Sjsg 
1341bb76ff1Sjsg 	if (!--power_well->count)
1351bb76ff1Sjsg 		intel_power_well_disable(i915, power_well);
1361bb76ff1Sjsg }
1371bb76ff1Sjsg 
intel_power_well_is_enabled(struct drm_i915_private * i915,struct i915_power_well * power_well)1381bb76ff1Sjsg bool intel_power_well_is_enabled(struct drm_i915_private *i915,
1391bb76ff1Sjsg 				 struct i915_power_well *power_well)
1401bb76ff1Sjsg {
1411bb76ff1Sjsg 	return power_well->desc->ops->is_enabled(i915, power_well);
1421bb76ff1Sjsg }
1431bb76ff1Sjsg 
intel_power_well_is_enabled_cached(struct i915_power_well * power_well)1441bb76ff1Sjsg bool intel_power_well_is_enabled_cached(struct i915_power_well *power_well)
1451bb76ff1Sjsg {
1461bb76ff1Sjsg 	return power_well->hw_enabled;
1471bb76ff1Sjsg }
1481bb76ff1Sjsg 
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)1491bb76ff1Sjsg bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1501bb76ff1Sjsg 					 enum i915_power_well_id power_well_id)
1511bb76ff1Sjsg {
1521bb76ff1Sjsg 	struct i915_power_well *power_well;
1531bb76ff1Sjsg 
1541bb76ff1Sjsg 	power_well = lookup_power_well(dev_priv, power_well_id);
1551bb76ff1Sjsg 
1561bb76ff1Sjsg 	return intel_power_well_is_enabled(dev_priv, power_well);
1571bb76ff1Sjsg }
1581bb76ff1Sjsg 
intel_power_well_is_always_on(struct i915_power_well * power_well)1591bb76ff1Sjsg bool intel_power_well_is_always_on(struct i915_power_well *power_well)
1601bb76ff1Sjsg {
1611bb76ff1Sjsg 	return power_well->desc->always_on;
1621bb76ff1Sjsg }
1631bb76ff1Sjsg 
intel_power_well_name(struct i915_power_well * power_well)1641bb76ff1Sjsg const char *intel_power_well_name(struct i915_power_well *power_well)
1651bb76ff1Sjsg {
1661bb76ff1Sjsg 	return i915_power_well_instance(power_well)->name;
1671bb76ff1Sjsg }
1681bb76ff1Sjsg 
intel_power_well_domains(struct i915_power_well * power_well)1691bb76ff1Sjsg struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
1701bb76ff1Sjsg {
1711bb76ff1Sjsg 	return &power_well->domains;
1721bb76ff1Sjsg }
1731bb76ff1Sjsg 
intel_power_well_refcount(struct i915_power_well * power_well)1741bb76ff1Sjsg int intel_power_well_refcount(struct i915_power_well *power_well)
1751bb76ff1Sjsg {
1761bb76ff1Sjsg 	return power_well->count;
1771bb76ff1Sjsg }
1781bb76ff1Sjsg 
1791bb76ff1Sjsg /*
1801bb76ff1Sjsg  * Starting with Haswell, we have a "Power Down Well" that can be turned off
1811bb76ff1Sjsg  * when not needed anymore. We have 4 registers that can request the power well
1821bb76ff1Sjsg  * to be enabled, and it will only be disabled if none of the registers is
1831bb76ff1Sjsg  * requesting it to be enabled.
1841bb76ff1Sjsg  */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)1851bb76ff1Sjsg static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
1861bb76ff1Sjsg 				       u8 irq_pipe_mask, bool has_vga)
1871bb76ff1Sjsg {
1881bb76ff1Sjsg 	if (has_vga)
1891bb76ff1Sjsg 		intel_vga_reset_io_mem(dev_priv);
1901bb76ff1Sjsg 
1911bb76ff1Sjsg 	if (irq_pipe_mask)
1921bb76ff1Sjsg 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
1931bb76ff1Sjsg }
1941bb76ff1Sjsg 
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)1951bb76ff1Sjsg static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
1961bb76ff1Sjsg 				       u8 irq_pipe_mask)
1971bb76ff1Sjsg {
1981bb76ff1Sjsg 	if (irq_pipe_mask)
1991bb76ff1Sjsg 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
2001bb76ff1Sjsg }
2011bb76ff1Sjsg 
2021bb76ff1Sjsg #define ICL_AUX_PW_TO_CH(pw_idx)	\
2031bb76ff1Sjsg 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
2041bb76ff1Sjsg 
2051bb76ff1Sjsg #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
2061bb76ff1Sjsg 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
2071bb76ff1Sjsg 
icl_aux_pw_to_ch(const struct i915_power_well * power_well)2081bb76ff1Sjsg static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
2091bb76ff1Sjsg {
2101bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
2111bb76ff1Sjsg 
2121bb76ff1Sjsg 	return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
2131bb76ff1Sjsg 					     ICL_AUX_PW_TO_CH(pw_idx);
2141bb76ff1Sjsg }
2151bb76ff1Sjsg 
2161bb76ff1Sjsg static struct intel_digital_port *
aux_ch_to_digital_port(struct drm_i915_private * dev_priv,enum aux_ch aux_ch)2171bb76ff1Sjsg aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
2181bb76ff1Sjsg 		       enum aux_ch aux_ch)
2191bb76ff1Sjsg {
2201bb76ff1Sjsg 	struct intel_digital_port *dig_port = NULL;
2211bb76ff1Sjsg 	struct intel_encoder *encoder;
2221bb76ff1Sjsg 
2231bb76ff1Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder) {
2241bb76ff1Sjsg 		/* We'll check the MST primary port */
2251bb76ff1Sjsg 		if (encoder->type == INTEL_OUTPUT_DP_MST)
2261bb76ff1Sjsg 			continue;
2271bb76ff1Sjsg 
2281bb76ff1Sjsg 		dig_port = enc_to_dig_port(encoder);
2291bb76ff1Sjsg 		if (!dig_port)
2301bb76ff1Sjsg 			continue;
2311bb76ff1Sjsg 
2321bb76ff1Sjsg 		if (dig_port->aux_ch != aux_ch) {
2331bb76ff1Sjsg 			dig_port = NULL;
2341bb76ff1Sjsg 			continue;
2351bb76ff1Sjsg 		}
2361bb76ff1Sjsg 
2371bb76ff1Sjsg 		break;
2381bb76ff1Sjsg 	}
2391bb76ff1Sjsg 
2401bb76ff1Sjsg 	return dig_port;
2411bb76ff1Sjsg }
2421bb76ff1Sjsg 
icl_aux_pw_to_phy(struct drm_i915_private * i915,const struct i915_power_well * power_well)2431bb76ff1Sjsg static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
2441bb76ff1Sjsg 				  const struct i915_power_well *power_well)
2451bb76ff1Sjsg {
2461bb76ff1Sjsg 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
2471bb76ff1Sjsg 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
2481bb76ff1Sjsg 
249*cd67bc62Sjsg 	/*
250*cd67bc62Sjsg 	 * FIXME should we care about the (VBT defined) dig_port->aux_ch
251*cd67bc62Sjsg 	 * relationship or should this be purely defined by the hardware layout?
252*cd67bc62Sjsg 	 * Currently if the port doesn't appear in the VBT, or if it's declared
253*cd67bc62Sjsg 	 * as HDMI-only and routed to a combo PHY, the encoder either won't be
254*cd67bc62Sjsg 	 * present at all or it will not have an aux_ch assigned.
255*cd67bc62Sjsg 	 */
256*cd67bc62Sjsg 	return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
2571bb76ff1Sjsg }
2581bb76ff1Sjsg 
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool timeout_expected)2591bb76ff1Sjsg static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
2601bb76ff1Sjsg 					   struct i915_power_well *power_well,
2611bb76ff1Sjsg 					   bool timeout_expected)
2621bb76ff1Sjsg {
2631bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
2641bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
265f005ef32Sjsg 	int timeout = power_well->desc->enable_timeout ? : 1;
2661bb76ff1Sjsg 
2671bb76ff1Sjsg 	/*
2681bb76ff1Sjsg 	 * For some power wells we're not supposed to watch the status bit for
2691bb76ff1Sjsg 	 * an ack, but rather just wait a fixed amount of time and then
2701bb76ff1Sjsg 	 * proceed.  This is only used on DG2.
2711bb76ff1Sjsg 	 */
2721bb76ff1Sjsg 	if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
2731bb76ff1Sjsg 		usleep_range(600, 1200);
2741bb76ff1Sjsg 		return;
2751bb76ff1Sjsg 	}
2761bb76ff1Sjsg 
2771bb76ff1Sjsg 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
2781bb76ff1Sjsg 	if (intel_de_wait_for_set(dev_priv, regs->driver,
279f005ef32Sjsg 				  HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
2801bb76ff1Sjsg 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
2811bb76ff1Sjsg 			    intel_power_well_name(power_well));
2821bb76ff1Sjsg 
2831bb76ff1Sjsg 		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
2841bb76ff1Sjsg 
2851bb76ff1Sjsg 	}
2861bb76ff1Sjsg }
2871bb76ff1Sjsg 
hsw_power_well_requesters(struct drm_i915_private * dev_priv,const struct i915_power_well_regs * regs,int pw_idx)2881bb76ff1Sjsg static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
2891bb76ff1Sjsg 				     const struct i915_power_well_regs *regs,
2901bb76ff1Sjsg 				     int pw_idx)
2911bb76ff1Sjsg {
2921bb76ff1Sjsg 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
2931bb76ff1Sjsg 	u32 ret;
2941bb76ff1Sjsg 
2951bb76ff1Sjsg 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
2961bb76ff1Sjsg 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
2971bb76ff1Sjsg 	if (regs->kvmr.reg)
2981bb76ff1Sjsg 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
2991bb76ff1Sjsg 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
3001bb76ff1Sjsg 
3011bb76ff1Sjsg 	return ret;
3021bb76ff1Sjsg }
3031bb76ff1Sjsg 
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)3041bb76ff1Sjsg static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
3051bb76ff1Sjsg 					    struct i915_power_well *power_well)
3061bb76ff1Sjsg {
3071bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
3081bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
3091bb76ff1Sjsg 	bool disabled;
3101bb76ff1Sjsg 	u32 reqs;
3111bb76ff1Sjsg 
3121bb76ff1Sjsg 	/*
3131bb76ff1Sjsg 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
3141bb76ff1Sjsg 	 * this for paranoia. The known cases where a PW will be forced on:
3151bb76ff1Sjsg 	 * - a KVMR request on any power well via the KVMR request register
3161bb76ff1Sjsg 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
3171bb76ff1Sjsg 	 *   DEBUG request registers
3181bb76ff1Sjsg 	 * Skip the wait in case any of the request bits are set and print a
3191bb76ff1Sjsg 	 * diagnostic message.
3201bb76ff1Sjsg 	 */
3211bb76ff1Sjsg 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
3221bb76ff1Sjsg 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
3231bb76ff1Sjsg 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
3241bb76ff1Sjsg 	if (disabled)
3251bb76ff1Sjsg 		return;
3261bb76ff1Sjsg 
3271bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm,
3281bb76ff1Sjsg 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
3291bb76ff1Sjsg 		    intel_power_well_name(power_well),
3301bb76ff1Sjsg 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
3311bb76ff1Sjsg }
3321bb76ff1Sjsg 
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)3331bb76ff1Sjsg static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
3341bb76ff1Sjsg 					   enum skl_power_gate pg)
3351bb76ff1Sjsg {
3361bb76ff1Sjsg 	/* Timeout 5us for PG#0, for other PGs 1us */
3371bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm,
3381bb76ff1Sjsg 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
3391bb76ff1Sjsg 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
3401bb76ff1Sjsg }
3411bb76ff1Sjsg 
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)3421bb76ff1Sjsg static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
3431bb76ff1Sjsg 				  struct i915_power_well *power_well)
3441bb76ff1Sjsg {
3451bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
3461bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
3471bb76ff1Sjsg 
3481bb76ff1Sjsg 	if (power_well->desc->has_fuses) {
3491bb76ff1Sjsg 		enum skl_power_gate pg;
3501bb76ff1Sjsg 
3511bb76ff1Sjsg 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
3521bb76ff1Sjsg 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
3531bb76ff1Sjsg 
3541bb76ff1Sjsg 		/* Wa_16013190616:adlp */
3551bb76ff1Sjsg 		if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
3561bb76ff1Sjsg 			intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
3571bb76ff1Sjsg 
3581bb76ff1Sjsg 		/*
3591bb76ff1Sjsg 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
3601bb76ff1Sjsg 		 * before enabling the power well and PW1/PG1's own fuse
3611bb76ff1Sjsg 		 * state after the enabling. For all other power wells with
3621bb76ff1Sjsg 		 * fuses we only have to wait for that PW/PG's fuse state
3631bb76ff1Sjsg 		 * after the enabling.
3641bb76ff1Sjsg 		 */
3651bb76ff1Sjsg 		if (pg == SKL_PG1)
3661bb76ff1Sjsg 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
3671bb76ff1Sjsg 	}
3681bb76ff1Sjsg 
369f005ef32Sjsg 	intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
3701bb76ff1Sjsg 
3711bb76ff1Sjsg 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
3721bb76ff1Sjsg 
3731bb76ff1Sjsg 	if (power_well->desc->has_fuses) {
3741bb76ff1Sjsg 		enum skl_power_gate pg;
3751bb76ff1Sjsg 
3761bb76ff1Sjsg 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
3771bb76ff1Sjsg 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
3781bb76ff1Sjsg 		gen9_wait_for_power_well_fuses(dev_priv, pg);
3791bb76ff1Sjsg 	}
3801bb76ff1Sjsg 
3811bb76ff1Sjsg 	hsw_power_well_post_enable(dev_priv,
3821bb76ff1Sjsg 				   power_well->desc->irq_pipe_mask,
3831bb76ff1Sjsg 				   power_well->desc->has_vga);
3841bb76ff1Sjsg }
3851bb76ff1Sjsg 
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)3861bb76ff1Sjsg static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
3871bb76ff1Sjsg 				   struct i915_power_well *power_well)
3881bb76ff1Sjsg {
3891bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
3901bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
3911bb76ff1Sjsg 
3921bb76ff1Sjsg 	hsw_power_well_pre_disable(dev_priv,
3931bb76ff1Sjsg 				   power_well->desc->irq_pipe_mask);
3941bb76ff1Sjsg 
395f005ef32Sjsg 	intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
3961bb76ff1Sjsg 	hsw_wait_for_power_well_disable(dev_priv, power_well);
3971bb76ff1Sjsg }
3981bb76ff1Sjsg 
intel_port_is_edp(struct drm_i915_private * i915,enum port port)399f005ef32Sjsg static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port)
400f005ef32Sjsg {
401f005ef32Sjsg 	struct intel_encoder *encoder;
402f005ef32Sjsg 
403f005ef32Sjsg 	for_each_intel_encoder(&i915->drm, encoder) {
404f005ef32Sjsg 		if (encoder->type == INTEL_OUTPUT_EDP &&
405f005ef32Sjsg 		    encoder->port == port)
406f005ef32Sjsg 			return true;
407f005ef32Sjsg 	}
408f005ef32Sjsg 
409f005ef32Sjsg 	return false;
410f005ef32Sjsg }
411f005ef32Sjsg 
4121bb76ff1Sjsg static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)4131bb76ff1Sjsg icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
4141bb76ff1Sjsg 				    struct i915_power_well *power_well)
4151bb76ff1Sjsg {
4161bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
4171bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
4181bb76ff1Sjsg 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
4191bb76ff1Sjsg 
4201bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
4211bb76ff1Sjsg 
422f005ef32Sjsg 	intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
4231bb76ff1Sjsg 
424*cd67bc62Sjsg 	/* FIXME this is a mess */
425*cd67bc62Sjsg 	if (phy != PHY_NONE)
426f005ef32Sjsg 		intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
427f005ef32Sjsg 			     0, ICL_LANE_ENABLE_AUX);
4281bb76ff1Sjsg 
4291bb76ff1Sjsg 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
4301bb76ff1Sjsg 
4311bb76ff1Sjsg 	/* Display WA #1178: icl */
4321bb76ff1Sjsg 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
433f005ef32Sjsg 	    !intel_port_is_edp(dev_priv, (enum port)phy))
434f005ef32Sjsg 		intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx),
435f005ef32Sjsg 			     0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS);
4361bb76ff1Sjsg }
4371bb76ff1Sjsg 
4381bb76ff1Sjsg static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)4391bb76ff1Sjsg icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
4401bb76ff1Sjsg 				     struct i915_power_well *power_well)
4411bb76ff1Sjsg {
4421bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
4431bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
4441bb76ff1Sjsg 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
4451bb76ff1Sjsg 
4461bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
4471bb76ff1Sjsg 
448*cd67bc62Sjsg 	/* FIXME this is a mess */
449*cd67bc62Sjsg 	if (phy != PHY_NONE)
450*cd67bc62Sjsg 		intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
451*cd67bc62Sjsg 			     ICL_LANE_ENABLE_AUX, 0);
4521bb76ff1Sjsg 
453f005ef32Sjsg 	intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
4541bb76ff1Sjsg 
4551bb76ff1Sjsg 	hsw_wait_for_power_well_disable(dev_priv, power_well);
4561bb76ff1Sjsg }
4571bb76ff1Sjsg 
4581bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
4591bb76ff1Sjsg 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)4601bb76ff1Sjsg static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
4611bb76ff1Sjsg 					struct i915_power_well *power_well,
4621bb76ff1Sjsg 					struct intel_digital_port *dig_port)
4631bb76ff1Sjsg {
4641bb76ff1Sjsg 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
4651bb76ff1Sjsg 		return;
4661bb76ff1Sjsg 
4671bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
4681bb76ff1Sjsg 		return;
4691bb76ff1Sjsg 
4701bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
4711bb76ff1Sjsg }
4721bb76ff1Sjsg 
4731bb76ff1Sjsg #else
4741bb76ff1Sjsg 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,struct intel_digital_port * dig_port)4751bb76ff1Sjsg static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
4761bb76ff1Sjsg 					struct i915_power_well *power_well,
4771bb76ff1Sjsg 					struct intel_digital_port *dig_port)
4781bb76ff1Sjsg {
4791bb76ff1Sjsg }
4801bb76ff1Sjsg 
4811bb76ff1Sjsg #endif
4821bb76ff1Sjsg 
4831bb76ff1Sjsg #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
4841bb76ff1Sjsg 
icl_tc_cold_exit(struct drm_i915_private * i915)4851bb76ff1Sjsg static void icl_tc_cold_exit(struct drm_i915_private *i915)
4861bb76ff1Sjsg {
4871bb76ff1Sjsg 	int ret, tries = 0;
4881bb76ff1Sjsg 
4891bb76ff1Sjsg 	while (1) {
4901bb76ff1Sjsg 		ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
4911bb76ff1Sjsg 					      250, 1);
4921bb76ff1Sjsg 		if (ret != -EAGAIN || ++tries == 3)
4931bb76ff1Sjsg 			break;
4941bb76ff1Sjsg 		drm_msleep(1);
4951bb76ff1Sjsg 	}
4961bb76ff1Sjsg 
4971bb76ff1Sjsg 	/* Spec states that TC cold exit can take up to 1ms to complete */
4981bb76ff1Sjsg 	if (!ret)
4991bb76ff1Sjsg 		drm_msleep(1);
5001bb76ff1Sjsg 
5011bb76ff1Sjsg 	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
5021bb76ff1Sjsg 	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
5031bb76ff1Sjsg 		    "succeeded");
5041bb76ff1Sjsg }
5051bb76ff1Sjsg 
5061bb76ff1Sjsg static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5071bb76ff1Sjsg icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
5081bb76ff1Sjsg 				 struct i915_power_well *power_well)
5091bb76ff1Sjsg {
5101bb76ff1Sjsg 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
5111bb76ff1Sjsg 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
5121bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
5131bb76ff1Sjsg 	bool is_tbt = power_well->desc->is_tc_tbt;
5141bb76ff1Sjsg 	bool timeout_expected;
5151bb76ff1Sjsg 
5161bb76ff1Sjsg 	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
5171bb76ff1Sjsg 
518f005ef32Sjsg 	intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch),
519f005ef32Sjsg 		     DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0);
5201bb76ff1Sjsg 
521f005ef32Sjsg 	intel_de_rmw(dev_priv, regs->driver,
522f005ef32Sjsg 		     0,
523f005ef32Sjsg 		     HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
5241bb76ff1Sjsg 
5251bb76ff1Sjsg 	/*
5261bb76ff1Sjsg 	 * An AUX timeout is expected if the TBT DP tunnel is down,
5271bb76ff1Sjsg 	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
5281bb76ff1Sjsg 	 * exit sequence.
5291bb76ff1Sjsg 	 */
5301bb76ff1Sjsg 	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
5311bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
5321bb76ff1Sjsg 		icl_tc_cold_exit(dev_priv);
5331bb76ff1Sjsg 
5341bb76ff1Sjsg 	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
5351bb76ff1Sjsg 
5361bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
5371bb76ff1Sjsg 		enum tc_port tc_port;
5381bb76ff1Sjsg 
5391bb76ff1Sjsg 		tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
5401bb76ff1Sjsg 
541f005ef32Sjsg 		if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port)) &
5421bb76ff1Sjsg 			     DKL_CMN_UC_DW27_UC_HEALTH, 1))
5431bb76ff1Sjsg 			drm_warn(&dev_priv->drm,
5441bb76ff1Sjsg 				 "Timeout waiting TC uC health\n");
5451bb76ff1Sjsg 	}
5461bb76ff1Sjsg }
5471bb76ff1Sjsg 
5481bb76ff1Sjsg static void
icl_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5491bb76ff1Sjsg icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
5501bb76ff1Sjsg 			  struct i915_power_well *power_well)
5511bb76ff1Sjsg {
5521bb76ff1Sjsg 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
5531bb76ff1Sjsg 
5541bb76ff1Sjsg 	if (intel_phy_is_tc(dev_priv, phy))
5551bb76ff1Sjsg 		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
5561bb76ff1Sjsg 	else if (IS_ICELAKE(dev_priv))
5571bb76ff1Sjsg 		return icl_combo_phy_aux_power_well_enable(dev_priv,
5581bb76ff1Sjsg 							   power_well);
5591bb76ff1Sjsg 	else
5601bb76ff1Sjsg 		return hsw_power_well_enable(dev_priv, power_well);
5611bb76ff1Sjsg }
5621bb76ff1Sjsg 
5631bb76ff1Sjsg static void
icl_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5641bb76ff1Sjsg icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
5651bb76ff1Sjsg 			   struct i915_power_well *power_well)
5661bb76ff1Sjsg {
5671bb76ff1Sjsg 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
5681bb76ff1Sjsg 
5691bb76ff1Sjsg 	if (intel_phy_is_tc(dev_priv, phy))
5701bb76ff1Sjsg 		return hsw_power_well_disable(dev_priv, power_well);
5711bb76ff1Sjsg 	else if (IS_ICELAKE(dev_priv))
5721bb76ff1Sjsg 		return icl_combo_phy_aux_power_well_disable(dev_priv,
5731bb76ff1Sjsg 							    power_well);
5741bb76ff1Sjsg 	else
5751bb76ff1Sjsg 		return hsw_power_well_disable(dev_priv, power_well);
5761bb76ff1Sjsg }
5771bb76ff1Sjsg 
5781bb76ff1Sjsg /*
5791bb76ff1Sjsg  * We should only use the power well if we explicitly asked the hardware to
5801bb76ff1Sjsg  * enable it, so check if it's enabled and also check if we've requested it to
5811bb76ff1Sjsg  * be enabled.
5821bb76ff1Sjsg  */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)5831bb76ff1Sjsg static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5841bb76ff1Sjsg 				   struct i915_power_well *power_well)
5851bb76ff1Sjsg {
5861bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
5871bb76ff1Sjsg 	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
5881bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
5891bb76ff1Sjsg 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
5901bb76ff1Sjsg 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
5911bb76ff1Sjsg 	u32 val;
5921bb76ff1Sjsg 
5931bb76ff1Sjsg 	val = intel_de_read(dev_priv, regs->driver);
5941bb76ff1Sjsg 
5951bb76ff1Sjsg 	/*
5961bb76ff1Sjsg 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
5971bb76ff1Sjsg 	 * and the MISC_IO PW will be not restored, so check instead for the
5981bb76ff1Sjsg 	 * BIOS's own request bits, which are forced-on for these power wells
5991bb76ff1Sjsg 	 * when exiting DC5/6.
6001bb76ff1Sjsg 	 */
6011bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
6021bb76ff1Sjsg 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
6031bb76ff1Sjsg 		val |= intel_de_read(dev_priv, regs->bios);
6041bb76ff1Sjsg 
6051bb76ff1Sjsg 	return (val & mask) == mask;
6061bb76ff1Sjsg }
6071bb76ff1Sjsg 
assert_can_enable_dc9(struct drm_i915_private * dev_priv)6081bb76ff1Sjsg static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
6091bb76ff1Sjsg {
6101bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
6111bb76ff1Sjsg 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
6121bb76ff1Sjsg 		      "DC9 already programmed to be enabled.\n");
6131bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
6141bb76ff1Sjsg 		      intel_de_read(dev_priv, DC_STATE_EN) &
6151bb76ff1Sjsg 		      DC_STATE_EN_UPTO_DC5,
6161bb76ff1Sjsg 		      "DC5 still not disabled to enable DC9.\n");
6171bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
6181bb76ff1Sjsg 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
6191bb76ff1Sjsg 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
6201bb76ff1Sjsg 		      "Power well 2 on.\n");
6211bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
6221bb76ff1Sjsg 		      "Interrupts not disabled yet.\n");
6231bb76ff1Sjsg 
6241bb76ff1Sjsg 	 /*
6251bb76ff1Sjsg 	  * TODO: check for the following to verify the conditions to enter DC9
6261bb76ff1Sjsg 	  * state are satisfied:
6271bb76ff1Sjsg 	  * 1] Check relevant display engine registers to verify if mode set
6281bb76ff1Sjsg 	  * disable sequence was followed.
6291bb76ff1Sjsg 	  * 2] Check if display uninitialize sequence is initialized.
6301bb76ff1Sjsg 	  */
6311bb76ff1Sjsg }
6321bb76ff1Sjsg 
assert_can_disable_dc9(struct drm_i915_private * dev_priv)6331bb76ff1Sjsg static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
6341bb76ff1Sjsg {
6351bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
6361bb76ff1Sjsg 		      "Interrupts not disabled yet.\n");
6371bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
6381bb76ff1Sjsg 		      intel_de_read(dev_priv, DC_STATE_EN) &
6391bb76ff1Sjsg 		      DC_STATE_EN_UPTO_DC5,
6401bb76ff1Sjsg 		      "DC5 still not disabled.\n");
6411bb76ff1Sjsg 
6421bb76ff1Sjsg 	 /*
6431bb76ff1Sjsg 	  * TODO: check for the following to verify DC9 state was indeed
6441bb76ff1Sjsg 	  * entered before programming to disable it:
6451bb76ff1Sjsg 	  * 1] Check relevant display engine registers to verify if mode
6461bb76ff1Sjsg 	  *  set disable sequence was followed.
6471bb76ff1Sjsg 	  * 2] Check if display uninitialize sequence is initialized.
6481bb76ff1Sjsg 	  */
6491bb76ff1Sjsg }
6501bb76ff1Sjsg 
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)6511bb76ff1Sjsg static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
6521bb76ff1Sjsg 				u32 state)
6531bb76ff1Sjsg {
6541bb76ff1Sjsg 	int rewrites = 0;
6551bb76ff1Sjsg 	int rereads = 0;
6561bb76ff1Sjsg 	u32 v;
6571bb76ff1Sjsg 
6581bb76ff1Sjsg 	intel_de_write(dev_priv, DC_STATE_EN, state);
6591bb76ff1Sjsg 
6601bb76ff1Sjsg 	/* It has been observed that disabling the dc6 state sometimes
6611bb76ff1Sjsg 	 * doesn't stick and dmc keeps returning old value. Make sure
6621bb76ff1Sjsg 	 * the write really sticks enough times and also force rewrite until
6631bb76ff1Sjsg 	 * we are confident that state is exactly what we want.
6641bb76ff1Sjsg 	 */
6651bb76ff1Sjsg 	do  {
6661bb76ff1Sjsg 		v = intel_de_read(dev_priv, DC_STATE_EN);
6671bb76ff1Sjsg 
6681bb76ff1Sjsg 		if (v != state) {
6691bb76ff1Sjsg 			intel_de_write(dev_priv, DC_STATE_EN, state);
6701bb76ff1Sjsg 			rewrites++;
6711bb76ff1Sjsg 			rereads = 0;
6721bb76ff1Sjsg 		} else if (rereads++ > 5) {
6731bb76ff1Sjsg 			break;
6741bb76ff1Sjsg 		}
6751bb76ff1Sjsg 
6761bb76ff1Sjsg 	} while (rewrites < 100);
6771bb76ff1Sjsg 
6781bb76ff1Sjsg 	if (v != state)
6791bb76ff1Sjsg 		drm_err(&dev_priv->drm,
6801bb76ff1Sjsg 			"Writing dc state to 0x%x failed, now 0x%x\n",
6811bb76ff1Sjsg 			state, v);
6821bb76ff1Sjsg 
6831bb76ff1Sjsg 	/* Most of the times we need one retry, avoid spam */
6841bb76ff1Sjsg 	if (rewrites > 1)
6851bb76ff1Sjsg 		drm_dbg_kms(&dev_priv->drm,
6861bb76ff1Sjsg 			    "Rewrote dc state to 0x%x %d times\n",
6871bb76ff1Sjsg 			    state, rewrites);
6881bb76ff1Sjsg }
6891bb76ff1Sjsg 
gen9_dc_mask(struct drm_i915_private * dev_priv)6901bb76ff1Sjsg static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
6911bb76ff1Sjsg {
6921bb76ff1Sjsg 	u32 mask;
6931bb76ff1Sjsg 
6941bb76ff1Sjsg 	mask = DC_STATE_EN_UPTO_DC5;
6951bb76ff1Sjsg 
6961bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) >= 12)
6971bb76ff1Sjsg 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
6981bb76ff1Sjsg 					  | DC_STATE_EN_DC9;
6991bb76ff1Sjsg 	else if (DISPLAY_VER(dev_priv) == 11)
7001bb76ff1Sjsg 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
7011bb76ff1Sjsg 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
7021bb76ff1Sjsg 		mask |= DC_STATE_EN_DC9;
7031bb76ff1Sjsg 	else
7041bb76ff1Sjsg 		mask |= DC_STATE_EN_UPTO_DC6;
7051bb76ff1Sjsg 
7061bb76ff1Sjsg 	return mask;
7071bb76ff1Sjsg }
7081bb76ff1Sjsg 
gen9_sanitize_dc_state(struct drm_i915_private * i915)709f005ef32Sjsg void gen9_sanitize_dc_state(struct drm_i915_private *i915)
7101bb76ff1Sjsg {
711f005ef32Sjsg 	struct i915_power_domains *power_domains = &i915->display.power.domains;
7121bb76ff1Sjsg 	u32 val;
7131bb76ff1Sjsg 
714f005ef32Sjsg 	if (!HAS_DISPLAY(i915))
7151bb76ff1Sjsg 		return;
7161bb76ff1Sjsg 
717f005ef32Sjsg 	val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915);
7181bb76ff1Sjsg 
719f005ef32Sjsg 	drm_dbg_kms(&i915->drm,
7201bb76ff1Sjsg 		    "Resetting DC state tracking from %02x to %02x\n",
721f005ef32Sjsg 		    power_domains->dc_state, val);
722f005ef32Sjsg 	power_domains->dc_state = val;
7231bb76ff1Sjsg }
7241bb76ff1Sjsg 
7251bb76ff1Sjsg /**
7261bb76ff1Sjsg  * gen9_set_dc_state - set target display C power state
7271bb76ff1Sjsg  * @dev_priv: i915 device instance
7281bb76ff1Sjsg  * @state: target DC power state
7291bb76ff1Sjsg  * - DC_STATE_DISABLE
7301bb76ff1Sjsg  * - DC_STATE_EN_UPTO_DC5
7311bb76ff1Sjsg  * - DC_STATE_EN_UPTO_DC6
7321bb76ff1Sjsg  * - DC_STATE_EN_DC9
7331bb76ff1Sjsg  *
7341bb76ff1Sjsg  * Signal to DMC firmware/HW the target DC power state passed in @state.
7351bb76ff1Sjsg  * DMC/HW can turn off individual display clocks and power rails when entering
7361bb76ff1Sjsg  * a deeper DC power state (higher in number) and turns these back when exiting
7371bb76ff1Sjsg  * that state to a shallower power state (lower in number). The HW will decide
7381bb76ff1Sjsg  * when to actually enter a given state on an on-demand basis, for instance
7391bb76ff1Sjsg  * depending on the active state of display pipes. The state of display
7401bb76ff1Sjsg  * registers backed by affected power rails are saved/restored as needed.
7411bb76ff1Sjsg  *
7421bb76ff1Sjsg  * Based on the above enabling a deeper DC power state is asynchronous wrt.
7431bb76ff1Sjsg  * enabling it. Disabling a deeper power state is synchronous: for instance
7441bb76ff1Sjsg  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
7451bb76ff1Sjsg  * back on and register state is restored. This is guaranteed by the MMIO write
7461bb76ff1Sjsg  * to DC_STATE_EN blocking until the state is restored.
7471bb76ff1Sjsg  */
gen9_set_dc_state(struct drm_i915_private * dev_priv,u32 state)7481bb76ff1Sjsg void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
7491bb76ff1Sjsg {
750f005ef32Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
7511bb76ff1Sjsg 	u32 val;
7521bb76ff1Sjsg 	u32 mask;
7531bb76ff1Sjsg 
7541bb76ff1Sjsg 	if (!HAS_DISPLAY(dev_priv))
7551bb76ff1Sjsg 		return;
7561bb76ff1Sjsg 
7571bb76ff1Sjsg 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
758f005ef32Sjsg 			     state & ~power_domains->allowed_dc_mask))
759f005ef32Sjsg 		state &= power_domains->allowed_dc_mask;
7601bb76ff1Sjsg 
7611bb76ff1Sjsg 	val = intel_de_read(dev_priv, DC_STATE_EN);
7621bb76ff1Sjsg 	mask = gen9_dc_mask(dev_priv);
7631bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
7641bb76ff1Sjsg 		    val & mask, state);
7651bb76ff1Sjsg 
7661bb76ff1Sjsg 	/* Check if DMC is ignoring our DC state requests */
767f005ef32Sjsg 	if ((val & mask) != power_domains->dc_state)
7681bb76ff1Sjsg 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
769f005ef32Sjsg 			power_domains->dc_state, val & mask);
7701bb76ff1Sjsg 
7711bb76ff1Sjsg 	val &= ~mask;
7721bb76ff1Sjsg 	val |= state;
7731bb76ff1Sjsg 
7741bb76ff1Sjsg 	gen9_write_dc_state(dev_priv, val);
7751bb76ff1Sjsg 
776f005ef32Sjsg 	power_domains->dc_state = val & mask;
7771bb76ff1Sjsg }
7781bb76ff1Sjsg 
tgl_enable_dc3co(struct drm_i915_private * dev_priv)7791bb76ff1Sjsg static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
7801bb76ff1Sjsg {
7811bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
7821bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
7831bb76ff1Sjsg }
7841bb76ff1Sjsg 
tgl_disable_dc3co(struct drm_i915_private * dev_priv)7851bb76ff1Sjsg static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
7861bb76ff1Sjsg {
7871bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
788f005ef32Sjsg 	intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0);
7891bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
7901bb76ff1Sjsg 	/*
7911bb76ff1Sjsg 	 * Delay of 200us DC3CO Exit time B.Spec 49196
7921bb76ff1Sjsg 	 */
7931bb76ff1Sjsg 	usleep_range(200, 210);
7941bb76ff1Sjsg }
7951bb76ff1Sjsg 
assert_can_enable_dc5(struct drm_i915_private * dev_priv)7961bb76ff1Sjsg static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
7971bb76ff1Sjsg {
7981bb76ff1Sjsg 	enum i915_power_well_id high_pg;
7991bb76ff1Sjsg 
8001bb76ff1Sjsg 	/* Power wells at this level and above must be disabled for DC5 entry */
8011bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 12)
8021bb76ff1Sjsg 		high_pg = ICL_DISP_PW_3;
8031bb76ff1Sjsg 	else
8041bb76ff1Sjsg 		high_pg = SKL_DISP_PW_2;
8051bb76ff1Sjsg 
8061bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
8071bb76ff1Sjsg 		      intel_display_power_well_is_enabled(dev_priv, high_pg),
8081bb76ff1Sjsg 		      "Power wells above platform's DC5 limit still enabled.\n");
8091bb76ff1Sjsg 
8101bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
8111bb76ff1Sjsg 		      (intel_de_read(dev_priv, DC_STATE_EN) &
8121bb76ff1Sjsg 		       DC_STATE_EN_UPTO_DC5),
8131bb76ff1Sjsg 		      "DC5 already programmed to be enabled.\n");
8141bb76ff1Sjsg 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
8151bb76ff1Sjsg 
8161bb76ff1Sjsg 	assert_dmc_loaded(dev_priv);
8171bb76ff1Sjsg }
8181bb76ff1Sjsg 
gen9_enable_dc5(struct drm_i915_private * dev_priv)8191bb76ff1Sjsg void gen9_enable_dc5(struct drm_i915_private *dev_priv)
8201bb76ff1Sjsg {
8211bb76ff1Sjsg 	assert_can_enable_dc5(dev_priv);
8221bb76ff1Sjsg 
8231bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
8241bb76ff1Sjsg 
8251bb76ff1Sjsg 	/* Wa Display #1183: skl,kbl,cfl */
8261bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
827f005ef32Sjsg 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
828f005ef32Sjsg 			     0, SKL_SELECT_ALTERNATE_DC_EXIT);
8291bb76ff1Sjsg 
8301bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
8311bb76ff1Sjsg }
8321bb76ff1Sjsg 
assert_can_enable_dc6(struct drm_i915_private * dev_priv)8331bb76ff1Sjsg static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
8341bb76ff1Sjsg {
8351bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
836f005ef32Sjsg 		      (intel_de_read(dev_priv, UTIL_PIN_CTL) &
837f005ef32Sjsg 		       (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) ==
838f005ef32Sjsg 		      (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM),
839f005ef32Sjsg 		      "Utility pin enabled in PWM mode\n");
8401bb76ff1Sjsg 	drm_WARN_ONCE(&dev_priv->drm,
8411bb76ff1Sjsg 		      (intel_de_read(dev_priv, DC_STATE_EN) &
8421bb76ff1Sjsg 		       DC_STATE_EN_UPTO_DC6),
8431bb76ff1Sjsg 		      "DC6 already programmed to be enabled.\n");
8441bb76ff1Sjsg 
8451bb76ff1Sjsg 	assert_dmc_loaded(dev_priv);
8461bb76ff1Sjsg }
8471bb76ff1Sjsg 
skl_enable_dc6(struct drm_i915_private * dev_priv)8481bb76ff1Sjsg void skl_enable_dc6(struct drm_i915_private *dev_priv)
8491bb76ff1Sjsg {
8501bb76ff1Sjsg 	assert_can_enable_dc6(dev_priv);
8511bb76ff1Sjsg 
8521bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
8531bb76ff1Sjsg 
8541bb76ff1Sjsg 	/* Wa Display #1183: skl,kbl,cfl */
8551bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
856f005ef32Sjsg 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
857f005ef32Sjsg 			     0, SKL_SELECT_ALTERNATE_DC_EXIT);
8581bb76ff1Sjsg 
8591bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
8601bb76ff1Sjsg }
8611bb76ff1Sjsg 
bxt_enable_dc9(struct drm_i915_private * dev_priv)8621bb76ff1Sjsg void bxt_enable_dc9(struct drm_i915_private *dev_priv)
8631bb76ff1Sjsg {
8641bb76ff1Sjsg 	assert_can_enable_dc9(dev_priv);
8651bb76ff1Sjsg 
8661bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
8671bb76ff1Sjsg 	/*
8681bb76ff1Sjsg 	 * Power sequencer reset is not needed on
8691bb76ff1Sjsg 	 * platforms with South Display Engine on PCH,
8701bb76ff1Sjsg 	 * because PPS registers are always on.
8711bb76ff1Sjsg 	 */
8721bb76ff1Sjsg 	if (!HAS_PCH_SPLIT(dev_priv))
8731bb76ff1Sjsg 		intel_pps_reset_all(dev_priv);
8741bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
8751bb76ff1Sjsg }
8761bb76ff1Sjsg 
bxt_disable_dc9(struct drm_i915_private * dev_priv)8771bb76ff1Sjsg void bxt_disable_dc9(struct drm_i915_private *dev_priv)
8781bb76ff1Sjsg {
8791bb76ff1Sjsg 	assert_can_disable_dc9(dev_priv);
8801bb76ff1Sjsg 
8811bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
8821bb76ff1Sjsg 
8831bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
8841bb76ff1Sjsg 
8851bb76ff1Sjsg 	intel_pps_unlock_regs_wa(dev_priv);
8861bb76ff1Sjsg }
8871bb76ff1Sjsg 
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)8881bb76ff1Sjsg static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
8891bb76ff1Sjsg 				   struct i915_power_well *power_well)
8901bb76ff1Sjsg {
8911bb76ff1Sjsg 	const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
8921bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
8931bb76ff1Sjsg 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
8941bb76ff1Sjsg 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
8951bb76ff1Sjsg 
8961bb76ff1Sjsg 	/* Take over the request bit if set by BIOS. */
8971bb76ff1Sjsg 	if (bios_req & mask) {
8981bb76ff1Sjsg 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
8991bb76ff1Sjsg 
9001bb76ff1Sjsg 		if (!(drv_req & mask))
9011bb76ff1Sjsg 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
9021bb76ff1Sjsg 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
9031bb76ff1Sjsg 	}
9041bb76ff1Sjsg }
9051bb76ff1Sjsg 
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)9061bb76ff1Sjsg static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
9071bb76ff1Sjsg 					   struct i915_power_well *power_well)
9081bb76ff1Sjsg {
9091bb76ff1Sjsg 	bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
9101bb76ff1Sjsg }
9111bb76ff1Sjsg 
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)9121bb76ff1Sjsg static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
9131bb76ff1Sjsg 					    struct i915_power_well *power_well)
9141bb76ff1Sjsg {
9151bb76ff1Sjsg 	bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
9161bb76ff1Sjsg }
9171bb76ff1Sjsg 
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)9181bb76ff1Sjsg static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
9191bb76ff1Sjsg 					    struct i915_power_well *power_well)
9201bb76ff1Sjsg {
9211bb76ff1Sjsg 	return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
9221bb76ff1Sjsg }
9231bb76ff1Sjsg 
bxt_verify_ddi_phy_power_wells(struct drm_i915_private * dev_priv)9241bb76ff1Sjsg static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
9251bb76ff1Sjsg {
9261bb76ff1Sjsg 	struct i915_power_well *power_well;
9271bb76ff1Sjsg 
9281bb76ff1Sjsg 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
9291bb76ff1Sjsg 	if (intel_power_well_refcount(power_well) > 0)
9301bb76ff1Sjsg 		bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
9311bb76ff1Sjsg 
9321bb76ff1Sjsg 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
9331bb76ff1Sjsg 	if (intel_power_well_refcount(power_well) > 0)
9341bb76ff1Sjsg 		bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
9351bb76ff1Sjsg 
9361bb76ff1Sjsg 	if (IS_GEMINILAKE(dev_priv)) {
9371bb76ff1Sjsg 		power_well = lookup_power_well(dev_priv,
9381bb76ff1Sjsg 					       GLK_DISP_PW_DPIO_CMN_C);
9391bb76ff1Sjsg 		if (intel_power_well_refcount(power_well) > 0)
9401bb76ff1Sjsg 			bxt_ddi_phy_verify_state(dev_priv,
9411bb76ff1Sjsg 						 i915_power_well_instance(power_well)->bxt.phy);
9421bb76ff1Sjsg 	}
9431bb76ff1Sjsg }
9441bb76ff1Sjsg 
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)9451bb76ff1Sjsg static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
9461bb76ff1Sjsg 					   struct i915_power_well *power_well)
9471bb76ff1Sjsg {
9481bb76ff1Sjsg 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
9491bb76ff1Sjsg 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
9501bb76ff1Sjsg }
9511bb76ff1Sjsg 
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)9521bb76ff1Sjsg static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
9531bb76ff1Sjsg {
9541bb76ff1Sjsg 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
9551bb76ff1Sjsg 	u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
9561bb76ff1Sjsg 
9571bb76ff1Sjsg 	drm_WARN(&dev_priv->drm,
9581bb76ff1Sjsg 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
9591bb76ff1Sjsg 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
9601bb76ff1Sjsg 		 hw_enabled_dbuf_slices,
9611bb76ff1Sjsg 		 enabled_dbuf_slices);
9621bb76ff1Sjsg }
9631bb76ff1Sjsg 
gen9_disable_dc_states(struct drm_i915_private * dev_priv)9641bb76ff1Sjsg void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
9651bb76ff1Sjsg {
966f005ef32Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
9671bb76ff1Sjsg 	struct intel_cdclk_config cdclk_config = {};
9681bb76ff1Sjsg 
969f005ef32Sjsg 	if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) {
9701bb76ff1Sjsg 		tgl_disable_dc3co(dev_priv);
9711bb76ff1Sjsg 		return;
9721bb76ff1Sjsg 	}
9731bb76ff1Sjsg 
9741bb76ff1Sjsg 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
9751bb76ff1Sjsg 
9761bb76ff1Sjsg 	if (!HAS_DISPLAY(dev_priv))
9771bb76ff1Sjsg 		return;
9781bb76ff1Sjsg 
9791bb76ff1Sjsg 	intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
9801bb76ff1Sjsg 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
9811bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm,
9821bb76ff1Sjsg 		    intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
9831bb76ff1Sjsg 					      &cdclk_config));
9841bb76ff1Sjsg 
9851bb76ff1Sjsg 	gen9_assert_dbuf_enabled(dev_priv);
9861bb76ff1Sjsg 
9871bb76ff1Sjsg 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
9881bb76ff1Sjsg 		bxt_verify_ddi_phy_power_wells(dev_priv);
9891bb76ff1Sjsg 
9901bb76ff1Sjsg 	if (DISPLAY_VER(dev_priv) >= 11)
9911bb76ff1Sjsg 		/*
9921bb76ff1Sjsg 		 * DMC retains HW context only for port A, the other combo
9931bb76ff1Sjsg 		 * PHY's HW context for port B is lost after DC transitions,
9941bb76ff1Sjsg 		 * so we need to restore it manually.
9951bb76ff1Sjsg 		 */
9961bb76ff1Sjsg 		intel_combo_phy_init(dev_priv);
9971bb76ff1Sjsg }
9981bb76ff1Sjsg 
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)9991bb76ff1Sjsg static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
10001bb76ff1Sjsg 					  struct i915_power_well *power_well)
10011bb76ff1Sjsg {
10021bb76ff1Sjsg 	gen9_disable_dc_states(dev_priv);
10031bb76ff1Sjsg }
10041bb76ff1Sjsg 
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10051bb76ff1Sjsg static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
10061bb76ff1Sjsg 					   struct i915_power_well *power_well)
10071bb76ff1Sjsg {
1008f005ef32Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
1009f005ef32Sjsg 
10101bb76ff1Sjsg 	if (!intel_dmc_has_payload(dev_priv))
10111bb76ff1Sjsg 		return;
10121bb76ff1Sjsg 
1013f005ef32Sjsg 	switch (power_domains->target_dc_state) {
10141bb76ff1Sjsg 	case DC_STATE_EN_DC3CO:
10151bb76ff1Sjsg 		tgl_enable_dc3co(dev_priv);
10161bb76ff1Sjsg 		break;
10171bb76ff1Sjsg 	case DC_STATE_EN_UPTO_DC6:
10181bb76ff1Sjsg 		skl_enable_dc6(dev_priv);
10191bb76ff1Sjsg 		break;
10201bb76ff1Sjsg 	case DC_STATE_EN_UPTO_DC5:
10211bb76ff1Sjsg 		gen9_enable_dc5(dev_priv);
10221bb76ff1Sjsg 		break;
10231bb76ff1Sjsg 	}
10241bb76ff1Sjsg }
10251bb76ff1Sjsg 
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10261bb76ff1Sjsg static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
10271bb76ff1Sjsg 					 struct i915_power_well *power_well)
10281bb76ff1Sjsg {
10291bb76ff1Sjsg }
10301bb76ff1Sjsg 
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10311bb76ff1Sjsg static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
10321bb76ff1Sjsg 					   struct i915_power_well *power_well)
10331bb76ff1Sjsg {
10341bb76ff1Sjsg }
10351bb76ff1Sjsg 
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10361bb76ff1Sjsg static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
10371bb76ff1Sjsg 					     struct i915_power_well *power_well)
10381bb76ff1Sjsg {
10391bb76ff1Sjsg 	return true;
10401bb76ff1Sjsg }
10411bb76ff1Sjsg 
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10421bb76ff1Sjsg static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
10431bb76ff1Sjsg 					 struct i915_power_well *power_well)
10441bb76ff1Sjsg {
1045f005ef32Sjsg 	if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0)
10461bb76ff1Sjsg 		i830_enable_pipe(dev_priv, PIPE_A);
1047f005ef32Sjsg 	if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0)
10481bb76ff1Sjsg 		i830_enable_pipe(dev_priv, PIPE_B);
10491bb76ff1Sjsg }
10501bb76ff1Sjsg 
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10511bb76ff1Sjsg static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
10521bb76ff1Sjsg 					  struct i915_power_well *power_well)
10531bb76ff1Sjsg {
10541bb76ff1Sjsg 	i830_disable_pipe(dev_priv, PIPE_B);
10551bb76ff1Sjsg 	i830_disable_pipe(dev_priv, PIPE_A);
10561bb76ff1Sjsg }
10571bb76ff1Sjsg 
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10581bb76ff1Sjsg static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
10591bb76ff1Sjsg 					  struct i915_power_well *power_well)
10601bb76ff1Sjsg {
1061f005ef32Sjsg 	return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE &&
1062f005ef32Sjsg 		intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE;
10631bb76ff1Sjsg }
10641bb76ff1Sjsg 
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)10651bb76ff1Sjsg static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
10661bb76ff1Sjsg 					  struct i915_power_well *power_well)
10671bb76ff1Sjsg {
10681bb76ff1Sjsg 	if (intel_power_well_refcount(power_well) > 0)
10691bb76ff1Sjsg 		i830_pipes_power_well_enable(dev_priv, power_well);
10701bb76ff1Sjsg 	else
10711bb76ff1Sjsg 		i830_pipes_power_well_disable(dev_priv, power_well);
10721bb76ff1Sjsg }
10731bb76ff1Sjsg 
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)10741bb76ff1Sjsg static void vlv_set_power_well(struct drm_i915_private *dev_priv,
10751bb76ff1Sjsg 			       struct i915_power_well *power_well, bool enable)
10761bb76ff1Sjsg {
10771bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
10781bb76ff1Sjsg 	u32 mask;
10791bb76ff1Sjsg 	u32 state;
10801bb76ff1Sjsg 	u32 ctrl;
10811bb76ff1Sjsg 
10821bb76ff1Sjsg 	mask = PUNIT_PWRGT_MASK(pw_idx);
10831bb76ff1Sjsg 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
10841bb76ff1Sjsg 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
10851bb76ff1Sjsg 
10861bb76ff1Sjsg 	vlv_punit_get(dev_priv);
10871bb76ff1Sjsg 
10881bb76ff1Sjsg #define COND \
10891bb76ff1Sjsg 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
10901bb76ff1Sjsg 
10911bb76ff1Sjsg 	if (COND)
10921bb76ff1Sjsg 		goto out;
10931bb76ff1Sjsg 
10941bb76ff1Sjsg 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
10951bb76ff1Sjsg 	ctrl &= ~mask;
10961bb76ff1Sjsg 	ctrl |= state;
10971bb76ff1Sjsg 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
10981bb76ff1Sjsg 
10991bb76ff1Sjsg 	if (wait_for(COND, 100))
11001bb76ff1Sjsg 		drm_err(&dev_priv->drm,
11011bb76ff1Sjsg 			"timeout setting power well state %08x (%08x)\n",
11021bb76ff1Sjsg 			state,
11031bb76ff1Sjsg 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
11041bb76ff1Sjsg 
11051bb76ff1Sjsg #undef COND
11061bb76ff1Sjsg 
11071bb76ff1Sjsg out:
11081bb76ff1Sjsg 	vlv_punit_put(dev_priv);
11091bb76ff1Sjsg }
11101bb76ff1Sjsg 
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)11111bb76ff1Sjsg static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
11121bb76ff1Sjsg 				  struct i915_power_well *power_well)
11131bb76ff1Sjsg {
11141bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, true);
11151bb76ff1Sjsg }
11161bb76ff1Sjsg 
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)11171bb76ff1Sjsg static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
11181bb76ff1Sjsg 				   struct i915_power_well *power_well)
11191bb76ff1Sjsg {
11201bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, false);
11211bb76ff1Sjsg }
11221bb76ff1Sjsg 
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)11231bb76ff1Sjsg static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
11241bb76ff1Sjsg 				   struct i915_power_well *power_well)
11251bb76ff1Sjsg {
11261bb76ff1Sjsg 	int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
11271bb76ff1Sjsg 	bool enabled = false;
11281bb76ff1Sjsg 	u32 mask;
11291bb76ff1Sjsg 	u32 state;
11301bb76ff1Sjsg 	u32 ctrl;
11311bb76ff1Sjsg 
11321bb76ff1Sjsg 	mask = PUNIT_PWRGT_MASK(pw_idx);
11331bb76ff1Sjsg 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
11341bb76ff1Sjsg 
11351bb76ff1Sjsg 	vlv_punit_get(dev_priv);
11361bb76ff1Sjsg 
11371bb76ff1Sjsg 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
11381bb76ff1Sjsg 	/*
11391bb76ff1Sjsg 	 * We only ever set the power-on and power-gate states, anything
11401bb76ff1Sjsg 	 * else is unexpected.
11411bb76ff1Sjsg 	 */
11421bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
11431bb76ff1Sjsg 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
11441bb76ff1Sjsg 	if (state == ctrl)
11451bb76ff1Sjsg 		enabled = true;
11461bb76ff1Sjsg 
11471bb76ff1Sjsg 	/*
11481bb76ff1Sjsg 	 * A transient state at this point would mean some unexpected party
11491bb76ff1Sjsg 	 * is poking at the power controls too.
11501bb76ff1Sjsg 	 */
11511bb76ff1Sjsg 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
11521bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
11531bb76ff1Sjsg 
11541bb76ff1Sjsg 	vlv_punit_put(dev_priv);
11551bb76ff1Sjsg 
11561bb76ff1Sjsg 	return enabled;
11571bb76ff1Sjsg }
11581bb76ff1Sjsg 
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)11591bb76ff1Sjsg static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
11601bb76ff1Sjsg {
11611bb76ff1Sjsg 	/*
11621bb76ff1Sjsg 	 * On driver load, a pipe may be active and driving a DSI display.
11631bb76ff1Sjsg 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
11641bb76ff1Sjsg 	 * (and never recovering) in this case. intel_dsi_post_disable() will
11651bb76ff1Sjsg 	 * clear it when we turn off the display.
11661bb76ff1Sjsg 	 */
1167f005ef32Sjsg 	intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv),
1168f005ef32Sjsg 		     ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
11691bb76ff1Sjsg 
11701bb76ff1Sjsg 	/*
11711bb76ff1Sjsg 	 * Disable trickle feed and enable pnd deadline calculation
11721bb76ff1Sjsg 	 */
11731bb76ff1Sjsg 	intel_de_write(dev_priv, MI_ARB_VLV,
11741bb76ff1Sjsg 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
11751bb76ff1Sjsg 	intel_de_write(dev_priv, CBR1_VLV, 0);
11761bb76ff1Sjsg 
11771bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
11781bb76ff1Sjsg 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
11791bb76ff1Sjsg 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
11801bb76ff1Sjsg 					 1000));
11811bb76ff1Sjsg }
11821bb76ff1Sjsg 
vlv_display_power_well_init(struct drm_i915_private * dev_priv)11831bb76ff1Sjsg static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
11841bb76ff1Sjsg {
11851bb76ff1Sjsg 	struct intel_encoder *encoder;
11861bb76ff1Sjsg 	enum pipe pipe;
11871bb76ff1Sjsg 
11881bb76ff1Sjsg 	/*
11891bb76ff1Sjsg 	 * Enable the CRI clock source so we can get at the
11901bb76ff1Sjsg 	 * display and the reference clock for VGA
11911bb76ff1Sjsg 	 * hotplug / manual detection. Supposedly DSI also
11921bb76ff1Sjsg 	 * needs the ref clock up and running.
11931bb76ff1Sjsg 	 *
11941bb76ff1Sjsg 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
11951bb76ff1Sjsg 	 */
11961bb76ff1Sjsg 	for_each_pipe(dev_priv, pipe) {
11971bb76ff1Sjsg 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
11981bb76ff1Sjsg 
11991bb76ff1Sjsg 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
12001bb76ff1Sjsg 		if (pipe != PIPE_A)
12011bb76ff1Sjsg 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
12021bb76ff1Sjsg 
12031bb76ff1Sjsg 		intel_de_write(dev_priv, DPLL(pipe), val);
12041bb76ff1Sjsg 	}
12051bb76ff1Sjsg 
12061bb76ff1Sjsg 	vlv_init_display_clock_gating(dev_priv);
12071bb76ff1Sjsg 
12081bb76ff1Sjsg 	spin_lock_irq(&dev_priv->irq_lock);
12091bb76ff1Sjsg 	valleyview_enable_display_irqs(dev_priv);
12101bb76ff1Sjsg 	spin_unlock_irq(&dev_priv->irq_lock);
12111bb76ff1Sjsg 
12121bb76ff1Sjsg 	/*
12131bb76ff1Sjsg 	 * During driver initialization/resume we can avoid restoring the
12141bb76ff1Sjsg 	 * part of the HW/SW state that will be inited anyway explicitly.
12151bb76ff1Sjsg 	 */
12161bb76ff1Sjsg 	if (dev_priv->display.power.domains.initializing)
12171bb76ff1Sjsg 		return;
12181bb76ff1Sjsg 
12191bb76ff1Sjsg 	intel_hpd_init(dev_priv);
12201bb76ff1Sjsg 	intel_hpd_poll_disable(dev_priv);
12211bb76ff1Sjsg 
12221bb76ff1Sjsg 	/* Re-enable the ADPA, if we have one */
12231bb76ff1Sjsg 	for_each_intel_encoder(&dev_priv->drm, encoder) {
12241bb76ff1Sjsg 		if (encoder->type == INTEL_OUTPUT_ANALOG)
12251bb76ff1Sjsg 			intel_crt_reset(&encoder->base);
12261bb76ff1Sjsg 	}
12271bb76ff1Sjsg 
12281bb76ff1Sjsg 	intel_vga_redisable_power_on(dev_priv);
12291bb76ff1Sjsg 
12301bb76ff1Sjsg 	intel_pps_unlock_regs_wa(dev_priv);
12311bb76ff1Sjsg }
12321bb76ff1Sjsg 
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)12331bb76ff1Sjsg static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
12341bb76ff1Sjsg {
12351bb76ff1Sjsg 	spin_lock_irq(&dev_priv->irq_lock);
12361bb76ff1Sjsg 	valleyview_disable_display_irqs(dev_priv);
12371bb76ff1Sjsg 	spin_unlock_irq(&dev_priv->irq_lock);
12381bb76ff1Sjsg 
12391bb76ff1Sjsg 	/* make sure we're done processing display irqs */
12401bb76ff1Sjsg 	intel_synchronize_irq(dev_priv);
12411bb76ff1Sjsg 
12421bb76ff1Sjsg 	intel_pps_reset_all(dev_priv);
12431bb76ff1Sjsg 
12441bb76ff1Sjsg 	/* Prevent us from re-enabling polling on accident in late suspend */
12451bb76ff1Sjsg #ifdef __linux__
12461bb76ff1Sjsg 	if (!dev_priv->drm.dev->power.is_suspended)
12471bb76ff1Sjsg #else
12481bb76ff1Sjsg 	if (!cold)
12491bb76ff1Sjsg #endif
12501bb76ff1Sjsg 		intel_hpd_poll_enable(dev_priv);
12511bb76ff1Sjsg }
12521bb76ff1Sjsg 
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)12531bb76ff1Sjsg static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
12541bb76ff1Sjsg 					  struct i915_power_well *power_well)
12551bb76ff1Sjsg {
12561bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, true);
12571bb76ff1Sjsg 
12581bb76ff1Sjsg 	vlv_display_power_well_init(dev_priv);
12591bb76ff1Sjsg }
12601bb76ff1Sjsg 
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)12611bb76ff1Sjsg static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
12621bb76ff1Sjsg 					   struct i915_power_well *power_well)
12631bb76ff1Sjsg {
12641bb76ff1Sjsg 	vlv_display_power_well_deinit(dev_priv);
12651bb76ff1Sjsg 
12661bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, false);
12671bb76ff1Sjsg }
12681bb76ff1Sjsg 
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)12691bb76ff1Sjsg static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
12701bb76ff1Sjsg 					   struct i915_power_well *power_well)
12711bb76ff1Sjsg {
12721bb76ff1Sjsg 	/* since ref/cri clock was enabled */
12731bb76ff1Sjsg 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
12741bb76ff1Sjsg 
12751bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, true);
12761bb76ff1Sjsg 
12771bb76ff1Sjsg 	/*
12781bb76ff1Sjsg 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
12791bb76ff1Sjsg 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
12801bb76ff1Sjsg 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
12811bb76ff1Sjsg 	 *   b.	The other bits such as sfr settings / modesel may all
12821bb76ff1Sjsg 	 *	be set to 0.
12831bb76ff1Sjsg 	 *
12841bb76ff1Sjsg 	 * This should only be done on init and resume from S3 with
12851bb76ff1Sjsg 	 * both PLLs disabled, or we risk losing DPIO and PLL
12861bb76ff1Sjsg 	 * synchronization.
12871bb76ff1Sjsg 	 */
1288f005ef32Sjsg 	intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST);
12891bb76ff1Sjsg }
12901bb76ff1Sjsg 
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)12911bb76ff1Sjsg static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
12921bb76ff1Sjsg 					    struct i915_power_well *power_well)
12931bb76ff1Sjsg {
12941bb76ff1Sjsg 	enum pipe pipe;
12951bb76ff1Sjsg 
12961bb76ff1Sjsg 	for_each_pipe(dev_priv, pipe)
12971bb76ff1Sjsg 		assert_pll_disabled(dev_priv, pipe);
12981bb76ff1Sjsg 
12991bb76ff1Sjsg 	/* Assert common reset */
1300f005ef32Sjsg 	intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0);
13011bb76ff1Sjsg 
13021bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, false);
13031bb76ff1Sjsg }
13041bb76ff1Sjsg 
13051bb76ff1Sjsg #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
13061bb76ff1Sjsg 
assert_chv_phy_status(struct drm_i915_private * dev_priv)13071bb76ff1Sjsg static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
13081bb76ff1Sjsg {
13091bb76ff1Sjsg 	struct i915_power_well *cmn_bc =
13101bb76ff1Sjsg 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
13111bb76ff1Sjsg 	struct i915_power_well *cmn_d =
13121bb76ff1Sjsg 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
13131bb76ff1Sjsg 	u32 phy_control = dev_priv->display.power.chv_phy_control;
13141bb76ff1Sjsg 	u32 phy_status = 0;
13151bb76ff1Sjsg 	u32 phy_status_mask = 0xffffffff;
13161bb76ff1Sjsg 
13171bb76ff1Sjsg 	/*
13181bb76ff1Sjsg 	 * The BIOS can leave the PHY is some weird state
13191bb76ff1Sjsg 	 * where it doesn't fully power down some parts.
13201bb76ff1Sjsg 	 * Disable the asserts until the PHY has been fully
13211bb76ff1Sjsg 	 * reset (ie. the power well has been disabled at
13221bb76ff1Sjsg 	 * least once).
13231bb76ff1Sjsg 	 */
13241bb76ff1Sjsg 	if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
13251bb76ff1Sjsg 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
13261bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
13271bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
13281bb76ff1Sjsg 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
13291bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
13301bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
13311bb76ff1Sjsg 
13321bb76ff1Sjsg 	if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
13331bb76ff1Sjsg 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
13341bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
13351bb76ff1Sjsg 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
13361bb76ff1Sjsg 
13371bb76ff1Sjsg 	if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
13381bb76ff1Sjsg 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
13391bb76ff1Sjsg 
13401bb76ff1Sjsg 		/* this assumes override is only used to enable lanes */
13411bb76ff1Sjsg 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
13421bb76ff1Sjsg 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
13431bb76ff1Sjsg 
13441bb76ff1Sjsg 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
13451bb76ff1Sjsg 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
13461bb76ff1Sjsg 
13471bb76ff1Sjsg 		/* CL1 is on whenever anything is on in either channel */
13481bb76ff1Sjsg 		if (BITS_SET(phy_control,
13491bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
13501bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
13511bb76ff1Sjsg 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
13521bb76ff1Sjsg 
13531bb76ff1Sjsg 		/*
13541bb76ff1Sjsg 		 * The DPLLB check accounts for the pipe B + port A usage
13551bb76ff1Sjsg 		 * with CL2 powered up but all the lanes in the second channel
13561bb76ff1Sjsg 		 * powered down.
13571bb76ff1Sjsg 		 */
13581bb76ff1Sjsg 		if (BITS_SET(phy_control,
13591bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
13601bb76ff1Sjsg 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
13611bb76ff1Sjsg 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
13621bb76ff1Sjsg 
13631bb76ff1Sjsg 		if (BITS_SET(phy_control,
13641bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
13651bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
13661bb76ff1Sjsg 		if (BITS_SET(phy_control,
13671bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
13681bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
13691bb76ff1Sjsg 
13701bb76ff1Sjsg 		if (BITS_SET(phy_control,
13711bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
13721bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
13731bb76ff1Sjsg 		if (BITS_SET(phy_control,
13741bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
13751bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
13761bb76ff1Sjsg 	}
13771bb76ff1Sjsg 
13781bb76ff1Sjsg 	if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
13791bb76ff1Sjsg 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
13801bb76ff1Sjsg 
13811bb76ff1Sjsg 		/* this assumes override is only used to enable lanes */
13821bb76ff1Sjsg 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
13831bb76ff1Sjsg 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
13841bb76ff1Sjsg 
13851bb76ff1Sjsg 		if (BITS_SET(phy_control,
13861bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
13871bb76ff1Sjsg 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
13881bb76ff1Sjsg 
13891bb76ff1Sjsg 		if (BITS_SET(phy_control,
13901bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
13911bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
13921bb76ff1Sjsg 		if (BITS_SET(phy_control,
13931bb76ff1Sjsg 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
13941bb76ff1Sjsg 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
13951bb76ff1Sjsg 	}
13961bb76ff1Sjsg 
13971bb76ff1Sjsg 	phy_status &= phy_status_mask;
13981bb76ff1Sjsg 
13991bb76ff1Sjsg 	/*
14001bb76ff1Sjsg 	 * The PHY may be busy with some initial calibration and whatnot,
14011bb76ff1Sjsg 	 * so the power state can take a while to actually change.
14021bb76ff1Sjsg 	 */
14031bb76ff1Sjsg 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
14041bb76ff1Sjsg 				       phy_status_mask, phy_status, 10))
14051bb76ff1Sjsg 		drm_err(&dev_priv->drm,
14061bb76ff1Sjsg 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
14071bb76ff1Sjsg 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
14081bb76ff1Sjsg 			phy_status, dev_priv->display.power.chv_phy_control);
14091bb76ff1Sjsg }
14101bb76ff1Sjsg 
14111bb76ff1Sjsg #undef BITS_SET
14121bb76ff1Sjsg 
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)14131bb76ff1Sjsg static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
14141bb76ff1Sjsg 					   struct i915_power_well *power_well)
14151bb76ff1Sjsg {
14161bb76ff1Sjsg 	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
14171bb76ff1Sjsg 	enum dpio_phy phy;
14181bb76ff1Sjsg 	enum pipe pipe;
14191bb76ff1Sjsg 	u32 tmp;
14201bb76ff1Sjsg 
14211bb76ff1Sjsg 	drm_WARN_ON_ONCE(&dev_priv->drm,
14221bb76ff1Sjsg 			 id != VLV_DISP_PW_DPIO_CMN_BC &&
14231bb76ff1Sjsg 			 id != CHV_DISP_PW_DPIO_CMN_D);
14241bb76ff1Sjsg 
14251bb76ff1Sjsg 	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
14261bb76ff1Sjsg 		pipe = PIPE_A;
14271bb76ff1Sjsg 		phy = DPIO_PHY0;
14281bb76ff1Sjsg 	} else {
14291bb76ff1Sjsg 		pipe = PIPE_C;
14301bb76ff1Sjsg 		phy = DPIO_PHY1;
14311bb76ff1Sjsg 	}
14321bb76ff1Sjsg 
14331bb76ff1Sjsg 	/* since ref/cri clock was enabled */
14341bb76ff1Sjsg 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
14351bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, true);
14361bb76ff1Sjsg 
14371bb76ff1Sjsg 	/* Poll for phypwrgood signal */
14381bb76ff1Sjsg 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
14391bb76ff1Sjsg 				  PHY_POWERGOOD(phy), 1))
14401bb76ff1Sjsg 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
14411bb76ff1Sjsg 			phy);
14421bb76ff1Sjsg 
14431bb76ff1Sjsg 	vlv_dpio_get(dev_priv);
14441bb76ff1Sjsg 
14451bb76ff1Sjsg 	/* Enable dynamic power down */
14461bb76ff1Sjsg 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
14471bb76ff1Sjsg 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
14481bb76ff1Sjsg 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
14491bb76ff1Sjsg 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
14501bb76ff1Sjsg 
14511bb76ff1Sjsg 	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
14521bb76ff1Sjsg 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
14531bb76ff1Sjsg 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
14541bb76ff1Sjsg 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
14551bb76ff1Sjsg 	} else {
14561bb76ff1Sjsg 		/*
14571bb76ff1Sjsg 		 * Force the non-existing CL2 off. BXT does this
14581bb76ff1Sjsg 		 * too, so maybe it saves some power even though
14591bb76ff1Sjsg 		 * CL2 doesn't exist?
14601bb76ff1Sjsg 		 */
14611bb76ff1Sjsg 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
14621bb76ff1Sjsg 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
14631bb76ff1Sjsg 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
14641bb76ff1Sjsg 	}
14651bb76ff1Sjsg 
14661bb76ff1Sjsg 	vlv_dpio_put(dev_priv);
14671bb76ff1Sjsg 
14681bb76ff1Sjsg 	dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
14691bb76ff1Sjsg 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
14701bb76ff1Sjsg 		       dev_priv->display.power.chv_phy_control);
14711bb76ff1Sjsg 
14721bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm,
14731bb76ff1Sjsg 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
14741bb76ff1Sjsg 		    phy, dev_priv->display.power.chv_phy_control);
14751bb76ff1Sjsg 
14761bb76ff1Sjsg 	assert_chv_phy_status(dev_priv);
14771bb76ff1Sjsg }
14781bb76ff1Sjsg 
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)14791bb76ff1Sjsg static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
14801bb76ff1Sjsg 					    struct i915_power_well *power_well)
14811bb76ff1Sjsg {
14821bb76ff1Sjsg 	enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
14831bb76ff1Sjsg 	enum dpio_phy phy;
14841bb76ff1Sjsg 
14851bb76ff1Sjsg 	drm_WARN_ON_ONCE(&dev_priv->drm,
14861bb76ff1Sjsg 			 id != VLV_DISP_PW_DPIO_CMN_BC &&
14871bb76ff1Sjsg 			 id != CHV_DISP_PW_DPIO_CMN_D);
14881bb76ff1Sjsg 
14891bb76ff1Sjsg 	if (id == VLV_DISP_PW_DPIO_CMN_BC) {
14901bb76ff1Sjsg 		phy = DPIO_PHY0;
14911bb76ff1Sjsg 		assert_pll_disabled(dev_priv, PIPE_A);
14921bb76ff1Sjsg 		assert_pll_disabled(dev_priv, PIPE_B);
14931bb76ff1Sjsg 	} else {
14941bb76ff1Sjsg 		phy = DPIO_PHY1;
14951bb76ff1Sjsg 		assert_pll_disabled(dev_priv, PIPE_C);
14961bb76ff1Sjsg 	}
14971bb76ff1Sjsg 
14981bb76ff1Sjsg 	dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
14991bb76ff1Sjsg 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
15001bb76ff1Sjsg 		       dev_priv->display.power.chv_phy_control);
15011bb76ff1Sjsg 
15021bb76ff1Sjsg 	vlv_set_power_well(dev_priv, power_well, false);
15031bb76ff1Sjsg 
15041bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm,
15051bb76ff1Sjsg 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
15061bb76ff1Sjsg 		    phy, dev_priv->display.power.chv_phy_control);
15071bb76ff1Sjsg 
15081bb76ff1Sjsg 	/* PHY is fully reset now, so we can enable the PHY state asserts */
15091bb76ff1Sjsg 	dev_priv->display.power.chv_phy_assert[phy] = true;
15101bb76ff1Sjsg 
15111bb76ff1Sjsg 	assert_chv_phy_status(dev_priv);
15121bb76ff1Sjsg }
15131bb76ff1Sjsg 
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)15141bb76ff1Sjsg static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
15151bb76ff1Sjsg 				     enum dpio_channel ch, bool override, unsigned int mask)
15161bb76ff1Sjsg {
15171bb76ff1Sjsg 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
15181bb76ff1Sjsg 	u32 reg, val, expected, actual;
15191bb76ff1Sjsg 
15201bb76ff1Sjsg 	/*
15211bb76ff1Sjsg 	 * The BIOS can leave the PHY is some weird state
15221bb76ff1Sjsg 	 * where it doesn't fully power down some parts.
15231bb76ff1Sjsg 	 * Disable the asserts until the PHY has been fully
15241bb76ff1Sjsg 	 * reset (ie. the power well has been disabled at
15251bb76ff1Sjsg 	 * least once).
15261bb76ff1Sjsg 	 */
15271bb76ff1Sjsg 	if (!dev_priv->display.power.chv_phy_assert[phy])
15281bb76ff1Sjsg 		return;
15291bb76ff1Sjsg 
15301bb76ff1Sjsg 	if (ch == DPIO_CH0)
15311bb76ff1Sjsg 		reg = _CHV_CMN_DW0_CH0;
15321bb76ff1Sjsg 	else
15331bb76ff1Sjsg 		reg = _CHV_CMN_DW6_CH1;
15341bb76ff1Sjsg 
15351bb76ff1Sjsg 	vlv_dpio_get(dev_priv);
15361bb76ff1Sjsg 	val = vlv_dpio_read(dev_priv, pipe, reg);
15371bb76ff1Sjsg 	vlv_dpio_put(dev_priv);
15381bb76ff1Sjsg 
15391bb76ff1Sjsg 	/*
15401bb76ff1Sjsg 	 * This assumes !override is only used when the port is disabled.
15411bb76ff1Sjsg 	 * All lanes should power down even without the override when
15421bb76ff1Sjsg 	 * the port is disabled.
15431bb76ff1Sjsg 	 */
15441bb76ff1Sjsg 	if (!override || mask == 0xf) {
15451bb76ff1Sjsg 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
15461bb76ff1Sjsg 		/*
15471bb76ff1Sjsg 		 * If CH1 common lane is not active anymore
15481bb76ff1Sjsg 		 * (eg. for pipe B DPLL) the entire channel will
15491bb76ff1Sjsg 		 * shut down, which causes the common lane registers
15501bb76ff1Sjsg 		 * to read as 0. That means we can't actually check
15511bb76ff1Sjsg 		 * the lane power down status bits, but as the entire
15521bb76ff1Sjsg 		 * register reads as 0 it's a good indication that the
15531bb76ff1Sjsg 		 * channel is indeed entirely powered down.
15541bb76ff1Sjsg 		 */
15551bb76ff1Sjsg 		if (ch == DPIO_CH1 && val == 0)
15561bb76ff1Sjsg 			expected = 0;
15571bb76ff1Sjsg 	} else if (mask != 0x0) {
15581bb76ff1Sjsg 		expected = DPIO_ANYDL_POWERDOWN;
15591bb76ff1Sjsg 	} else {
15601bb76ff1Sjsg 		expected = 0;
15611bb76ff1Sjsg 	}
15621bb76ff1Sjsg 
15631bb76ff1Sjsg 	if (ch == DPIO_CH0)
15641bb76ff1Sjsg 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
15651bb76ff1Sjsg 	else
15661bb76ff1Sjsg 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
15671bb76ff1Sjsg 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
15681bb76ff1Sjsg 
15691bb76ff1Sjsg 	drm_WARN(&dev_priv->drm, actual != expected,
15701bb76ff1Sjsg 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
15711bb76ff1Sjsg 		 !!(actual & DPIO_ALLDL_POWERDOWN),
15721bb76ff1Sjsg 		 !!(actual & DPIO_ANYDL_POWERDOWN),
15731bb76ff1Sjsg 		 !!(expected & DPIO_ALLDL_POWERDOWN),
15741bb76ff1Sjsg 		 !!(expected & DPIO_ANYDL_POWERDOWN),
15751bb76ff1Sjsg 		 reg, val);
15761bb76ff1Sjsg }
15771bb76ff1Sjsg 
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)15781bb76ff1Sjsg bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
15791bb76ff1Sjsg 			  enum dpio_channel ch, bool override)
15801bb76ff1Sjsg {
15811bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
15821bb76ff1Sjsg 	bool was_override;
15831bb76ff1Sjsg 
15841bb76ff1Sjsg 	mutex_lock(&power_domains->lock);
15851bb76ff1Sjsg 
15861bb76ff1Sjsg 	was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
15871bb76ff1Sjsg 
15881bb76ff1Sjsg 	if (override == was_override)
15891bb76ff1Sjsg 		goto out;
15901bb76ff1Sjsg 
15911bb76ff1Sjsg 	if (override)
15921bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
15931bb76ff1Sjsg 	else
15941bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
15951bb76ff1Sjsg 
15961bb76ff1Sjsg 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
15971bb76ff1Sjsg 		       dev_priv->display.power.chv_phy_control);
15981bb76ff1Sjsg 
15991bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm,
16001bb76ff1Sjsg 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
16011bb76ff1Sjsg 		    phy, ch, dev_priv->display.power.chv_phy_control);
16021bb76ff1Sjsg 
16031bb76ff1Sjsg 	assert_chv_phy_status(dev_priv);
16041bb76ff1Sjsg 
16051bb76ff1Sjsg out:
16061bb76ff1Sjsg 	mutex_unlock(&power_domains->lock);
16071bb76ff1Sjsg 
16081bb76ff1Sjsg 	return was_override;
16091bb76ff1Sjsg }
16101bb76ff1Sjsg 
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)16111bb76ff1Sjsg void chv_phy_powergate_lanes(struct intel_encoder *encoder,
16121bb76ff1Sjsg 			     bool override, unsigned int mask)
16131bb76ff1Sjsg {
16141bb76ff1Sjsg 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16151bb76ff1Sjsg 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
16161bb76ff1Sjsg 	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
16171bb76ff1Sjsg 	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
16181bb76ff1Sjsg 
16191bb76ff1Sjsg 	mutex_lock(&power_domains->lock);
16201bb76ff1Sjsg 
16211bb76ff1Sjsg 	dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
16221bb76ff1Sjsg 	dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
16231bb76ff1Sjsg 
16241bb76ff1Sjsg 	if (override)
16251bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
16261bb76ff1Sjsg 	else
16271bb76ff1Sjsg 		dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
16281bb76ff1Sjsg 
16291bb76ff1Sjsg 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
16301bb76ff1Sjsg 		       dev_priv->display.power.chv_phy_control);
16311bb76ff1Sjsg 
16321bb76ff1Sjsg 	drm_dbg_kms(&dev_priv->drm,
16331bb76ff1Sjsg 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
16341bb76ff1Sjsg 		    phy, ch, mask, dev_priv->display.power.chv_phy_control);
16351bb76ff1Sjsg 
16361bb76ff1Sjsg 	assert_chv_phy_status(dev_priv);
16371bb76ff1Sjsg 
16381bb76ff1Sjsg 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
16391bb76ff1Sjsg 
16401bb76ff1Sjsg 	mutex_unlock(&power_domains->lock);
16411bb76ff1Sjsg }
16421bb76ff1Sjsg 
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)16431bb76ff1Sjsg static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
16441bb76ff1Sjsg 					struct i915_power_well *power_well)
16451bb76ff1Sjsg {
16461bb76ff1Sjsg 	enum pipe pipe = PIPE_A;
16471bb76ff1Sjsg 	bool enabled;
16481bb76ff1Sjsg 	u32 state, ctrl;
16491bb76ff1Sjsg 
16501bb76ff1Sjsg 	vlv_punit_get(dev_priv);
16511bb76ff1Sjsg 
16521bb76ff1Sjsg 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
16531bb76ff1Sjsg 	/*
16541bb76ff1Sjsg 	 * We only ever set the power-on and power-gate states, anything
16551bb76ff1Sjsg 	 * else is unexpected.
16561bb76ff1Sjsg 	 */
16571bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
16581bb76ff1Sjsg 		    state != DP_SSS_PWR_GATE(pipe));
16591bb76ff1Sjsg 	enabled = state == DP_SSS_PWR_ON(pipe);
16601bb76ff1Sjsg 
16611bb76ff1Sjsg 	/*
16621bb76ff1Sjsg 	 * A transient state at this point would mean some unexpected party
16631bb76ff1Sjsg 	 * is poking at the power controls too.
16641bb76ff1Sjsg 	 */
16651bb76ff1Sjsg 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
16661bb76ff1Sjsg 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
16671bb76ff1Sjsg 
16681bb76ff1Sjsg 	vlv_punit_put(dev_priv);
16691bb76ff1Sjsg 
16701bb76ff1Sjsg 	return enabled;
16711bb76ff1Sjsg }
16721bb76ff1Sjsg 
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)16731bb76ff1Sjsg static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
16741bb76ff1Sjsg 				    struct i915_power_well *power_well,
16751bb76ff1Sjsg 				    bool enable)
16761bb76ff1Sjsg {
16771bb76ff1Sjsg 	enum pipe pipe = PIPE_A;
16781bb76ff1Sjsg 	u32 state;
16791bb76ff1Sjsg 	u32 ctrl;
16801bb76ff1Sjsg 
16811bb76ff1Sjsg 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
16821bb76ff1Sjsg 
16831bb76ff1Sjsg 	vlv_punit_get(dev_priv);
16841bb76ff1Sjsg 
16851bb76ff1Sjsg #define COND \
16861bb76ff1Sjsg 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
16871bb76ff1Sjsg 
16881bb76ff1Sjsg 	if (COND)
16891bb76ff1Sjsg 		goto out;
16901bb76ff1Sjsg 
16911bb76ff1Sjsg 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
16921bb76ff1Sjsg 	ctrl &= ~DP_SSC_MASK(pipe);
16931bb76ff1Sjsg 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
16941bb76ff1Sjsg 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
16951bb76ff1Sjsg 
16961bb76ff1Sjsg 	if (wait_for(COND, 100))
16971bb76ff1Sjsg 		drm_err(&dev_priv->drm,
16981bb76ff1Sjsg 			"timeout setting power well state %08x (%08x)\n",
16991bb76ff1Sjsg 			state,
17001bb76ff1Sjsg 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
17011bb76ff1Sjsg 
17021bb76ff1Sjsg #undef COND
17031bb76ff1Sjsg 
17041bb76ff1Sjsg out:
17051bb76ff1Sjsg 	vlv_punit_put(dev_priv);
17061bb76ff1Sjsg }
17071bb76ff1Sjsg 
chv_pipe_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)17081bb76ff1Sjsg static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
17091bb76ff1Sjsg 					struct i915_power_well *power_well)
17101bb76ff1Sjsg {
17111bb76ff1Sjsg 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
17121bb76ff1Sjsg 		       dev_priv->display.power.chv_phy_control);
17131bb76ff1Sjsg }
17141bb76ff1Sjsg 
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)17151bb76ff1Sjsg static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
17161bb76ff1Sjsg 				       struct i915_power_well *power_well)
17171bb76ff1Sjsg {
17181bb76ff1Sjsg 	chv_set_pipe_power_well(dev_priv, power_well, true);
17191bb76ff1Sjsg 
17201bb76ff1Sjsg 	vlv_display_power_well_init(dev_priv);
17211bb76ff1Sjsg }
17221bb76ff1Sjsg 
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)17231bb76ff1Sjsg static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
17241bb76ff1Sjsg 					struct i915_power_well *power_well)
17251bb76ff1Sjsg {
17261bb76ff1Sjsg 	vlv_display_power_well_deinit(dev_priv);
17271bb76ff1Sjsg 
17281bb76ff1Sjsg 	chv_set_pipe_power_well(dev_priv, power_well, false);
17291bb76ff1Sjsg }
17301bb76ff1Sjsg 
17311bb76ff1Sjsg static void
tgl_tc_cold_request(struct drm_i915_private * i915,bool block)17321bb76ff1Sjsg tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
17331bb76ff1Sjsg {
17341bb76ff1Sjsg 	u8 tries = 0;
17351bb76ff1Sjsg 	int ret;
17361bb76ff1Sjsg 
17371bb76ff1Sjsg 	while (1) {
17381bb76ff1Sjsg 		u32 low_val;
17391bb76ff1Sjsg 		u32 high_val = 0;
17401bb76ff1Sjsg 
17411bb76ff1Sjsg 		if (block)
17421bb76ff1Sjsg 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
17431bb76ff1Sjsg 		else
17441bb76ff1Sjsg 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
17451bb76ff1Sjsg 
17461bb76ff1Sjsg 		/*
17471bb76ff1Sjsg 		 * Spec states that we should timeout the request after 200us
17481bb76ff1Sjsg 		 * but the function below will timeout after 500us
17491bb76ff1Sjsg 		 */
17501bb76ff1Sjsg 		ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
17511bb76ff1Sjsg 		if (ret == 0) {
17521bb76ff1Sjsg 			if (block &&
17531bb76ff1Sjsg 			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
17541bb76ff1Sjsg 				ret = -EIO;
17551bb76ff1Sjsg 			else
17561bb76ff1Sjsg 				break;
17571bb76ff1Sjsg 		}
17581bb76ff1Sjsg 
17591bb76ff1Sjsg 		if (++tries == 3)
17601bb76ff1Sjsg 			break;
17611bb76ff1Sjsg 
17621bb76ff1Sjsg 		drm_msleep(1);
17631bb76ff1Sjsg 	}
17641bb76ff1Sjsg 
17651bb76ff1Sjsg 	if (ret)
17661bb76ff1Sjsg 		drm_err(&i915->drm, "TC cold %sblock failed\n",
17671bb76ff1Sjsg 			block ? "" : "un");
17681bb76ff1Sjsg 	else
17691bb76ff1Sjsg 		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
17701bb76ff1Sjsg 			    block ? "" : "un");
17711bb76ff1Sjsg }
17721bb76ff1Sjsg 
17731bb76ff1Sjsg static void
tgl_tc_cold_off_power_well_enable(struct drm_i915_private * i915,struct i915_power_well * power_well)17741bb76ff1Sjsg tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
17751bb76ff1Sjsg 				  struct i915_power_well *power_well)
17761bb76ff1Sjsg {
17771bb76ff1Sjsg 	tgl_tc_cold_request(i915, true);
17781bb76ff1Sjsg }
17791bb76ff1Sjsg 
17801bb76ff1Sjsg static void
tgl_tc_cold_off_power_well_disable(struct drm_i915_private * i915,struct i915_power_well * power_well)17811bb76ff1Sjsg tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
17821bb76ff1Sjsg 				   struct i915_power_well *power_well)
17831bb76ff1Sjsg {
17841bb76ff1Sjsg 	tgl_tc_cold_request(i915, false);
17851bb76ff1Sjsg }
17861bb76ff1Sjsg 
17871bb76ff1Sjsg static void
tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private * i915,struct i915_power_well * power_well)17881bb76ff1Sjsg tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
17891bb76ff1Sjsg 				   struct i915_power_well *power_well)
17901bb76ff1Sjsg {
17911bb76ff1Sjsg 	if (intel_power_well_refcount(power_well) > 0)
17921bb76ff1Sjsg 		tgl_tc_cold_off_power_well_enable(i915, power_well);
17931bb76ff1Sjsg 	else
17941bb76ff1Sjsg 		tgl_tc_cold_off_power_well_disable(i915, power_well);
17951bb76ff1Sjsg }
17961bb76ff1Sjsg 
17971bb76ff1Sjsg static bool
tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)17981bb76ff1Sjsg tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
17991bb76ff1Sjsg 				      struct i915_power_well *power_well)
18001bb76ff1Sjsg {
18011bb76ff1Sjsg 	/*
18021bb76ff1Sjsg 	 * Not the correctly implementation but there is no way to just read it
18031bb76ff1Sjsg 	 * from PCODE, so returning count to avoid state mismatch errors
18041bb76ff1Sjsg 	 */
18051bb76ff1Sjsg 	return intel_power_well_refcount(power_well);
18061bb76ff1Sjsg }
18071bb76ff1Sjsg 
xelpdp_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)18081bb76ff1Sjsg static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
18091bb76ff1Sjsg 					 struct i915_power_well *power_well)
18101bb76ff1Sjsg {
18111bb76ff1Sjsg 	enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
18121bb76ff1Sjsg 
18131bb76ff1Sjsg 	intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
18141bb76ff1Sjsg 		     XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
18151bb76ff1Sjsg 		     XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
18161bb76ff1Sjsg 
18171bb76ff1Sjsg 	/*
18181bb76ff1Sjsg 	 * The power status flag cannot be used to determine whether aux
18191bb76ff1Sjsg 	 * power wells have finished powering up.  Instead we're
18201bb76ff1Sjsg 	 * expected to just wait a fixed 600us after raising the request
18211bb76ff1Sjsg 	 * bit.
18221bb76ff1Sjsg 	 */
18231bb76ff1Sjsg 	usleep_range(600, 1200);
18241bb76ff1Sjsg }
18251bb76ff1Sjsg 
xelpdp_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)18261bb76ff1Sjsg static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
18271bb76ff1Sjsg 					  struct i915_power_well *power_well)
18281bb76ff1Sjsg {
18291bb76ff1Sjsg 	enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
18301bb76ff1Sjsg 
18311bb76ff1Sjsg 	intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
18321bb76ff1Sjsg 		     XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
18331bb76ff1Sjsg 		     0);
18341bb76ff1Sjsg 	usleep_range(10, 30);
18351bb76ff1Sjsg }
18361bb76ff1Sjsg 
xelpdp_aux_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)18371bb76ff1Sjsg static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
18381bb76ff1Sjsg 					  struct i915_power_well *power_well)
18391bb76ff1Sjsg {
18401bb76ff1Sjsg 	enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
18411bb76ff1Sjsg 
18421bb76ff1Sjsg 	return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) &
18431bb76ff1Sjsg 		XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
18441bb76ff1Sjsg }
18451bb76ff1Sjsg 
18461bb76ff1Sjsg const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
18471bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
18481bb76ff1Sjsg 	.enable = i9xx_always_on_power_well_noop,
18491bb76ff1Sjsg 	.disable = i9xx_always_on_power_well_noop,
18501bb76ff1Sjsg 	.is_enabled = i9xx_always_on_power_well_enabled,
18511bb76ff1Sjsg };
18521bb76ff1Sjsg 
18531bb76ff1Sjsg const struct i915_power_well_ops chv_pipe_power_well_ops = {
18541bb76ff1Sjsg 	.sync_hw = chv_pipe_power_well_sync_hw,
18551bb76ff1Sjsg 	.enable = chv_pipe_power_well_enable,
18561bb76ff1Sjsg 	.disable = chv_pipe_power_well_disable,
18571bb76ff1Sjsg 	.is_enabled = chv_pipe_power_well_enabled,
18581bb76ff1Sjsg };
18591bb76ff1Sjsg 
18601bb76ff1Sjsg const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
18611bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
18621bb76ff1Sjsg 	.enable = chv_dpio_cmn_power_well_enable,
18631bb76ff1Sjsg 	.disable = chv_dpio_cmn_power_well_disable,
18641bb76ff1Sjsg 	.is_enabled = vlv_power_well_enabled,
18651bb76ff1Sjsg };
18661bb76ff1Sjsg 
18671bb76ff1Sjsg const struct i915_power_well_ops i830_pipes_power_well_ops = {
18681bb76ff1Sjsg 	.sync_hw = i830_pipes_power_well_sync_hw,
18691bb76ff1Sjsg 	.enable = i830_pipes_power_well_enable,
18701bb76ff1Sjsg 	.disable = i830_pipes_power_well_disable,
18711bb76ff1Sjsg 	.is_enabled = i830_pipes_power_well_enabled,
18721bb76ff1Sjsg };
18731bb76ff1Sjsg 
18741bb76ff1Sjsg static const struct i915_power_well_regs hsw_power_well_regs = {
18751bb76ff1Sjsg 	.bios	= HSW_PWR_WELL_CTL1,
18761bb76ff1Sjsg 	.driver	= HSW_PWR_WELL_CTL2,
18771bb76ff1Sjsg 	.kvmr	= HSW_PWR_WELL_CTL3,
18781bb76ff1Sjsg 	.debug	= HSW_PWR_WELL_CTL4,
18791bb76ff1Sjsg };
18801bb76ff1Sjsg 
18811bb76ff1Sjsg const struct i915_power_well_ops hsw_power_well_ops = {
18821bb76ff1Sjsg 	.regs = &hsw_power_well_regs,
18831bb76ff1Sjsg 	.sync_hw = hsw_power_well_sync_hw,
18841bb76ff1Sjsg 	.enable = hsw_power_well_enable,
18851bb76ff1Sjsg 	.disable = hsw_power_well_disable,
18861bb76ff1Sjsg 	.is_enabled = hsw_power_well_enabled,
18871bb76ff1Sjsg };
18881bb76ff1Sjsg 
18891bb76ff1Sjsg const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
18901bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
18911bb76ff1Sjsg 	.enable = gen9_dc_off_power_well_enable,
18921bb76ff1Sjsg 	.disable = gen9_dc_off_power_well_disable,
18931bb76ff1Sjsg 	.is_enabled = gen9_dc_off_power_well_enabled,
18941bb76ff1Sjsg };
18951bb76ff1Sjsg 
18961bb76ff1Sjsg const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
18971bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
18981bb76ff1Sjsg 	.enable = bxt_dpio_cmn_power_well_enable,
18991bb76ff1Sjsg 	.disable = bxt_dpio_cmn_power_well_disable,
19001bb76ff1Sjsg 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
19011bb76ff1Sjsg };
19021bb76ff1Sjsg 
19031bb76ff1Sjsg const struct i915_power_well_ops vlv_display_power_well_ops = {
19041bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
19051bb76ff1Sjsg 	.enable = vlv_display_power_well_enable,
19061bb76ff1Sjsg 	.disable = vlv_display_power_well_disable,
19071bb76ff1Sjsg 	.is_enabled = vlv_power_well_enabled,
19081bb76ff1Sjsg };
19091bb76ff1Sjsg 
19101bb76ff1Sjsg const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
19111bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
19121bb76ff1Sjsg 	.enable = vlv_dpio_cmn_power_well_enable,
19131bb76ff1Sjsg 	.disable = vlv_dpio_cmn_power_well_disable,
19141bb76ff1Sjsg 	.is_enabled = vlv_power_well_enabled,
19151bb76ff1Sjsg };
19161bb76ff1Sjsg 
19171bb76ff1Sjsg const struct i915_power_well_ops vlv_dpio_power_well_ops = {
19181bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
19191bb76ff1Sjsg 	.enable = vlv_power_well_enable,
19201bb76ff1Sjsg 	.disable = vlv_power_well_disable,
19211bb76ff1Sjsg 	.is_enabled = vlv_power_well_enabled,
19221bb76ff1Sjsg };
19231bb76ff1Sjsg 
19241bb76ff1Sjsg static const struct i915_power_well_regs icl_aux_power_well_regs = {
19251bb76ff1Sjsg 	.bios	= ICL_PWR_WELL_CTL_AUX1,
19261bb76ff1Sjsg 	.driver	= ICL_PWR_WELL_CTL_AUX2,
19271bb76ff1Sjsg 	.debug	= ICL_PWR_WELL_CTL_AUX4,
19281bb76ff1Sjsg };
19291bb76ff1Sjsg 
19301bb76ff1Sjsg const struct i915_power_well_ops icl_aux_power_well_ops = {
19311bb76ff1Sjsg 	.regs = &icl_aux_power_well_regs,
19321bb76ff1Sjsg 	.sync_hw = hsw_power_well_sync_hw,
19331bb76ff1Sjsg 	.enable = icl_aux_power_well_enable,
19341bb76ff1Sjsg 	.disable = icl_aux_power_well_disable,
19351bb76ff1Sjsg 	.is_enabled = hsw_power_well_enabled,
19361bb76ff1Sjsg };
19371bb76ff1Sjsg 
19381bb76ff1Sjsg static const struct i915_power_well_regs icl_ddi_power_well_regs = {
19391bb76ff1Sjsg 	.bios	= ICL_PWR_WELL_CTL_DDI1,
19401bb76ff1Sjsg 	.driver	= ICL_PWR_WELL_CTL_DDI2,
19411bb76ff1Sjsg 	.debug	= ICL_PWR_WELL_CTL_DDI4,
19421bb76ff1Sjsg };
19431bb76ff1Sjsg 
19441bb76ff1Sjsg const struct i915_power_well_ops icl_ddi_power_well_ops = {
19451bb76ff1Sjsg 	.regs = &icl_ddi_power_well_regs,
19461bb76ff1Sjsg 	.sync_hw = hsw_power_well_sync_hw,
19471bb76ff1Sjsg 	.enable = hsw_power_well_enable,
19481bb76ff1Sjsg 	.disable = hsw_power_well_disable,
19491bb76ff1Sjsg 	.is_enabled = hsw_power_well_enabled,
19501bb76ff1Sjsg };
19511bb76ff1Sjsg 
19521bb76ff1Sjsg const struct i915_power_well_ops tgl_tc_cold_off_ops = {
19531bb76ff1Sjsg 	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
19541bb76ff1Sjsg 	.enable = tgl_tc_cold_off_power_well_enable,
19551bb76ff1Sjsg 	.disable = tgl_tc_cold_off_power_well_disable,
19561bb76ff1Sjsg 	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
19571bb76ff1Sjsg };
19581bb76ff1Sjsg 
19591bb76ff1Sjsg const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
19601bb76ff1Sjsg 	.sync_hw = i9xx_power_well_sync_hw_noop,
19611bb76ff1Sjsg 	.enable = xelpdp_aux_power_well_enable,
19621bb76ff1Sjsg 	.disable = xelpdp_aux_power_well_disable,
19631bb76ff1Sjsg 	.is_enabled = xelpdp_aux_power_well_enabled,
19641bb76ff1Sjsg };
1965