xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_display_power.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "display/intel_crt.h"
7 
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "intel_cdclk.h"
11 #include "intel_combo_phy.h"
12 #include "intel_display_power.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dmc.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_pps.h"
20 #include "intel_sideband.h"
21 #include "intel_snps_phy.h"
22 #include "intel_tc.h"
23 #include "intel_vga.h"
24 
25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
26 					 enum i915_power_well_id power_well_id);
27 
28 const char *
29 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 {
31 	switch (domain) {
32 	case POWER_DOMAIN_DISPLAY_CORE:
33 		return "DISPLAY_CORE";
34 	case POWER_DOMAIN_PIPE_A:
35 		return "PIPE_A";
36 	case POWER_DOMAIN_PIPE_B:
37 		return "PIPE_B";
38 	case POWER_DOMAIN_PIPE_C:
39 		return "PIPE_C";
40 	case POWER_DOMAIN_PIPE_D:
41 		return "PIPE_D";
42 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43 		return "PIPE_A_PANEL_FITTER";
44 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45 		return "PIPE_B_PANEL_FITTER";
46 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47 		return "PIPE_C_PANEL_FITTER";
48 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49 		return "PIPE_D_PANEL_FITTER";
50 	case POWER_DOMAIN_TRANSCODER_A:
51 		return "TRANSCODER_A";
52 	case POWER_DOMAIN_TRANSCODER_B:
53 		return "TRANSCODER_B";
54 	case POWER_DOMAIN_TRANSCODER_C:
55 		return "TRANSCODER_C";
56 	case POWER_DOMAIN_TRANSCODER_D:
57 		return "TRANSCODER_D";
58 	case POWER_DOMAIN_TRANSCODER_EDP:
59 		return "TRANSCODER_EDP";
60 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61 		return "TRANSCODER_VDSC_PW2";
62 	case POWER_DOMAIN_TRANSCODER_DSI_A:
63 		return "TRANSCODER_DSI_A";
64 	case POWER_DOMAIN_TRANSCODER_DSI_C:
65 		return "TRANSCODER_DSI_C";
66 	case POWER_DOMAIN_PORT_DDI_A_LANES:
67 		return "PORT_DDI_A_LANES";
68 	case POWER_DOMAIN_PORT_DDI_B_LANES:
69 		return "PORT_DDI_B_LANES";
70 	case POWER_DOMAIN_PORT_DDI_C_LANES:
71 		return "PORT_DDI_C_LANES";
72 	case POWER_DOMAIN_PORT_DDI_D_LANES:
73 		return "PORT_DDI_D_LANES";
74 	case POWER_DOMAIN_PORT_DDI_E_LANES:
75 		return "PORT_DDI_E_LANES";
76 	case POWER_DOMAIN_PORT_DDI_F_LANES:
77 		return "PORT_DDI_F_LANES";
78 	case POWER_DOMAIN_PORT_DDI_G_LANES:
79 		return "PORT_DDI_G_LANES";
80 	case POWER_DOMAIN_PORT_DDI_H_LANES:
81 		return "PORT_DDI_H_LANES";
82 	case POWER_DOMAIN_PORT_DDI_I_LANES:
83 		return "PORT_DDI_I_LANES";
84 	case POWER_DOMAIN_PORT_DDI_A_IO:
85 		return "PORT_DDI_A_IO";
86 	case POWER_DOMAIN_PORT_DDI_B_IO:
87 		return "PORT_DDI_B_IO";
88 	case POWER_DOMAIN_PORT_DDI_C_IO:
89 		return "PORT_DDI_C_IO";
90 	case POWER_DOMAIN_PORT_DDI_D_IO:
91 		return "PORT_DDI_D_IO";
92 	case POWER_DOMAIN_PORT_DDI_E_IO:
93 		return "PORT_DDI_E_IO";
94 	case POWER_DOMAIN_PORT_DDI_F_IO:
95 		return "PORT_DDI_F_IO";
96 	case POWER_DOMAIN_PORT_DDI_G_IO:
97 		return "PORT_DDI_G_IO";
98 	case POWER_DOMAIN_PORT_DDI_H_IO:
99 		return "PORT_DDI_H_IO";
100 	case POWER_DOMAIN_PORT_DDI_I_IO:
101 		return "PORT_DDI_I_IO";
102 	case POWER_DOMAIN_PORT_DSI:
103 		return "PORT_DSI";
104 	case POWER_DOMAIN_PORT_CRT:
105 		return "PORT_CRT";
106 	case POWER_DOMAIN_PORT_OTHER:
107 		return "PORT_OTHER";
108 	case POWER_DOMAIN_VGA:
109 		return "VGA";
110 	case POWER_DOMAIN_AUDIO_MMIO:
111 		return "AUDIO_MMIO";
112 	case POWER_DOMAIN_AUDIO_PLAYBACK:
113 		return "AUDIO_PLAYBACK";
114 	case POWER_DOMAIN_AUX_A:
115 		return "AUX_A";
116 	case POWER_DOMAIN_AUX_B:
117 		return "AUX_B";
118 	case POWER_DOMAIN_AUX_C:
119 		return "AUX_C";
120 	case POWER_DOMAIN_AUX_D:
121 		return "AUX_D";
122 	case POWER_DOMAIN_AUX_E:
123 		return "AUX_E";
124 	case POWER_DOMAIN_AUX_F:
125 		return "AUX_F";
126 	case POWER_DOMAIN_AUX_G:
127 		return "AUX_G";
128 	case POWER_DOMAIN_AUX_H:
129 		return "AUX_H";
130 	case POWER_DOMAIN_AUX_I:
131 		return "AUX_I";
132 	case POWER_DOMAIN_AUX_IO_A:
133 		return "AUX_IO_A";
134 	case POWER_DOMAIN_AUX_C_TBT:
135 		return "AUX_C_TBT";
136 	case POWER_DOMAIN_AUX_D_TBT:
137 		return "AUX_D_TBT";
138 	case POWER_DOMAIN_AUX_E_TBT:
139 		return "AUX_E_TBT";
140 	case POWER_DOMAIN_AUX_F_TBT:
141 		return "AUX_F_TBT";
142 	case POWER_DOMAIN_AUX_G_TBT:
143 		return "AUX_G_TBT";
144 	case POWER_DOMAIN_AUX_H_TBT:
145 		return "AUX_H_TBT";
146 	case POWER_DOMAIN_AUX_I_TBT:
147 		return "AUX_I_TBT";
148 	case POWER_DOMAIN_GMBUS:
149 		return "GMBUS";
150 	case POWER_DOMAIN_INIT:
151 		return "INIT";
152 	case POWER_DOMAIN_MODESET:
153 		return "MODESET";
154 	case POWER_DOMAIN_GT_IRQ:
155 		return "GT_IRQ";
156 	case POWER_DOMAIN_DPLL_DC_OFF:
157 		return "DPLL_DC_OFF";
158 	case POWER_DOMAIN_TC_COLD_OFF:
159 		return "TC_COLD_OFF";
160 	default:
161 		MISSING_CASE(domain);
162 		return "?";
163 	}
164 }
165 
166 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
167 				    struct i915_power_well *power_well)
168 {
169 	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
170 	power_well->desc->ops->enable(dev_priv, power_well);
171 	power_well->hw_enabled = true;
172 }
173 
174 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
175 				     struct i915_power_well *power_well)
176 {
177 	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
178 	power_well->hw_enabled = false;
179 	power_well->desc->ops->disable(dev_priv, power_well);
180 }
181 
182 static void intel_power_well_get(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	if (!power_well->count++)
186 		intel_power_well_enable(dev_priv, power_well);
187 }
188 
189 static void intel_power_well_put(struct drm_i915_private *dev_priv,
190 				 struct i915_power_well *power_well)
191 {
192 	drm_WARN(&dev_priv->drm, !power_well->count,
193 		 "Use count on power well %s is already zero",
194 		 power_well->desc->name);
195 
196 	if (!--power_well->count)
197 		intel_power_well_disable(dev_priv, power_well);
198 }
199 
200 /**
201  * __intel_display_power_is_enabled - unlocked check for a power domain
202  * @dev_priv: i915 device instance
203  * @domain: power domain to check
204  *
205  * This is the unlocked version of intel_display_power_is_enabled() and should
206  * only be used from error capture and recovery code where deadlocks are
207  * possible.
208  *
209  * Returns:
210  * True when the power domain is enabled, false otherwise.
211  */
212 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
213 				      enum intel_display_power_domain domain)
214 {
215 	struct i915_power_well *power_well;
216 	bool is_enabled;
217 
218 	if (dev_priv->runtime_pm.suspended)
219 		return false;
220 
221 	is_enabled = true;
222 
223 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
224 		if (power_well->desc->always_on)
225 			continue;
226 
227 		if (!power_well->hw_enabled) {
228 			is_enabled = false;
229 			break;
230 		}
231 	}
232 
233 	return is_enabled;
234 }
235 
236 /**
237  * intel_display_power_is_enabled - check for a power domain
238  * @dev_priv: i915 device instance
239  * @domain: power domain to check
240  *
241  * This function can be used to check the hw power domain state. It is mostly
242  * used in hardware state readout functions. Everywhere else code should rely
243  * upon explicit power domain reference counting to ensure that the hardware
244  * block is powered up before accessing it.
245  *
246  * Callers must hold the relevant modesetting locks to ensure that concurrent
247  * threads can't disable the power well while the caller tries to read a few
248  * registers.
249  *
250  * Returns:
251  * True when the power domain is enabled, false otherwise.
252  */
253 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
254 				    enum intel_display_power_domain domain)
255 {
256 	struct i915_power_domains *power_domains;
257 	bool ret;
258 
259 	power_domains = &dev_priv->power_domains;
260 
261 	mutex_lock(&power_domains->lock);
262 	ret = __intel_display_power_is_enabled(dev_priv, domain);
263 	mutex_unlock(&power_domains->lock);
264 
265 	return ret;
266 }
267 
268 /*
269  * Starting with Haswell, we have a "Power Down Well" that can be turned off
270  * when not needed anymore. We have 4 registers that can request the power well
271  * to be enabled, and it will only be disabled if none of the registers is
272  * requesting it to be enabled.
273  */
274 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
275 				       u8 irq_pipe_mask, bool has_vga)
276 {
277 	if (has_vga)
278 		intel_vga_reset_io_mem(dev_priv);
279 
280 	if (irq_pipe_mask)
281 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
282 }
283 
284 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
285 				       u8 irq_pipe_mask)
286 {
287 	if (irq_pipe_mask)
288 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
289 }
290 
291 #define ICL_AUX_PW_TO_CH(pw_idx)	\
292 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
293 
294 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
295 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
296 
297 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
298 {
299 	int pw_idx = power_well->desc->hsw.idx;
300 
301 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
302 						 ICL_AUX_PW_TO_CH(pw_idx);
303 }
304 
305 static struct intel_digital_port *
306 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
307 		       enum aux_ch aux_ch)
308 {
309 	struct intel_digital_port *dig_port = NULL;
310 	struct intel_encoder *encoder;
311 
312 	for_each_intel_encoder(&dev_priv->drm, encoder) {
313 		/* We'll check the MST primary port */
314 		if (encoder->type == INTEL_OUTPUT_DP_MST)
315 			continue;
316 
317 		dig_port = enc_to_dig_port(encoder);
318 		if (!dig_port)
319 			continue;
320 
321 		if (dig_port->aux_ch != aux_ch) {
322 			dig_port = NULL;
323 			continue;
324 		}
325 
326 		break;
327 	}
328 
329 	return dig_port;
330 }
331 
332 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
333 				  const struct i915_power_well *power_well)
334 {
335 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
336 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
337 
338 	return intel_port_to_phy(i915, dig_port->base.port);
339 }
340 
341 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
342 					   struct i915_power_well *power_well,
343 					   bool timeout_expected)
344 {
345 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
346 	int pw_idx = power_well->desc->hsw.idx;
347 	int enable_delay = power_well->desc->hsw.fixed_enable_delay;
348 
349 	/*
350 	 * For some power wells we're not supposed to watch the status bit for
351 	 * an ack, but rather just wait a fixed amount of time and then
352 	 * proceed.  This is only used on DG2.
353 	 */
354 	if (IS_DG2(dev_priv) && enable_delay) {
355 		usleep_range(enable_delay, 2 * enable_delay);
356 		return;
357 	}
358 
359 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
360 	if (intel_de_wait_for_set(dev_priv, regs->driver,
361 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
362 		drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
363 			    power_well->desc->name);
364 
365 		drm_WARN_ON(&dev_priv->drm, !timeout_expected);
366 
367 	}
368 }
369 
370 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
371 				     const struct i915_power_well_regs *regs,
372 				     int pw_idx)
373 {
374 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
375 	u32 ret;
376 
377 	ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
378 	ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
379 	if (regs->kvmr.reg)
380 		ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
381 	ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
382 
383 	return ret;
384 }
385 
386 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
387 					    struct i915_power_well *power_well)
388 {
389 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
390 	int pw_idx = power_well->desc->hsw.idx;
391 	bool disabled;
392 	u32 reqs;
393 
394 	/*
395 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
396 	 * this for paranoia. The known cases where a PW will be forced on:
397 	 * - a KVMR request on any power well via the KVMR request register
398 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
399 	 *   DEBUG request registers
400 	 * Skip the wait in case any of the request bits are set and print a
401 	 * diagnostic message.
402 	 */
403 	wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
404 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
405 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
406 	if (disabled)
407 		return;
408 
409 	drm_dbg_kms(&dev_priv->drm,
410 		    "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
411 		    power_well->desc->name,
412 		    !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
413 }
414 
415 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
416 					   enum skl_power_gate pg)
417 {
418 	/* Timeout 5us for PG#0, for other PGs 1us */
419 	drm_WARN_ON(&dev_priv->drm,
420 		    intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
421 					  SKL_FUSE_PG_DIST_STATUS(pg), 1));
422 }
423 
424 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
425 				  struct i915_power_well *power_well)
426 {
427 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
428 	int pw_idx = power_well->desc->hsw.idx;
429 	u32 val;
430 
431 	if (power_well->desc->hsw.has_fuses) {
432 		enum skl_power_gate pg;
433 
434 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
435 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
436 
437 		/* Wa_16013190616:adlp */
438 		if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
439 			intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
440 
441 		/*
442 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
443 		 * before enabling the power well and PW1/PG1's own fuse
444 		 * state after the enabling. For all other power wells with
445 		 * fuses we only have to wait for that PW/PG's fuse state
446 		 * after the enabling.
447 		 */
448 		if (pg == SKL_PG1)
449 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
450 	}
451 
452 	val = intel_de_read(dev_priv, regs->driver);
453 	intel_de_write(dev_priv, regs->driver,
454 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
455 
456 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
457 
458 	if (power_well->desc->hsw.has_fuses) {
459 		enum skl_power_gate pg;
460 
461 		pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
462 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
463 		gen9_wait_for_power_well_fuses(dev_priv, pg);
464 	}
465 
466 	hsw_power_well_post_enable(dev_priv,
467 				   power_well->desc->hsw.irq_pipe_mask,
468 				   power_well->desc->hsw.has_vga);
469 }
470 
471 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
472 				   struct i915_power_well *power_well)
473 {
474 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
475 	int pw_idx = power_well->desc->hsw.idx;
476 	u32 val;
477 
478 	hsw_power_well_pre_disable(dev_priv,
479 				   power_well->desc->hsw.irq_pipe_mask);
480 
481 	val = intel_de_read(dev_priv, regs->driver);
482 	intel_de_write(dev_priv, regs->driver,
483 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
484 	hsw_wait_for_power_well_disable(dev_priv, power_well);
485 }
486 
487 static void
488 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
489 				    struct i915_power_well *power_well)
490 {
491 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
492 	int pw_idx = power_well->desc->hsw.idx;
493 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
494 	u32 val;
495 
496 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
497 
498 	val = intel_de_read(dev_priv, regs->driver);
499 	intel_de_write(dev_priv, regs->driver,
500 		       val | HSW_PWR_WELL_CTL_REQ(pw_idx));
501 
502 	if (DISPLAY_VER(dev_priv) < 12) {
503 		val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
504 		intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
505 			       val | ICL_LANE_ENABLE_AUX);
506 	}
507 
508 	hsw_wait_for_power_well_enable(dev_priv, power_well, false);
509 
510 	/* Display WA #1178: icl */
511 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
512 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
513 		val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
514 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
515 		intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
516 	}
517 }
518 
519 static void
520 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
521 				     struct i915_power_well *power_well)
522 {
523 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
524 	int pw_idx = power_well->desc->hsw.idx;
525 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
526 	u32 val;
527 
528 	drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
529 
530 	val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
531 	intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
532 		       val & ~ICL_LANE_ENABLE_AUX);
533 
534 	val = intel_de_read(dev_priv, regs->driver);
535 	intel_de_write(dev_priv, regs->driver,
536 		       val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
537 
538 	hsw_wait_for_power_well_disable(dev_priv, power_well);
539 }
540 
541 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
542 
543 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
544 
545 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
546 				      struct i915_power_well *power_well)
547 {
548 	int refs = hweight64(power_well->desc->domains &
549 			     async_put_domains_mask(&dev_priv->power_domains));
550 
551 	drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
552 
553 	return refs;
554 }
555 
556 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
557 					struct i915_power_well *power_well,
558 					struct intel_digital_port *dig_port)
559 {
560 	/* Bypass the check if all references are released asynchronously */
561 	if (power_well_async_ref_count(dev_priv, power_well) ==
562 	    power_well->count)
563 		return;
564 
565 	if (drm_WARN_ON(&dev_priv->drm, !dig_port))
566 		return;
567 
568 	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
569 		return;
570 
571 	drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
572 }
573 
574 #else
575 
576 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
577 					struct i915_power_well *power_well,
578 					struct intel_digital_port *dig_port)
579 {
580 }
581 
582 #endif
583 
584 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
585 
586 static void icl_tc_cold_exit(struct drm_i915_private *i915)
587 {
588 	int ret, tries = 0;
589 
590 	while (1) {
591 		ret = sandybridge_pcode_write_timeout(i915,
592 						      ICL_PCODE_EXIT_TCCOLD,
593 						      0, 250, 1);
594 		if (ret != -EAGAIN || ++tries == 3)
595 			break;
596 		drm_msleep(1);
597 	}
598 
599 	/* Spec states that TC cold exit can take up to 1ms to complete */
600 	if (!ret)
601 		drm_msleep(1);
602 
603 	/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
604 	drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
605 		    "succeeded");
606 }
607 
608 static void
609 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
610 				 struct i915_power_well *power_well)
611 {
612 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
613 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
614 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
615 	bool is_tbt = power_well->desc->hsw.is_tc_tbt;
616 	bool timeout_expected;
617 	u32 val;
618 
619 	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
620 
621 	val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
622 	val &= ~DP_AUX_CH_CTL_TBT_IO;
623 	if (is_tbt)
624 		val |= DP_AUX_CH_CTL_TBT_IO;
625 	intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
626 
627 	val = intel_de_read(dev_priv, regs->driver);
628 	intel_de_write(dev_priv, regs->driver,
629 		       val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
630 
631 	/*
632 	 * An AUX timeout is expected if the TBT DP tunnel is down,
633 	 * or need to enable AUX on a legacy TypeC port as part of the TC-cold
634 	 * exit sequence.
635 	 */
636 	timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
637 	if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
638 		icl_tc_cold_exit(dev_priv);
639 
640 	hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
641 
642 	if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
643 		enum tc_port tc_port;
644 
645 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
646 		intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
647 			       HIP_INDEX_VAL(tc_port, 0x2));
648 
649 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
650 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
651 			drm_warn(&dev_priv->drm,
652 				 "Timeout waiting TC uC health\n");
653 	}
654 }
655 
656 static void
657 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
658 				  struct i915_power_well *power_well)
659 {
660 	enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
661 	struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
662 
663 	icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
664 
665 	hsw_power_well_disable(dev_priv, power_well);
666 }
667 
668 static void
669 icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
670 			  struct i915_power_well *power_well)
671 {
672 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
673 
674 	if (intel_phy_is_tc(dev_priv, phy))
675 		return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
676 	else if (IS_ICELAKE(dev_priv))
677 		return icl_combo_phy_aux_power_well_enable(dev_priv,
678 							   power_well);
679 	else
680 		return hsw_power_well_enable(dev_priv, power_well);
681 }
682 
683 static void
684 icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
685 			   struct i915_power_well *power_well)
686 {
687 	enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
688 
689 	if (intel_phy_is_tc(dev_priv, phy))
690 		return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
691 	else if (IS_ICELAKE(dev_priv))
692 		return icl_combo_phy_aux_power_well_disable(dev_priv,
693 							    power_well);
694 	else
695 		return hsw_power_well_disable(dev_priv, power_well);
696 }
697 
698 /*
699  * We should only use the power well if we explicitly asked the hardware to
700  * enable it, so check if it's enabled and also check if we've requested it to
701  * be enabled.
702  */
703 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
704 				   struct i915_power_well *power_well)
705 {
706 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
707 	enum i915_power_well_id id = power_well->desc->id;
708 	int pw_idx = power_well->desc->hsw.idx;
709 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
710 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
711 	u32 val;
712 
713 	val = intel_de_read(dev_priv, regs->driver);
714 
715 	/*
716 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
717 	 * and the MISC_IO PW will be not restored, so check instead for the
718 	 * BIOS's own request bits, which are forced-on for these power wells
719 	 * when exiting DC5/6.
720 	 */
721 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
722 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
723 		val |= intel_de_read(dev_priv, regs->bios);
724 
725 	return (val & mask) == mask;
726 }
727 
728 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
729 {
730 	drm_WARN_ONCE(&dev_priv->drm,
731 		      (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
732 		      "DC9 already programmed to be enabled.\n");
733 	drm_WARN_ONCE(&dev_priv->drm,
734 		      intel_de_read(dev_priv, DC_STATE_EN) &
735 		      DC_STATE_EN_UPTO_DC5,
736 		      "DC5 still not disabled to enable DC9.\n");
737 	drm_WARN_ONCE(&dev_priv->drm,
738 		      intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
739 		      HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
740 		      "Power well 2 on.\n");
741 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
742 		      "Interrupts not disabled yet.\n");
743 
744 	 /*
745 	  * TODO: check for the following to verify the conditions to enter DC9
746 	  * state are satisfied:
747 	  * 1] Check relevant display engine registers to verify if mode set
748 	  * disable sequence was followed.
749 	  * 2] Check if display uninitialize sequence is initialized.
750 	  */
751 }
752 
753 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
754 {
755 	drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
756 		      "Interrupts not disabled yet.\n");
757 	drm_WARN_ONCE(&dev_priv->drm,
758 		      intel_de_read(dev_priv, DC_STATE_EN) &
759 		      DC_STATE_EN_UPTO_DC5,
760 		      "DC5 still not disabled.\n");
761 
762 	 /*
763 	  * TODO: check for the following to verify DC9 state was indeed
764 	  * entered before programming to disable it:
765 	  * 1] Check relevant display engine registers to verify if mode
766 	  *  set disable sequence was followed.
767 	  * 2] Check if display uninitialize sequence is initialized.
768 	  */
769 }
770 
771 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
772 				u32 state)
773 {
774 	int rewrites = 0;
775 	int rereads = 0;
776 	u32 v;
777 
778 	intel_de_write(dev_priv, DC_STATE_EN, state);
779 
780 	/* It has been observed that disabling the dc6 state sometimes
781 	 * doesn't stick and dmc keeps returning old value. Make sure
782 	 * the write really sticks enough times and also force rewrite until
783 	 * we are confident that state is exactly what we want.
784 	 */
785 	do  {
786 		v = intel_de_read(dev_priv, DC_STATE_EN);
787 
788 		if (v != state) {
789 			intel_de_write(dev_priv, DC_STATE_EN, state);
790 			rewrites++;
791 			rereads = 0;
792 		} else if (rereads++ > 5) {
793 			break;
794 		}
795 
796 	} while (rewrites < 100);
797 
798 	if (v != state)
799 		drm_err(&dev_priv->drm,
800 			"Writing dc state to 0x%x failed, now 0x%x\n",
801 			state, v);
802 
803 	/* Most of the times we need one retry, avoid spam */
804 	if (rewrites > 1)
805 		drm_dbg_kms(&dev_priv->drm,
806 			    "Rewrote dc state to 0x%x %d times\n",
807 			    state, rewrites);
808 }
809 
810 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
811 {
812 	u32 mask;
813 
814 	mask = DC_STATE_EN_UPTO_DC5;
815 
816 	if (DISPLAY_VER(dev_priv) >= 12)
817 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
818 					  | DC_STATE_EN_DC9;
819 	else if (DISPLAY_VER(dev_priv) == 11)
820 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
821 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
822 		mask |= DC_STATE_EN_DC9;
823 	else
824 		mask |= DC_STATE_EN_UPTO_DC6;
825 
826 	return mask;
827 }
828 
829 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
830 {
831 	u32 val;
832 
833 	if (!HAS_DISPLAY(dev_priv))
834 		return;
835 
836 	val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
837 
838 	drm_dbg_kms(&dev_priv->drm,
839 		    "Resetting DC state tracking from %02x to %02x\n",
840 		    dev_priv->dmc.dc_state, val);
841 	dev_priv->dmc.dc_state = val;
842 }
843 
844 /**
845  * gen9_set_dc_state - set target display C power state
846  * @dev_priv: i915 device instance
847  * @state: target DC power state
848  * - DC_STATE_DISABLE
849  * - DC_STATE_EN_UPTO_DC5
850  * - DC_STATE_EN_UPTO_DC6
851  * - DC_STATE_EN_DC9
852  *
853  * Signal to DMC firmware/HW the target DC power state passed in @state.
854  * DMC/HW can turn off individual display clocks and power rails when entering
855  * a deeper DC power state (higher in number) and turns these back when exiting
856  * that state to a shallower power state (lower in number). The HW will decide
857  * when to actually enter a given state on an on-demand basis, for instance
858  * depending on the active state of display pipes. The state of display
859  * registers backed by affected power rails are saved/restored as needed.
860  *
861  * Based on the above enabling a deeper DC power state is asynchronous wrt.
862  * enabling it. Disabling a deeper power state is synchronous: for instance
863  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
864  * back on and register state is restored. This is guaranteed by the MMIO write
865  * to DC_STATE_EN blocking until the state is restored.
866  */
867 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
868 {
869 	u32 val;
870 	u32 mask;
871 
872 	if (!HAS_DISPLAY(dev_priv))
873 		return;
874 
875 	if (drm_WARN_ON_ONCE(&dev_priv->drm,
876 			     state & ~dev_priv->dmc.allowed_dc_mask))
877 		state &= dev_priv->dmc.allowed_dc_mask;
878 
879 	val = intel_de_read(dev_priv, DC_STATE_EN);
880 	mask = gen9_dc_mask(dev_priv);
881 	drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
882 		    val & mask, state);
883 
884 	/* Check if DMC is ignoring our DC state requests */
885 	if ((val & mask) != dev_priv->dmc.dc_state)
886 		drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
887 			dev_priv->dmc.dc_state, val & mask);
888 
889 	val &= ~mask;
890 	val |= state;
891 
892 	gen9_write_dc_state(dev_priv, val);
893 
894 	dev_priv->dmc.dc_state = val & mask;
895 }
896 
897 static u32
898 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
899 			 u32 target_dc_state)
900 {
901 	u32 states[] = {
902 		DC_STATE_EN_UPTO_DC6,
903 		DC_STATE_EN_UPTO_DC5,
904 		DC_STATE_EN_DC3CO,
905 		DC_STATE_DISABLE,
906 	};
907 	int i;
908 
909 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
910 		if (target_dc_state != states[i])
911 			continue;
912 
913 		if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
914 			break;
915 
916 		target_dc_state = states[i + 1];
917 	}
918 
919 	return target_dc_state;
920 }
921 
922 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
923 {
924 	drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
925 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
926 }
927 
928 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
929 {
930 	u32 val;
931 
932 	drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
933 	val = intel_de_read(dev_priv, DC_STATE_EN);
934 	val &= ~DC_STATE_DC3CO_STATUS;
935 	intel_de_write(dev_priv, DC_STATE_EN, val);
936 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
937 	/*
938 	 * Delay of 200us DC3CO Exit time B.Spec 49196
939 	 */
940 	usleep_range(200, 210);
941 }
942 
943 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
944 {
945 	assert_can_enable_dc9(dev_priv);
946 
947 	drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
948 	/*
949 	 * Power sequencer reset is not needed on
950 	 * platforms with South Display Engine on PCH,
951 	 * because PPS registers are always on.
952 	 */
953 	if (!HAS_PCH_SPLIT(dev_priv))
954 		intel_pps_reset_all(dev_priv);
955 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
956 }
957 
958 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
959 {
960 	assert_can_disable_dc9(dev_priv);
961 
962 	drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
963 
964 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
965 
966 	intel_pps_unlock_regs_wa(dev_priv);
967 }
968 
969 static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
970 {
971 	drm_WARN_ONCE(&dev_priv->drm,
972 		      !intel_de_read(dev_priv,
973 				     DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
974 				     "DMC program storage start is NULL\n");
975 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
976 		      "DMC SSP Base Not fine\n");
977 	drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
978 		      "DMC HTP Not fine\n");
979 }
980 
981 static struct i915_power_well *
982 lookup_power_well(struct drm_i915_private *dev_priv,
983 		  enum i915_power_well_id power_well_id)
984 {
985 	struct i915_power_well *power_well;
986 
987 	for_each_power_well(dev_priv, power_well)
988 		if (power_well->desc->id == power_well_id)
989 			return power_well;
990 
991 	/*
992 	 * It's not feasible to add error checking code to the callers since
993 	 * this condition really shouldn't happen and it doesn't even make sense
994 	 * to abort things like display initialization sequences. Just return
995 	 * the first power well and hope the WARN gets reported so we can fix
996 	 * our driver.
997 	 */
998 	drm_WARN(&dev_priv->drm, 1,
999 		 "Power well %d not defined for this platform\n",
1000 		 power_well_id);
1001 	return &dev_priv->power_domains.power_wells[0];
1002 }
1003 
1004 /**
1005  * intel_display_power_set_target_dc_state - Set target dc state.
1006  * @dev_priv: i915 device
1007  * @state: state which needs to be set as target_dc_state.
1008  *
1009  * This function set the "DC off" power well target_dc_state,
1010  * based upon this target_dc_stste, "DC off" power well will
1011  * enable desired DC state.
1012  */
1013 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1014 					     u32 state)
1015 {
1016 	struct i915_power_well *power_well;
1017 	bool dc_off_enabled;
1018 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1019 
1020 	mutex_lock(&power_domains->lock);
1021 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1022 
1023 	if (drm_WARN_ON(&dev_priv->drm, !power_well))
1024 		goto unlock;
1025 
1026 	state = sanitize_target_dc_state(dev_priv, state);
1027 
1028 	if (state == dev_priv->dmc.target_dc_state)
1029 		goto unlock;
1030 
1031 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1032 							   power_well);
1033 	/*
1034 	 * If DC off power well is disabled, need to enable and disable the
1035 	 * DC off power well to effect target DC state.
1036 	 */
1037 	if (!dc_off_enabled)
1038 		power_well->desc->ops->enable(dev_priv, power_well);
1039 
1040 	dev_priv->dmc.target_dc_state = state;
1041 
1042 	if (!dc_off_enabled)
1043 		power_well->desc->ops->disable(dev_priv, power_well);
1044 
1045 unlock:
1046 	mutex_unlock(&power_domains->lock);
1047 }
1048 
1049 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1050 {
1051 	enum i915_power_well_id high_pg;
1052 
1053 	/* Power wells at this level and above must be disabled for DC5 entry */
1054 	if (DISPLAY_VER(dev_priv) == 12)
1055 		high_pg = ICL_DISP_PW_3;
1056 	else
1057 		high_pg = SKL_DISP_PW_2;
1058 
1059 	drm_WARN_ONCE(&dev_priv->drm,
1060 		      intel_display_power_well_is_enabled(dev_priv, high_pg),
1061 		      "Power wells above platform's DC5 limit still enabled.\n");
1062 
1063 	drm_WARN_ONCE(&dev_priv->drm,
1064 		      (intel_de_read(dev_priv, DC_STATE_EN) &
1065 		       DC_STATE_EN_UPTO_DC5),
1066 		      "DC5 already programmed to be enabled.\n");
1067 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1068 
1069 	assert_dmc_loaded(dev_priv);
1070 }
1071 
1072 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1073 {
1074 	assert_can_enable_dc5(dev_priv);
1075 
1076 	drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1077 
1078 	/* Wa Display #1183: skl,kbl,cfl */
1079 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1080 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1081 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1082 
1083 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1084 }
1085 
1086 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1087 {
1088 	drm_WARN_ONCE(&dev_priv->drm,
1089 		      intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1090 		      "Backlight is not disabled.\n");
1091 	drm_WARN_ONCE(&dev_priv->drm,
1092 		      (intel_de_read(dev_priv, DC_STATE_EN) &
1093 		       DC_STATE_EN_UPTO_DC6),
1094 		      "DC6 already programmed to be enabled.\n");
1095 
1096 	assert_dmc_loaded(dev_priv);
1097 }
1098 
1099 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1100 {
1101 	assert_can_enable_dc6(dev_priv);
1102 
1103 	drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1104 
1105 	/* Wa Display #1183: skl,kbl,cfl */
1106 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
1107 		intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1108 			       intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1109 
1110 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1111 }
1112 
1113 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1114 				   struct i915_power_well *power_well)
1115 {
1116 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1117 	int pw_idx = power_well->desc->hsw.idx;
1118 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1119 	u32 bios_req = intel_de_read(dev_priv, regs->bios);
1120 
1121 	/* Take over the request bit if set by BIOS. */
1122 	if (bios_req & mask) {
1123 		u32 drv_req = intel_de_read(dev_priv, regs->driver);
1124 
1125 		if (!(drv_req & mask))
1126 			intel_de_write(dev_priv, regs->driver, drv_req | mask);
1127 		intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1128 	}
1129 }
1130 
1131 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1132 					   struct i915_power_well *power_well)
1133 {
1134 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1135 }
1136 
1137 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1138 					    struct i915_power_well *power_well)
1139 {
1140 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1141 }
1142 
1143 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1144 					    struct i915_power_well *power_well)
1145 {
1146 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1147 }
1148 
1149 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1150 {
1151 	struct i915_power_well *power_well;
1152 
1153 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1154 	if (power_well->count > 0)
1155 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1156 
1157 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1158 	if (power_well->count > 0)
1159 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1160 
1161 	if (IS_GEMINILAKE(dev_priv)) {
1162 		power_well = lookup_power_well(dev_priv,
1163 					       GLK_DISP_PW_DPIO_CMN_C);
1164 		if (power_well->count > 0)
1165 			bxt_ddi_phy_verify_state(dev_priv,
1166 						 power_well->desc->bxt.phy);
1167 	}
1168 }
1169 
1170 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1171 					   struct i915_power_well *power_well)
1172 {
1173 	return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1174 		(intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1175 }
1176 
1177 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1178 {
1179 	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1180 	u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
1181 
1182 	drm_WARN(&dev_priv->drm,
1183 		 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1184 		 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1185 		 hw_enabled_dbuf_slices,
1186 		 enabled_dbuf_slices);
1187 }
1188 
1189 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1190 {
1191 	struct intel_cdclk_config cdclk_config = {};
1192 
1193 	if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
1194 		tgl_disable_dc3co(dev_priv);
1195 		return;
1196 	}
1197 
1198 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1199 
1200 	if (!HAS_DISPLAY(dev_priv))
1201 		return;
1202 
1203 	dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1204 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1205 	drm_WARN_ON(&dev_priv->drm,
1206 		    intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1207 					      &cdclk_config));
1208 
1209 	gen9_assert_dbuf_enabled(dev_priv);
1210 
1211 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1212 		bxt_verify_ddi_phy_power_wells(dev_priv);
1213 
1214 	if (DISPLAY_VER(dev_priv) >= 11)
1215 		/*
1216 		 * DMC retains HW context only for port A, the other combo
1217 		 * PHY's HW context for port B is lost after DC transitions,
1218 		 * so we need to restore it manually.
1219 		 */
1220 		intel_combo_phy_init(dev_priv);
1221 }
1222 
1223 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1224 					  struct i915_power_well *power_well)
1225 {
1226 	gen9_disable_dc_states(dev_priv);
1227 }
1228 
1229 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1230 					   struct i915_power_well *power_well)
1231 {
1232 	if (!intel_dmc_has_payload(dev_priv))
1233 		return;
1234 
1235 	switch (dev_priv->dmc.target_dc_state) {
1236 	case DC_STATE_EN_DC3CO:
1237 		tgl_enable_dc3co(dev_priv);
1238 		break;
1239 	case DC_STATE_EN_UPTO_DC6:
1240 		skl_enable_dc6(dev_priv);
1241 		break;
1242 	case DC_STATE_EN_UPTO_DC5:
1243 		gen9_enable_dc5(dev_priv);
1244 		break;
1245 	}
1246 }
1247 
1248 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1249 					 struct i915_power_well *power_well)
1250 {
1251 }
1252 
1253 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1254 					   struct i915_power_well *power_well)
1255 {
1256 }
1257 
1258 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1259 					     struct i915_power_well *power_well)
1260 {
1261 	return true;
1262 }
1263 
1264 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1265 					 struct i915_power_well *power_well)
1266 {
1267 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1268 		i830_enable_pipe(dev_priv, PIPE_A);
1269 	if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1270 		i830_enable_pipe(dev_priv, PIPE_B);
1271 }
1272 
1273 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1274 					  struct i915_power_well *power_well)
1275 {
1276 	i830_disable_pipe(dev_priv, PIPE_B);
1277 	i830_disable_pipe(dev_priv, PIPE_A);
1278 }
1279 
1280 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1281 					  struct i915_power_well *power_well)
1282 {
1283 	return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1284 		intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1285 }
1286 
1287 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1288 					  struct i915_power_well *power_well)
1289 {
1290 	if (power_well->count > 0)
1291 		i830_pipes_power_well_enable(dev_priv, power_well);
1292 	else
1293 		i830_pipes_power_well_disable(dev_priv, power_well);
1294 }
1295 
1296 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1297 			       struct i915_power_well *power_well, bool enable)
1298 {
1299 	int pw_idx = power_well->desc->vlv.idx;
1300 	u32 mask;
1301 	u32 state;
1302 	u32 ctrl;
1303 
1304 	mask = PUNIT_PWRGT_MASK(pw_idx);
1305 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1306 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1307 
1308 	vlv_punit_get(dev_priv);
1309 
1310 #define COND \
1311 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1312 
1313 	if (COND)
1314 		goto out;
1315 
1316 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1317 	ctrl &= ~mask;
1318 	ctrl |= state;
1319 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1320 
1321 	if (wait_for(COND, 100))
1322 		drm_err(&dev_priv->drm,
1323 			"timeout setting power well state %08x (%08x)\n",
1324 			state,
1325 			vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1326 
1327 #undef COND
1328 
1329 out:
1330 	vlv_punit_put(dev_priv);
1331 }
1332 
1333 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1334 				  struct i915_power_well *power_well)
1335 {
1336 	vlv_set_power_well(dev_priv, power_well, true);
1337 }
1338 
1339 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1340 				   struct i915_power_well *power_well)
1341 {
1342 	vlv_set_power_well(dev_priv, power_well, false);
1343 }
1344 
1345 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1346 				   struct i915_power_well *power_well)
1347 {
1348 	int pw_idx = power_well->desc->vlv.idx;
1349 	bool enabled = false;
1350 	u32 mask;
1351 	u32 state;
1352 	u32 ctrl;
1353 
1354 	mask = PUNIT_PWRGT_MASK(pw_idx);
1355 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1356 
1357 	vlv_punit_get(dev_priv);
1358 
1359 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1360 	/*
1361 	 * We only ever set the power-on and power-gate states, anything
1362 	 * else is unexpected.
1363 	 */
1364 	drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1365 		    state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1366 	if (state == ctrl)
1367 		enabled = true;
1368 
1369 	/*
1370 	 * A transient state at this point would mean some unexpected party
1371 	 * is poking at the power controls too.
1372 	 */
1373 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1374 	drm_WARN_ON(&dev_priv->drm, ctrl != state);
1375 
1376 	vlv_punit_put(dev_priv);
1377 
1378 	return enabled;
1379 }
1380 
1381 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1382 {
1383 	u32 val;
1384 
1385 	/*
1386 	 * On driver load, a pipe may be active and driving a DSI display.
1387 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1388 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1389 	 * clear it when we turn off the display.
1390 	 */
1391 	val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1392 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1393 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1394 	intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1395 
1396 	/*
1397 	 * Disable trickle feed and enable pnd deadline calculation
1398 	 */
1399 	intel_de_write(dev_priv, MI_ARB_VLV,
1400 		       MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1401 	intel_de_write(dev_priv, CBR1_VLV, 0);
1402 
1403 	drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1404 	intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1405 		       DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1406 					 1000));
1407 }
1408 
1409 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1410 {
1411 	struct intel_encoder *encoder;
1412 	enum pipe pipe;
1413 
1414 	/*
1415 	 * Enable the CRI clock source so we can get at the
1416 	 * display and the reference clock for VGA
1417 	 * hotplug / manual detection. Supposedly DSI also
1418 	 * needs the ref clock up and running.
1419 	 *
1420 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1421 	 */
1422 	for_each_pipe(dev_priv, pipe) {
1423 		u32 val = intel_de_read(dev_priv, DPLL(pipe));
1424 
1425 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1426 		if (pipe != PIPE_A)
1427 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1428 
1429 		intel_de_write(dev_priv, DPLL(pipe), val);
1430 	}
1431 
1432 	vlv_init_display_clock_gating(dev_priv);
1433 
1434 	spin_lock_irq(&dev_priv->irq_lock);
1435 	valleyview_enable_display_irqs(dev_priv);
1436 	spin_unlock_irq(&dev_priv->irq_lock);
1437 
1438 	/*
1439 	 * During driver initialization/resume we can avoid restoring the
1440 	 * part of the HW/SW state that will be inited anyway explicitly.
1441 	 */
1442 	if (dev_priv->power_domains.initializing)
1443 		return;
1444 
1445 	intel_hpd_init(dev_priv);
1446 	intel_hpd_poll_disable(dev_priv);
1447 
1448 	/* Re-enable the ADPA, if we have one */
1449 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1450 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1451 			intel_crt_reset(&encoder->base);
1452 	}
1453 
1454 	intel_vga_redisable_power_on(dev_priv);
1455 
1456 	intel_pps_unlock_regs_wa(dev_priv);
1457 }
1458 
1459 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1460 {
1461 	spin_lock_irq(&dev_priv->irq_lock);
1462 	valleyview_disable_display_irqs(dev_priv);
1463 	spin_unlock_irq(&dev_priv->irq_lock);
1464 
1465 	/* make sure we're done processing display irqs */
1466 	intel_synchronize_irq(dev_priv);
1467 
1468 	intel_pps_reset_all(dev_priv);
1469 
1470 	/* Prevent us from re-enabling polling on accident in late suspend */
1471 #ifdef __linux__
1472 	if (!dev_priv->drm.dev->power.is_suspended)
1473 #else
1474 	if (!cold)
1475 #endif
1476 		intel_hpd_poll_enable(dev_priv);
1477 }
1478 
1479 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1480 					  struct i915_power_well *power_well)
1481 {
1482 	vlv_set_power_well(dev_priv, power_well, true);
1483 
1484 	vlv_display_power_well_init(dev_priv);
1485 }
1486 
1487 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1488 					   struct i915_power_well *power_well)
1489 {
1490 	vlv_display_power_well_deinit(dev_priv);
1491 
1492 	vlv_set_power_well(dev_priv, power_well, false);
1493 }
1494 
1495 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1496 					   struct i915_power_well *power_well)
1497 {
1498 	/* since ref/cri clock was enabled */
1499 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1500 
1501 	vlv_set_power_well(dev_priv, power_well, true);
1502 
1503 	/*
1504 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1505 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1506 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1507 	 *   b.	The other bits such as sfr settings / modesel may all
1508 	 *	be set to 0.
1509 	 *
1510 	 * This should only be done on init and resume from S3 with
1511 	 * both PLLs disabled, or we risk losing DPIO and PLL
1512 	 * synchronization.
1513 	 */
1514 	intel_de_write(dev_priv, DPIO_CTL,
1515 		       intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1516 }
1517 
1518 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1519 					    struct i915_power_well *power_well)
1520 {
1521 	enum pipe pipe;
1522 
1523 	for_each_pipe(dev_priv, pipe)
1524 		assert_pll_disabled(dev_priv, pipe);
1525 
1526 	/* Assert common reset */
1527 	intel_de_write(dev_priv, DPIO_CTL,
1528 		       intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1529 
1530 	vlv_set_power_well(dev_priv, power_well, false);
1531 }
1532 
1533 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1534 
1535 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1536 
1537 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1538 {
1539 	struct i915_power_well *cmn_bc =
1540 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1541 	struct i915_power_well *cmn_d =
1542 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1543 	u32 phy_control = dev_priv->chv_phy_control;
1544 	u32 phy_status = 0;
1545 	u32 phy_status_mask = 0xffffffff;
1546 
1547 	/*
1548 	 * The BIOS can leave the PHY is some weird state
1549 	 * where it doesn't fully power down some parts.
1550 	 * Disable the asserts until the PHY has been fully
1551 	 * reset (ie. the power well has been disabled at
1552 	 * least once).
1553 	 */
1554 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1555 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1556 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1557 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1558 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1559 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1560 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1561 
1562 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1563 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1564 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1565 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1566 
1567 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1568 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1569 
1570 		/* this assumes override is only used to enable lanes */
1571 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1572 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1573 
1574 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1575 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1576 
1577 		/* CL1 is on whenever anything is on in either channel */
1578 		if (BITS_SET(phy_control,
1579 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1580 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1581 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1582 
1583 		/*
1584 		 * The DPLLB check accounts for the pipe B + port A usage
1585 		 * with CL2 powered up but all the lanes in the second channel
1586 		 * powered down.
1587 		 */
1588 		if (BITS_SET(phy_control,
1589 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1590 		    (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1591 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1592 
1593 		if (BITS_SET(phy_control,
1594 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1595 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1596 		if (BITS_SET(phy_control,
1597 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1598 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1599 
1600 		if (BITS_SET(phy_control,
1601 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1602 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1603 		if (BITS_SET(phy_control,
1604 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1605 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1606 	}
1607 
1608 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1609 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1610 
1611 		/* this assumes override is only used to enable lanes */
1612 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1613 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1614 
1615 		if (BITS_SET(phy_control,
1616 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1617 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1618 
1619 		if (BITS_SET(phy_control,
1620 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1621 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1622 		if (BITS_SET(phy_control,
1623 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1624 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1625 	}
1626 
1627 	phy_status &= phy_status_mask;
1628 
1629 	/*
1630 	 * The PHY may be busy with some initial calibration and whatnot,
1631 	 * so the power state can take a while to actually change.
1632 	 */
1633 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1634 				       phy_status_mask, phy_status, 10))
1635 		drm_err(&dev_priv->drm,
1636 			"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1637 			intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1638 			phy_status, dev_priv->chv_phy_control);
1639 }
1640 
1641 #undef BITS_SET
1642 
1643 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1644 					   struct i915_power_well *power_well)
1645 {
1646 	enum dpio_phy phy;
1647 	enum pipe pipe;
1648 	u32 tmp;
1649 
1650 	drm_WARN_ON_ONCE(&dev_priv->drm,
1651 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1652 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1653 
1654 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1655 		pipe = PIPE_A;
1656 		phy = DPIO_PHY0;
1657 	} else {
1658 		pipe = PIPE_C;
1659 		phy = DPIO_PHY1;
1660 	}
1661 
1662 	/* since ref/cri clock was enabled */
1663 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1664 	vlv_set_power_well(dev_priv, power_well, true);
1665 
1666 	/* Poll for phypwrgood signal */
1667 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1668 				  PHY_POWERGOOD(phy), 1))
1669 		drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1670 			phy);
1671 
1672 	vlv_dpio_get(dev_priv);
1673 
1674 	/* Enable dynamic power down */
1675 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1676 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1677 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1678 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1679 
1680 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1681 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1682 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1683 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1684 	} else {
1685 		/*
1686 		 * Force the non-existing CL2 off. BXT does this
1687 		 * too, so maybe it saves some power even though
1688 		 * CL2 doesn't exist?
1689 		 */
1690 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1691 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1692 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1693 	}
1694 
1695 	vlv_dpio_put(dev_priv);
1696 
1697 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1698 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1699 		       dev_priv->chv_phy_control);
1700 
1701 	drm_dbg_kms(&dev_priv->drm,
1702 		    "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1703 		    phy, dev_priv->chv_phy_control);
1704 
1705 	assert_chv_phy_status(dev_priv);
1706 }
1707 
1708 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1709 					    struct i915_power_well *power_well)
1710 {
1711 	enum dpio_phy phy;
1712 
1713 	drm_WARN_ON_ONCE(&dev_priv->drm,
1714 			 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1715 			 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1716 
1717 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1718 		phy = DPIO_PHY0;
1719 		assert_pll_disabled(dev_priv, PIPE_A);
1720 		assert_pll_disabled(dev_priv, PIPE_B);
1721 	} else {
1722 		phy = DPIO_PHY1;
1723 		assert_pll_disabled(dev_priv, PIPE_C);
1724 	}
1725 
1726 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1727 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1728 		       dev_priv->chv_phy_control);
1729 
1730 	vlv_set_power_well(dev_priv, power_well, false);
1731 
1732 	drm_dbg_kms(&dev_priv->drm,
1733 		    "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1734 		    phy, dev_priv->chv_phy_control);
1735 
1736 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1737 	dev_priv->chv_phy_assert[phy] = true;
1738 
1739 	assert_chv_phy_status(dev_priv);
1740 }
1741 
1742 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1743 				     enum dpio_channel ch, bool override, unsigned int mask)
1744 {
1745 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1746 	u32 reg, val, expected, actual;
1747 
1748 	/*
1749 	 * The BIOS can leave the PHY is some weird state
1750 	 * where it doesn't fully power down some parts.
1751 	 * Disable the asserts until the PHY has been fully
1752 	 * reset (ie. the power well has been disabled at
1753 	 * least once).
1754 	 */
1755 	if (!dev_priv->chv_phy_assert[phy])
1756 		return;
1757 
1758 	if (ch == DPIO_CH0)
1759 		reg = _CHV_CMN_DW0_CH0;
1760 	else
1761 		reg = _CHV_CMN_DW6_CH1;
1762 
1763 	vlv_dpio_get(dev_priv);
1764 	val = vlv_dpio_read(dev_priv, pipe, reg);
1765 	vlv_dpio_put(dev_priv);
1766 
1767 	/*
1768 	 * This assumes !override is only used when the port is disabled.
1769 	 * All lanes should power down even without the override when
1770 	 * the port is disabled.
1771 	 */
1772 	if (!override || mask == 0xf) {
1773 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1774 		/*
1775 		 * If CH1 common lane is not active anymore
1776 		 * (eg. for pipe B DPLL) the entire channel will
1777 		 * shut down, which causes the common lane registers
1778 		 * to read as 0. That means we can't actually check
1779 		 * the lane power down status bits, but as the entire
1780 		 * register reads as 0 it's a good indication that the
1781 		 * channel is indeed entirely powered down.
1782 		 */
1783 		if (ch == DPIO_CH1 && val == 0)
1784 			expected = 0;
1785 	} else if (mask != 0x0) {
1786 		expected = DPIO_ANYDL_POWERDOWN;
1787 	} else {
1788 		expected = 0;
1789 	}
1790 
1791 	if (ch == DPIO_CH0)
1792 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1793 	else
1794 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1795 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1796 
1797 	drm_WARN(&dev_priv->drm, actual != expected,
1798 		 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1799 		 !!(actual & DPIO_ALLDL_POWERDOWN),
1800 		 !!(actual & DPIO_ANYDL_POWERDOWN),
1801 		 !!(expected & DPIO_ALLDL_POWERDOWN),
1802 		 !!(expected & DPIO_ANYDL_POWERDOWN),
1803 		 reg, val);
1804 }
1805 
1806 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1807 			  enum dpio_channel ch, bool override)
1808 {
1809 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1810 	bool was_override;
1811 
1812 	mutex_lock(&power_domains->lock);
1813 
1814 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1815 
1816 	if (override == was_override)
1817 		goto out;
1818 
1819 	if (override)
1820 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1821 	else
1822 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1823 
1824 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1825 		       dev_priv->chv_phy_control);
1826 
1827 	drm_dbg_kms(&dev_priv->drm,
1828 		    "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1829 		    phy, ch, dev_priv->chv_phy_control);
1830 
1831 	assert_chv_phy_status(dev_priv);
1832 
1833 out:
1834 	mutex_unlock(&power_domains->lock);
1835 
1836 	return was_override;
1837 }
1838 
1839 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1840 			     bool override, unsigned int mask)
1841 {
1842 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1843 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1844 	enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
1845 	enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
1846 
1847 	mutex_lock(&power_domains->lock);
1848 
1849 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1850 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1851 
1852 	if (override)
1853 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1854 	else
1855 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1856 
1857 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1858 		       dev_priv->chv_phy_control);
1859 
1860 	drm_dbg_kms(&dev_priv->drm,
1861 		    "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1862 		    phy, ch, mask, dev_priv->chv_phy_control);
1863 
1864 	assert_chv_phy_status(dev_priv);
1865 
1866 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1867 
1868 	mutex_unlock(&power_domains->lock);
1869 }
1870 
1871 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1872 					struct i915_power_well *power_well)
1873 {
1874 	enum pipe pipe = PIPE_A;
1875 	bool enabled;
1876 	u32 state, ctrl;
1877 
1878 	vlv_punit_get(dev_priv);
1879 
1880 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1881 	/*
1882 	 * We only ever set the power-on and power-gate states, anything
1883 	 * else is unexpected.
1884 	 */
1885 	drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1886 		    state != DP_SSS_PWR_GATE(pipe));
1887 	enabled = state == DP_SSS_PWR_ON(pipe);
1888 
1889 	/*
1890 	 * A transient state at this point would mean some unexpected party
1891 	 * is poking at the power controls too.
1892 	 */
1893 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1894 	drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1895 
1896 	vlv_punit_put(dev_priv);
1897 
1898 	return enabled;
1899 }
1900 
1901 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1902 				    struct i915_power_well *power_well,
1903 				    bool enable)
1904 {
1905 	enum pipe pipe = PIPE_A;
1906 	u32 state;
1907 	u32 ctrl;
1908 
1909 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1910 
1911 	vlv_punit_get(dev_priv);
1912 
1913 #define COND \
1914 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1915 
1916 	if (COND)
1917 		goto out;
1918 
1919 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1920 	ctrl &= ~DP_SSC_MASK(pipe);
1921 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1922 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1923 
1924 	if (wait_for(COND, 100))
1925 		drm_err(&dev_priv->drm,
1926 			"timeout setting power well state %08x (%08x)\n",
1927 			state,
1928 			vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1929 
1930 #undef COND
1931 
1932 out:
1933 	vlv_punit_put(dev_priv);
1934 }
1935 
1936 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1937 					struct i915_power_well *power_well)
1938 {
1939 	intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1940 		       dev_priv->chv_phy_control);
1941 }
1942 
1943 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1944 				       struct i915_power_well *power_well)
1945 {
1946 	chv_set_pipe_power_well(dev_priv, power_well, true);
1947 
1948 	vlv_display_power_well_init(dev_priv);
1949 }
1950 
1951 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1952 					struct i915_power_well *power_well)
1953 {
1954 	vlv_display_power_well_deinit(dev_priv);
1955 
1956 	chv_set_pipe_power_well(dev_priv, power_well, false);
1957 }
1958 
1959 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1960 {
1961 	return power_domains->async_put_domains[0] |
1962 	       power_domains->async_put_domains[1];
1963 }
1964 
1965 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1966 
1967 static bool
1968 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1969 {
1970 	struct drm_i915_private *i915 = container_of(power_domains,
1971 						     struct drm_i915_private,
1972 						     power_domains);
1973 	return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
1974 			    power_domains->async_put_domains[1]);
1975 }
1976 
1977 static bool
1978 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1979 {
1980 	struct drm_i915_private *i915 = container_of(power_domains,
1981 						     struct drm_i915_private,
1982 						     power_domains);
1983 	enum intel_display_power_domain domain;
1984 	bool err = false;
1985 
1986 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1987 	err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
1988 			   !!__async_put_domains_mask(power_domains));
1989 
1990 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1991 		err |= drm_WARN_ON(&i915->drm,
1992 				   power_domains->domain_use_count[domain] != 1);
1993 
1994 	return !err;
1995 }
1996 
1997 static void print_power_domains(struct i915_power_domains *power_domains,
1998 				const char *prefix, u64 mask)
1999 {
2000 	struct drm_i915_private *i915 = container_of(power_domains,
2001 						     struct drm_i915_private,
2002 						     power_domains);
2003 	enum intel_display_power_domain domain;
2004 
2005 	drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
2006 	for_each_power_domain(domain, mask)
2007 		drm_dbg(&i915->drm, "%s use_count %d\n",
2008 			intel_display_power_domain_str(domain),
2009 			power_domains->domain_use_count[domain]);
2010 }
2011 
2012 static void
2013 print_async_put_domains_state(struct i915_power_domains *power_domains)
2014 {
2015 	struct drm_i915_private *i915 = container_of(power_domains,
2016 						     struct drm_i915_private,
2017 						     power_domains);
2018 
2019 	drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2020 		power_domains->async_put_wakeref);
2021 
2022 	print_power_domains(power_domains, "async_put_domains[0]",
2023 			    power_domains->async_put_domains[0]);
2024 	print_power_domains(power_domains, "async_put_domains[1]",
2025 			    power_domains->async_put_domains[1]);
2026 }
2027 
2028 static void
2029 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2030 {
2031 	if (!__async_put_domains_state_ok(power_domains))
2032 		print_async_put_domains_state(power_domains);
2033 }
2034 
2035 #else
2036 
2037 static void
2038 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2039 {
2040 }
2041 
2042 static void
2043 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2044 {
2045 }
2046 
2047 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2048 
2049 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2050 {
2051 	assert_async_put_domain_masks_disjoint(power_domains);
2052 
2053 	return __async_put_domains_mask(power_domains);
2054 }
2055 
2056 static void
2057 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2058 			       enum intel_display_power_domain domain)
2059 {
2060 	assert_async_put_domain_masks_disjoint(power_domains);
2061 
2062 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2063 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2064 }
2065 
2066 static bool
2067 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2068 				       enum intel_display_power_domain domain)
2069 {
2070 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2071 	bool ret = false;
2072 
2073 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2074 		goto out_verify;
2075 
2076 	async_put_domains_clear_domain(power_domains, domain);
2077 
2078 	ret = true;
2079 
2080 	if (async_put_domains_mask(power_domains))
2081 		goto out_verify;
2082 
2083 	cancel_delayed_work(&power_domains->async_put_work);
2084 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2085 				 fetch_and_zero(&power_domains->async_put_wakeref));
2086 out_verify:
2087 	verify_async_put_domains_state(power_domains);
2088 
2089 	return ret;
2090 }
2091 
2092 static void
2093 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2094 				 enum intel_display_power_domain domain)
2095 {
2096 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2097 	struct i915_power_well *power_well;
2098 
2099 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2100 		return;
2101 
2102 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2103 		intel_power_well_get(dev_priv, power_well);
2104 
2105 	power_domains->domain_use_count[domain]++;
2106 }
2107 
2108 /**
2109  * intel_display_power_get - grab a power domain reference
2110  * @dev_priv: i915 device instance
2111  * @domain: power domain to reference
2112  *
2113  * This function grabs a power domain reference for @domain and ensures that the
2114  * power domain and all its parents are powered up. Therefore users should only
2115  * grab a reference to the innermost power domain they need.
2116  *
2117  * Any power domain reference obtained by this function must have a symmetric
2118  * call to intel_display_power_put() to release the reference again.
2119  */
2120 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2121 					enum intel_display_power_domain domain)
2122 {
2123 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2124 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2125 
2126 	mutex_lock(&power_domains->lock);
2127 	__intel_display_power_get_domain(dev_priv, domain);
2128 	mutex_unlock(&power_domains->lock);
2129 
2130 	return wakeref;
2131 }
2132 
2133 /**
2134  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2135  * @dev_priv: i915 device instance
2136  * @domain: power domain to reference
2137  *
2138  * This function grabs a power domain reference for @domain and ensures that the
2139  * power domain and all its parents are powered up. Therefore users should only
2140  * grab a reference to the innermost power domain they need.
2141  *
2142  * Any power domain reference obtained by this function must have a symmetric
2143  * call to intel_display_power_put() to release the reference again.
2144  */
2145 intel_wakeref_t
2146 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2147 				   enum intel_display_power_domain domain)
2148 {
2149 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2150 	intel_wakeref_t wakeref;
2151 	bool is_enabled;
2152 
2153 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2154 	if (!wakeref)
2155 		return false;
2156 
2157 	mutex_lock(&power_domains->lock);
2158 
2159 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
2160 		__intel_display_power_get_domain(dev_priv, domain);
2161 		is_enabled = true;
2162 	} else {
2163 		is_enabled = false;
2164 	}
2165 
2166 	mutex_unlock(&power_domains->lock);
2167 
2168 	if (!is_enabled) {
2169 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2170 		wakeref = 0;
2171 	}
2172 
2173 	return wakeref;
2174 }
2175 
2176 static void
2177 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2178 				 enum intel_display_power_domain domain)
2179 {
2180 	struct i915_power_domains *power_domains;
2181 	struct i915_power_well *power_well;
2182 	const char *name = intel_display_power_domain_str(domain);
2183 
2184 	power_domains = &dev_priv->power_domains;
2185 
2186 	drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2187 		 "Use count on domain %s is already zero\n",
2188 		 name);
2189 	drm_WARN(&dev_priv->drm,
2190 		 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2191 		 "Async disabling of domain %s is pending\n",
2192 		 name);
2193 
2194 	power_domains->domain_use_count[domain]--;
2195 
2196 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2197 		intel_power_well_put(dev_priv, power_well);
2198 }
2199 
2200 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2201 				      enum intel_display_power_domain domain)
2202 {
2203 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2204 
2205 	mutex_lock(&power_domains->lock);
2206 	__intel_display_power_put_domain(dev_priv, domain);
2207 	mutex_unlock(&power_domains->lock);
2208 }
2209 
2210 static void
2211 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2212 			     intel_wakeref_t wakeref)
2213 {
2214 	struct drm_i915_private *i915 = container_of(power_domains,
2215 						     struct drm_i915_private,
2216 						     power_domains);
2217 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2218 	power_domains->async_put_wakeref = wakeref;
2219 	drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
2220 						    &power_domains->async_put_work,
2221 						    msecs_to_jiffies(100)));
2222 }
2223 
2224 static void
2225 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2226 {
2227 	struct drm_i915_private *dev_priv =
2228 		container_of(power_domains, struct drm_i915_private,
2229 			     power_domains);
2230 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2231 	enum intel_display_power_domain domain;
2232 	intel_wakeref_t wakeref;
2233 
2234 	/*
2235 	 * The caller must hold already raw wakeref, upgrade that to a proper
2236 	 * wakeref to make the state checker happy about the HW access during
2237 	 * power well disabling.
2238 	 */
2239 	assert_rpm_raw_wakeref_held(rpm);
2240 	wakeref = intel_runtime_pm_get(rpm);
2241 
2242 	for_each_power_domain(domain, mask) {
2243 		/* Clear before put, so put's sanity check is happy. */
2244 		async_put_domains_clear_domain(power_domains, domain);
2245 		__intel_display_power_put_domain(dev_priv, domain);
2246 	}
2247 
2248 	intel_runtime_pm_put(rpm, wakeref);
2249 }
2250 
2251 static void
2252 intel_display_power_put_async_work(struct work_struct *work)
2253 {
2254 	struct drm_i915_private *dev_priv =
2255 		container_of(work, struct drm_i915_private,
2256 			     power_domains.async_put_work.work);
2257 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2258 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2259 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2260 	intel_wakeref_t old_work_wakeref = 0;
2261 
2262 	mutex_lock(&power_domains->lock);
2263 
2264 	/*
2265 	 * Bail out if all the domain refs pending to be released were grabbed
2266 	 * by subsequent gets or a flush_work.
2267 	 */
2268 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2269 	if (!old_work_wakeref)
2270 		goto out_verify;
2271 
2272 	release_async_put_domains(power_domains,
2273 				  power_domains->async_put_domains[0]);
2274 
2275 	/* Requeue the work if more domains were async put meanwhile. */
2276 	if (power_domains->async_put_domains[1]) {
2277 		power_domains->async_put_domains[0] =
2278 			fetch_and_zero(&power_domains->async_put_domains[1]);
2279 		queue_async_put_domains_work(power_domains,
2280 					     fetch_and_zero(&new_work_wakeref));
2281 	} else {
2282 		/*
2283 		 * Cancel the work that got queued after this one got dequeued,
2284 		 * since here we released the corresponding async-put reference.
2285 		 */
2286 		cancel_delayed_work(&power_domains->async_put_work);
2287 	}
2288 
2289 out_verify:
2290 	verify_async_put_domains_state(power_domains);
2291 
2292 	mutex_unlock(&power_domains->lock);
2293 
2294 	if (old_work_wakeref)
2295 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2296 	if (new_work_wakeref)
2297 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2298 }
2299 
2300 /**
2301  * intel_display_power_put_async - release a power domain reference asynchronously
2302  * @i915: i915 device instance
2303  * @domain: power domain to reference
2304  * @wakeref: wakeref acquired for the reference that is being released
2305  *
2306  * This function drops the power domain reference obtained by
2307  * intel_display_power_get*() and schedules a work to power down the
2308  * corresponding hardware block if this is the last reference.
2309  */
2310 void __intel_display_power_put_async(struct drm_i915_private *i915,
2311 				     enum intel_display_power_domain domain,
2312 				     intel_wakeref_t wakeref)
2313 {
2314 	struct i915_power_domains *power_domains = &i915->power_domains;
2315 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2316 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2317 
2318 	mutex_lock(&power_domains->lock);
2319 
2320 	if (power_domains->domain_use_count[domain] > 1) {
2321 		__intel_display_power_put_domain(i915, domain);
2322 
2323 		goto out_verify;
2324 	}
2325 
2326 	drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2327 
2328 	/* Let a pending work requeue itself or queue a new one. */
2329 	if (power_domains->async_put_wakeref) {
2330 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2331 	} else {
2332 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2333 		queue_async_put_domains_work(power_domains,
2334 					     fetch_and_zero(&work_wakeref));
2335 	}
2336 
2337 out_verify:
2338 	verify_async_put_domains_state(power_domains);
2339 
2340 	mutex_unlock(&power_domains->lock);
2341 
2342 	if (work_wakeref)
2343 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2344 
2345 	intel_runtime_pm_put(rpm, wakeref);
2346 }
2347 
2348 /**
2349  * intel_display_power_flush_work - flushes the async display power disabling work
2350  * @i915: i915 device instance
2351  *
2352  * Flushes any pending work that was scheduled by a preceding
2353  * intel_display_power_put_async() call, completing the disabling of the
2354  * corresponding power domains.
2355  *
2356  * Note that the work handler function may still be running after this
2357  * function returns; to ensure that the work handler isn't running use
2358  * intel_display_power_flush_work_sync() instead.
2359  */
2360 void intel_display_power_flush_work(struct drm_i915_private *i915)
2361 {
2362 	struct i915_power_domains *power_domains = &i915->power_domains;
2363 	intel_wakeref_t work_wakeref;
2364 
2365 	mutex_lock(&power_domains->lock);
2366 
2367 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2368 	if (!work_wakeref)
2369 		goto out_verify;
2370 
2371 	release_async_put_domains(power_domains,
2372 				  async_put_domains_mask(power_domains));
2373 	cancel_delayed_work(&power_domains->async_put_work);
2374 
2375 out_verify:
2376 	verify_async_put_domains_state(power_domains);
2377 
2378 	mutex_unlock(&power_domains->lock);
2379 
2380 	if (work_wakeref)
2381 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2382 }
2383 
2384 /**
2385  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2386  * @i915: i915 device instance
2387  *
2388  * Like intel_display_power_flush_work(), but also ensure that the work
2389  * handler function is not running any more when this function returns.
2390  */
2391 static void
2392 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2393 {
2394 	struct i915_power_domains *power_domains = &i915->power_domains;
2395 
2396 	intel_display_power_flush_work(i915);
2397 	cancel_delayed_work_sync(&power_domains->async_put_work);
2398 
2399 	verify_async_put_domains_state(power_domains);
2400 
2401 	drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2402 }
2403 
2404 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2405 /**
2406  * intel_display_power_put - release a power domain reference
2407  * @dev_priv: i915 device instance
2408  * @domain: power domain to reference
2409  * @wakeref: wakeref acquired for the reference that is being released
2410  *
2411  * This function drops the power domain reference obtained by
2412  * intel_display_power_get() and might power down the corresponding hardware
2413  * block right away if this is the last reference.
2414  */
2415 void intel_display_power_put(struct drm_i915_private *dev_priv,
2416 			     enum intel_display_power_domain domain,
2417 			     intel_wakeref_t wakeref)
2418 {
2419 	__intel_display_power_put(dev_priv, domain);
2420 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2421 }
2422 #else
2423 /**
2424  * intel_display_power_put_unchecked - release an unchecked power domain reference
2425  * @dev_priv: i915 device instance
2426  * @domain: power domain to reference
2427  *
2428  * This function drops the power domain reference obtained by
2429  * intel_display_power_get() and might power down the corresponding hardware
2430  * block right away if this is the last reference.
2431  *
2432  * This function is only for the power domain code's internal use to suppress wakeref
2433  * tracking when the correspondig debug kconfig option is disabled, should not
2434  * be used otherwise.
2435  */
2436 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2437 				       enum intel_display_power_domain domain)
2438 {
2439 	__intel_display_power_put(dev_priv, domain);
2440 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2441 }
2442 #endif
2443 
2444 void
2445 intel_display_power_get_in_set(struct drm_i915_private *i915,
2446 			       struct intel_display_power_domain_set *power_domain_set,
2447 			       enum intel_display_power_domain domain)
2448 {
2449 	intel_wakeref_t __maybe_unused wf;
2450 
2451 	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2452 
2453 	wf = intel_display_power_get(i915, domain);
2454 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2455 	power_domain_set->wakerefs[domain] = wf;
2456 #endif
2457 	power_domain_set->mask |= BIT_ULL(domain);
2458 }
2459 
2460 bool
2461 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
2462 					  struct intel_display_power_domain_set *power_domain_set,
2463 					  enum intel_display_power_domain domain)
2464 {
2465 	intel_wakeref_t wf;
2466 
2467 	drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
2468 
2469 	wf = intel_display_power_get_if_enabled(i915, domain);
2470 	if (!wf)
2471 		return false;
2472 
2473 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2474 	power_domain_set->wakerefs[domain] = wf;
2475 #endif
2476 	power_domain_set->mask |= BIT_ULL(domain);
2477 
2478 	return true;
2479 }
2480 
2481 void
2482 intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
2483 				    struct intel_display_power_domain_set *power_domain_set,
2484 				    u64 mask)
2485 {
2486 	enum intel_display_power_domain domain;
2487 
2488 	drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
2489 
2490 	for_each_power_domain(domain, mask) {
2491 		intel_wakeref_t __maybe_unused wf = -1;
2492 
2493 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2494 		wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
2495 #endif
2496 		intel_display_power_put(i915, domain, wf);
2497 		power_domain_set->mask &= ~BIT_ULL(domain);
2498 	}
2499 }
2500 
2501 #define I830_PIPES_POWER_DOMAINS (		\
2502 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2503 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2504 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2505 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2506 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2507 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2508 	BIT_ULL(POWER_DOMAIN_INIT))
2509 
2510 #define VLV_DISPLAY_POWER_DOMAINS (		\
2511 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2512 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2513 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2514 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2515 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2516 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2517 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2518 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2519 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2520 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2521 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2522 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2523 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2524 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |		\
2525 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2526 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2527 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2528 	BIT_ULL(POWER_DOMAIN_INIT))
2529 
2530 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2531 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2532 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2533 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2534 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2535 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2536 	BIT_ULL(POWER_DOMAIN_INIT))
2537 
2538 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2539 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2540 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2541 	BIT_ULL(POWER_DOMAIN_INIT))
2542 
2543 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2544 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2545 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2546 	BIT_ULL(POWER_DOMAIN_INIT))
2547 
2548 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2549 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2550 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2551 	BIT_ULL(POWER_DOMAIN_INIT))
2552 
2553 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2554 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2555 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2556 	BIT_ULL(POWER_DOMAIN_INIT))
2557 
2558 #define CHV_DISPLAY_POWER_DOMAINS (		\
2559 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2560 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2561 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2562 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2563 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2564 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2565 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2566 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2567 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2568 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2569 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2570 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2571 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2572 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2573 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2574 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2575 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |		\
2576 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2577 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2578 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2579 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2580 	BIT_ULL(POWER_DOMAIN_INIT))
2581 
2582 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2583 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2584 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2585 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2586 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2587 	BIT_ULL(POWER_DOMAIN_INIT))
2588 
2589 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2590 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2591 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2592 	BIT_ULL(POWER_DOMAIN_INIT))
2593 
2594 #define HSW_DISPLAY_POWER_DOMAINS (			\
2595 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2596 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2597 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2598 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2599 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2600 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2601 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2602 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2603 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2604 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2605 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2606 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2607 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2608 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2609 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2610 	BIT_ULL(POWER_DOMAIN_INIT))
2611 
2612 #define BDW_DISPLAY_POWER_DOMAINS (			\
2613 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2614 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2615 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2616 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2617 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2618 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2619 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2620 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2621 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2622 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2623 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2624 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2625 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2626 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2627 	BIT_ULL(POWER_DOMAIN_INIT))
2628 
2629 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2630 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2631 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2632 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2633 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2634 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2635 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2636 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2637 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2638 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2639 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2640 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2641 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2642 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2643 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2644 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2645 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2646 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2647 	BIT_ULL(POWER_DOMAIN_INIT))
2648 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2649 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2650 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2651 	BIT_ULL(POWER_DOMAIN_INIT))
2652 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2653 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2654 	BIT_ULL(POWER_DOMAIN_INIT))
2655 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2656 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2657 	BIT_ULL(POWER_DOMAIN_INIT))
2658 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2659 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2660 	BIT_ULL(POWER_DOMAIN_INIT))
2661 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2662 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2663 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2664 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2665 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2666 	BIT_ULL(POWER_DOMAIN_INIT))
2667 
2668 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2669 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2670 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2671 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2672 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2673 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2674 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2675 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2676 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2677 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2678 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2679 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2680 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2681 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2682 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2683 	BIT_ULL(POWER_DOMAIN_INIT))
2684 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2685 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2686 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2687 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2688 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2689 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2690 	BIT_ULL(POWER_DOMAIN_INIT))
2691 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2692 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2693 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2694 	BIT_ULL(POWER_DOMAIN_INIT))
2695 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2696 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2697 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2698 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2699 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2700 	BIT_ULL(POWER_DOMAIN_INIT))
2701 
2702 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2703 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2704 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2705 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2706 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2707 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2708 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2709 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2710 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2711 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2712 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2713 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2714 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2715 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2716 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2717 	BIT_ULL(POWER_DOMAIN_INIT))
2718 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2719 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2720 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2721 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2722 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2723 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2724 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2725 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2726 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2727 	BIT_ULL(POWER_DOMAIN_INIT))
2728 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2729 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2730 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2731 	BIT_ULL(POWER_DOMAIN_INIT))
2732 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2733 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2734 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2735 	BIT_ULL(POWER_DOMAIN_INIT))
2736 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2737 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2738 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2739 	BIT_ULL(POWER_DOMAIN_INIT))
2740 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2741 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2742 	BIT_ULL(POWER_DOMAIN_INIT))
2743 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2744 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2745 	BIT_ULL(POWER_DOMAIN_INIT))
2746 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2747 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2748 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2749 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2750 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2751 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2752 	BIT_ULL(POWER_DOMAIN_INIT))
2753 
2754 /*
2755  * ICL PW_0/PG_0 domains (HW/DMC control):
2756  * - PCI
2757  * - clocks except port PLL
2758  * - central power except FBC
2759  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2760  * ICL PW_1/PG_1 domains (HW/DMC control):
2761  * - DBUF function
2762  * - PIPE_A and its planes, except VGA
2763  * - transcoder EDP + PSR
2764  * - transcoder DSI
2765  * - DDI_A
2766  * - FBC
2767  */
2768 #define ICL_PW_4_POWER_DOMAINS (			\
2769 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2770 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2771 	BIT_ULL(POWER_DOMAIN_INIT))
2772 	/* VDSC/joining */
2773 #define ICL_PW_3_POWER_DOMAINS (			\
2774 	ICL_PW_4_POWER_DOMAINS |			\
2775 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2776 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2777 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2778 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2779 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2780 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2781 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2782 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2783 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2784 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2785 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2786 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2787 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2788 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2789 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2790 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2791 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2792 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2793 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2794 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2795 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2796 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2797 	BIT_ULL(POWER_DOMAIN_INIT))
2798 	/*
2799 	 * - transcoder WD
2800 	 * - KVMR (HW control)
2801 	 */
2802 #define ICL_PW_2_POWER_DOMAINS (			\
2803 	ICL_PW_3_POWER_DOMAINS |			\
2804 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2805 	BIT_ULL(POWER_DOMAIN_INIT))
2806 	/*
2807 	 * - KVMR (HW control)
2808 	 */
2809 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2810 	ICL_PW_2_POWER_DOMAINS |			\
2811 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2812 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2813 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2814 	BIT_ULL(POWER_DOMAIN_INIT))
2815 
2816 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2817 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2818 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2819 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2820 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2821 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2822 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2823 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2824 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2825 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2826 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2827 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2828 
2829 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2830 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2831 	BIT_ULL(POWER_DOMAIN_AUX_A))
2832 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2833 	BIT_ULL(POWER_DOMAIN_AUX_B))
2834 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2835 	BIT_ULL(POWER_DOMAIN_AUX_C))
2836 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2837 	BIT_ULL(POWER_DOMAIN_AUX_D))
2838 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2839 	BIT_ULL(POWER_DOMAIN_AUX_E))
2840 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2841 	BIT_ULL(POWER_DOMAIN_AUX_F))
2842 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2843 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2844 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2845 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2846 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2847 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2848 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2849 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2850 
2851 #define TGL_PW_5_POWER_DOMAINS (			\
2852 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2853 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2854 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2855 	BIT_ULL(POWER_DOMAIN_INIT))
2856 
2857 #define TGL_PW_4_POWER_DOMAINS (			\
2858 	TGL_PW_5_POWER_DOMAINS |			\
2859 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2860 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2861 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2862 	BIT_ULL(POWER_DOMAIN_INIT))
2863 
2864 #define TGL_PW_3_POWER_DOMAINS (			\
2865 	TGL_PW_4_POWER_DOMAINS |			\
2866 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2867 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2868 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2869 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2870 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2871 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
2872 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
2873 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) |	\
2874 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) |	\
2875 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2876 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2877 	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |		\
2878 	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |		\
2879 	BIT_ULL(POWER_DOMAIN_AUX_USBC5) |		\
2880 	BIT_ULL(POWER_DOMAIN_AUX_USBC6) |		\
2881 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
2882 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
2883 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
2884 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
2885 	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |		\
2886 	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |		\
2887 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2888 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2889 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2890 	BIT_ULL(POWER_DOMAIN_INIT))
2891 
2892 #define TGL_PW_2_POWER_DOMAINS (			\
2893 	TGL_PW_3_POWER_DOMAINS |			\
2894 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2895 	BIT_ULL(POWER_DOMAIN_INIT))
2896 
2897 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2898 	TGL_PW_3_POWER_DOMAINS |			\
2899 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2900 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2901 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2902 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2903 	BIT_ULL(POWER_DOMAIN_INIT))
2904 
2905 #define TGL_DDI_IO_TC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
2906 #define TGL_DDI_IO_TC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
2907 #define TGL_DDI_IO_TC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
2908 #define TGL_DDI_IO_TC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
2909 #define TGL_DDI_IO_TC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
2910 #define TGL_DDI_IO_TC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
2911 
2912 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2913 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2914 	BIT_ULL(POWER_DOMAIN_AUX_A))
2915 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2916 	BIT_ULL(POWER_DOMAIN_AUX_B))
2917 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2918 	BIT_ULL(POWER_DOMAIN_AUX_C))
2919 
2920 #define TGL_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
2921 #define TGL_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
2922 #define TGL_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
2923 #define TGL_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
2924 #define TGL_AUX_IO_USBC5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC5)
2925 #define TGL_AUX_IO_USBC6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC6)
2926 
2927 #define TGL_AUX_IO_TBT1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT1)
2928 #define TGL_AUX_IO_TBT2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT2)
2929 #define TGL_AUX_IO_TBT3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT3)
2930 #define TGL_AUX_IO_TBT4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT4)
2931 #define TGL_AUX_IO_TBT5_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT5)
2932 #define TGL_AUX_IO_TBT6_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_TBT6)
2933 
2934 #define TGL_TC_COLD_OFF_POWER_DOMAINS (		\
2935 	BIT_ULL(POWER_DOMAIN_AUX_USBC1)	|	\
2936 	BIT_ULL(POWER_DOMAIN_AUX_USBC2)	|	\
2937 	BIT_ULL(POWER_DOMAIN_AUX_USBC3)	|	\
2938 	BIT_ULL(POWER_DOMAIN_AUX_USBC4)	|	\
2939 	BIT_ULL(POWER_DOMAIN_AUX_USBC5)	|	\
2940 	BIT_ULL(POWER_DOMAIN_AUX_USBC6)	|	\
2941 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |	\
2942 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |	\
2943 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |	\
2944 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |	\
2945 	BIT_ULL(POWER_DOMAIN_AUX_TBT5) |	\
2946 	BIT_ULL(POWER_DOMAIN_AUX_TBT6) |	\
2947 	BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2948 
2949 #define RKL_PW_4_POWER_DOMAINS (			\
2950 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2951 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2952 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2953 	BIT_ULL(POWER_DOMAIN_INIT))
2954 
2955 #define RKL_PW_3_POWER_DOMAINS (			\
2956 	RKL_PW_4_POWER_DOMAINS |			\
2957 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2958 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2959 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
2960 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
2961 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2962 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2963 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
2964 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
2965 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
2966 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
2967 	BIT_ULL(POWER_DOMAIN_INIT))
2968 
2969 /*
2970  * There is no PW_2/PG_2 on RKL.
2971  *
2972  * RKL PW_1/PG_1 domains (under HW/DMC control):
2973  * - DBUF function (note: registers are in PW0)
2974  * - PIPE_A and its planes and VDSC/joining, except VGA
2975  * - transcoder A
2976  * - DDI_A and DDI_B
2977  * - FBC
2978  *
2979  * RKL PW_0/PG_0 domains (under HW/DMC control):
2980  * - PCI
2981  * - clocks except port PLL
2982  * - shared functions:
2983  *     * interrupts except pipe interrupts
2984  *     * MBus except PIPE_MBUS_DBOX_CTL
2985  *     * DBUF registers
2986  * - central power except FBC
2987  * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
2988  */
2989 
2990 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2991 	RKL_PW_3_POWER_DOMAINS |			\
2992 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2993 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2994 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2995 	BIT_ULL(POWER_DOMAIN_INIT))
2996 
2997 /*
2998  * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
2999  */
3000 #define DG1_PW_3_POWER_DOMAINS (			\
3001 	TGL_PW_4_POWER_DOMAINS |			\
3002 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
3003 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
3004 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
3005 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
3006 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
3007 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |		\
3008 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |		\
3009 	BIT_ULL(POWER_DOMAIN_VGA) |			\
3010 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
3011 	BIT_ULL(POWER_DOMAIN_INIT))
3012 
3013 #define DG1_PW_2_POWER_DOMAINS (			\
3014 	DG1_PW_3_POWER_DOMAINS |			\
3015 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
3016 	BIT_ULL(POWER_DOMAIN_INIT))
3017 
3018 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3019 	DG1_PW_3_POWER_DOMAINS |			\
3020 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
3021 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3022 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3023 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3024 	BIT_ULL(POWER_DOMAIN_INIT))
3025 
3026 /*
3027  * XE_LPD Power Domains
3028  *
3029  * Previous platforms required that PG(n-1) be enabled before PG(n).  That
3030  * dependency chain turns into a dependency tree on XE_LPD:
3031  *
3032  *       PG0
3033  *        |
3034  *     --PG1--
3035  *    /       \
3036  *  PGA     --PG2--
3037  *         /   |   \
3038  *       PGB  PGC  PGD
3039  *
3040  * Power wells must be enabled from top to bottom and disabled from bottom
3041  * to top.  This allows pipes to be power gated independently.
3042  */
3043 
3044 #define XELPD_PW_D_POWER_DOMAINS (			\
3045 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
3046 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |	\
3047 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
3048 	BIT_ULL(POWER_DOMAIN_INIT))
3049 
3050 #define XELPD_PW_C_POWER_DOMAINS (			\
3051 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
3052 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
3053 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
3054 	BIT_ULL(POWER_DOMAIN_INIT))
3055 
3056 #define XELPD_PW_B_POWER_DOMAINS (			\
3057 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
3058 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
3059 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
3060 	BIT_ULL(POWER_DOMAIN_INIT))
3061 
3062 #define XELPD_PW_A_POWER_DOMAINS (			\
3063 	BIT_ULL(POWER_DOMAIN_PIPE_A) |			\
3064 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
3065 	BIT_ULL(POWER_DOMAIN_INIT))
3066 
3067 #define XELPD_PW_2_POWER_DOMAINS (			\
3068 	XELPD_PW_B_POWER_DOMAINS |			\
3069 	XELPD_PW_C_POWER_DOMAINS |			\
3070 	XELPD_PW_D_POWER_DOMAINS |			\
3071 	BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |			\
3072 	BIT_ULL(POWER_DOMAIN_VGA) |			\
3073 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
3074 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |	\
3075 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) |	\
3076 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |	\
3077 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |	\
3078 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) |	\
3079 	BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) |	\
3080 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
3081 	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) |		\
3082 	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) |		\
3083 	BIT_ULL(POWER_DOMAIN_AUX_USBC1) |			\
3084 	BIT_ULL(POWER_DOMAIN_AUX_USBC2) |			\
3085 	BIT_ULL(POWER_DOMAIN_AUX_USBC3) |			\
3086 	BIT_ULL(POWER_DOMAIN_AUX_USBC4) |			\
3087 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |			\
3088 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |			\
3089 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |			\
3090 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |			\
3091 	BIT_ULL(POWER_DOMAIN_INIT))
3092 
3093 /*
3094  * XELPD PW_1/PG_1 domains (under HW/DMC control):
3095  *  - DBUF function (registers are in PW0)
3096  *  - Transcoder A
3097  *  - DDI_A and DDI_B
3098  *
3099  * XELPD PW_0/PW_1 domains (under HW/DMC control):
3100  *  - PCI
3101  *  - Clocks except port PLL
3102  *  - Shared functions:
3103  *     * interrupts except pipe interrupts
3104  *     * MBus except PIPE_MBUS_DBOX_CTL
3105  *     * DBUF registers
3106  *  - Central power except FBC
3107  *  - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
3108  */
3109 
3110 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (		\
3111 	XELPD_PW_2_POWER_DOMAINS |			\
3112 	BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |		\
3113 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
3114 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
3115 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
3116 	BIT_ULL(POWER_DOMAIN_INIT))
3117 
3118 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
3119 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
3120 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC1)
3121 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC2)
3122 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC3)
3123 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_AUX_USBC4)
3124 
3125 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT1)
3126 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT2)
3127 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT3)
3128 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_AUX_TBT4)
3129 
3130 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
3131 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS	BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
3132 #define XELPD_DDI_IO_TC1_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
3133 #define XELPD_DDI_IO_TC2_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
3134 #define XELPD_DDI_IO_TC3_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
3135 #define XELPD_DDI_IO_TC4_POWER_DOMAINS		BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
3136 
3137 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
3138 	.sync_hw = i9xx_power_well_sync_hw_noop,
3139 	.enable = i9xx_always_on_power_well_noop,
3140 	.disable = i9xx_always_on_power_well_noop,
3141 	.is_enabled = i9xx_always_on_power_well_enabled,
3142 };
3143 
3144 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
3145 	.sync_hw = chv_pipe_power_well_sync_hw,
3146 	.enable = chv_pipe_power_well_enable,
3147 	.disable = chv_pipe_power_well_disable,
3148 	.is_enabled = chv_pipe_power_well_enabled,
3149 };
3150 
3151 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
3152 	.sync_hw = i9xx_power_well_sync_hw_noop,
3153 	.enable = chv_dpio_cmn_power_well_enable,
3154 	.disable = chv_dpio_cmn_power_well_disable,
3155 	.is_enabled = vlv_power_well_enabled,
3156 };
3157 
3158 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
3159 	{
3160 		.name = "always-on",
3161 		.always_on = true,
3162 		.domains = POWER_DOMAIN_MASK,
3163 		.ops = &i9xx_always_on_power_well_ops,
3164 		.id = DISP_PW_ID_NONE,
3165 	},
3166 };
3167 
3168 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
3169 	.sync_hw = i830_pipes_power_well_sync_hw,
3170 	.enable = i830_pipes_power_well_enable,
3171 	.disable = i830_pipes_power_well_disable,
3172 	.is_enabled = i830_pipes_power_well_enabled,
3173 };
3174 
3175 static const struct i915_power_well_desc i830_power_wells[] = {
3176 	{
3177 		.name = "always-on",
3178 		.always_on = true,
3179 		.domains = POWER_DOMAIN_MASK,
3180 		.ops = &i9xx_always_on_power_well_ops,
3181 		.id = DISP_PW_ID_NONE,
3182 	},
3183 	{
3184 		.name = "pipes",
3185 		.domains = I830_PIPES_POWER_DOMAINS,
3186 		.ops = &i830_pipes_power_well_ops,
3187 		.id = DISP_PW_ID_NONE,
3188 	},
3189 };
3190 
3191 static const struct i915_power_well_ops hsw_power_well_ops = {
3192 	.sync_hw = hsw_power_well_sync_hw,
3193 	.enable = hsw_power_well_enable,
3194 	.disable = hsw_power_well_disable,
3195 	.is_enabled = hsw_power_well_enabled,
3196 };
3197 
3198 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3199 	.sync_hw = i9xx_power_well_sync_hw_noop,
3200 	.enable = gen9_dc_off_power_well_enable,
3201 	.disable = gen9_dc_off_power_well_disable,
3202 	.is_enabled = gen9_dc_off_power_well_enabled,
3203 };
3204 
3205 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3206 	.sync_hw = i9xx_power_well_sync_hw_noop,
3207 	.enable = bxt_dpio_cmn_power_well_enable,
3208 	.disable = bxt_dpio_cmn_power_well_disable,
3209 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
3210 };
3211 
3212 static const struct i915_power_well_regs hsw_power_well_regs = {
3213 	.bios	= HSW_PWR_WELL_CTL1,
3214 	.driver	= HSW_PWR_WELL_CTL2,
3215 	.kvmr	= HSW_PWR_WELL_CTL3,
3216 	.debug	= HSW_PWR_WELL_CTL4,
3217 };
3218 
3219 static const struct i915_power_well_desc hsw_power_wells[] = {
3220 	{
3221 		.name = "always-on",
3222 		.always_on = true,
3223 		.domains = POWER_DOMAIN_MASK,
3224 		.ops = &i9xx_always_on_power_well_ops,
3225 		.id = DISP_PW_ID_NONE,
3226 	},
3227 	{
3228 		.name = "display",
3229 		.domains = HSW_DISPLAY_POWER_DOMAINS,
3230 		.ops = &hsw_power_well_ops,
3231 		.id = HSW_DISP_PW_GLOBAL,
3232 		{
3233 			.hsw.regs = &hsw_power_well_regs,
3234 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3235 			.hsw.has_vga = true,
3236 		},
3237 	},
3238 };
3239 
3240 static const struct i915_power_well_desc bdw_power_wells[] = {
3241 	{
3242 		.name = "always-on",
3243 		.always_on = true,
3244 		.domains = POWER_DOMAIN_MASK,
3245 		.ops = &i9xx_always_on_power_well_ops,
3246 		.id = DISP_PW_ID_NONE,
3247 	},
3248 	{
3249 		.name = "display",
3250 		.domains = BDW_DISPLAY_POWER_DOMAINS,
3251 		.ops = &hsw_power_well_ops,
3252 		.id = HSW_DISP_PW_GLOBAL,
3253 		{
3254 			.hsw.regs = &hsw_power_well_regs,
3255 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3256 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3257 			.hsw.has_vga = true,
3258 		},
3259 	},
3260 };
3261 
3262 static const struct i915_power_well_ops vlv_display_power_well_ops = {
3263 	.sync_hw = i9xx_power_well_sync_hw_noop,
3264 	.enable = vlv_display_power_well_enable,
3265 	.disable = vlv_display_power_well_disable,
3266 	.is_enabled = vlv_power_well_enabled,
3267 };
3268 
3269 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3270 	.sync_hw = i9xx_power_well_sync_hw_noop,
3271 	.enable = vlv_dpio_cmn_power_well_enable,
3272 	.disable = vlv_dpio_cmn_power_well_disable,
3273 	.is_enabled = vlv_power_well_enabled,
3274 };
3275 
3276 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3277 	.sync_hw = i9xx_power_well_sync_hw_noop,
3278 	.enable = vlv_power_well_enable,
3279 	.disable = vlv_power_well_disable,
3280 	.is_enabled = vlv_power_well_enabled,
3281 };
3282 
3283 static const struct i915_power_well_desc vlv_power_wells[] = {
3284 	{
3285 		.name = "always-on",
3286 		.always_on = true,
3287 		.domains = POWER_DOMAIN_MASK,
3288 		.ops = &i9xx_always_on_power_well_ops,
3289 		.id = DISP_PW_ID_NONE,
3290 	},
3291 	{
3292 		.name = "display",
3293 		.domains = VLV_DISPLAY_POWER_DOMAINS,
3294 		.ops = &vlv_display_power_well_ops,
3295 		.id = VLV_DISP_PW_DISP2D,
3296 		{
3297 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3298 		},
3299 	},
3300 	{
3301 		.name = "dpio-tx-b-01",
3302 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3303 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3304 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3305 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3306 		.ops = &vlv_dpio_power_well_ops,
3307 		.id = DISP_PW_ID_NONE,
3308 		{
3309 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3310 		},
3311 	},
3312 	{
3313 		.name = "dpio-tx-b-23",
3314 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3315 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3316 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3317 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3318 		.ops = &vlv_dpio_power_well_ops,
3319 		.id = DISP_PW_ID_NONE,
3320 		{
3321 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3322 		},
3323 	},
3324 	{
3325 		.name = "dpio-tx-c-01",
3326 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3327 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3328 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3329 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3330 		.ops = &vlv_dpio_power_well_ops,
3331 		.id = DISP_PW_ID_NONE,
3332 		{
3333 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3334 		},
3335 	},
3336 	{
3337 		.name = "dpio-tx-c-23",
3338 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3339 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3340 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3341 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3342 		.ops = &vlv_dpio_power_well_ops,
3343 		.id = DISP_PW_ID_NONE,
3344 		{
3345 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3346 		},
3347 	},
3348 	{
3349 		.name = "dpio-common",
3350 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3351 		.ops = &vlv_dpio_cmn_power_well_ops,
3352 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3353 		{
3354 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3355 		},
3356 	},
3357 };
3358 
3359 static const struct i915_power_well_desc chv_power_wells[] = {
3360 	{
3361 		.name = "always-on",
3362 		.always_on = true,
3363 		.domains = POWER_DOMAIN_MASK,
3364 		.ops = &i9xx_always_on_power_well_ops,
3365 		.id = DISP_PW_ID_NONE,
3366 	},
3367 	{
3368 		.name = "display",
3369 		/*
3370 		 * Pipe A power well is the new disp2d well. Pipe B and C
3371 		 * power wells don't actually exist. Pipe A power well is
3372 		 * required for any pipe to work.
3373 		 */
3374 		.domains = CHV_DISPLAY_POWER_DOMAINS,
3375 		.ops = &chv_pipe_power_well_ops,
3376 		.id = DISP_PW_ID_NONE,
3377 	},
3378 	{
3379 		.name = "dpio-common-bc",
3380 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3381 		.ops = &chv_dpio_cmn_power_well_ops,
3382 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3383 		{
3384 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3385 		},
3386 	},
3387 	{
3388 		.name = "dpio-common-d",
3389 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3390 		.ops = &chv_dpio_cmn_power_well_ops,
3391 		.id = CHV_DISP_PW_DPIO_CMN_D,
3392 		{
3393 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3394 		},
3395 	},
3396 };
3397 
3398 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3399 					 enum i915_power_well_id power_well_id)
3400 {
3401 	struct i915_power_well *power_well;
3402 	bool ret;
3403 
3404 	power_well = lookup_power_well(dev_priv, power_well_id);
3405 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3406 
3407 	return ret;
3408 }
3409 
3410 static const struct i915_power_well_desc skl_power_wells[] = {
3411 	{
3412 		.name = "always-on",
3413 		.always_on = true,
3414 		.domains = POWER_DOMAIN_MASK,
3415 		.ops = &i9xx_always_on_power_well_ops,
3416 		.id = DISP_PW_ID_NONE,
3417 	},
3418 	{
3419 		.name = "power well 1",
3420 		/* Handled by the DMC firmware */
3421 		.always_on = true,
3422 		.domains = 0,
3423 		.ops = &hsw_power_well_ops,
3424 		.id = SKL_DISP_PW_1,
3425 		{
3426 			.hsw.regs = &hsw_power_well_regs,
3427 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3428 			.hsw.has_fuses = true,
3429 		},
3430 	},
3431 	{
3432 		.name = "MISC IO power well",
3433 		/* Handled by the DMC firmware */
3434 		.always_on = true,
3435 		.domains = 0,
3436 		.ops = &hsw_power_well_ops,
3437 		.id = SKL_DISP_PW_MISC_IO,
3438 		{
3439 			.hsw.regs = &hsw_power_well_regs,
3440 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3441 		},
3442 	},
3443 	{
3444 		.name = "DC off",
3445 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3446 		.ops = &gen9_dc_off_power_well_ops,
3447 		.id = SKL_DISP_DC_OFF,
3448 	},
3449 	{
3450 		.name = "power well 2",
3451 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3452 		.ops = &hsw_power_well_ops,
3453 		.id = SKL_DISP_PW_2,
3454 		{
3455 			.hsw.regs = &hsw_power_well_regs,
3456 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3457 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3458 			.hsw.has_vga = true,
3459 			.hsw.has_fuses = true,
3460 		},
3461 	},
3462 	{
3463 		.name = "DDI A/E IO power well",
3464 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3465 		.ops = &hsw_power_well_ops,
3466 		.id = DISP_PW_ID_NONE,
3467 		{
3468 			.hsw.regs = &hsw_power_well_regs,
3469 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3470 		},
3471 	},
3472 	{
3473 		.name = "DDI B IO power well",
3474 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3475 		.ops = &hsw_power_well_ops,
3476 		.id = DISP_PW_ID_NONE,
3477 		{
3478 			.hsw.regs = &hsw_power_well_regs,
3479 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3480 		},
3481 	},
3482 	{
3483 		.name = "DDI C IO power well",
3484 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3485 		.ops = &hsw_power_well_ops,
3486 		.id = DISP_PW_ID_NONE,
3487 		{
3488 			.hsw.regs = &hsw_power_well_regs,
3489 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3490 		},
3491 	},
3492 	{
3493 		.name = "DDI D IO power well",
3494 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3495 		.ops = &hsw_power_well_ops,
3496 		.id = DISP_PW_ID_NONE,
3497 		{
3498 			.hsw.regs = &hsw_power_well_regs,
3499 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3500 		},
3501 	},
3502 };
3503 
3504 static const struct i915_power_well_desc bxt_power_wells[] = {
3505 	{
3506 		.name = "always-on",
3507 		.always_on = true,
3508 		.domains = POWER_DOMAIN_MASK,
3509 		.ops = &i9xx_always_on_power_well_ops,
3510 		.id = DISP_PW_ID_NONE,
3511 	},
3512 	{
3513 		.name = "power well 1",
3514 		/* Handled by the DMC firmware */
3515 		.always_on = true,
3516 		.domains = 0,
3517 		.ops = &hsw_power_well_ops,
3518 		.id = SKL_DISP_PW_1,
3519 		{
3520 			.hsw.regs = &hsw_power_well_regs,
3521 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3522 			.hsw.has_fuses = true,
3523 		},
3524 	},
3525 	{
3526 		.name = "DC off",
3527 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3528 		.ops = &gen9_dc_off_power_well_ops,
3529 		.id = SKL_DISP_DC_OFF,
3530 	},
3531 	{
3532 		.name = "power well 2",
3533 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3534 		.ops = &hsw_power_well_ops,
3535 		.id = SKL_DISP_PW_2,
3536 		{
3537 			.hsw.regs = &hsw_power_well_regs,
3538 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3539 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3540 			.hsw.has_vga = true,
3541 			.hsw.has_fuses = true,
3542 		},
3543 	},
3544 	{
3545 		.name = "dpio-common-a",
3546 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3547 		.ops = &bxt_dpio_cmn_power_well_ops,
3548 		.id = BXT_DISP_PW_DPIO_CMN_A,
3549 		{
3550 			.bxt.phy = DPIO_PHY1,
3551 		},
3552 	},
3553 	{
3554 		.name = "dpio-common-bc",
3555 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3556 		.ops = &bxt_dpio_cmn_power_well_ops,
3557 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3558 		{
3559 			.bxt.phy = DPIO_PHY0,
3560 		},
3561 	},
3562 };
3563 
3564 static const struct i915_power_well_desc glk_power_wells[] = {
3565 	{
3566 		.name = "always-on",
3567 		.always_on = true,
3568 		.domains = POWER_DOMAIN_MASK,
3569 		.ops = &i9xx_always_on_power_well_ops,
3570 		.id = DISP_PW_ID_NONE,
3571 	},
3572 	{
3573 		.name = "power well 1",
3574 		/* Handled by the DMC firmware */
3575 		.always_on = true,
3576 		.domains = 0,
3577 		.ops = &hsw_power_well_ops,
3578 		.id = SKL_DISP_PW_1,
3579 		{
3580 			.hsw.regs = &hsw_power_well_regs,
3581 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3582 			.hsw.has_fuses = true,
3583 		},
3584 	},
3585 	{
3586 		.name = "DC off",
3587 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3588 		.ops = &gen9_dc_off_power_well_ops,
3589 		.id = SKL_DISP_DC_OFF,
3590 	},
3591 	{
3592 		.name = "power well 2",
3593 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3594 		.ops = &hsw_power_well_ops,
3595 		.id = SKL_DISP_PW_2,
3596 		{
3597 			.hsw.regs = &hsw_power_well_regs,
3598 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3599 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3600 			.hsw.has_vga = true,
3601 			.hsw.has_fuses = true,
3602 		},
3603 	},
3604 	{
3605 		.name = "dpio-common-a",
3606 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3607 		.ops = &bxt_dpio_cmn_power_well_ops,
3608 		.id = BXT_DISP_PW_DPIO_CMN_A,
3609 		{
3610 			.bxt.phy = DPIO_PHY1,
3611 		},
3612 	},
3613 	{
3614 		.name = "dpio-common-b",
3615 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3616 		.ops = &bxt_dpio_cmn_power_well_ops,
3617 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3618 		{
3619 			.bxt.phy = DPIO_PHY0,
3620 		},
3621 	},
3622 	{
3623 		.name = "dpio-common-c",
3624 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3625 		.ops = &bxt_dpio_cmn_power_well_ops,
3626 		.id = GLK_DISP_PW_DPIO_CMN_C,
3627 		{
3628 			.bxt.phy = DPIO_PHY2,
3629 		},
3630 	},
3631 	{
3632 		.name = "AUX A",
3633 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3634 		.ops = &hsw_power_well_ops,
3635 		.id = DISP_PW_ID_NONE,
3636 		{
3637 			.hsw.regs = &hsw_power_well_regs,
3638 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3639 		},
3640 	},
3641 	{
3642 		.name = "AUX B",
3643 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3644 		.ops = &hsw_power_well_ops,
3645 		.id = DISP_PW_ID_NONE,
3646 		{
3647 			.hsw.regs = &hsw_power_well_regs,
3648 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3649 		},
3650 	},
3651 	{
3652 		.name = "AUX C",
3653 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3654 		.ops = &hsw_power_well_ops,
3655 		.id = DISP_PW_ID_NONE,
3656 		{
3657 			.hsw.regs = &hsw_power_well_regs,
3658 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3659 		},
3660 	},
3661 	{
3662 		.name = "DDI A IO power well",
3663 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3664 		.ops = &hsw_power_well_ops,
3665 		.id = DISP_PW_ID_NONE,
3666 		{
3667 			.hsw.regs = &hsw_power_well_regs,
3668 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3669 		},
3670 	},
3671 	{
3672 		.name = "DDI B IO power well",
3673 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3674 		.ops = &hsw_power_well_ops,
3675 		.id = DISP_PW_ID_NONE,
3676 		{
3677 			.hsw.regs = &hsw_power_well_regs,
3678 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3679 		},
3680 	},
3681 	{
3682 		.name = "DDI C IO power well",
3683 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3684 		.ops = &hsw_power_well_ops,
3685 		.id = DISP_PW_ID_NONE,
3686 		{
3687 			.hsw.regs = &hsw_power_well_regs,
3688 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3689 		},
3690 	},
3691 };
3692 
3693 static const struct i915_power_well_ops icl_aux_power_well_ops = {
3694 	.sync_hw = hsw_power_well_sync_hw,
3695 	.enable = icl_aux_power_well_enable,
3696 	.disable = icl_aux_power_well_disable,
3697 	.is_enabled = hsw_power_well_enabled,
3698 };
3699 
3700 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3701 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3702 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3703 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3704 };
3705 
3706 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3707 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3708 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3709 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3710 };
3711 
3712 static const struct i915_power_well_desc icl_power_wells[] = {
3713 	{
3714 		.name = "always-on",
3715 		.always_on = true,
3716 		.domains = POWER_DOMAIN_MASK,
3717 		.ops = &i9xx_always_on_power_well_ops,
3718 		.id = DISP_PW_ID_NONE,
3719 	},
3720 	{
3721 		.name = "power well 1",
3722 		/* Handled by the DMC firmware */
3723 		.always_on = true,
3724 		.domains = 0,
3725 		.ops = &hsw_power_well_ops,
3726 		.id = SKL_DISP_PW_1,
3727 		{
3728 			.hsw.regs = &hsw_power_well_regs,
3729 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3730 			.hsw.has_fuses = true,
3731 		},
3732 	},
3733 	{
3734 		.name = "DC off",
3735 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3736 		.ops = &gen9_dc_off_power_well_ops,
3737 		.id = SKL_DISP_DC_OFF,
3738 	},
3739 	{
3740 		.name = "power well 2",
3741 		.domains = ICL_PW_2_POWER_DOMAINS,
3742 		.ops = &hsw_power_well_ops,
3743 		.id = SKL_DISP_PW_2,
3744 		{
3745 			.hsw.regs = &hsw_power_well_regs,
3746 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3747 			.hsw.has_fuses = true,
3748 		},
3749 	},
3750 	{
3751 		.name = "power well 3",
3752 		.domains = ICL_PW_3_POWER_DOMAINS,
3753 		.ops = &hsw_power_well_ops,
3754 		.id = ICL_DISP_PW_3,
3755 		{
3756 			.hsw.regs = &hsw_power_well_regs,
3757 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3758 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3759 			.hsw.has_vga = true,
3760 			.hsw.has_fuses = true,
3761 		},
3762 	},
3763 	{
3764 		.name = "DDI A IO",
3765 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3766 		.ops = &hsw_power_well_ops,
3767 		.id = DISP_PW_ID_NONE,
3768 		{
3769 			.hsw.regs = &icl_ddi_power_well_regs,
3770 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3771 		},
3772 	},
3773 	{
3774 		.name = "DDI B IO",
3775 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3776 		.ops = &hsw_power_well_ops,
3777 		.id = DISP_PW_ID_NONE,
3778 		{
3779 			.hsw.regs = &icl_ddi_power_well_regs,
3780 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3781 		},
3782 	},
3783 	{
3784 		.name = "DDI C IO",
3785 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3786 		.ops = &hsw_power_well_ops,
3787 		.id = DISP_PW_ID_NONE,
3788 		{
3789 			.hsw.regs = &icl_ddi_power_well_regs,
3790 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3791 		},
3792 	},
3793 	{
3794 		.name = "DDI D IO",
3795 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3796 		.ops = &hsw_power_well_ops,
3797 		.id = DISP_PW_ID_NONE,
3798 		{
3799 			.hsw.regs = &icl_ddi_power_well_regs,
3800 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3801 		},
3802 	},
3803 	{
3804 		.name = "DDI E IO",
3805 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3806 		.ops = &hsw_power_well_ops,
3807 		.id = DISP_PW_ID_NONE,
3808 		{
3809 			.hsw.regs = &icl_ddi_power_well_regs,
3810 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3811 		},
3812 	},
3813 	{
3814 		.name = "DDI F IO",
3815 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3816 		.ops = &hsw_power_well_ops,
3817 		.id = DISP_PW_ID_NONE,
3818 		{
3819 			.hsw.regs = &icl_ddi_power_well_regs,
3820 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3821 		},
3822 	},
3823 	{
3824 		.name = "AUX A",
3825 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3826 		.ops = &icl_aux_power_well_ops,
3827 		.id = DISP_PW_ID_NONE,
3828 		{
3829 			.hsw.regs = &icl_aux_power_well_regs,
3830 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3831 		},
3832 	},
3833 	{
3834 		.name = "AUX B",
3835 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3836 		.ops = &icl_aux_power_well_ops,
3837 		.id = DISP_PW_ID_NONE,
3838 		{
3839 			.hsw.regs = &icl_aux_power_well_regs,
3840 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3841 		},
3842 	},
3843 	{
3844 		.name = "AUX C TC1",
3845 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3846 		.ops = &icl_aux_power_well_ops,
3847 		.id = DISP_PW_ID_NONE,
3848 		{
3849 			.hsw.regs = &icl_aux_power_well_regs,
3850 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3851 			.hsw.is_tc_tbt = false,
3852 		},
3853 	},
3854 	{
3855 		.name = "AUX D TC2",
3856 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3857 		.ops = &icl_aux_power_well_ops,
3858 		.id = DISP_PW_ID_NONE,
3859 		{
3860 			.hsw.regs = &icl_aux_power_well_regs,
3861 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3862 			.hsw.is_tc_tbt = false,
3863 		},
3864 	},
3865 	{
3866 		.name = "AUX E TC3",
3867 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3868 		.ops = &icl_aux_power_well_ops,
3869 		.id = DISP_PW_ID_NONE,
3870 		{
3871 			.hsw.regs = &icl_aux_power_well_regs,
3872 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3873 			.hsw.is_tc_tbt = false,
3874 		},
3875 	},
3876 	{
3877 		.name = "AUX F TC4",
3878 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3879 		.ops = &icl_aux_power_well_ops,
3880 		.id = DISP_PW_ID_NONE,
3881 		{
3882 			.hsw.regs = &icl_aux_power_well_regs,
3883 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3884 			.hsw.is_tc_tbt = false,
3885 		},
3886 	},
3887 	{
3888 		.name = "AUX C TBT1",
3889 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3890 		.ops = &icl_aux_power_well_ops,
3891 		.id = DISP_PW_ID_NONE,
3892 		{
3893 			.hsw.regs = &icl_aux_power_well_regs,
3894 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3895 			.hsw.is_tc_tbt = true,
3896 		},
3897 	},
3898 	{
3899 		.name = "AUX D TBT2",
3900 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3901 		.ops = &icl_aux_power_well_ops,
3902 		.id = DISP_PW_ID_NONE,
3903 		{
3904 			.hsw.regs = &icl_aux_power_well_regs,
3905 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3906 			.hsw.is_tc_tbt = true,
3907 		},
3908 	},
3909 	{
3910 		.name = "AUX E TBT3",
3911 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3912 		.ops = &icl_aux_power_well_ops,
3913 		.id = DISP_PW_ID_NONE,
3914 		{
3915 			.hsw.regs = &icl_aux_power_well_regs,
3916 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3917 			.hsw.is_tc_tbt = true,
3918 		},
3919 	},
3920 	{
3921 		.name = "AUX F TBT4",
3922 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3923 		.ops = &icl_aux_power_well_ops,
3924 		.id = DISP_PW_ID_NONE,
3925 		{
3926 			.hsw.regs = &icl_aux_power_well_regs,
3927 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3928 			.hsw.is_tc_tbt = true,
3929 		},
3930 	},
3931 	{
3932 		.name = "power well 4",
3933 		.domains = ICL_PW_4_POWER_DOMAINS,
3934 		.ops = &hsw_power_well_ops,
3935 		.id = DISP_PW_ID_NONE,
3936 		{
3937 			.hsw.regs = &hsw_power_well_regs,
3938 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3939 			.hsw.has_fuses = true,
3940 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3941 		},
3942 	},
3943 };
3944 
3945 static void
3946 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3947 {
3948 	u8 tries = 0;
3949 	int ret;
3950 
3951 	while (1) {
3952 		u32 low_val;
3953 		u32 high_val = 0;
3954 
3955 		if (block)
3956 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
3957 		else
3958 			low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
3959 
3960 		/*
3961 		 * Spec states that we should timeout the request after 200us
3962 		 * but the function below will timeout after 500us
3963 		 */
3964 		ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3965 					     &high_val);
3966 		if (ret == 0) {
3967 			if (block &&
3968 			    (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3969 				ret = -EIO;
3970 			else
3971 				break;
3972 		}
3973 
3974 		if (++tries == 3)
3975 			break;
3976 
3977 		drm_msleep(1);
3978 	}
3979 
3980 	if (ret)
3981 		drm_err(&i915->drm, "TC cold %sblock failed\n",
3982 			block ? "" : "un");
3983 	else
3984 		drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3985 			    block ? "" : "un");
3986 }
3987 
3988 static void
3989 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3990 				  struct i915_power_well *power_well)
3991 {
3992 	tgl_tc_cold_request(i915, true);
3993 }
3994 
3995 static void
3996 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3997 				   struct i915_power_well *power_well)
3998 {
3999 	tgl_tc_cold_request(i915, false);
4000 }
4001 
4002 static void
4003 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
4004 				   struct i915_power_well *power_well)
4005 {
4006 	if (power_well->count > 0)
4007 		tgl_tc_cold_off_power_well_enable(i915, power_well);
4008 	else
4009 		tgl_tc_cold_off_power_well_disable(i915, power_well);
4010 }
4011 
4012 static bool
4013 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4014 				      struct i915_power_well *power_well)
4015 {
4016 	/*
4017 	 * Not the correctly implementation but there is no way to just read it
4018 	 * from PCODE, so returning count to avoid state mismatch errors
4019 	 */
4020 	return power_well->count;
4021 }
4022 
4023 static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4024 	.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4025 	.enable = tgl_tc_cold_off_power_well_enable,
4026 	.disable = tgl_tc_cold_off_power_well_disable,
4027 	.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4028 };
4029 
4030 static const struct i915_power_well_desc tgl_power_wells[] = {
4031 	{
4032 		.name = "always-on",
4033 		.always_on = true,
4034 		.domains = POWER_DOMAIN_MASK,
4035 		.ops = &i9xx_always_on_power_well_ops,
4036 		.id = DISP_PW_ID_NONE,
4037 	},
4038 	{
4039 		.name = "power well 1",
4040 		/* Handled by the DMC firmware */
4041 		.always_on = true,
4042 		.domains = 0,
4043 		.ops = &hsw_power_well_ops,
4044 		.id = SKL_DISP_PW_1,
4045 		{
4046 			.hsw.regs = &hsw_power_well_regs,
4047 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4048 			.hsw.has_fuses = true,
4049 		},
4050 	},
4051 	{
4052 		.name = "DC off",
4053 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4054 		.ops = &gen9_dc_off_power_well_ops,
4055 		.id = SKL_DISP_DC_OFF,
4056 	},
4057 	{
4058 		.name = "power well 2",
4059 		.domains = TGL_PW_2_POWER_DOMAINS,
4060 		.ops = &hsw_power_well_ops,
4061 		.id = SKL_DISP_PW_2,
4062 		{
4063 			.hsw.regs = &hsw_power_well_regs,
4064 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4065 			.hsw.has_fuses = true,
4066 		},
4067 	},
4068 	{
4069 		.name = "power well 3",
4070 		.domains = TGL_PW_3_POWER_DOMAINS,
4071 		.ops = &hsw_power_well_ops,
4072 		.id = ICL_DISP_PW_3,
4073 		{
4074 			.hsw.regs = &hsw_power_well_regs,
4075 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4076 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4077 			.hsw.has_vga = true,
4078 			.hsw.has_fuses = true,
4079 		},
4080 	},
4081 	{
4082 		.name = "DDI A IO",
4083 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4084 		.ops = &hsw_power_well_ops,
4085 		.id = DISP_PW_ID_NONE,
4086 		{
4087 			.hsw.regs = &icl_ddi_power_well_regs,
4088 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4089 		}
4090 	},
4091 	{
4092 		.name = "DDI B IO",
4093 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4094 		.ops = &hsw_power_well_ops,
4095 		.id = DISP_PW_ID_NONE,
4096 		{
4097 			.hsw.regs = &icl_ddi_power_well_regs,
4098 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4099 		}
4100 	},
4101 	{
4102 		.name = "DDI C IO",
4103 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4104 		.ops = &hsw_power_well_ops,
4105 		.id = DISP_PW_ID_NONE,
4106 		{
4107 			.hsw.regs = &icl_ddi_power_well_regs,
4108 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4109 		}
4110 	},
4111 	{
4112 		.name = "DDI IO TC1",
4113 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4114 		.ops = &hsw_power_well_ops,
4115 		.id = DISP_PW_ID_NONE,
4116 		{
4117 			.hsw.regs = &icl_ddi_power_well_regs,
4118 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4119 		},
4120 	},
4121 	{
4122 		.name = "DDI IO TC2",
4123 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4124 		.ops = &hsw_power_well_ops,
4125 		.id = DISP_PW_ID_NONE,
4126 		{
4127 			.hsw.regs = &icl_ddi_power_well_regs,
4128 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4129 		},
4130 	},
4131 	{
4132 		.name = "DDI IO TC3",
4133 		.domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
4134 		.ops = &hsw_power_well_ops,
4135 		.id = DISP_PW_ID_NONE,
4136 		{
4137 			.hsw.regs = &icl_ddi_power_well_regs,
4138 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4139 		},
4140 	},
4141 	{
4142 		.name = "DDI IO TC4",
4143 		.domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
4144 		.ops = &hsw_power_well_ops,
4145 		.id = DISP_PW_ID_NONE,
4146 		{
4147 			.hsw.regs = &icl_ddi_power_well_regs,
4148 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4149 		},
4150 	},
4151 	{
4152 		.name = "DDI IO TC5",
4153 		.domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
4154 		.ops = &hsw_power_well_ops,
4155 		.id = DISP_PW_ID_NONE,
4156 		{
4157 			.hsw.regs = &icl_ddi_power_well_regs,
4158 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4159 		},
4160 	},
4161 	{
4162 		.name = "DDI IO TC6",
4163 		.domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
4164 		.ops = &hsw_power_well_ops,
4165 		.id = DISP_PW_ID_NONE,
4166 		{
4167 			.hsw.regs = &icl_ddi_power_well_regs,
4168 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4169 		},
4170 	},
4171 	{
4172 		.name = "TC cold off",
4173 		.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4174 		.ops = &tgl_tc_cold_off_ops,
4175 		.id = TGL_DISP_PW_TC_COLD_OFF,
4176 	},
4177 	{
4178 		.name = "AUX A",
4179 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4180 		.ops = &icl_aux_power_well_ops,
4181 		.id = DISP_PW_ID_NONE,
4182 		{
4183 			.hsw.regs = &icl_aux_power_well_regs,
4184 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4185 		},
4186 	},
4187 	{
4188 		.name = "AUX B",
4189 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4190 		.ops = &icl_aux_power_well_ops,
4191 		.id = DISP_PW_ID_NONE,
4192 		{
4193 			.hsw.regs = &icl_aux_power_well_regs,
4194 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4195 		},
4196 	},
4197 	{
4198 		.name = "AUX C",
4199 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4200 		.ops = &icl_aux_power_well_ops,
4201 		.id = DISP_PW_ID_NONE,
4202 		{
4203 			.hsw.regs = &icl_aux_power_well_regs,
4204 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4205 		},
4206 	},
4207 	{
4208 		.name = "AUX USBC1",
4209 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4210 		.ops = &icl_aux_power_well_ops,
4211 		.id = DISP_PW_ID_NONE,
4212 		{
4213 			.hsw.regs = &icl_aux_power_well_regs,
4214 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4215 			.hsw.is_tc_tbt = false,
4216 		},
4217 	},
4218 	{
4219 		.name = "AUX USBC2",
4220 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4221 		.ops = &icl_aux_power_well_ops,
4222 		.id = DISP_PW_ID_NONE,
4223 		{
4224 			.hsw.regs = &icl_aux_power_well_regs,
4225 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4226 			.hsw.is_tc_tbt = false,
4227 		},
4228 	},
4229 	{
4230 		.name = "AUX USBC3",
4231 		.domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
4232 		.ops = &icl_aux_power_well_ops,
4233 		.id = DISP_PW_ID_NONE,
4234 		{
4235 			.hsw.regs = &icl_aux_power_well_regs,
4236 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4237 			.hsw.is_tc_tbt = false,
4238 		},
4239 	},
4240 	{
4241 		.name = "AUX USBC4",
4242 		.domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
4243 		.ops = &icl_aux_power_well_ops,
4244 		.id = DISP_PW_ID_NONE,
4245 		{
4246 			.hsw.regs = &icl_aux_power_well_regs,
4247 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4248 			.hsw.is_tc_tbt = false,
4249 		},
4250 	},
4251 	{
4252 		.name = "AUX USBC5",
4253 		.domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
4254 		.ops = &icl_aux_power_well_ops,
4255 		.id = DISP_PW_ID_NONE,
4256 		{
4257 			.hsw.regs = &icl_aux_power_well_regs,
4258 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4259 			.hsw.is_tc_tbt = false,
4260 		},
4261 	},
4262 	{
4263 		.name = "AUX USBC6",
4264 		.domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
4265 		.ops = &icl_aux_power_well_ops,
4266 		.id = DISP_PW_ID_NONE,
4267 		{
4268 			.hsw.regs = &icl_aux_power_well_regs,
4269 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4270 			.hsw.is_tc_tbt = false,
4271 		},
4272 	},
4273 	{
4274 		.name = "AUX TBT1",
4275 		.domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
4276 		.ops = &icl_aux_power_well_ops,
4277 		.id = DISP_PW_ID_NONE,
4278 		{
4279 			.hsw.regs = &icl_aux_power_well_regs,
4280 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4281 			.hsw.is_tc_tbt = true,
4282 		},
4283 	},
4284 	{
4285 		.name = "AUX TBT2",
4286 		.domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
4287 		.ops = &icl_aux_power_well_ops,
4288 		.id = DISP_PW_ID_NONE,
4289 		{
4290 			.hsw.regs = &icl_aux_power_well_regs,
4291 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4292 			.hsw.is_tc_tbt = true,
4293 		},
4294 	},
4295 	{
4296 		.name = "AUX TBT3",
4297 		.domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
4298 		.ops = &icl_aux_power_well_ops,
4299 		.id = DISP_PW_ID_NONE,
4300 		{
4301 			.hsw.regs = &icl_aux_power_well_regs,
4302 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4303 			.hsw.is_tc_tbt = true,
4304 		},
4305 	},
4306 	{
4307 		.name = "AUX TBT4",
4308 		.domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
4309 		.ops = &icl_aux_power_well_ops,
4310 		.id = DISP_PW_ID_NONE,
4311 		{
4312 			.hsw.regs = &icl_aux_power_well_regs,
4313 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4314 			.hsw.is_tc_tbt = true,
4315 		},
4316 	},
4317 	{
4318 		.name = "AUX TBT5",
4319 		.domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
4320 		.ops = &icl_aux_power_well_ops,
4321 		.id = DISP_PW_ID_NONE,
4322 		{
4323 			.hsw.regs = &icl_aux_power_well_regs,
4324 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4325 			.hsw.is_tc_tbt = true,
4326 		},
4327 	},
4328 	{
4329 		.name = "AUX TBT6",
4330 		.domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
4331 		.ops = &icl_aux_power_well_ops,
4332 		.id = DISP_PW_ID_NONE,
4333 		{
4334 			.hsw.regs = &icl_aux_power_well_regs,
4335 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4336 			.hsw.is_tc_tbt = true,
4337 		},
4338 	},
4339 	{
4340 		.name = "power well 4",
4341 		.domains = TGL_PW_4_POWER_DOMAINS,
4342 		.ops = &hsw_power_well_ops,
4343 		.id = DISP_PW_ID_NONE,
4344 		{
4345 			.hsw.regs = &hsw_power_well_regs,
4346 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4347 			.hsw.has_fuses = true,
4348 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4349 		}
4350 	},
4351 	{
4352 		.name = "power well 5",
4353 		.domains = TGL_PW_5_POWER_DOMAINS,
4354 		.ops = &hsw_power_well_ops,
4355 		.id = DISP_PW_ID_NONE,
4356 		{
4357 			.hsw.regs = &hsw_power_well_regs,
4358 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4359 			.hsw.has_fuses = true,
4360 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4361 		},
4362 	},
4363 };
4364 
4365 static const struct i915_power_well_desc rkl_power_wells[] = {
4366 	{
4367 		.name = "always-on",
4368 		.always_on = true,
4369 		.domains = POWER_DOMAIN_MASK,
4370 		.ops = &i9xx_always_on_power_well_ops,
4371 		.id = DISP_PW_ID_NONE,
4372 	},
4373 	{
4374 		.name = "power well 1",
4375 		/* Handled by the DMC firmware */
4376 		.always_on = true,
4377 		.domains = 0,
4378 		.ops = &hsw_power_well_ops,
4379 		.id = SKL_DISP_PW_1,
4380 		{
4381 			.hsw.regs = &hsw_power_well_regs,
4382 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4383 			.hsw.has_fuses = true,
4384 		},
4385 	},
4386 	{
4387 		.name = "DC off",
4388 		.domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
4389 		.ops = &gen9_dc_off_power_well_ops,
4390 		.id = SKL_DISP_DC_OFF,
4391 	},
4392 	{
4393 		.name = "power well 3",
4394 		.domains = RKL_PW_3_POWER_DOMAINS,
4395 		.ops = &hsw_power_well_ops,
4396 		.id = ICL_DISP_PW_3,
4397 		{
4398 			.hsw.regs = &hsw_power_well_regs,
4399 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4400 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4401 			.hsw.has_vga = true,
4402 			.hsw.has_fuses = true,
4403 		},
4404 	},
4405 	{
4406 		.name = "power well 4",
4407 		.domains = RKL_PW_4_POWER_DOMAINS,
4408 		.ops = &hsw_power_well_ops,
4409 		.id = DISP_PW_ID_NONE,
4410 		{
4411 			.hsw.regs = &hsw_power_well_regs,
4412 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4413 			.hsw.has_fuses = true,
4414 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4415 		}
4416 	},
4417 	{
4418 		.name = "DDI A IO",
4419 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4420 		.ops = &hsw_power_well_ops,
4421 		.id = DISP_PW_ID_NONE,
4422 		{
4423 			.hsw.regs = &icl_ddi_power_well_regs,
4424 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4425 		}
4426 	},
4427 	{
4428 		.name = "DDI B IO",
4429 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4430 		.ops = &hsw_power_well_ops,
4431 		.id = DISP_PW_ID_NONE,
4432 		{
4433 			.hsw.regs = &icl_ddi_power_well_regs,
4434 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4435 		}
4436 	},
4437 	{
4438 		.name = "DDI IO TC1",
4439 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4440 		.ops = &hsw_power_well_ops,
4441 		.id = DISP_PW_ID_NONE,
4442 		{
4443 			.hsw.regs = &icl_ddi_power_well_regs,
4444 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4445 		},
4446 	},
4447 	{
4448 		.name = "DDI IO TC2",
4449 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4450 		.ops = &hsw_power_well_ops,
4451 		.id = DISP_PW_ID_NONE,
4452 		{
4453 			.hsw.regs = &icl_ddi_power_well_regs,
4454 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4455 		},
4456 	},
4457 	{
4458 		.name = "AUX A",
4459 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4460 		.ops = &icl_aux_power_well_ops,
4461 		.id = DISP_PW_ID_NONE,
4462 		{
4463 			.hsw.regs = &icl_aux_power_well_regs,
4464 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4465 		},
4466 	},
4467 	{
4468 		.name = "AUX B",
4469 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4470 		.ops = &icl_aux_power_well_ops,
4471 		.id = DISP_PW_ID_NONE,
4472 		{
4473 			.hsw.regs = &icl_aux_power_well_regs,
4474 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4475 		},
4476 	},
4477 	{
4478 		.name = "AUX USBC1",
4479 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4480 		.ops = &icl_aux_power_well_ops,
4481 		.id = DISP_PW_ID_NONE,
4482 		{
4483 			.hsw.regs = &icl_aux_power_well_regs,
4484 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4485 		},
4486 	},
4487 	{
4488 		.name = "AUX USBC2",
4489 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4490 		.ops = &icl_aux_power_well_ops,
4491 		.id = DISP_PW_ID_NONE,
4492 		{
4493 			.hsw.regs = &icl_aux_power_well_regs,
4494 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4495 		},
4496 	},
4497 };
4498 
4499 static const struct i915_power_well_desc dg1_power_wells[] = {
4500 	{
4501 		.name = "always-on",
4502 		.always_on = true,
4503 		.domains = POWER_DOMAIN_MASK,
4504 		.ops = &i9xx_always_on_power_well_ops,
4505 		.id = DISP_PW_ID_NONE,
4506 	},
4507 	{
4508 		.name = "power well 1",
4509 		/* Handled by the DMC firmware */
4510 		.always_on = true,
4511 		.domains = 0,
4512 		.ops = &hsw_power_well_ops,
4513 		.id = SKL_DISP_PW_1,
4514 		{
4515 			.hsw.regs = &hsw_power_well_regs,
4516 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4517 			.hsw.has_fuses = true,
4518 		},
4519 	},
4520 	{
4521 		.name = "DC off",
4522 		.domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
4523 		.ops = &gen9_dc_off_power_well_ops,
4524 		.id = SKL_DISP_DC_OFF,
4525 	},
4526 	{
4527 		.name = "power well 2",
4528 		.domains = DG1_PW_2_POWER_DOMAINS,
4529 		.ops = &hsw_power_well_ops,
4530 		.id = SKL_DISP_PW_2,
4531 		{
4532 			.hsw.regs = &hsw_power_well_regs,
4533 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4534 			.hsw.has_fuses = true,
4535 		},
4536 	},
4537 	{
4538 		.name = "power well 3",
4539 		.domains = DG1_PW_3_POWER_DOMAINS,
4540 		.ops = &hsw_power_well_ops,
4541 		.id = ICL_DISP_PW_3,
4542 		{
4543 			.hsw.regs = &hsw_power_well_regs,
4544 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
4545 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4546 			.hsw.has_vga = true,
4547 			.hsw.has_fuses = true,
4548 		},
4549 	},
4550 	{
4551 		.name = "DDI A IO",
4552 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4553 		.ops = &hsw_power_well_ops,
4554 		.id = DISP_PW_ID_NONE,
4555 		{
4556 			.hsw.regs = &icl_ddi_power_well_regs,
4557 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4558 		}
4559 	},
4560 	{
4561 		.name = "DDI B IO",
4562 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4563 		.ops = &hsw_power_well_ops,
4564 		.id = DISP_PW_ID_NONE,
4565 		{
4566 			.hsw.regs = &icl_ddi_power_well_regs,
4567 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4568 		}
4569 	},
4570 	{
4571 		.name = "DDI IO TC1",
4572 		.domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
4573 		.ops = &hsw_power_well_ops,
4574 		.id = DISP_PW_ID_NONE,
4575 		{
4576 			.hsw.regs = &icl_ddi_power_well_regs,
4577 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4578 		},
4579 	},
4580 	{
4581 		.name = "DDI IO TC2",
4582 		.domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
4583 		.ops = &hsw_power_well_ops,
4584 		.id = DISP_PW_ID_NONE,
4585 		{
4586 			.hsw.regs = &icl_ddi_power_well_regs,
4587 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4588 		},
4589 	},
4590 	{
4591 		.name = "AUX A",
4592 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
4593 		.ops = &icl_aux_power_well_ops,
4594 		.id = DISP_PW_ID_NONE,
4595 		{
4596 			.hsw.regs = &icl_aux_power_well_regs,
4597 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4598 		},
4599 	},
4600 	{
4601 		.name = "AUX B",
4602 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
4603 		.ops = &icl_aux_power_well_ops,
4604 		.id = DISP_PW_ID_NONE,
4605 		{
4606 			.hsw.regs = &icl_aux_power_well_regs,
4607 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4608 		},
4609 	},
4610 	{
4611 		.name = "AUX USBC1",
4612 		.domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
4613 		.ops = &icl_aux_power_well_ops,
4614 		.id = DISP_PW_ID_NONE,
4615 		{
4616 			.hsw.regs = &icl_aux_power_well_regs,
4617 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4618 			.hsw.is_tc_tbt = false,
4619 		},
4620 	},
4621 	{
4622 		.name = "AUX USBC2",
4623 		.domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
4624 		.ops = &icl_aux_power_well_ops,
4625 		.id = DISP_PW_ID_NONE,
4626 		{
4627 			.hsw.regs = &icl_aux_power_well_regs,
4628 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4629 			.hsw.is_tc_tbt = false,
4630 		},
4631 	},
4632 	{
4633 		.name = "power well 4",
4634 		.domains = TGL_PW_4_POWER_DOMAINS,
4635 		.ops = &hsw_power_well_ops,
4636 		.id = DISP_PW_ID_NONE,
4637 		{
4638 			.hsw.regs = &hsw_power_well_regs,
4639 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4640 			.hsw.has_fuses = true,
4641 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4642 		}
4643 	},
4644 	{
4645 		.name = "power well 5",
4646 		.domains = TGL_PW_5_POWER_DOMAINS,
4647 		.ops = &hsw_power_well_ops,
4648 		.id = DISP_PW_ID_NONE,
4649 		{
4650 			.hsw.regs = &hsw_power_well_regs,
4651 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4652 			.hsw.has_fuses = true,
4653 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4654 		},
4655 	},
4656 };
4657 
4658 static const struct i915_power_well_desc xelpd_power_wells[] = {
4659 	{
4660 		.name = "always-on",
4661 		.always_on = true,
4662 		.domains = POWER_DOMAIN_MASK,
4663 		.ops = &i9xx_always_on_power_well_ops,
4664 		.id = DISP_PW_ID_NONE,
4665 	},
4666 	{
4667 		.name = "power well 1",
4668 		/* Handled by the DMC firmware */
4669 		.always_on = true,
4670 		.domains = 0,
4671 		.ops = &hsw_power_well_ops,
4672 		.id = SKL_DISP_PW_1,
4673 		{
4674 			.hsw.regs = &hsw_power_well_regs,
4675 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
4676 			.hsw.has_fuses = true,
4677 		},
4678 	},
4679 	{
4680 		.name = "DC off",
4681 		.domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
4682 		.ops = &gen9_dc_off_power_well_ops,
4683 		.id = SKL_DISP_DC_OFF,
4684 	},
4685 	{
4686 		.name = "power well 2",
4687 		.domains = XELPD_PW_2_POWER_DOMAINS,
4688 		.ops = &hsw_power_well_ops,
4689 		.id = SKL_DISP_PW_2,
4690 		{
4691 			.hsw.regs = &hsw_power_well_regs,
4692 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
4693 			.hsw.has_vga = true,
4694 			.hsw.has_fuses = true,
4695 		},
4696 	},
4697 	{
4698 		.name = "power well A",
4699 		.domains = XELPD_PW_A_POWER_DOMAINS,
4700 		.ops = &hsw_power_well_ops,
4701 		.id = DISP_PW_ID_NONE,
4702 		{
4703 			.hsw.regs = &hsw_power_well_regs,
4704 			.hsw.idx = XELPD_PW_CTL_IDX_PW_A,
4705 			.hsw.irq_pipe_mask = BIT(PIPE_A),
4706 			.hsw.has_fuses = true,
4707 		},
4708 	},
4709 	{
4710 		.name = "power well B",
4711 		.domains = XELPD_PW_B_POWER_DOMAINS,
4712 		.ops = &hsw_power_well_ops,
4713 		.id = DISP_PW_ID_NONE,
4714 		{
4715 			.hsw.regs = &hsw_power_well_regs,
4716 			.hsw.idx = XELPD_PW_CTL_IDX_PW_B,
4717 			.hsw.irq_pipe_mask = BIT(PIPE_B),
4718 			.hsw.has_fuses = true,
4719 		},
4720 	},
4721 	{
4722 		.name = "power well C",
4723 		.domains = XELPD_PW_C_POWER_DOMAINS,
4724 		.ops = &hsw_power_well_ops,
4725 		.id = DISP_PW_ID_NONE,
4726 		{
4727 			.hsw.regs = &hsw_power_well_regs,
4728 			.hsw.idx = XELPD_PW_CTL_IDX_PW_C,
4729 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4730 			.hsw.has_fuses = true,
4731 		},
4732 	},
4733 	{
4734 		.name = "power well D",
4735 		.domains = XELPD_PW_D_POWER_DOMAINS,
4736 		.ops = &hsw_power_well_ops,
4737 		.id = DISP_PW_ID_NONE,
4738 		{
4739 			.hsw.regs = &hsw_power_well_regs,
4740 			.hsw.idx = XELPD_PW_CTL_IDX_PW_D,
4741 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4742 			.hsw.has_fuses = true,
4743 		},
4744 	},
4745 	{
4746 		.name = "DDI A IO",
4747 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
4748 		.ops = &hsw_power_well_ops,
4749 		.id = DISP_PW_ID_NONE,
4750 		{
4751 			.hsw.regs = &icl_ddi_power_well_regs,
4752 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4753 		}
4754 	},
4755 	{
4756 		.name = "DDI B IO",
4757 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
4758 		.ops = &hsw_power_well_ops,
4759 		.id = DISP_PW_ID_NONE,
4760 		{
4761 			.hsw.regs = &icl_ddi_power_well_regs,
4762 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4763 		}
4764 	},
4765 	{
4766 		.name = "DDI C IO",
4767 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
4768 		.ops = &hsw_power_well_ops,
4769 		.id = DISP_PW_ID_NONE,
4770 		{
4771 			.hsw.regs = &icl_ddi_power_well_regs,
4772 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4773 		}
4774 	},
4775 	{
4776 		.name = "DDI IO D_XELPD",
4777 		.domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
4778 		.ops = &hsw_power_well_ops,
4779 		.id = DISP_PW_ID_NONE,
4780 		{
4781 			.hsw.regs = &icl_ddi_power_well_regs,
4782 			.hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
4783 		}
4784 	},
4785 	{
4786 		.name = "DDI IO E_XELPD",
4787 		.domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
4788 		.ops = &hsw_power_well_ops,
4789 		.id = DISP_PW_ID_NONE,
4790 		{
4791 			.hsw.regs = &icl_ddi_power_well_regs,
4792 			.hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
4793 		}
4794 	},
4795 	{
4796 		.name = "DDI IO TC1",
4797 		.domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
4798 		.ops = &hsw_power_well_ops,
4799 		.id = DISP_PW_ID_NONE,
4800 		{
4801 			.hsw.regs = &icl_ddi_power_well_regs,
4802 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4803 		}
4804 	},
4805 	{
4806 		.name = "DDI IO TC2",
4807 		.domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
4808 		.ops = &hsw_power_well_ops,
4809 		.id = DISP_PW_ID_NONE,
4810 		{
4811 			.hsw.regs = &icl_ddi_power_well_regs,
4812 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4813 		}
4814 	},
4815 	{
4816 		.name = "DDI IO TC3",
4817 		.domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
4818 		.ops = &hsw_power_well_ops,
4819 		.id = DISP_PW_ID_NONE,
4820 		{
4821 			.hsw.regs = &icl_ddi_power_well_regs,
4822 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4823 		}
4824 	},
4825 	{
4826 		.name = "DDI IO TC4",
4827 		.domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
4828 		.ops = &hsw_power_well_ops,
4829 		.id = DISP_PW_ID_NONE,
4830 		{
4831 			.hsw.regs = &icl_ddi_power_well_regs,
4832 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4833 		}
4834 	},
4835 	{
4836 		.name = "AUX A",
4837 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
4838 		.ops = &icl_aux_power_well_ops,
4839 		.id = DISP_PW_ID_NONE,
4840 		{
4841 			.hsw.regs = &icl_aux_power_well_regs,
4842 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4843 			.hsw.fixed_enable_delay = 600,
4844 		},
4845 	},
4846 	{
4847 		.name = "AUX B",
4848 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
4849 		.ops = &icl_aux_power_well_ops,
4850 		.id = DISP_PW_ID_NONE,
4851 		{
4852 			.hsw.regs = &icl_aux_power_well_regs,
4853 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4854 			.hsw.fixed_enable_delay = 600,
4855 		},
4856 	},
4857 	{
4858 		.name = "AUX C",
4859 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4860 		.ops = &icl_aux_power_well_ops,
4861 		.id = DISP_PW_ID_NONE,
4862 		{
4863 			.hsw.regs = &icl_aux_power_well_regs,
4864 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4865 			.hsw.fixed_enable_delay = 600,
4866 		},
4867 	},
4868 	{
4869 		.name = "AUX D_XELPD",
4870 		.domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
4871 		.ops = &icl_aux_power_well_ops,
4872 		.id = DISP_PW_ID_NONE,
4873 		{
4874 			.hsw.regs = &icl_aux_power_well_regs,
4875 			.hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
4876 			.hsw.fixed_enable_delay = 600,
4877 		},
4878 	},
4879 	{
4880 		.name = "AUX E_XELPD",
4881 		.domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
4882 		.ops = &icl_aux_power_well_ops,
4883 		.id = DISP_PW_ID_NONE,
4884 		{
4885 			.hsw.regs = &icl_aux_power_well_regs,
4886 			.hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
4887 		},
4888 	},
4889 	{
4890 		.name = "AUX USBC1",
4891 		.domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
4892 		.ops = &icl_aux_power_well_ops,
4893 		.id = DISP_PW_ID_NONE,
4894 		{
4895 			.hsw.regs = &icl_aux_power_well_regs,
4896 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4897 			.hsw.fixed_enable_delay = 600,
4898 		},
4899 	},
4900 	{
4901 		.name = "AUX USBC2",
4902 		.domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
4903 		.ops = &icl_aux_power_well_ops,
4904 		.id = DISP_PW_ID_NONE,
4905 		{
4906 			.hsw.regs = &icl_aux_power_well_regs,
4907 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4908 		},
4909 	},
4910 	{
4911 		.name = "AUX USBC3",
4912 		.domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
4913 		.ops = &icl_aux_power_well_ops,
4914 		.id = DISP_PW_ID_NONE,
4915 		{
4916 			.hsw.regs = &icl_aux_power_well_regs,
4917 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4918 		},
4919 	},
4920 	{
4921 		.name = "AUX USBC4",
4922 		.domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
4923 		.ops = &icl_aux_power_well_ops,
4924 		.id = DISP_PW_ID_NONE,
4925 		{
4926 			.hsw.regs = &icl_aux_power_well_regs,
4927 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4928 		},
4929 	},
4930 	{
4931 		.name = "AUX TBT1",
4932 		.domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
4933 		.ops = &icl_aux_power_well_ops,
4934 		.id = DISP_PW_ID_NONE,
4935 		{
4936 			.hsw.regs = &icl_aux_power_well_regs,
4937 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4938 			.hsw.is_tc_tbt = true,
4939 		},
4940 	},
4941 	{
4942 		.name = "AUX TBT2",
4943 		.domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
4944 		.ops = &icl_aux_power_well_ops,
4945 		.id = DISP_PW_ID_NONE,
4946 		{
4947 			.hsw.regs = &icl_aux_power_well_regs,
4948 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4949 			.hsw.is_tc_tbt = true,
4950 		},
4951 	},
4952 	{
4953 		.name = "AUX TBT3",
4954 		.domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
4955 		.ops = &icl_aux_power_well_ops,
4956 		.id = DISP_PW_ID_NONE,
4957 		{
4958 			.hsw.regs = &icl_aux_power_well_regs,
4959 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4960 			.hsw.is_tc_tbt = true,
4961 		},
4962 	},
4963 	{
4964 		.name = "AUX TBT4",
4965 		.domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
4966 		.ops = &icl_aux_power_well_ops,
4967 		.id = DISP_PW_ID_NONE,
4968 		{
4969 			.hsw.regs = &icl_aux_power_well_regs,
4970 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4971 			.hsw.is_tc_tbt = true,
4972 		},
4973 	},
4974 };
4975 
4976 static int
4977 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4978 				   int disable_power_well)
4979 {
4980 	if (disable_power_well >= 0)
4981 		return !!disable_power_well;
4982 
4983 	return 1;
4984 }
4985 
4986 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4987 			       int enable_dc)
4988 {
4989 	u32 mask;
4990 	int requested_dc;
4991 	int max_dc;
4992 
4993 	if (!HAS_DISPLAY(dev_priv))
4994 		return 0;
4995 
4996 	if (IS_DG1(dev_priv))
4997 		max_dc = 3;
4998 	else if (DISPLAY_VER(dev_priv) >= 12)
4999 		max_dc = 4;
5000 	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
5001 		max_dc = 1;
5002 	else if (DISPLAY_VER(dev_priv) >= 9)
5003 		max_dc = 2;
5004 	else
5005 		max_dc = 0;
5006 
5007 	/*
5008 	 * DC9 has a separate HW flow from the rest of the DC states,
5009 	 * not depending on the DMC firmware. It's needed by system
5010 	 * suspend/resume, so allow it unconditionally.
5011 	 */
5012 	mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
5013 		DISPLAY_VER(dev_priv) >= 11 ?
5014 	       DC_STATE_EN_DC9 : 0;
5015 
5016 	if (!dev_priv->params.disable_power_well)
5017 		max_dc = 0;
5018 
5019 	if (enable_dc >= 0 && enable_dc <= max_dc) {
5020 		requested_dc = enable_dc;
5021 	} else if (enable_dc == -1) {
5022 		requested_dc = max_dc;
5023 	} else if (enable_dc > max_dc && enable_dc <= 4) {
5024 		drm_dbg_kms(&dev_priv->drm,
5025 			    "Adjusting requested max DC state (%d->%d)\n",
5026 			    enable_dc, max_dc);
5027 		requested_dc = max_dc;
5028 	} else {
5029 		drm_err(&dev_priv->drm,
5030 			"Unexpected value for enable_dc (%d)\n", enable_dc);
5031 		requested_dc = max_dc;
5032 	}
5033 
5034 	switch (requested_dc) {
5035 	case 4:
5036 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
5037 		break;
5038 	case 3:
5039 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
5040 		break;
5041 	case 2:
5042 		mask |= DC_STATE_EN_UPTO_DC6;
5043 		break;
5044 	case 1:
5045 		mask |= DC_STATE_EN_UPTO_DC5;
5046 		break;
5047 	}
5048 
5049 	drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
5050 
5051 	return mask;
5052 }
5053 
5054 static int
5055 __set_power_wells(struct i915_power_domains *power_domains,
5056 		  const struct i915_power_well_desc *power_well_descs,
5057 		  int power_well_descs_sz, u64 skip_mask)
5058 {
5059 	struct drm_i915_private *i915 = container_of(power_domains,
5060 						     struct drm_i915_private,
5061 						     power_domains);
5062 	u64 power_well_ids = 0;
5063 	int power_well_count = 0;
5064 	int i, plt_idx = 0;
5065 
5066 	for (i = 0; i < power_well_descs_sz; i++)
5067 		if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
5068 			power_well_count++;
5069 
5070 	power_domains->power_well_count = power_well_count;
5071 	power_domains->power_wells =
5072 				kcalloc(power_well_count,
5073 					sizeof(*power_domains->power_wells),
5074 					GFP_KERNEL);
5075 	if (!power_domains->power_wells)
5076 		return -ENOMEM;
5077 
5078 	for (i = 0; i < power_well_descs_sz; i++) {
5079 		enum i915_power_well_id id = power_well_descs[i].id;
5080 
5081 		if (BIT_ULL(id) & skip_mask)
5082 			continue;
5083 
5084 		power_domains->power_wells[plt_idx++].desc =
5085 			&power_well_descs[i];
5086 
5087 		if (id == DISP_PW_ID_NONE)
5088 			continue;
5089 
5090 		drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
5091 		drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
5092 		power_well_ids |= BIT_ULL(id);
5093 	}
5094 
5095 	return 0;
5096 }
5097 
5098 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
5099 	__set_power_wells(power_domains, __power_well_descs, \
5100 			  ARRAY_SIZE(__power_well_descs), skip_mask)
5101 
5102 #define set_power_wells(power_domains, __power_well_descs) \
5103 	set_power_wells_mask(power_domains, __power_well_descs, 0)
5104 
5105 /**
5106  * intel_power_domains_init - initializes the power domain structures
5107  * @dev_priv: i915 device instance
5108  *
5109  * Initializes the power domain structures for @dev_priv depending upon the
5110  * supported platform.
5111  */
5112 int intel_power_domains_init(struct drm_i915_private *dev_priv)
5113 {
5114 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5115 	int err;
5116 
5117 	dev_priv->params.disable_power_well =
5118 		sanitize_disable_power_well_option(dev_priv,
5119 						   dev_priv->params.disable_power_well);
5120 	dev_priv->dmc.allowed_dc_mask =
5121 		get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
5122 
5123 	dev_priv->dmc.target_dc_state =
5124 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
5125 
5126 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
5127 
5128 	rw_init(&power_domains->lock, "ipdl");
5129 
5130 	INIT_DELAYED_WORK(&power_domains->async_put_work,
5131 			  intel_display_power_put_async_work);
5132 
5133 	/*
5134 	 * The enabling order will be from lower to higher indexed wells,
5135 	 * the disabling order is reversed.
5136 	 */
5137 	if (!HAS_DISPLAY(dev_priv)) {
5138 		power_domains->power_well_count = 0;
5139 		err = 0;
5140 	} else if (DISPLAY_VER(dev_priv) >= 13) {
5141 		err = set_power_wells(power_domains, xelpd_power_wells);
5142 	} else if (IS_DG1(dev_priv)) {
5143 		err = set_power_wells(power_domains, dg1_power_wells);
5144 	} else if (IS_ALDERLAKE_S(dev_priv)) {
5145 		err = set_power_wells_mask(power_domains, tgl_power_wells,
5146 					   BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
5147 	} else if (IS_ROCKETLAKE(dev_priv)) {
5148 		err = set_power_wells(power_domains, rkl_power_wells);
5149 	} else if (DISPLAY_VER(dev_priv) == 12) {
5150 		err = set_power_wells(power_domains, tgl_power_wells);
5151 	} else if (DISPLAY_VER(dev_priv) == 11) {
5152 		err = set_power_wells(power_domains, icl_power_wells);
5153 	} else if (IS_GEMINILAKE(dev_priv)) {
5154 		err = set_power_wells(power_domains, glk_power_wells);
5155 	} else if (IS_BROXTON(dev_priv)) {
5156 		err = set_power_wells(power_domains, bxt_power_wells);
5157 	} else if (DISPLAY_VER(dev_priv) == 9) {
5158 		err = set_power_wells(power_domains, skl_power_wells);
5159 	} else if (IS_CHERRYVIEW(dev_priv)) {
5160 		err = set_power_wells(power_domains, chv_power_wells);
5161 	} else if (IS_BROADWELL(dev_priv)) {
5162 		err = set_power_wells(power_domains, bdw_power_wells);
5163 	} else if (IS_HASWELL(dev_priv)) {
5164 		err = set_power_wells(power_domains, hsw_power_wells);
5165 	} else if (IS_VALLEYVIEW(dev_priv)) {
5166 		err = set_power_wells(power_domains, vlv_power_wells);
5167 	} else if (IS_I830(dev_priv)) {
5168 		err = set_power_wells(power_domains, i830_power_wells);
5169 	} else {
5170 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
5171 	}
5172 
5173 	return err;
5174 }
5175 
5176 /**
5177  * intel_power_domains_cleanup - clean up power domains resources
5178  * @dev_priv: i915 device instance
5179  *
5180  * Release any resources acquired by intel_power_domains_init()
5181  */
5182 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
5183 {
5184 	kfree(dev_priv->power_domains.power_wells);
5185 }
5186 
5187 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5188 {
5189 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5190 	struct i915_power_well *power_well;
5191 
5192 	mutex_lock(&power_domains->lock);
5193 	for_each_power_well(dev_priv, power_well) {
5194 		power_well->desc->ops->sync_hw(dev_priv, power_well);
5195 		power_well->hw_enabled =
5196 			power_well->desc->ops->is_enabled(dev_priv, power_well);
5197 	}
5198 	mutex_unlock(&power_domains->lock);
5199 }
5200 
5201 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
5202 				enum dbuf_slice slice, bool enable)
5203 {
5204 	i915_reg_t reg = DBUF_CTL_S(slice);
5205 	bool state;
5206 
5207 	intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST,
5208 		     enable ? DBUF_POWER_REQUEST : 0);
5209 	intel_de_posting_read(dev_priv, reg);
5210 	udelay(10);
5211 
5212 	state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
5213 	drm_WARN(&dev_priv->drm, enable != state,
5214 		 "DBuf slice %d power %s timeout!\n",
5215 		 slice, enabledisable(enable));
5216 }
5217 
5218 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
5219 			     u8 req_slices)
5220 {
5221 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5222 	u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask;
5223 	enum dbuf_slice slice;
5224 
5225 	drm_WARN(&dev_priv->drm, req_slices & ~slice_mask,
5226 		 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n",
5227 		 req_slices, slice_mask);
5228 
5229 	drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
5230 		    req_slices);
5231 
5232 	/*
5233 	 * Might be running this in parallel to gen9_dc_off_power_well_enable
5234 	 * being called from intel_dp_detect for instance,
5235 	 * which causes assertion triggered by race condition,
5236 	 * as gen9_assert_dbuf_enabled might preempt this when registers
5237 	 * were already updated, while dev_priv was not.
5238 	 */
5239 	mutex_lock(&power_domains->lock);
5240 
5241 	for_each_dbuf_slice(dev_priv, slice)
5242 		gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
5243 
5244 	dev_priv->dbuf.enabled_slices = req_slices;
5245 
5246 	mutex_unlock(&power_domains->lock);
5247 }
5248 
5249 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
5250 {
5251 	dev_priv->dbuf.enabled_slices =
5252 		intel_enabled_dbuf_slices_mask(dev_priv);
5253 
5254 	/*
5255 	 * Just power up at least 1 slice, we will
5256 	 * figure out later which slices we have and what we need.
5257 	 */
5258 	gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
5259 				dev_priv->dbuf.enabled_slices);
5260 }
5261 
5262 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
5263 {
5264 	gen9_dbuf_slices_update(dev_priv, 0);
5265 }
5266 
5267 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv)
5268 {
5269 	enum dbuf_slice slice;
5270 
5271 	if (IS_ALDERLAKE_P(dev_priv))
5272 		return;
5273 
5274 	for_each_dbuf_slice(dev_priv, slice)
5275 		intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
5276 			     DBUF_TRACKER_STATE_SERVICE_MASK,
5277 			     DBUF_TRACKER_STATE_SERVICE(8));
5278 }
5279 
5280 static void icl_mbus_init(struct drm_i915_private *dev_priv)
5281 {
5282 	unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask;
5283 	u32 mask, val, i;
5284 
5285 	if (IS_ALDERLAKE_P(dev_priv))
5286 		return;
5287 
5288 	mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
5289 		MBUS_ABOX_BT_CREDIT_POOL2_MASK |
5290 		MBUS_ABOX_B_CREDIT_MASK |
5291 		MBUS_ABOX_BW_CREDIT_MASK;
5292 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
5293 		MBUS_ABOX_BT_CREDIT_POOL2(16) |
5294 		MBUS_ABOX_B_CREDIT(1) |
5295 		MBUS_ABOX_BW_CREDIT(1);
5296 
5297 	/*
5298 	 * gen12 platforms that use abox1 and abox2 for pixel data reads still
5299 	 * expect us to program the abox_ctl0 register as well, even though
5300 	 * we don't have to program other instance-0 registers like BW_BUDDY.
5301 	 */
5302 	if (DISPLAY_VER(dev_priv) == 12)
5303 		abox_regs |= BIT(0);
5304 
5305 	for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
5306 		intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val);
5307 }
5308 
5309 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
5310 {
5311 	u32 val = intel_de_read(dev_priv, LCPLL_CTL);
5312 
5313 	/*
5314 	 * The LCPLL register should be turned on by the BIOS. For now
5315 	 * let's just check its state and print errors in case
5316 	 * something is wrong.  Don't even try to turn it on.
5317 	 */
5318 
5319 	if (val & LCPLL_CD_SOURCE_FCLK)
5320 		drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
5321 
5322 	if (val & LCPLL_PLL_DISABLE)
5323 		drm_err(&dev_priv->drm, "LCPLL is disabled\n");
5324 
5325 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
5326 		drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
5327 }
5328 
5329 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
5330 {
5331 	struct drm_device *dev = &dev_priv->drm;
5332 	struct intel_crtc *crtc;
5333 
5334 	for_each_intel_crtc(dev, crtc)
5335 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
5336 				pipe_name(crtc->pipe));
5337 
5338 	I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
5339 			"Display power well on\n");
5340 	I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
5341 			"SPLL enabled\n");
5342 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
5343 			"WRPLL1 enabled\n");
5344 	I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
5345 			"WRPLL2 enabled\n");
5346 	I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
5347 			"Panel power on\n");
5348 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
5349 			"CPU PWM1 enabled\n");
5350 	if (IS_HASWELL(dev_priv))
5351 		I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
5352 				"CPU PWM2 enabled\n");
5353 	I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
5354 			"PCH PWM1 enabled\n");
5355 	I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
5356 			"Utility pin enabled\n");
5357 	I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
5358 			"PCH GTC enabled\n");
5359 
5360 	/*
5361 	 * In theory we can still leave IRQs enabled, as long as only the HPD
5362 	 * interrupts remain enabled. We used to check for that, but since it's
5363 	 * gen-specific and since we only disable LCPLL after we fully disable
5364 	 * the interrupts, the check below should be enough.
5365 	 */
5366 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
5367 }
5368 
5369 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
5370 {
5371 	if (IS_HASWELL(dev_priv))
5372 		return intel_de_read(dev_priv, D_COMP_HSW);
5373 	else
5374 		return intel_de_read(dev_priv, D_COMP_BDW);
5375 }
5376 
5377 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
5378 {
5379 	if (IS_HASWELL(dev_priv)) {
5380 		if (sandybridge_pcode_write(dev_priv,
5381 					    GEN6_PCODE_WRITE_D_COMP, val))
5382 			drm_dbg_kms(&dev_priv->drm,
5383 				    "Failed to write to D_COMP\n");
5384 	} else {
5385 		intel_de_write(dev_priv, D_COMP_BDW, val);
5386 		intel_de_posting_read(dev_priv, D_COMP_BDW);
5387 	}
5388 }
5389 
5390 /*
5391  * This function implements pieces of two sequences from BSpec:
5392  * - Sequence for display software to disable LCPLL
5393  * - Sequence for display software to allow package C8+
5394  * The steps implemented here are just the steps that actually touch the LCPLL
5395  * register. Callers should take care of disabling all the display engine
5396  * functions, doing the mode unset, fixing interrupts, etc.
5397  */
5398 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
5399 			      bool switch_to_fclk, bool allow_power_down)
5400 {
5401 	u32 val;
5402 
5403 	assert_can_disable_lcpll(dev_priv);
5404 
5405 	val = intel_de_read(dev_priv, LCPLL_CTL);
5406 
5407 	if (switch_to_fclk) {
5408 		val |= LCPLL_CD_SOURCE_FCLK;
5409 		intel_de_write(dev_priv, LCPLL_CTL, val);
5410 
5411 		if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
5412 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
5413 			drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
5414 
5415 		val = intel_de_read(dev_priv, LCPLL_CTL);
5416 	}
5417 
5418 	val |= LCPLL_PLL_DISABLE;
5419 	intel_de_write(dev_priv, LCPLL_CTL, val);
5420 	intel_de_posting_read(dev_priv, LCPLL_CTL);
5421 
5422 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
5423 		drm_err(&dev_priv->drm, "LCPLL still locked\n");
5424 
5425 	val = hsw_read_dcomp(dev_priv);
5426 	val |= D_COMP_COMP_DISABLE;
5427 	hsw_write_dcomp(dev_priv, val);
5428 	ndelay(100);
5429 
5430 	if (wait_for((hsw_read_dcomp(dev_priv) &
5431 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
5432 		drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
5433 
5434 	if (allow_power_down) {
5435 		val = intel_de_read(dev_priv, LCPLL_CTL);
5436 		val |= LCPLL_POWER_DOWN_ALLOW;
5437 		intel_de_write(dev_priv, LCPLL_CTL, val);
5438 		intel_de_posting_read(dev_priv, LCPLL_CTL);
5439 	}
5440 }
5441 
5442 /*
5443  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
5444  * source.
5445  */
5446 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
5447 {
5448 	u32 val;
5449 
5450 	val = intel_de_read(dev_priv, LCPLL_CTL);
5451 
5452 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
5453 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
5454 		return;
5455 
5456 	/*
5457 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
5458 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
5459 	 */
5460 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
5461 
5462 	if (val & LCPLL_POWER_DOWN_ALLOW) {
5463 		val &= ~LCPLL_POWER_DOWN_ALLOW;
5464 		intel_de_write(dev_priv, LCPLL_CTL, val);
5465 		intel_de_posting_read(dev_priv, LCPLL_CTL);
5466 	}
5467 
5468 	val = hsw_read_dcomp(dev_priv);
5469 	val |= D_COMP_COMP_FORCE;
5470 	val &= ~D_COMP_COMP_DISABLE;
5471 	hsw_write_dcomp(dev_priv, val);
5472 
5473 	val = intel_de_read(dev_priv, LCPLL_CTL);
5474 	val &= ~LCPLL_PLL_DISABLE;
5475 	intel_de_write(dev_priv, LCPLL_CTL, val);
5476 
5477 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
5478 		drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
5479 
5480 	if (val & LCPLL_CD_SOURCE_FCLK) {
5481 		val = intel_de_read(dev_priv, LCPLL_CTL);
5482 		val &= ~LCPLL_CD_SOURCE_FCLK;
5483 		intel_de_write(dev_priv, LCPLL_CTL, val);
5484 
5485 		if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
5486 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
5487 			drm_err(&dev_priv->drm,
5488 				"Switching back to LCPLL failed\n");
5489 	}
5490 
5491 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
5492 
5493 	intel_update_cdclk(dev_priv);
5494 	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
5495 }
5496 
5497 /*
5498  * Package states C8 and deeper are really deep PC states that can only be
5499  * reached when all the devices on the system allow it, so even if the graphics
5500  * device allows PC8+, it doesn't mean the system will actually get to these
5501  * states. Our driver only allows PC8+ when going into runtime PM.
5502  *
5503  * The requirements for PC8+ are that all the outputs are disabled, the power
5504  * well is disabled and most interrupts are disabled, and these are also
5505  * requirements for runtime PM. When these conditions are met, we manually do
5506  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
5507  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
5508  * hang the machine.
5509  *
5510  * When we really reach PC8 or deeper states (not just when we allow it) we lose
5511  * the state of some registers, so when we come back from PC8+ we need to
5512  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
5513  * need to take care of the registers kept by RC6. Notice that this happens even
5514  * if we don't put the device in PCI D3 state (which is what currently happens
5515  * because of the runtime PM support).
5516  *
5517  * For more, read "Display Sequences for Package C8" on the hardware
5518  * documentation.
5519  */
5520 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
5521 {
5522 	u32 val;
5523 
5524 	drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
5525 
5526 	if (HAS_PCH_LPT_LP(dev_priv)) {
5527 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5528 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5529 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5530 	}
5531 
5532 	lpt_disable_clkout_dp(dev_priv);
5533 	hsw_disable_lcpll(dev_priv, true, true);
5534 }
5535 
5536 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
5537 {
5538 	u32 val;
5539 
5540 	drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
5541 
5542 	hsw_restore_lcpll(dev_priv);
5543 	intel_init_pch_refclk(dev_priv);
5544 
5545 	if (HAS_PCH_LPT_LP(dev_priv)) {
5546 		val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
5547 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
5548 		intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
5549 	}
5550 }
5551 
5552 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
5553 				      bool enable)
5554 {
5555 	i915_reg_t reg;
5556 	u32 reset_bits, val;
5557 
5558 	if (IS_IVYBRIDGE(dev_priv)) {
5559 		reg = GEN7_MSG_CTL;
5560 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
5561 	} else {
5562 		reg = HSW_NDE_RSTWRN_OPT;
5563 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
5564 	}
5565 
5566 	val = intel_de_read(dev_priv, reg);
5567 
5568 	if (enable)
5569 		val |= reset_bits;
5570 	else
5571 		val &= ~reset_bits;
5572 
5573 	intel_de_write(dev_priv, reg, val);
5574 }
5575 
5576 static void skl_display_core_init(struct drm_i915_private *dev_priv,
5577 				  bool resume)
5578 {
5579 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5580 	struct i915_power_well *well;
5581 
5582 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5583 
5584 	/* enable PCH reset handshake */
5585 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5586 
5587 	if (!HAS_DISPLAY(dev_priv))
5588 		return;
5589 
5590 	/* enable PG1 and Misc I/O */
5591 	mutex_lock(&power_domains->lock);
5592 
5593 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5594 	intel_power_well_enable(dev_priv, well);
5595 
5596 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5597 	intel_power_well_enable(dev_priv, well);
5598 
5599 	mutex_unlock(&power_domains->lock);
5600 
5601 	intel_cdclk_init_hw(dev_priv);
5602 
5603 	gen9_dbuf_enable(dev_priv);
5604 
5605 	if (resume && intel_dmc_has_payload(dev_priv))
5606 		intel_dmc_load_program(dev_priv);
5607 }
5608 
5609 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5610 {
5611 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5612 	struct i915_power_well *well;
5613 
5614 	if (!HAS_DISPLAY(dev_priv))
5615 		return;
5616 
5617 	gen9_disable_dc_states(dev_priv);
5618 
5619 	gen9_dbuf_disable(dev_priv);
5620 
5621 	intel_cdclk_uninit_hw(dev_priv);
5622 
5623 	/* The spec doesn't call for removing the reset handshake flag */
5624 	/* disable PG1 and Misc I/O */
5625 
5626 	mutex_lock(&power_domains->lock);
5627 
5628 	/*
5629 	 * BSpec says to keep the MISC IO power well enabled here, only
5630 	 * remove our request for power well 1.
5631 	 * Note that even though the driver's request is removed power well 1
5632 	 * may stay enabled after this due to DMC's own request on it.
5633 	 */
5634 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5635 	intel_power_well_disable(dev_priv, well);
5636 
5637 	mutex_unlock(&power_domains->lock);
5638 
5639 	usleep_range(10, 30);		/* 10 us delay per Bspec */
5640 }
5641 
5642 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5643 {
5644 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5645 	struct i915_power_well *well;
5646 
5647 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5648 
5649 	/*
5650 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5651 	 * or else the reset will hang because there is no PCH to respond.
5652 	 * Move the handshake programming to initialization sequence.
5653 	 * Previously was left up to BIOS.
5654 	 */
5655 	intel_pch_reset_handshake(dev_priv, false);
5656 
5657 	if (!HAS_DISPLAY(dev_priv))
5658 		return;
5659 
5660 	/* Enable PG1 */
5661 	mutex_lock(&power_domains->lock);
5662 
5663 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5664 	intel_power_well_enable(dev_priv, well);
5665 
5666 	mutex_unlock(&power_domains->lock);
5667 
5668 	intel_cdclk_init_hw(dev_priv);
5669 
5670 	gen9_dbuf_enable(dev_priv);
5671 
5672 	if (resume && intel_dmc_has_payload(dev_priv))
5673 		intel_dmc_load_program(dev_priv);
5674 }
5675 
5676 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5677 {
5678 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5679 	struct i915_power_well *well;
5680 
5681 	if (!HAS_DISPLAY(dev_priv))
5682 		return;
5683 
5684 	gen9_disable_dc_states(dev_priv);
5685 
5686 	gen9_dbuf_disable(dev_priv);
5687 
5688 	intel_cdclk_uninit_hw(dev_priv);
5689 
5690 	/* The spec doesn't call for removing the reset handshake flag */
5691 
5692 	/*
5693 	 * Disable PW1 (PG1).
5694 	 * Note that even though the driver's request is removed power well 1
5695 	 * may stay enabled after this due to DMC's own request on it.
5696 	 */
5697 	mutex_lock(&power_domains->lock);
5698 
5699 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5700 	intel_power_well_disable(dev_priv, well);
5701 
5702 	mutex_unlock(&power_domains->lock);
5703 
5704 	usleep_range(10, 30);		/* 10 us delay per Bspec */
5705 }
5706 
5707 struct buddy_page_mask {
5708 	u32 page_mask;
5709 	u8 type;
5710 	u8 num_channels;
5711 };
5712 
5713 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5714 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5715 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,	.page_mask = 0xF },
5716 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5717 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C },
5718 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5719 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1E },
5720 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 },
5721 	{ .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 },
5722 	{}
5723 };
5724 
5725 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5726 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5727 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5728 	{ .num_channels = 1, .type = INTEL_DRAM_DDR5,   .page_mask = 0x1 },
5729 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 },
5730 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5731 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5732 	{ .num_channels = 2, .type = INTEL_DRAM_DDR5,   .page_mask = 0x3 },
5733 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 },
5734 	{}
5735 };
5736 
5737 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5738 {
5739 	enum intel_dram_type type = dev_priv->dram_info.type;
5740 	u8 num_channels = dev_priv->dram_info.num_channels;
5741 	const struct buddy_page_mask *table;
5742 	unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
5743 	int config, i;
5744 
5745 	/* BW_BUDDY registers are not used on dgpu's beyond DG1 */
5746 	if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
5747 		return;
5748 
5749 	if (IS_ALDERLAKE_S(dev_priv) ||
5750 	    IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
5751 	    IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
5752 	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
5753 		/* Wa_1409767108:tgl,dg1,adl-s */
5754 		table = wa_1409767108_buddy_page_masks;
5755 	else
5756 		table = tgl_buddy_page_masks;
5757 
5758 	for (config = 0; table[config].page_mask != 0; config++)
5759 		if (table[config].num_channels == num_channels &&
5760 		    table[config].type == type)
5761 			break;
5762 
5763 	if (table[config].page_mask == 0) {
5764 		drm_dbg(&dev_priv->drm,
5765 			"Unknown memory configuration; disabling address buddy logic.\n");
5766 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
5767 			intel_de_write(dev_priv, BW_BUDDY_CTL(i),
5768 				       BW_BUDDY_DISABLE);
5769 	} else {
5770 		for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
5771 			intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
5772 				       table[config].page_mask);
5773 
5774 			/* Wa_22010178259:tgl,dg1,rkl,adl-s */
5775 			if (DISPLAY_VER(dev_priv) == 12)
5776 				intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
5777 					     BW_BUDDY_TLB_REQ_TIMER_MASK,
5778 					     BW_BUDDY_TLB_REQ_TIMER(0x8));
5779 		}
5780 	}
5781 }
5782 
5783 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5784 				  bool resume)
5785 {
5786 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5787 	struct i915_power_well *well;
5788 	u32 val;
5789 
5790 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5791 
5792 	/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
5793 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP &&
5794 	    INTEL_PCH_TYPE(dev_priv) < PCH_DG1)
5795 		intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0,
5796 			     PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
5797 
5798 	/* 1. Enable PCH reset handshake. */
5799 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5800 
5801 	if (!HAS_DISPLAY(dev_priv))
5802 		return;
5803 
5804 	/* 2. Initialize all combo phys */
5805 	intel_combo_phy_init(dev_priv);
5806 
5807 	/*
5808 	 * 3. Enable Power Well 1 (PG1).
5809 	 *    The AUX IO power wells will be enabled on demand.
5810 	 */
5811 	mutex_lock(&power_domains->lock);
5812 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5813 	intel_power_well_enable(dev_priv, well);
5814 	mutex_unlock(&power_domains->lock);
5815 
5816 	/* 4. Enable CDCLK. */
5817 	intel_cdclk_init_hw(dev_priv);
5818 
5819 	if (DISPLAY_VER(dev_priv) >= 12)
5820 		gen12_dbuf_slices_config(dev_priv);
5821 
5822 	/* 5. Enable DBUF. */
5823 	gen9_dbuf_enable(dev_priv);
5824 
5825 	/* 6. Setup MBUS. */
5826 	icl_mbus_init(dev_priv);
5827 
5828 	/* 7. Program arbiter BW_BUDDY registers */
5829 	if (DISPLAY_VER(dev_priv) >= 12)
5830 		tgl_bw_buddy_init(dev_priv);
5831 
5832 	/* 8. Ensure PHYs have completed calibration and adaptation */
5833 	if (IS_DG2(dev_priv))
5834 		intel_snps_phy_wait_for_calibration(dev_priv);
5835 
5836 	if (resume && intel_dmc_has_payload(dev_priv))
5837 		intel_dmc_load_program(dev_priv);
5838 
5839 	/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
5840 	if (DISPLAY_VER(dev_priv) >= 12) {
5841 		val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
5842 		      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
5843 		intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
5844 	}
5845 
5846 	/* Wa_14011503030:xelpd */
5847 	if (DISPLAY_VER(dev_priv) >= 13)
5848 		intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
5849 }
5850 
5851 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5852 {
5853 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5854 	struct i915_power_well *well;
5855 
5856 	if (!HAS_DISPLAY(dev_priv))
5857 		return;
5858 
5859 	gen9_disable_dc_states(dev_priv);
5860 
5861 	/* 1. Disable all display engine functions -> aready done */
5862 
5863 	/* 2. Disable DBUF */
5864 	gen9_dbuf_disable(dev_priv);
5865 
5866 	/* 3. Disable CD clock */
5867 	intel_cdclk_uninit_hw(dev_priv);
5868 
5869 	/*
5870 	 * 4. Disable Power Well 1 (PG1).
5871 	 *    The AUX IO power wells are toggled on demand, so they are already
5872 	 *    disabled at this point.
5873 	 */
5874 	mutex_lock(&power_domains->lock);
5875 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5876 	intel_power_well_disable(dev_priv, well);
5877 	mutex_unlock(&power_domains->lock);
5878 
5879 	/* 5. */
5880 	intel_combo_phy_uninit(dev_priv);
5881 }
5882 
5883 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5884 {
5885 	struct i915_power_well *cmn_bc =
5886 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5887 	struct i915_power_well *cmn_d =
5888 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5889 
5890 	/*
5891 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5892 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5893 	 * instead maintain a shadow copy ourselves. Use the actual
5894 	 * power well state and lane status to reconstruct the
5895 	 * expected initial value.
5896 	 */
5897 	dev_priv->chv_phy_control =
5898 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5899 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5900 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5901 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5902 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5903 
5904 	/*
5905 	 * If all lanes are disabled we leave the override disabled
5906 	 * with all power down bits cleared to match the state we
5907 	 * would use after disabling the port. Otherwise enable the
5908 	 * override and set the lane powerdown bits accding to the
5909 	 * current lane status.
5910 	 */
5911 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5912 		u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5913 		unsigned int mask;
5914 
5915 		mask = status & DPLL_PORTB_READY_MASK;
5916 		if (mask == 0xf)
5917 			mask = 0x0;
5918 		else
5919 			dev_priv->chv_phy_control |=
5920 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5921 
5922 		dev_priv->chv_phy_control |=
5923 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5924 
5925 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5926 		if (mask == 0xf)
5927 			mask = 0x0;
5928 		else
5929 			dev_priv->chv_phy_control |=
5930 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5931 
5932 		dev_priv->chv_phy_control |=
5933 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5934 
5935 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5936 
5937 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5938 	} else {
5939 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5940 	}
5941 
5942 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5943 		u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5944 		unsigned int mask;
5945 
5946 		mask = status & DPLL_PORTD_READY_MASK;
5947 
5948 		if (mask == 0xf)
5949 			mask = 0x0;
5950 		else
5951 			dev_priv->chv_phy_control |=
5952 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5953 
5954 		dev_priv->chv_phy_control |=
5955 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5956 
5957 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5958 
5959 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5960 	} else {
5961 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5962 	}
5963 
5964 	drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5965 		    dev_priv->chv_phy_control);
5966 
5967 	/* Defer application of initial phy_control to enabling the powerwell */
5968 }
5969 
5970 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5971 {
5972 	struct i915_power_well *cmn =
5973 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5974 	struct i915_power_well *disp2d =
5975 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5976 
5977 	/* If the display might be already active skip this */
5978 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5979 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5980 	    intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5981 		return;
5982 
5983 	drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5984 
5985 	/* cmnlane needs DPLL registers */
5986 	disp2d->desc->ops->enable(dev_priv, disp2d);
5987 
5988 	/*
5989 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5990 	 * Need to assert and de-assert PHY SB reset by gating the
5991 	 * common lane power, then un-gating it.
5992 	 * Simply ungating isn't enough to reset the PHY enough to get
5993 	 * ports and lanes running.
5994 	 */
5995 	cmn->desc->ops->disable(dev_priv, cmn);
5996 }
5997 
5998 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5999 {
6000 	bool ret;
6001 
6002 	vlv_punit_get(dev_priv);
6003 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
6004 	vlv_punit_put(dev_priv);
6005 
6006 	return ret;
6007 }
6008 
6009 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
6010 {
6011 	drm_WARN(&dev_priv->drm,
6012 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
6013 		 "VED not power gated\n");
6014 }
6015 
6016 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
6017 {
6018 #ifdef notyet
6019 	static const struct pci_device_id isp_ids[] = {
6020 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
6021 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
6022 		{}
6023 	};
6024 
6025 	drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
6026 		 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
6027 		 "ISP not power gated\n");
6028 #endif
6029 }
6030 
6031 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
6032 
6033 /**
6034  * intel_power_domains_init_hw - initialize hardware power domain state
6035  * @i915: i915 device instance
6036  * @resume: Called from resume code paths or not
6037  *
6038  * This function initializes the hardware power domain state and enables all
6039  * power wells belonging to the INIT power domain. Power wells in other
6040  * domains (and not in the INIT domain) are referenced or disabled by
6041  * intel_modeset_readout_hw_state(). After that the reference count of each
6042  * power well must match its HW enabled state, see
6043  * intel_power_domains_verify_state().
6044  *
6045  * It will return with power domains disabled (to be enabled later by
6046  * intel_power_domains_enable()) and must be paired with
6047  * intel_power_domains_driver_remove().
6048  */
6049 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
6050 {
6051 	struct i915_power_domains *power_domains = &i915->power_domains;
6052 
6053 	power_domains->initializing = true;
6054 
6055 	if (DISPLAY_VER(i915) >= 11) {
6056 		icl_display_core_init(i915, resume);
6057 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6058 		bxt_display_core_init(i915, resume);
6059 	} else if (DISPLAY_VER(i915) == 9) {
6060 		skl_display_core_init(i915, resume);
6061 	} else if (IS_CHERRYVIEW(i915)) {
6062 		mutex_lock(&power_domains->lock);
6063 		chv_phy_control_init(i915);
6064 		mutex_unlock(&power_domains->lock);
6065 		assert_isp_power_gated(i915);
6066 	} else if (IS_VALLEYVIEW(i915)) {
6067 		mutex_lock(&power_domains->lock);
6068 		vlv_cmnlane_wa(i915);
6069 		mutex_unlock(&power_domains->lock);
6070 		assert_ved_power_gated(i915);
6071 		assert_isp_power_gated(i915);
6072 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
6073 		hsw_assert_cdclk(i915);
6074 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6075 	} else if (IS_IVYBRIDGE(i915)) {
6076 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
6077 	}
6078 
6079 	/*
6080 	 * Keep all power wells enabled for any dependent HW access during
6081 	 * initialization and to make sure we keep BIOS enabled display HW
6082 	 * resources powered until display HW readout is complete. We drop
6083 	 * this reference in intel_power_domains_enable().
6084 	 */
6085 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6086 	power_domains->init_wakeref =
6087 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6088 
6089 	/* Disable power support if the user asked so. */
6090 	if (!i915->params.disable_power_well) {
6091 		drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
6092 		i915->power_domains.disable_wakeref = intel_display_power_get(i915,
6093 									      POWER_DOMAIN_INIT);
6094 	}
6095 	intel_power_domains_sync_hw(i915);
6096 
6097 	power_domains->initializing = false;
6098 }
6099 
6100 /**
6101  * intel_power_domains_driver_remove - deinitialize hw power domain state
6102  * @i915: i915 device instance
6103  *
6104  * De-initializes the display power domain HW state. It also ensures that the
6105  * device stays powered up so that the driver can be reloaded.
6106  *
6107  * It must be called with power domains already disabled (after a call to
6108  * intel_power_domains_disable()) and must be paired with
6109  * intel_power_domains_init_hw().
6110  */
6111 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
6112 {
6113 	intel_wakeref_t wakeref __maybe_unused =
6114 		fetch_and_zero(&i915->power_domains.init_wakeref);
6115 
6116 	/* Remove the refcount we took to keep power well support disabled. */
6117 	if (!i915->params.disable_power_well)
6118 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6119 					fetch_and_zero(&i915->power_domains.disable_wakeref));
6120 
6121 	intel_display_power_flush_work_sync(i915);
6122 
6123 	intel_power_domains_verify_state(i915);
6124 
6125 	/* Keep the power well enabled, but cancel its rpm wakeref. */
6126 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
6127 }
6128 
6129 /**
6130  * intel_power_domains_enable - enable toggling of display power wells
6131  * @i915: i915 device instance
6132  *
6133  * Enable the ondemand enabling/disabling of the display power wells. Note that
6134  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
6135  * only at specific points of the display modeset sequence, thus they are not
6136  * affected by the intel_power_domains_enable()/disable() calls. The purpose
6137  * of these function is to keep the rest of power wells enabled until the end
6138  * of display HW readout (which will acquire the power references reflecting
6139  * the current HW state).
6140  */
6141 void intel_power_domains_enable(struct drm_i915_private *i915)
6142 {
6143 	intel_wakeref_t wakeref __maybe_unused =
6144 		fetch_and_zero(&i915->power_domains.init_wakeref);
6145 
6146 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6147 	intel_power_domains_verify_state(i915);
6148 }
6149 
6150 /**
6151  * intel_power_domains_disable - disable toggling of display power wells
6152  * @i915: i915 device instance
6153  *
6154  * Disable the ondemand enabling/disabling of the display power wells. See
6155  * intel_power_domains_enable() for which power wells this call controls.
6156  */
6157 void intel_power_domains_disable(struct drm_i915_private *i915)
6158 {
6159 	struct i915_power_domains *power_domains = &i915->power_domains;
6160 
6161 	drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6162 	power_domains->init_wakeref =
6163 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
6164 
6165 	intel_power_domains_verify_state(i915);
6166 }
6167 
6168 /**
6169  * intel_power_domains_suspend - suspend power domain state
6170  * @i915: i915 device instance
6171  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
6172  *
6173  * This function prepares the hardware power domain state before entering
6174  * system suspend.
6175  *
6176  * It must be called with power domains already disabled (after a call to
6177  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
6178  */
6179 void intel_power_domains_suspend(struct drm_i915_private *i915,
6180 				 enum i915_drm_suspend_mode suspend_mode)
6181 {
6182 	struct i915_power_domains *power_domains = &i915->power_domains;
6183 	intel_wakeref_t wakeref __maybe_unused =
6184 		fetch_and_zero(&power_domains->init_wakeref);
6185 
6186 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
6187 
6188 	/*
6189 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
6190 	 * support don't manually deinit the power domains. This also means the
6191 	 * DMC firmware will stay active, it will power down any HW
6192 	 * resources as required and also enable deeper system power states
6193 	 * that would be blocked if the firmware was inactive.
6194 	 */
6195 	if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
6196 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
6197 	    intel_dmc_has_payload(i915)) {
6198 		intel_display_power_flush_work(i915);
6199 		intel_power_domains_verify_state(i915);
6200 		return;
6201 	}
6202 
6203 	/*
6204 	 * Even if power well support was disabled we still want to disable
6205 	 * power wells if power domains must be deinitialized for suspend.
6206 	 */
6207 	if (!i915->params.disable_power_well)
6208 		intel_display_power_put(i915, POWER_DOMAIN_INIT,
6209 					fetch_and_zero(&i915->power_domains.disable_wakeref));
6210 
6211 	intel_display_power_flush_work(i915);
6212 	intel_power_domains_verify_state(i915);
6213 
6214 	if (DISPLAY_VER(i915) >= 11)
6215 		icl_display_core_uninit(i915);
6216 	else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
6217 		bxt_display_core_uninit(i915);
6218 	else if (DISPLAY_VER(i915) == 9)
6219 		skl_display_core_uninit(i915);
6220 
6221 	power_domains->display_core_suspended = true;
6222 }
6223 
6224 /**
6225  * intel_power_domains_resume - resume power domain state
6226  * @i915: i915 device instance
6227  *
6228  * This function resume the hardware power domain state during system resume.
6229  *
6230  * It will return with power domain support disabled (to be enabled later by
6231  * intel_power_domains_enable()) and must be paired with
6232  * intel_power_domains_suspend().
6233  */
6234 void intel_power_domains_resume(struct drm_i915_private *i915)
6235 {
6236 	struct i915_power_domains *power_domains = &i915->power_domains;
6237 
6238 	if (power_domains->display_core_suspended) {
6239 		intel_power_domains_init_hw(i915, true);
6240 		power_domains->display_core_suspended = false;
6241 	} else {
6242 		drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
6243 		power_domains->init_wakeref =
6244 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
6245 	}
6246 
6247 	intel_power_domains_verify_state(i915);
6248 }
6249 
6250 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
6251 
6252 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
6253 {
6254 	struct i915_power_domains *power_domains = &i915->power_domains;
6255 	struct i915_power_well *power_well;
6256 
6257 	for_each_power_well(i915, power_well) {
6258 		enum intel_display_power_domain domain;
6259 
6260 		drm_dbg(&i915->drm, "%-25s %d\n",
6261 			power_well->desc->name, power_well->count);
6262 
6263 		for_each_power_domain(domain, power_well->desc->domains)
6264 			drm_dbg(&i915->drm, "  %-23s %d\n",
6265 				intel_display_power_domain_str(domain),
6266 				power_domains->domain_use_count[domain]);
6267 	}
6268 }
6269 
6270 /**
6271  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
6272  * @i915: i915 device instance
6273  *
6274  * Verify if the reference count of each power well matches its HW enabled
6275  * state and the total refcount of the domains it belongs to. This must be
6276  * called after modeset HW state sanitization, which is responsible for
6277  * acquiring reference counts for any power wells in use and disabling the
6278  * ones left on by BIOS but not required by any active output.
6279  */
6280 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6281 {
6282 	struct i915_power_domains *power_domains = &i915->power_domains;
6283 	struct i915_power_well *power_well;
6284 	bool dump_domain_info;
6285 
6286 	mutex_lock(&power_domains->lock);
6287 
6288 	verify_async_put_domains_state(power_domains);
6289 
6290 	dump_domain_info = false;
6291 	for_each_power_well(i915, power_well) {
6292 		enum intel_display_power_domain domain;
6293 		int domains_count;
6294 		bool enabled;
6295 
6296 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
6297 		if ((power_well->count || power_well->desc->always_on) !=
6298 		    enabled)
6299 			drm_err(&i915->drm,
6300 				"power well %s state mismatch (refcount %d/enabled %d)",
6301 				power_well->desc->name,
6302 				power_well->count, enabled);
6303 
6304 		domains_count = 0;
6305 		for_each_power_domain(domain, power_well->desc->domains)
6306 			domains_count += power_domains->domain_use_count[domain];
6307 
6308 		if (power_well->count != domains_count) {
6309 			drm_err(&i915->drm,
6310 				"power well %s refcount/domain refcount mismatch "
6311 				"(refcount %d/domains refcount %d)\n",
6312 				power_well->desc->name, power_well->count,
6313 				domains_count);
6314 			dump_domain_info = true;
6315 		}
6316 	}
6317 
6318 	if (dump_domain_info) {
6319 		static bool dumped;
6320 
6321 		if (!dumped) {
6322 			intel_power_domains_dump_info(i915);
6323 			dumped = true;
6324 		}
6325 	}
6326 
6327 	mutex_unlock(&power_domains->lock);
6328 }
6329 
6330 #else
6331 
6332 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
6333 {
6334 }
6335 
6336 #endif
6337 
6338 void intel_display_power_suspend_late(struct drm_i915_private *i915)
6339 {
6340 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6341 	    IS_BROXTON(i915)) {
6342 		bxt_enable_dc9(i915);
6343 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6344 		hsw_enable_pc8(i915);
6345 	}
6346 
6347 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6348 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6349 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
6350 }
6351 
6352 void intel_display_power_resume_early(struct drm_i915_private *i915)
6353 {
6354 	if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
6355 	    IS_BROXTON(i915)) {
6356 		gen9_sanitize_dc_state(i915);
6357 		bxt_disable_dc9(i915);
6358 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6359 		hsw_disable_pc8(i915);
6360 	}
6361 
6362 	/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
6363 	if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
6364 		intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
6365 }
6366 
6367 void intel_display_power_suspend(struct drm_i915_private *i915)
6368 {
6369 	if (DISPLAY_VER(i915) >= 11) {
6370 		icl_display_core_uninit(i915);
6371 		bxt_enable_dc9(i915);
6372 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6373 		bxt_display_core_uninit(i915);
6374 		bxt_enable_dc9(i915);
6375 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6376 		hsw_enable_pc8(i915);
6377 	}
6378 }
6379 
6380 void intel_display_power_resume(struct drm_i915_private *i915)
6381 {
6382 	if (DISPLAY_VER(i915) >= 11) {
6383 		bxt_disable_dc9(i915);
6384 		icl_display_core_init(i915, true);
6385 		if (intel_dmc_has_payload(i915)) {
6386 			if (i915->dmc.allowed_dc_mask &
6387 			    DC_STATE_EN_UPTO_DC6)
6388 				skl_enable_dc6(i915);
6389 			else if (i915->dmc.allowed_dc_mask &
6390 				 DC_STATE_EN_UPTO_DC5)
6391 				gen9_enable_dc5(i915);
6392 		}
6393 	} else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
6394 		bxt_disable_dc9(i915);
6395 		bxt_display_core_init(i915, true);
6396 		if (intel_dmc_has_payload(i915) &&
6397 		    (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
6398 			gen9_enable_dc5(i915);
6399 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
6400 		hsw_disable_pc8(i915);
6401 	}
6402 }
6403