xref: /openbsd-src/sys/dev/pci/drm/i915/intel_runtime_pm.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 					 enum i915_power_well_id power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv,
57 		  enum i915_power_well_id power_well_id);
58 
59 const char *
60 intel_display_power_domain_str(enum intel_display_power_domain domain)
61 {
62 	switch (domain) {
63 	case POWER_DOMAIN_PIPE_A:
64 		return "PIPE_A";
65 	case POWER_DOMAIN_PIPE_B:
66 		return "PIPE_B";
67 	case POWER_DOMAIN_PIPE_C:
68 		return "PIPE_C";
69 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 		return "PIPE_A_PANEL_FITTER";
71 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 		return "PIPE_B_PANEL_FITTER";
73 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 		return "PIPE_C_PANEL_FITTER";
75 	case POWER_DOMAIN_TRANSCODER_A:
76 		return "TRANSCODER_A";
77 	case POWER_DOMAIN_TRANSCODER_B:
78 		return "TRANSCODER_B";
79 	case POWER_DOMAIN_TRANSCODER_C:
80 		return "TRANSCODER_C";
81 	case POWER_DOMAIN_TRANSCODER_EDP:
82 		return "TRANSCODER_EDP";
83 	case POWER_DOMAIN_TRANSCODER_DSI_A:
84 		return "TRANSCODER_DSI_A";
85 	case POWER_DOMAIN_TRANSCODER_DSI_C:
86 		return "TRANSCODER_DSI_C";
87 	case POWER_DOMAIN_PORT_DDI_A_LANES:
88 		return "PORT_DDI_A_LANES";
89 	case POWER_DOMAIN_PORT_DDI_B_LANES:
90 		return "PORT_DDI_B_LANES";
91 	case POWER_DOMAIN_PORT_DDI_C_LANES:
92 		return "PORT_DDI_C_LANES";
93 	case POWER_DOMAIN_PORT_DDI_D_LANES:
94 		return "PORT_DDI_D_LANES";
95 	case POWER_DOMAIN_PORT_DDI_E_LANES:
96 		return "PORT_DDI_E_LANES";
97 	case POWER_DOMAIN_PORT_DDI_F_LANES:
98 		return "PORT_DDI_F_LANES";
99 	case POWER_DOMAIN_PORT_DDI_A_IO:
100 		return "PORT_DDI_A_IO";
101 	case POWER_DOMAIN_PORT_DDI_B_IO:
102 		return "PORT_DDI_B_IO";
103 	case POWER_DOMAIN_PORT_DDI_C_IO:
104 		return "PORT_DDI_C_IO";
105 	case POWER_DOMAIN_PORT_DDI_D_IO:
106 		return "PORT_DDI_D_IO";
107 	case POWER_DOMAIN_PORT_DDI_E_IO:
108 		return "PORT_DDI_E_IO";
109 	case POWER_DOMAIN_PORT_DDI_F_IO:
110 		return "PORT_DDI_F_IO";
111 	case POWER_DOMAIN_PORT_DSI:
112 		return "PORT_DSI";
113 	case POWER_DOMAIN_PORT_CRT:
114 		return "PORT_CRT";
115 	case POWER_DOMAIN_PORT_OTHER:
116 		return "PORT_OTHER";
117 	case POWER_DOMAIN_VGA:
118 		return "VGA";
119 	case POWER_DOMAIN_AUDIO:
120 		return "AUDIO";
121 	case POWER_DOMAIN_PLLS:
122 		return "PLLS";
123 	case POWER_DOMAIN_AUX_A:
124 		return "AUX_A";
125 	case POWER_DOMAIN_AUX_B:
126 		return "AUX_B";
127 	case POWER_DOMAIN_AUX_C:
128 		return "AUX_C";
129 	case POWER_DOMAIN_AUX_D:
130 		return "AUX_D";
131 	case POWER_DOMAIN_AUX_E:
132 		return "AUX_E";
133 	case POWER_DOMAIN_AUX_F:
134 		return "AUX_F";
135 	case POWER_DOMAIN_AUX_IO_A:
136 		return "AUX_IO_A";
137 	case POWER_DOMAIN_AUX_TBT1:
138 		return "AUX_TBT1";
139 	case POWER_DOMAIN_AUX_TBT2:
140 		return "AUX_TBT2";
141 	case POWER_DOMAIN_AUX_TBT3:
142 		return "AUX_TBT3";
143 	case POWER_DOMAIN_AUX_TBT4:
144 		return "AUX_TBT4";
145 	case POWER_DOMAIN_GMBUS:
146 		return "GMBUS";
147 	case POWER_DOMAIN_INIT:
148 		return "INIT";
149 	case POWER_DOMAIN_MODESET:
150 		return "MODESET";
151 	case POWER_DOMAIN_GT_IRQ:
152 		return "GT_IRQ";
153 	default:
154 		MISSING_CASE(domain);
155 		return "?";
156 	}
157 }
158 
159 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
160 				    struct i915_power_well *power_well)
161 {
162 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
163 	power_well->ops->enable(dev_priv, power_well);
164 	power_well->hw_enabled = true;
165 }
166 
167 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
168 				     struct i915_power_well *power_well)
169 {
170 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
171 	power_well->hw_enabled = false;
172 	power_well->ops->disable(dev_priv, power_well);
173 }
174 
175 static void intel_power_well_get(struct drm_i915_private *dev_priv,
176 				 struct i915_power_well *power_well)
177 {
178 	if (!power_well->count++)
179 		intel_power_well_enable(dev_priv, power_well);
180 }
181 
182 static void intel_power_well_put(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	WARN(!power_well->count, "Use count on power well %s is already zero",
186 	     power_well->name);
187 
188 	if (!--power_well->count)
189 		intel_power_well_disable(dev_priv, power_well);
190 }
191 
192 /**
193  * __intel_display_power_is_enabled - unlocked check for a power domain
194  * @dev_priv: i915 device instance
195  * @domain: power domain to check
196  *
197  * This is the unlocked version of intel_display_power_is_enabled() and should
198  * only be used from error capture and recovery code where deadlocks are
199  * possible.
200  *
201  * Returns:
202  * True when the power domain is enabled, false otherwise.
203  */
204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
205 				      enum intel_display_power_domain domain)
206 {
207 	struct i915_power_well *power_well;
208 	bool is_enabled;
209 
210 	if (dev_priv->runtime_pm.suspended)
211 		return false;
212 
213 	is_enabled = true;
214 
215 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
216 		if (power_well->always_on)
217 			continue;
218 
219 		if (!power_well->hw_enabled) {
220 			is_enabled = false;
221 			break;
222 		}
223 	}
224 
225 	return is_enabled;
226 }
227 
228 /**
229  * intel_display_power_is_enabled - check for a power domain
230  * @dev_priv: i915 device instance
231  * @domain: power domain to check
232  *
233  * This function can be used to check the hw power domain state. It is mostly
234  * used in hardware state readout functions. Everywhere else code should rely
235  * upon explicit power domain reference counting to ensure that the hardware
236  * block is powered up before accessing it.
237  *
238  * Callers must hold the relevant modesetting locks to ensure that concurrent
239  * threads can't disable the power well while the caller tries to read a few
240  * registers.
241  *
242  * Returns:
243  * True when the power domain is enabled, false otherwise.
244  */
245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
246 				    enum intel_display_power_domain domain)
247 {
248 	struct i915_power_domains *power_domains;
249 	bool ret;
250 
251 	power_domains = &dev_priv->power_domains;
252 
253 	mutex_lock(&power_domains->lock);
254 	ret = __intel_display_power_is_enabled(dev_priv, domain);
255 	mutex_unlock(&power_domains->lock);
256 
257 	return ret;
258 }
259 
260 /**
261  * intel_display_set_init_power - set the initial power domain state
262  * @dev_priv: i915 device instance
263  * @enable: whether to enable or disable the initial power domain state
264  *
265  * For simplicity our driver load/unload and system suspend/resume code assumes
266  * that all power domains are always enabled. This functions controls the state
267  * of this little hack. While the initial power domain state is enabled runtime
268  * pm is effectively disabled.
269  */
270 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
271 				  bool enable)
272 {
273 	if (dev_priv->power_domains.init_power_on == enable)
274 		return;
275 
276 	if (enable)
277 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
278 	else
279 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
280 
281 	dev_priv->power_domains.init_power_on = enable;
282 }
283 
284 /*
285  * Starting with Haswell, we have a "Power Down Well" that can be turned off
286  * when not needed anymore. We have 4 registers that can request the power well
287  * to be enabled, and it will only be disabled if none of the registers is
288  * requesting it to be enabled.
289  */
290 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
291 				       u8 irq_pipe_mask, bool has_vga)
292 {
293 	struct pci_dev *pdev = dev_priv->drm.pdev;
294 
295 	/*
296 	 * After we re-enable the power well, if we touch VGA register 0x3d5
297 	 * we'll get unclaimed register interrupts. This stops after we write
298 	 * anything to the VGA MSR register. The vgacon module uses this
299 	 * register all the time, so if we unbind our driver and, as a
300 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
301 	 * console_unlock(). So make here we touch the VGA MSR register, making
302 	 * sure vgacon can keep working normally without triggering interrupts
303 	 * and error messages.
304 	 */
305 	if (has_vga) {
306 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
307 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
308 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
309 	}
310 
311 	if (irq_pipe_mask)
312 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
313 }
314 
315 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
316 				       u8 irq_pipe_mask)
317 {
318 	if (irq_pipe_mask)
319 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
320 }
321 
322 
323 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
324 					   struct i915_power_well *power_well)
325 {
326 	enum i915_power_well_id id = power_well->id;
327 
328 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
329 	WARN_ON(intel_wait_for_register(dev_priv,
330 					HSW_PWR_WELL_CTL_DRIVER(id),
331 					HSW_PWR_WELL_CTL_STATE(id),
332 					HSW_PWR_WELL_CTL_STATE(id),
333 					1));
334 }
335 
336 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
337 				     enum i915_power_well_id id)
338 {
339 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
340 	u32 ret;
341 
342 	ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
343 	ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
344 	ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
345 	ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
346 
347 	return ret;
348 }
349 
350 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
351 					    struct i915_power_well *power_well)
352 {
353 	enum i915_power_well_id id = power_well->id;
354 	bool disabled;
355 	u32 reqs;
356 
357 	/*
358 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
359 	 * this for paranoia. The known cases where a PW will be forced on:
360 	 * - a KVMR request on any power well via the KVMR request register
361 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
362 	 *   DEBUG request registers
363 	 * Skip the wait in case any of the request bits are set and print a
364 	 * diagnostic message.
365 	 */
366 	wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
367 			       HSW_PWR_WELL_CTL_STATE(id))) ||
368 		 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
369 	if (disabled)
370 		return;
371 
372 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
373 		      power_well->name,
374 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
375 }
376 
377 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
378 					   enum skl_power_gate pg)
379 {
380 	/* Timeout 5us for PG#0, for other PGs 1us */
381 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
382 					SKL_FUSE_PG_DIST_STATUS(pg),
383 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
384 }
385 
386 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
387 				  struct i915_power_well *power_well)
388 {
389 	enum i915_power_well_id id = power_well->id;
390 	bool wait_fuses = power_well->hsw.has_fuses;
391 	enum skl_power_gate uninitialized_var(pg);
392 	u32 val;
393 
394 	if (wait_fuses) {
395 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
396 						 SKL_PW_TO_PG(id);
397 		/*
398 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
399 		 * before enabling the power well and PW1/PG1's own fuse
400 		 * state after the enabling. For all other power wells with
401 		 * fuses we only have to wait for that PW/PG's fuse state
402 		 * after the enabling.
403 		 */
404 		if (pg == SKL_PG1)
405 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
406 	}
407 
408 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
409 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
410 	hsw_wait_for_power_well_enable(dev_priv, power_well);
411 
412 	/* Display WA #1178: cnl */
413 	if (IS_CANNONLAKE(dev_priv) &&
414 	    (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
415 	     id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
416 		val = I915_READ(CNL_AUX_ANAOVRD1(id));
417 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
418 		I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
419 	}
420 
421 	if (wait_fuses)
422 		gen9_wait_for_power_well_fuses(dev_priv, pg);
423 
424 	hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
425 				   power_well->hsw.has_vga);
426 }
427 
428 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
429 				   struct i915_power_well *power_well)
430 {
431 	enum i915_power_well_id id = power_well->id;
432 	u32 val;
433 
434 	hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
435 
436 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
437 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
438 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
439 	hsw_wait_for_power_well_disable(dev_priv, power_well);
440 }
441 
442 #define ICL_AUX_PW_TO_PORT(pw)	((pw) - ICL_DISP_PW_AUX_A)
443 
444 static void
445 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
446 				    struct i915_power_well *power_well)
447 {
448 	enum i915_power_well_id id = power_well->id;
449 	enum port port = ICL_AUX_PW_TO_PORT(id);
450 	u32 val;
451 
452 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
453 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
454 
455 	val = I915_READ(ICL_PORT_CL_DW12(port));
456 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
457 
458 	hsw_wait_for_power_well_enable(dev_priv, power_well);
459 }
460 
461 static void
462 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
463 				     struct i915_power_well *power_well)
464 {
465 	enum i915_power_well_id id = power_well->id;
466 	enum port port = ICL_AUX_PW_TO_PORT(id);
467 	u32 val;
468 
469 	val = I915_READ(ICL_PORT_CL_DW12(port));
470 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
471 
472 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
473 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
474 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
475 
476 	hsw_wait_for_power_well_disable(dev_priv, power_well);
477 }
478 
479 /*
480  * We should only use the power well if we explicitly asked the hardware to
481  * enable it, so check if it's enabled and also check if we've requested it to
482  * be enabled.
483  */
484 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
485 				   struct i915_power_well *power_well)
486 {
487 	enum i915_power_well_id id = power_well->id;
488 	u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
489 
490 	return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
491 }
492 
493 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
494 {
495 	enum i915_power_well_id id = SKL_DISP_PW_2;
496 
497 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
498 		  "DC9 already programmed to be enabled.\n");
499 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
500 		  "DC5 still not disabled to enable DC9.\n");
501 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
502 		  HSW_PWR_WELL_CTL_REQ(id),
503 		  "Power well 2 on.\n");
504 	WARN_ONCE(intel_irqs_enabled(dev_priv),
505 		  "Interrupts not disabled yet.\n");
506 
507 	 /*
508 	  * TODO: check for the following to verify the conditions to enter DC9
509 	  * state are satisfied:
510 	  * 1] Check relevant display engine registers to verify if mode set
511 	  * disable sequence was followed.
512 	  * 2] Check if display uninitialize sequence is initialized.
513 	  */
514 }
515 
516 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
517 {
518 	WARN_ONCE(intel_irqs_enabled(dev_priv),
519 		  "Interrupts not disabled yet.\n");
520 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
521 		  "DC5 still not disabled.\n");
522 
523 	 /*
524 	  * TODO: check for the following to verify DC9 state was indeed
525 	  * entered before programming to disable it:
526 	  * 1] Check relevant display engine registers to verify if mode
527 	  *  set disable sequence was followed.
528 	  * 2] Check if display uninitialize sequence is initialized.
529 	  */
530 }
531 
532 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
533 				u32 state)
534 {
535 	int rewrites = 0;
536 	int rereads = 0;
537 	u32 v;
538 
539 	I915_WRITE(DC_STATE_EN, state);
540 
541 	/* It has been observed that disabling the dc6 state sometimes
542 	 * doesn't stick and dmc keeps returning old value. Make sure
543 	 * the write really sticks enough times and also force rewrite until
544 	 * we are confident that state is exactly what we want.
545 	 */
546 	do  {
547 		v = I915_READ(DC_STATE_EN);
548 
549 		if (v != state) {
550 			I915_WRITE(DC_STATE_EN, state);
551 			rewrites++;
552 			rereads = 0;
553 		} else if (rereads++ > 5) {
554 			break;
555 		}
556 
557 	} while (rewrites < 100);
558 
559 	if (v != state)
560 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
561 			  state, v);
562 
563 	/* Most of the times we need one retry, avoid spam */
564 	if (rewrites > 1)
565 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
566 			      state, rewrites);
567 }
568 
569 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
570 {
571 	u32 mask;
572 
573 	mask = DC_STATE_EN_UPTO_DC5;
574 	if (IS_GEN9_LP(dev_priv))
575 		mask |= DC_STATE_EN_DC9;
576 	else
577 		mask |= DC_STATE_EN_UPTO_DC6;
578 
579 	return mask;
580 }
581 
582 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
583 {
584 	u32 val;
585 
586 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
587 
588 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
589 		      dev_priv->csr.dc_state, val);
590 	dev_priv->csr.dc_state = val;
591 }
592 
593 /**
594  * gen9_set_dc_state - set target display C power state
595  * @dev_priv: i915 device instance
596  * @state: target DC power state
597  * - DC_STATE_DISABLE
598  * - DC_STATE_EN_UPTO_DC5
599  * - DC_STATE_EN_UPTO_DC6
600  * - DC_STATE_EN_DC9
601  *
602  * Signal to DMC firmware/HW the target DC power state passed in @state.
603  * DMC/HW can turn off individual display clocks and power rails when entering
604  * a deeper DC power state (higher in number) and turns these back when exiting
605  * that state to a shallower power state (lower in number). The HW will decide
606  * when to actually enter a given state on an on-demand basis, for instance
607  * depending on the active state of display pipes. The state of display
608  * registers backed by affected power rails are saved/restored as needed.
609  *
610  * Based on the above enabling a deeper DC power state is asynchronous wrt.
611  * enabling it. Disabling a deeper power state is synchronous: for instance
612  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
613  * back on and register state is restored. This is guaranteed by the MMIO write
614  * to DC_STATE_EN blocking until the state is restored.
615  */
616 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
617 {
618 	uint32_t val;
619 	uint32_t mask;
620 
621 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
622 		state &= dev_priv->csr.allowed_dc_mask;
623 
624 	val = I915_READ(DC_STATE_EN);
625 	mask = gen9_dc_mask(dev_priv);
626 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
627 		      val & mask, state);
628 
629 	/* Check if DMC is ignoring our DC state requests */
630 	if ((val & mask) != dev_priv->csr.dc_state)
631 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
632 			  dev_priv->csr.dc_state, val & mask);
633 
634 	val &= ~mask;
635 	val |= state;
636 
637 	gen9_write_dc_state(dev_priv, val);
638 
639 	dev_priv->csr.dc_state = val & mask;
640 }
641 
642 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
643 {
644 	assert_can_enable_dc9(dev_priv);
645 
646 	DRM_DEBUG_KMS("Enabling DC9\n");
647 
648 	intel_power_sequencer_reset(dev_priv);
649 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
650 }
651 
652 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
653 {
654 	assert_can_disable_dc9(dev_priv);
655 
656 	DRM_DEBUG_KMS("Disabling DC9\n");
657 
658 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
659 
660 	intel_pps_unlock_regs_wa(dev_priv);
661 }
662 
663 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
664 {
665 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
666 		  "CSR program storage start is NULL\n");
667 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
668 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
669 }
670 
671 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
672 {
673 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
674 					SKL_DISP_PW_2);
675 
676 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
677 
678 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
679 		  "DC5 already programmed to be enabled.\n");
680 	assert_rpm_wakelock_held(dev_priv);
681 
682 	assert_csr_loaded(dev_priv);
683 }
684 
685 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
686 {
687 	assert_can_enable_dc5(dev_priv);
688 
689 	DRM_DEBUG_KMS("Enabling DC5\n");
690 
691 	/* Wa Display #1183: skl,kbl,cfl */
692 	if (IS_GEN9_BC(dev_priv))
693 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
694 			   SKL_SELECT_ALTERNATE_DC_EXIT);
695 
696 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
697 }
698 
699 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
700 {
701 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
702 		  "Backlight is not disabled.\n");
703 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
704 		  "DC6 already programmed to be enabled.\n");
705 
706 	assert_csr_loaded(dev_priv);
707 }
708 
709 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
710 {
711 	assert_can_enable_dc6(dev_priv);
712 
713 	DRM_DEBUG_KMS("Enabling DC6\n");
714 
715 	/* Wa Display #1183: skl,kbl,cfl */
716 	if (IS_GEN9_BC(dev_priv))
717 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
718 			   SKL_SELECT_ALTERNATE_DC_EXIT);
719 
720 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
721 }
722 
723 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
724 				   struct i915_power_well *power_well)
725 {
726 	enum i915_power_well_id id = power_well->id;
727 	u32 mask = HSW_PWR_WELL_CTL_REQ(id);
728 	u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
729 
730 	/* Take over the request bit if set by BIOS. */
731 	if (bios_req & mask) {
732 		u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
733 
734 		if (!(drv_req & mask))
735 			I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
736 		I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
737 	}
738 }
739 
740 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
741 					   struct i915_power_well *power_well)
742 {
743 	bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
744 }
745 
746 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
747 					    struct i915_power_well *power_well)
748 {
749 	bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
750 }
751 
752 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
753 					    struct i915_power_well *power_well)
754 {
755 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
756 }
757 
758 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
759 {
760 	struct i915_power_well *power_well;
761 
762 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
763 	if (power_well->count > 0)
764 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
765 
766 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
767 	if (power_well->count > 0)
768 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
769 
770 	if (IS_GEMINILAKE(dev_priv)) {
771 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
772 		if (power_well->count > 0)
773 			bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
774 	}
775 }
776 
777 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
778 					   struct i915_power_well *power_well)
779 {
780 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
781 }
782 
783 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
784 {
785 	u32 tmp = I915_READ(DBUF_CTL);
786 
787 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
788 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
789 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
790 }
791 
792 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
793 					  struct i915_power_well *power_well)
794 {
795 	struct intel_cdclk_state cdclk_state = {};
796 
797 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
798 
799 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
800 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
801 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
802 
803 	gen9_assert_dbuf_enabled(dev_priv);
804 
805 	if (IS_GEN9_LP(dev_priv))
806 		bxt_verify_ddi_phy_power_wells(dev_priv);
807 }
808 
809 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
810 					   struct i915_power_well *power_well)
811 {
812 	if (!dev_priv->csr.dmc_payload)
813 		return;
814 
815 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
816 		skl_enable_dc6(dev_priv);
817 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
818 		gen9_enable_dc5(dev_priv);
819 }
820 
821 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
822 					 struct i915_power_well *power_well)
823 {
824 }
825 
826 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
827 					   struct i915_power_well *power_well)
828 {
829 }
830 
831 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
832 					     struct i915_power_well *power_well)
833 {
834 	return true;
835 }
836 
837 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
838 					 struct i915_power_well *power_well)
839 {
840 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
841 		i830_enable_pipe(dev_priv, PIPE_A);
842 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
843 		i830_enable_pipe(dev_priv, PIPE_B);
844 }
845 
846 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
847 					  struct i915_power_well *power_well)
848 {
849 	i830_disable_pipe(dev_priv, PIPE_B);
850 	i830_disable_pipe(dev_priv, PIPE_A);
851 }
852 
853 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
854 					  struct i915_power_well *power_well)
855 {
856 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
857 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
858 }
859 
860 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
861 					  struct i915_power_well *power_well)
862 {
863 	if (power_well->count > 0)
864 		i830_pipes_power_well_enable(dev_priv, power_well);
865 	else
866 		i830_pipes_power_well_disable(dev_priv, power_well);
867 }
868 
869 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
870 			       struct i915_power_well *power_well, bool enable)
871 {
872 	enum i915_power_well_id power_well_id = power_well->id;
873 	u32 mask;
874 	u32 state;
875 	u32 ctrl;
876 
877 	mask = PUNIT_PWRGT_MASK(power_well_id);
878 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
879 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
880 
881 	mutex_lock(&dev_priv->pcu_lock);
882 
883 #define COND \
884 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
885 
886 	if (COND)
887 		goto out;
888 
889 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
890 	ctrl &= ~mask;
891 	ctrl |= state;
892 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
893 
894 	if (wait_for(COND, 100))
895 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
896 			  state,
897 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
898 
899 #undef COND
900 
901 out:
902 	mutex_unlock(&dev_priv->pcu_lock);
903 }
904 
905 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
906 				  struct i915_power_well *power_well)
907 {
908 	vlv_set_power_well(dev_priv, power_well, true);
909 }
910 
911 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
912 				   struct i915_power_well *power_well)
913 {
914 	vlv_set_power_well(dev_priv, power_well, false);
915 }
916 
917 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
918 				   struct i915_power_well *power_well)
919 {
920 	enum i915_power_well_id power_well_id = power_well->id;
921 	bool enabled = false;
922 	u32 mask;
923 	u32 state;
924 	u32 ctrl;
925 
926 	mask = PUNIT_PWRGT_MASK(power_well_id);
927 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
928 
929 	mutex_lock(&dev_priv->pcu_lock);
930 
931 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
932 	/*
933 	 * We only ever set the power-on and power-gate states, anything
934 	 * else is unexpected.
935 	 */
936 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
937 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
938 	if (state == ctrl)
939 		enabled = true;
940 
941 	/*
942 	 * A transient state at this point would mean some unexpected party
943 	 * is poking at the power controls too.
944 	 */
945 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
946 	WARN_ON(ctrl != state);
947 
948 	mutex_unlock(&dev_priv->pcu_lock);
949 
950 	return enabled;
951 }
952 
953 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
954 {
955 	u32 val;
956 
957 	/*
958 	 * On driver load, a pipe may be active and driving a DSI display.
959 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
960 	 * (and never recovering) in this case. intel_dsi_post_disable() will
961 	 * clear it when we turn off the display.
962 	 */
963 	val = I915_READ(DSPCLK_GATE_D);
964 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
965 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
966 	I915_WRITE(DSPCLK_GATE_D, val);
967 
968 	/*
969 	 * Disable trickle feed and enable pnd deadline calculation
970 	 */
971 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
972 	I915_WRITE(CBR1_VLV, 0);
973 
974 	WARN_ON(dev_priv->rawclk_freq == 0);
975 
976 	I915_WRITE(RAWCLK_FREQ_VLV,
977 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
978 }
979 
980 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
981 {
982 	struct intel_encoder *encoder;
983 	enum pipe pipe;
984 
985 	/*
986 	 * Enable the CRI clock source so we can get at the
987 	 * display and the reference clock for VGA
988 	 * hotplug / manual detection. Supposedly DSI also
989 	 * needs the ref clock up and running.
990 	 *
991 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
992 	 */
993 	for_each_pipe(dev_priv, pipe) {
994 		u32 val = I915_READ(DPLL(pipe));
995 
996 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
997 		if (pipe != PIPE_A)
998 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
999 
1000 		I915_WRITE(DPLL(pipe), val);
1001 	}
1002 
1003 	vlv_init_display_clock_gating(dev_priv);
1004 
1005 	spin_lock_irq(&dev_priv->irq_lock);
1006 	valleyview_enable_display_irqs(dev_priv);
1007 	spin_unlock_irq(&dev_priv->irq_lock);
1008 
1009 	/*
1010 	 * During driver initialization/resume we can avoid restoring the
1011 	 * part of the HW/SW state that will be inited anyway explicitly.
1012 	 */
1013 	if (dev_priv->power_domains.initializing)
1014 		return;
1015 
1016 	intel_hpd_init(dev_priv);
1017 
1018 	/* Re-enable the ADPA, if we have one */
1019 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1020 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1021 			intel_crt_reset(&encoder->base);
1022 	}
1023 
1024 	i915_redisable_vga_power_on(dev_priv);
1025 
1026 	intel_pps_unlock_regs_wa(dev_priv);
1027 }
1028 
1029 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1030 {
1031 	spin_lock_irq(&dev_priv->irq_lock);
1032 	valleyview_disable_display_irqs(dev_priv);
1033 	spin_unlock_irq(&dev_priv->irq_lock);
1034 
1035 	/* make sure we're done processing display irqs */
1036 	synchronize_irq(dev_priv->drm.irq);
1037 
1038 	intel_power_sequencer_reset(dev_priv);
1039 
1040 #ifdef notyet
1041 	/* Prevent us from re-enabling polling on accident in late suspend */
1042 	if (!dev_priv->drm.dev->power.is_suspended)
1043 		intel_hpd_poll_init(dev_priv);
1044 #endif
1045 }
1046 
1047 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1048 					  struct i915_power_well *power_well)
1049 {
1050 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1051 
1052 	vlv_set_power_well(dev_priv, power_well, true);
1053 
1054 	vlv_display_power_well_init(dev_priv);
1055 }
1056 
1057 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1058 					   struct i915_power_well *power_well)
1059 {
1060 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1061 
1062 	vlv_display_power_well_deinit(dev_priv);
1063 
1064 	vlv_set_power_well(dev_priv, power_well, false);
1065 }
1066 
1067 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1068 					   struct i915_power_well *power_well)
1069 {
1070 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1071 
1072 	/* since ref/cri clock was enabled */
1073 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1074 
1075 	vlv_set_power_well(dev_priv, power_well, true);
1076 
1077 	/*
1078 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1079 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1080 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1081 	 *   b.	The other bits such as sfr settings / modesel may all
1082 	 *	be set to 0.
1083 	 *
1084 	 * This should only be done on init and resume from S3 with
1085 	 * both PLLs disabled, or we risk losing DPIO and PLL
1086 	 * synchronization.
1087 	 */
1088 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1089 }
1090 
1091 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1092 					    struct i915_power_well *power_well)
1093 {
1094 	enum pipe pipe;
1095 
1096 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1097 
1098 	for_each_pipe(dev_priv, pipe)
1099 		assert_pll_disabled(dev_priv, pipe);
1100 
1101 	/* Assert common reset */
1102 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1103 
1104 	vlv_set_power_well(dev_priv, power_well, false);
1105 }
1106 
1107 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1108 
1109 static struct i915_power_well *
1110 lookup_power_well(struct drm_i915_private *dev_priv,
1111 		  enum i915_power_well_id power_well_id)
1112 {
1113 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1114 	int i;
1115 
1116 	for (i = 0; i < power_domains->power_well_count; i++) {
1117 		struct i915_power_well *power_well;
1118 
1119 		power_well = &power_domains->power_wells[i];
1120 		if (power_well->id == power_well_id)
1121 			return power_well;
1122 	}
1123 
1124 	return NULL;
1125 }
1126 
1127 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1128 
1129 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1130 {
1131 	struct i915_power_well *cmn_bc =
1132 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1133 	struct i915_power_well *cmn_d =
1134 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1135 	u32 phy_control = dev_priv->chv_phy_control;
1136 	u32 phy_status = 0;
1137 	u32 phy_status_mask = 0xffffffff;
1138 
1139 	/*
1140 	 * The BIOS can leave the PHY is some weird state
1141 	 * where it doesn't fully power down some parts.
1142 	 * Disable the asserts until the PHY has been fully
1143 	 * reset (ie. the power well has been disabled at
1144 	 * least once).
1145 	 */
1146 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1147 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1148 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1149 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1150 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1151 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1152 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1153 
1154 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1155 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1156 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1157 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1158 
1159 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1160 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1161 
1162 		/* this assumes override is only used to enable lanes */
1163 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1164 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1165 
1166 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1167 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1168 
1169 		/* CL1 is on whenever anything is on in either channel */
1170 		if (BITS_SET(phy_control,
1171 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1172 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1173 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1174 
1175 		/*
1176 		 * The DPLLB check accounts for the pipe B + port A usage
1177 		 * with CL2 powered up but all the lanes in the second channel
1178 		 * powered down.
1179 		 */
1180 		if (BITS_SET(phy_control,
1181 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1182 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1183 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1184 
1185 		if (BITS_SET(phy_control,
1186 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1187 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1188 		if (BITS_SET(phy_control,
1189 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1190 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1191 
1192 		if (BITS_SET(phy_control,
1193 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1194 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1195 		if (BITS_SET(phy_control,
1196 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1197 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1198 	}
1199 
1200 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1201 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1202 
1203 		/* this assumes override is only used to enable lanes */
1204 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1205 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1206 
1207 		if (BITS_SET(phy_control,
1208 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1209 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1210 
1211 		if (BITS_SET(phy_control,
1212 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1213 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1214 		if (BITS_SET(phy_control,
1215 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1216 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1217 	}
1218 
1219 	phy_status &= phy_status_mask;
1220 
1221 	/*
1222 	 * The PHY may be busy with some initial calibration and whatnot,
1223 	 * so the power state can take a while to actually change.
1224 	 */
1225 	if (intel_wait_for_register(dev_priv,
1226 				    DISPLAY_PHY_STATUS,
1227 				    phy_status_mask,
1228 				    phy_status,
1229 				    10))
1230 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1231 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1232 			   phy_status, dev_priv->chv_phy_control);
1233 }
1234 
1235 #undef BITS_SET
1236 
1237 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1238 					   struct i915_power_well *power_well)
1239 {
1240 	enum dpio_phy phy;
1241 	enum pipe pipe;
1242 	uint32_t tmp;
1243 
1244 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1245 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1246 
1247 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1248 		pipe = PIPE_A;
1249 		phy = DPIO_PHY0;
1250 	} else {
1251 		pipe = PIPE_C;
1252 		phy = DPIO_PHY1;
1253 	}
1254 
1255 	/* since ref/cri clock was enabled */
1256 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1257 	vlv_set_power_well(dev_priv, power_well, true);
1258 
1259 	/* Poll for phypwrgood signal */
1260 	if (intel_wait_for_register(dev_priv,
1261 				    DISPLAY_PHY_STATUS,
1262 				    PHY_POWERGOOD(phy),
1263 				    PHY_POWERGOOD(phy),
1264 				    1))
1265 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1266 
1267 	mutex_lock(&dev_priv->sb_lock);
1268 
1269 	/* Enable dynamic power down */
1270 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1271 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1272 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1273 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1274 
1275 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1276 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1277 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1278 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1279 	} else {
1280 		/*
1281 		 * Force the non-existing CL2 off. BXT does this
1282 		 * too, so maybe it saves some power even though
1283 		 * CL2 doesn't exist?
1284 		 */
1285 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1286 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1287 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1288 	}
1289 
1290 	mutex_unlock(&dev_priv->sb_lock);
1291 
1292 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1293 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1294 
1295 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1296 		      phy, dev_priv->chv_phy_control);
1297 
1298 	assert_chv_phy_status(dev_priv);
1299 }
1300 
1301 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1302 					    struct i915_power_well *power_well)
1303 {
1304 	enum dpio_phy phy;
1305 
1306 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1307 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1308 
1309 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1310 		phy = DPIO_PHY0;
1311 		assert_pll_disabled(dev_priv, PIPE_A);
1312 		assert_pll_disabled(dev_priv, PIPE_B);
1313 	} else {
1314 		phy = DPIO_PHY1;
1315 		assert_pll_disabled(dev_priv, PIPE_C);
1316 	}
1317 
1318 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1319 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1320 
1321 	vlv_set_power_well(dev_priv, power_well, false);
1322 
1323 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1324 		      phy, dev_priv->chv_phy_control);
1325 
1326 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1327 	dev_priv->chv_phy_assert[phy] = true;
1328 
1329 	assert_chv_phy_status(dev_priv);
1330 }
1331 
1332 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1333 				     enum dpio_channel ch, bool override, unsigned int mask)
1334 {
1335 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1336 	u32 reg, val, expected, actual;
1337 
1338 	/*
1339 	 * The BIOS can leave the PHY is some weird state
1340 	 * where it doesn't fully power down some parts.
1341 	 * Disable the asserts until the PHY has been fully
1342 	 * reset (ie. the power well has been disabled at
1343 	 * least once).
1344 	 */
1345 	if (!dev_priv->chv_phy_assert[phy])
1346 		return;
1347 
1348 	if (ch == DPIO_CH0)
1349 		reg = _CHV_CMN_DW0_CH0;
1350 	else
1351 		reg = _CHV_CMN_DW6_CH1;
1352 
1353 	mutex_lock(&dev_priv->sb_lock);
1354 	val = vlv_dpio_read(dev_priv, pipe, reg);
1355 	mutex_unlock(&dev_priv->sb_lock);
1356 
1357 	/*
1358 	 * This assumes !override is only used when the port is disabled.
1359 	 * All lanes should power down even without the override when
1360 	 * the port is disabled.
1361 	 */
1362 	if (!override || mask == 0xf) {
1363 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1364 		/*
1365 		 * If CH1 common lane is not active anymore
1366 		 * (eg. for pipe B DPLL) the entire channel will
1367 		 * shut down, which causes the common lane registers
1368 		 * to read as 0. That means we can't actually check
1369 		 * the lane power down status bits, but as the entire
1370 		 * register reads as 0 it's a good indication that the
1371 		 * channel is indeed entirely powered down.
1372 		 */
1373 		if (ch == DPIO_CH1 && val == 0)
1374 			expected = 0;
1375 	} else if (mask != 0x0) {
1376 		expected = DPIO_ANYDL_POWERDOWN;
1377 	} else {
1378 		expected = 0;
1379 	}
1380 
1381 	if (ch == DPIO_CH0)
1382 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1383 	else
1384 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1385 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1386 
1387 	WARN(actual != expected,
1388 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1389 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1390 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1391 	     reg, val);
1392 }
1393 
1394 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1395 			  enum dpio_channel ch, bool override)
1396 {
1397 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1398 	bool was_override;
1399 
1400 	mutex_lock(&power_domains->lock);
1401 
1402 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1403 
1404 	if (override == was_override)
1405 		goto out;
1406 
1407 	if (override)
1408 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1409 	else
1410 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1411 
1412 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1413 
1414 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1415 		      phy, ch, dev_priv->chv_phy_control);
1416 
1417 	assert_chv_phy_status(dev_priv);
1418 
1419 out:
1420 	mutex_unlock(&power_domains->lock);
1421 
1422 	return was_override;
1423 }
1424 
1425 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1426 			     bool override, unsigned int mask)
1427 {
1428 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1429 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1430 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1431 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1432 
1433 	mutex_lock(&power_domains->lock);
1434 
1435 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1436 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1437 
1438 	if (override)
1439 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1440 	else
1441 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1442 
1443 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1444 
1445 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1446 		      phy, ch, mask, dev_priv->chv_phy_control);
1447 
1448 	assert_chv_phy_status(dev_priv);
1449 
1450 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1451 
1452 	mutex_unlock(&power_domains->lock);
1453 }
1454 
1455 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1456 					struct i915_power_well *power_well)
1457 {
1458 	enum pipe pipe = PIPE_A;
1459 	bool enabled;
1460 	u32 state, ctrl;
1461 
1462 	mutex_lock(&dev_priv->pcu_lock);
1463 
1464 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1465 	/*
1466 	 * We only ever set the power-on and power-gate states, anything
1467 	 * else is unexpected.
1468 	 */
1469 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1470 	enabled = state == DP_SSS_PWR_ON(pipe);
1471 
1472 	/*
1473 	 * A transient state at this point would mean some unexpected party
1474 	 * is poking at the power controls too.
1475 	 */
1476 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1477 	WARN_ON(ctrl << 16 != state);
1478 
1479 	mutex_unlock(&dev_priv->pcu_lock);
1480 
1481 	return enabled;
1482 }
1483 
1484 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1485 				    struct i915_power_well *power_well,
1486 				    bool enable)
1487 {
1488 	enum pipe pipe = PIPE_A;
1489 	u32 state;
1490 	u32 ctrl;
1491 
1492 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1493 
1494 	mutex_lock(&dev_priv->pcu_lock);
1495 
1496 #define COND \
1497 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1498 
1499 	if (COND)
1500 		goto out;
1501 
1502 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1503 	ctrl &= ~DP_SSC_MASK(pipe);
1504 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1505 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1506 
1507 	if (wait_for(COND, 100))
1508 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1509 			  state,
1510 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1511 
1512 #undef COND
1513 
1514 out:
1515 	mutex_unlock(&dev_priv->pcu_lock);
1516 }
1517 
1518 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1519 				       struct i915_power_well *power_well)
1520 {
1521 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1522 
1523 	chv_set_pipe_power_well(dev_priv, power_well, true);
1524 
1525 	vlv_display_power_well_init(dev_priv);
1526 }
1527 
1528 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1529 					struct i915_power_well *power_well)
1530 {
1531 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1532 
1533 	vlv_display_power_well_deinit(dev_priv);
1534 
1535 	chv_set_pipe_power_well(dev_priv, power_well, false);
1536 }
1537 
1538 static void
1539 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1540 				 enum intel_display_power_domain domain)
1541 {
1542 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1543 	struct i915_power_well *power_well;
1544 
1545 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1546 		intel_power_well_get(dev_priv, power_well);
1547 
1548 	power_domains->domain_use_count[domain]++;
1549 }
1550 
1551 /**
1552  * intel_display_power_get - grab a power domain reference
1553  * @dev_priv: i915 device instance
1554  * @domain: power domain to reference
1555  *
1556  * This function grabs a power domain reference for @domain and ensures that the
1557  * power domain and all its parents are powered up. Therefore users should only
1558  * grab a reference to the innermost power domain they need.
1559  *
1560  * Any power domain reference obtained by this function must have a symmetric
1561  * call to intel_display_power_put() to release the reference again.
1562  */
1563 void intel_display_power_get(struct drm_i915_private *dev_priv,
1564 			     enum intel_display_power_domain domain)
1565 {
1566 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1567 
1568 	intel_runtime_pm_get(dev_priv);
1569 
1570 	mutex_lock(&power_domains->lock);
1571 
1572 	__intel_display_power_get_domain(dev_priv, domain);
1573 
1574 	mutex_unlock(&power_domains->lock);
1575 }
1576 
1577 /**
1578  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1579  * @dev_priv: i915 device instance
1580  * @domain: power domain to reference
1581  *
1582  * This function grabs a power domain reference for @domain and ensures that the
1583  * power domain and all its parents are powered up. Therefore users should only
1584  * grab a reference to the innermost power domain they need.
1585  *
1586  * Any power domain reference obtained by this function must have a symmetric
1587  * call to intel_display_power_put() to release the reference again.
1588  */
1589 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1590 					enum intel_display_power_domain domain)
1591 {
1592 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1593 	bool is_enabled;
1594 
1595 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1596 		return false;
1597 
1598 	mutex_lock(&power_domains->lock);
1599 
1600 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1601 		__intel_display_power_get_domain(dev_priv, domain);
1602 		is_enabled = true;
1603 	} else {
1604 		is_enabled = false;
1605 	}
1606 
1607 	mutex_unlock(&power_domains->lock);
1608 
1609 	if (!is_enabled)
1610 		intel_runtime_pm_put(dev_priv);
1611 
1612 	return is_enabled;
1613 }
1614 
1615 /**
1616  * intel_display_power_put - release a power domain reference
1617  * @dev_priv: i915 device instance
1618  * @domain: power domain to reference
1619  *
1620  * This function drops the power domain reference obtained by
1621  * intel_display_power_get() and might power down the corresponding hardware
1622  * block right away if this is the last reference.
1623  */
1624 void intel_display_power_put(struct drm_i915_private *dev_priv,
1625 			     enum intel_display_power_domain domain)
1626 {
1627 	struct i915_power_domains *power_domains;
1628 	struct i915_power_well *power_well;
1629 
1630 	power_domains = &dev_priv->power_domains;
1631 
1632 	mutex_lock(&power_domains->lock);
1633 
1634 	WARN(!power_domains->domain_use_count[domain],
1635 	     "Use count on domain %s is already zero\n",
1636 	     intel_display_power_domain_str(domain));
1637 	power_domains->domain_use_count[domain]--;
1638 
1639 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1640 		intel_power_well_put(dev_priv, power_well);
1641 
1642 	mutex_unlock(&power_domains->lock);
1643 
1644 	intel_runtime_pm_put(dev_priv);
1645 }
1646 
1647 #define I830_PIPES_POWER_DOMAINS (		\
1648 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1649 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1650 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1651 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1652 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1653 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1654 	BIT_ULL(POWER_DOMAIN_INIT))
1655 
1656 #define VLV_DISPLAY_POWER_DOMAINS (		\
1657 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1658 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1659 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1660 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1661 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1662 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1663 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1664 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1665 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1666 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1667 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1668 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1669 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1670 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1671 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1672 	BIT_ULL(POWER_DOMAIN_INIT))
1673 
1674 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1675 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1676 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1677 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1678 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1679 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1680 	BIT_ULL(POWER_DOMAIN_INIT))
1681 
1682 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1683 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1684 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1685 	BIT_ULL(POWER_DOMAIN_INIT))
1686 
1687 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1688 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1689 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1690 	BIT_ULL(POWER_DOMAIN_INIT))
1691 
1692 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1693 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1694 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1695 	BIT_ULL(POWER_DOMAIN_INIT))
1696 
1697 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1698 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1699 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1700 	BIT_ULL(POWER_DOMAIN_INIT))
1701 
1702 #define CHV_DISPLAY_POWER_DOMAINS (		\
1703 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1704 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1705 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1706 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1707 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1708 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1709 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1710 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1711 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1712 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1713 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1714 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1715 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1716 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1717 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1718 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1719 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1720 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1721 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1722 	BIT_ULL(POWER_DOMAIN_INIT))
1723 
1724 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1725 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1726 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1727 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1728 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1729 	BIT_ULL(POWER_DOMAIN_INIT))
1730 
1731 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1732 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1733 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1734 	BIT_ULL(POWER_DOMAIN_INIT))
1735 
1736 #define HSW_DISPLAY_POWER_DOMAINS (			\
1737 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1738 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1739 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1740 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1741 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1742 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1743 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1744 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1745 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1746 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1747 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1748 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1749 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1750 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1751 	BIT_ULL(POWER_DOMAIN_INIT))
1752 
1753 #define BDW_DISPLAY_POWER_DOMAINS (			\
1754 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1755 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1756 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1757 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1758 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1759 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1760 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1761 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1762 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1763 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1764 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1765 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1766 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1767 	BIT_ULL(POWER_DOMAIN_INIT))
1768 
1769 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1770 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1771 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1772 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1773 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1774 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1775 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1776 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1777 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1778 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1779 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1780 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1781 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1782 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1783 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1784 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1785 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1786 	BIT_ULL(POWER_DOMAIN_INIT))
1787 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1788 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1789 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1790 	BIT_ULL(POWER_DOMAIN_INIT))
1791 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1792 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1793 	BIT_ULL(POWER_DOMAIN_INIT))
1794 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1795 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1796 	BIT_ULL(POWER_DOMAIN_INIT))
1797 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1798 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1799 	BIT_ULL(POWER_DOMAIN_INIT))
1800 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1801 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1802 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1803 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1804 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1805 	BIT_ULL(POWER_DOMAIN_INIT))
1806 
1807 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1808 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1809 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1810 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1811 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1812 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1813 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1814 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1815 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1816 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1817 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1818 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1819 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1820 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1821 	BIT_ULL(POWER_DOMAIN_INIT))
1822 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1823 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1824 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1825 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1826 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1827 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1828 	BIT_ULL(POWER_DOMAIN_INIT))
1829 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1830 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1831 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1832 	BIT_ULL(POWER_DOMAIN_INIT))
1833 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1834 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1835 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1836 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1837 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1838 	BIT_ULL(POWER_DOMAIN_INIT))
1839 
1840 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1841 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1842 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1843 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1844 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1845 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1846 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1847 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1848 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1849 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1850 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1851 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1852 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1853 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1854 	BIT_ULL(POWER_DOMAIN_INIT))
1855 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1856 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1857 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1858 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1859 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1861 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1862 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1863 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1864 	BIT_ULL(POWER_DOMAIN_INIT))
1865 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1866 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1867 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1868 	BIT_ULL(POWER_DOMAIN_INIT))
1869 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1870 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1871 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1872 	BIT_ULL(POWER_DOMAIN_INIT))
1873 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1874 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1875 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1876 	BIT_ULL(POWER_DOMAIN_INIT))
1877 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1878 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1879 	BIT_ULL(POWER_DOMAIN_INIT))
1880 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1881 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1882 	BIT_ULL(POWER_DOMAIN_INIT))
1883 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1884 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1885 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1886 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1887 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1888 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1889 	BIT_ULL(POWER_DOMAIN_INIT))
1890 
1891 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1892 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1893 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1894 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1895 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1896 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1897 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1898 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1899 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1900 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1901 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1902 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1903 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1904 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1905 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1906 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1907 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1908 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1909 	BIT_ULL(POWER_DOMAIN_INIT))
1910 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1911 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1912 	BIT_ULL(POWER_DOMAIN_INIT))
1913 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1914 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1915 	BIT_ULL(POWER_DOMAIN_INIT))
1916 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1917 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1918 	BIT_ULL(POWER_DOMAIN_INIT))
1919 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1920 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1921 	BIT_ULL(POWER_DOMAIN_INIT))
1922 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1923 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1924 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1925 	BIT_ULL(POWER_DOMAIN_INIT))
1926 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1927 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1928 	BIT_ULL(POWER_DOMAIN_INIT))
1929 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1930 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1931 	BIT_ULL(POWER_DOMAIN_INIT))
1932 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1933 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1934 	BIT_ULL(POWER_DOMAIN_INIT))
1935 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1936 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1937 	BIT_ULL(POWER_DOMAIN_INIT))
1938 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1939 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1940 	BIT_ULL(POWER_DOMAIN_INIT))
1941 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1942 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1943 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1944 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1945 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1946 	BIT_ULL(POWER_DOMAIN_INIT))
1947 
1948 /*
1949  * ICL PW_0/PG_0 domains (HW/DMC control):
1950  * - PCI
1951  * - clocks except port PLL
1952  * - central power except FBC
1953  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
1954  * ICL PW_1/PG_1 domains (HW/DMC control):
1955  * - DBUF function
1956  * - PIPE_A and its planes, except VGA
1957  * - transcoder EDP + PSR
1958  * - transcoder DSI
1959  * - DDI_A
1960  * - FBC
1961  */
1962 #define ICL_PW_4_POWER_DOMAINS (			\
1963 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1964 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1965 	BIT_ULL(POWER_DOMAIN_INIT))
1966 	/* VDSC/joining */
1967 #define ICL_PW_3_POWER_DOMAINS (			\
1968 	ICL_PW_4_POWER_DOMAINS |			\
1969 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1970 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1971 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1972 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1973 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1974 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1975 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1976 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1977 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1978 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1979 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1980 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
1981 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1982 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
1983 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1984 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1985 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1986 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1987 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
1988 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1989 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
1990 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
1991 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
1992 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
1993 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1994 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1995 	BIT_ULL(POWER_DOMAIN_INIT))
1996 	/*
1997 	 * - transcoder WD
1998 	 * - KVMR (HW control)
1999 	 */
2000 #define ICL_PW_2_POWER_DOMAINS (			\
2001 	ICL_PW_3_POWER_DOMAINS |			\
2002 	BIT_ULL(POWER_DOMAIN_INIT))
2003 	/*
2004 	 * - eDP/DSI VDSC
2005 	 * - KVMR (HW control)
2006 	 */
2007 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2008 	ICL_PW_2_POWER_DOMAINS |			\
2009 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2010 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2011 	BIT_ULL(POWER_DOMAIN_INIT))
2012 
2013 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2014 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2015 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2016 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2017 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2018 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2019 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2020 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2021 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2022 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2023 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2024 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2025 
2026 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2027 	BIT_ULL(POWER_DOMAIN_AUX_A))
2028 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2029 	BIT_ULL(POWER_DOMAIN_AUX_B))
2030 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2031 	BIT_ULL(POWER_DOMAIN_AUX_C))
2032 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2033 	BIT_ULL(POWER_DOMAIN_AUX_D))
2034 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2035 	BIT_ULL(POWER_DOMAIN_AUX_E))
2036 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2037 	BIT_ULL(POWER_DOMAIN_AUX_F))
2038 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2039 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2040 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2041 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2042 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2043 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2044 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2045 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2046 
2047 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2048 	.sync_hw = i9xx_power_well_sync_hw_noop,
2049 	.enable = i9xx_always_on_power_well_noop,
2050 	.disable = i9xx_always_on_power_well_noop,
2051 	.is_enabled = i9xx_always_on_power_well_enabled,
2052 };
2053 
2054 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2055 	.sync_hw = i9xx_power_well_sync_hw_noop,
2056 	.enable = chv_pipe_power_well_enable,
2057 	.disable = chv_pipe_power_well_disable,
2058 	.is_enabled = chv_pipe_power_well_enabled,
2059 };
2060 
2061 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2062 	.sync_hw = i9xx_power_well_sync_hw_noop,
2063 	.enable = chv_dpio_cmn_power_well_enable,
2064 	.disable = chv_dpio_cmn_power_well_disable,
2065 	.is_enabled = vlv_power_well_enabled,
2066 };
2067 
2068 static struct i915_power_well i9xx_always_on_power_well[] = {
2069 	{
2070 		.name = "always-on",
2071 		.always_on = 1,
2072 		.domains = POWER_DOMAIN_MASK,
2073 		.ops = &i9xx_always_on_power_well_ops,
2074 		.id = I915_DISP_PW_ALWAYS_ON,
2075 	},
2076 };
2077 
2078 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2079 	.sync_hw = i830_pipes_power_well_sync_hw,
2080 	.enable = i830_pipes_power_well_enable,
2081 	.disable = i830_pipes_power_well_disable,
2082 	.is_enabled = i830_pipes_power_well_enabled,
2083 };
2084 
2085 static struct i915_power_well i830_power_wells[] = {
2086 	{
2087 		.name = "always-on",
2088 		.always_on = 1,
2089 		.domains = POWER_DOMAIN_MASK,
2090 		.ops = &i9xx_always_on_power_well_ops,
2091 		.id = I915_DISP_PW_ALWAYS_ON,
2092 	},
2093 	{
2094 		.name = "pipes",
2095 		.domains = I830_PIPES_POWER_DOMAINS,
2096 		.ops = &i830_pipes_power_well_ops,
2097 		.id = I830_DISP_PW_PIPES,
2098 	},
2099 };
2100 
2101 static const struct i915_power_well_ops hsw_power_well_ops = {
2102 	.sync_hw = hsw_power_well_sync_hw,
2103 	.enable = hsw_power_well_enable,
2104 	.disable = hsw_power_well_disable,
2105 	.is_enabled = hsw_power_well_enabled,
2106 };
2107 
2108 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2109 	.sync_hw = i9xx_power_well_sync_hw_noop,
2110 	.enable = gen9_dc_off_power_well_enable,
2111 	.disable = gen9_dc_off_power_well_disable,
2112 	.is_enabled = gen9_dc_off_power_well_enabled,
2113 };
2114 
2115 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2116 	.sync_hw = i9xx_power_well_sync_hw_noop,
2117 	.enable = bxt_dpio_cmn_power_well_enable,
2118 	.disable = bxt_dpio_cmn_power_well_disable,
2119 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2120 };
2121 
2122 static struct i915_power_well hsw_power_wells[] = {
2123 	{
2124 		.name = "always-on",
2125 		.always_on = 1,
2126 		.domains = POWER_DOMAIN_MASK,
2127 		.ops = &i9xx_always_on_power_well_ops,
2128 		.id = I915_DISP_PW_ALWAYS_ON,
2129 	},
2130 	{
2131 		.name = "display",
2132 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2133 		.ops = &hsw_power_well_ops,
2134 		.id = HSW_DISP_PW_GLOBAL,
2135 		{
2136 			.hsw.has_vga = true,
2137 		},
2138 	},
2139 };
2140 
2141 static struct i915_power_well bdw_power_wells[] = {
2142 	{
2143 		.name = "always-on",
2144 		.always_on = 1,
2145 		.domains = POWER_DOMAIN_MASK,
2146 		.ops = &i9xx_always_on_power_well_ops,
2147 		.id = I915_DISP_PW_ALWAYS_ON,
2148 	},
2149 	{
2150 		.name = "display",
2151 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2152 		.ops = &hsw_power_well_ops,
2153 		.id = HSW_DISP_PW_GLOBAL,
2154 		{
2155 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2156 			.hsw.has_vga = true,
2157 		},
2158 	},
2159 };
2160 
2161 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2162 	.sync_hw = i9xx_power_well_sync_hw_noop,
2163 	.enable = vlv_display_power_well_enable,
2164 	.disable = vlv_display_power_well_disable,
2165 	.is_enabled = vlv_power_well_enabled,
2166 };
2167 
2168 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2169 	.sync_hw = i9xx_power_well_sync_hw_noop,
2170 	.enable = vlv_dpio_cmn_power_well_enable,
2171 	.disable = vlv_dpio_cmn_power_well_disable,
2172 	.is_enabled = vlv_power_well_enabled,
2173 };
2174 
2175 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2176 	.sync_hw = i9xx_power_well_sync_hw_noop,
2177 	.enable = vlv_power_well_enable,
2178 	.disable = vlv_power_well_disable,
2179 	.is_enabled = vlv_power_well_enabled,
2180 };
2181 
2182 static struct i915_power_well vlv_power_wells[] = {
2183 	{
2184 		.name = "always-on",
2185 		.always_on = 1,
2186 		.domains = POWER_DOMAIN_MASK,
2187 		.ops = &i9xx_always_on_power_well_ops,
2188 		.id = I915_DISP_PW_ALWAYS_ON,
2189 	},
2190 	{
2191 		.name = "display",
2192 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2193 		.id = PUNIT_POWER_WELL_DISP2D,
2194 		.ops = &vlv_display_power_well_ops,
2195 	},
2196 	{
2197 		.name = "dpio-tx-b-01",
2198 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2199 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2200 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2201 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2202 		.ops = &vlv_dpio_power_well_ops,
2203 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2204 	},
2205 	{
2206 		.name = "dpio-tx-b-23",
2207 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2208 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2209 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2210 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2211 		.ops = &vlv_dpio_power_well_ops,
2212 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2213 	},
2214 	{
2215 		.name = "dpio-tx-c-01",
2216 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2217 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2218 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2219 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2220 		.ops = &vlv_dpio_power_well_ops,
2221 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2222 	},
2223 	{
2224 		.name = "dpio-tx-c-23",
2225 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2226 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2227 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2228 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2229 		.ops = &vlv_dpio_power_well_ops,
2230 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2231 	},
2232 	{
2233 		.name = "dpio-common",
2234 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2235 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2236 		.ops = &vlv_dpio_cmn_power_well_ops,
2237 	},
2238 };
2239 
2240 static struct i915_power_well chv_power_wells[] = {
2241 	{
2242 		.name = "always-on",
2243 		.always_on = 1,
2244 		.domains = POWER_DOMAIN_MASK,
2245 		.ops = &i9xx_always_on_power_well_ops,
2246 		.id = I915_DISP_PW_ALWAYS_ON,
2247 	},
2248 	{
2249 		.name = "display",
2250 		/*
2251 		 * Pipe A power well is the new disp2d well. Pipe B and C
2252 		 * power wells don't actually exist. Pipe A power well is
2253 		 * required for any pipe to work.
2254 		 */
2255 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2256 		.id = CHV_DISP_PW_PIPE_A,
2257 		.ops = &chv_pipe_power_well_ops,
2258 	},
2259 	{
2260 		.name = "dpio-common-bc",
2261 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2262 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2263 		.ops = &chv_dpio_cmn_power_well_ops,
2264 	},
2265 	{
2266 		.name = "dpio-common-d",
2267 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2268 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2269 		.ops = &chv_dpio_cmn_power_well_ops,
2270 	},
2271 };
2272 
2273 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2274 					 enum i915_power_well_id power_well_id)
2275 {
2276 	struct i915_power_well *power_well;
2277 	bool ret;
2278 
2279 	power_well = lookup_power_well(dev_priv, power_well_id);
2280 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2281 
2282 	return ret;
2283 }
2284 
2285 static struct i915_power_well skl_power_wells[] = {
2286 	{
2287 		.name = "always-on",
2288 		.always_on = 1,
2289 		.domains = POWER_DOMAIN_MASK,
2290 		.ops = &i9xx_always_on_power_well_ops,
2291 		.id = I915_DISP_PW_ALWAYS_ON,
2292 	},
2293 	{
2294 		.name = "power well 1",
2295 		/* Handled by the DMC firmware */
2296 		.domains = 0,
2297 		.ops = &hsw_power_well_ops,
2298 		.id = SKL_DISP_PW_1,
2299 		{
2300 			.hsw.has_fuses = true,
2301 		},
2302 	},
2303 	{
2304 		.name = "MISC IO power well",
2305 		/* Handled by the DMC firmware */
2306 		.domains = 0,
2307 		.ops = &hsw_power_well_ops,
2308 		.id = SKL_DISP_PW_MISC_IO,
2309 	},
2310 	{
2311 		.name = "DC off",
2312 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2313 		.ops = &gen9_dc_off_power_well_ops,
2314 		.id = SKL_DISP_PW_DC_OFF,
2315 	},
2316 	{
2317 		.name = "power well 2",
2318 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2319 		.ops = &hsw_power_well_ops,
2320 		.id = SKL_DISP_PW_2,
2321 		{
2322 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2323 			.hsw.has_vga = true,
2324 			.hsw.has_fuses = true,
2325 		},
2326 	},
2327 	{
2328 		.name = "DDI A/E IO power well",
2329 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2330 		.ops = &hsw_power_well_ops,
2331 		.id = SKL_DISP_PW_DDI_A_E,
2332 	},
2333 	{
2334 		.name = "DDI B IO power well",
2335 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2336 		.ops = &hsw_power_well_ops,
2337 		.id = SKL_DISP_PW_DDI_B,
2338 	},
2339 	{
2340 		.name = "DDI C IO power well",
2341 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2342 		.ops = &hsw_power_well_ops,
2343 		.id = SKL_DISP_PW_DDI_C,
2344 	},
2345 	{
2346 		.name = "DDI D IO power well",
2347 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2348 		.ops = &hsw_power_well_ops,
2349 		.id = SKL_DISP_PW_DDI_D,
2350 	},
2351 };
2352 
2353 static struct i915_power_well bxt_power_wells[] = {
2354 	{
2355 		.name = "always-on",
2356 		.always_on = 1,
2357 		.domains = POWER_DOMAIN_MASK,
2358 		.ops = &i9xx_always_on_power_well_ops,
2359 		.id = I915_DISP_PW_ALWAYS_ON,
2360 	},
2361 	{
2362 		.name = "power well 1",
2363 		.domains = 0,
2364 		.ops = &hsw_power_well_ops,
2365 		.id = SKL_DISP_PW_1,
2366 		{
2367 			.hsw.has_fuses = true,
2368 		},
2369 	},
2370 	{
2371 		.name = "DC off",
2372 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2373 		.ops = &gen9_dc_off_power_well_ops,
2374 		.id = SKL_DISP_PW_DC_OFF,
2375 	},
2376 	{
2377 		.name = "power well 2",
2378 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2379 		.ops = &hsw_power_well_ops,
2380 		.id = SKL_DISP_PW_2,
2381 		{
2382 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2383 			.hsw.has_vga = true,
2384 			.hsw.has_fuses = true,
2385 		},
2386 	},
2387 	{
2388 		.name = "dpio-common-a",
2389 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2390 		.ops = &bxt_dpio_cmn_power_well_ops,
2391 		.id = BXT_DPIO_CMN_A,
2392 		{
2393 			.bxt.phy = DPIO_PHY1,
2394 		},
2395 	},
2396 	{
2397 		.name = "dpio-common-bc",
2398 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2399 		.ops = &bxt_dpio_cmn_power_well_ops,
2400 		.id = BXT_DPIO_CMN_BC,
2401 		{
2402 			.bxt.phy = DPIO_PHY0,
2403 		},
2404 	},
2405 };
2406 
2407 static struct i915_power_well glk_power_wells[] = {
2408 	{
2409 		.name = "always-on",
2410 		.always_on = 1,
2411 		.domains = POWER_DOMAIN_MASK,
2412 		.ops = &i9xx_always_on_power_well_ops,
2413 		.id = I915_DISP_PW_ALWAYS_ON,
2414 	},
2415 	{
2416 		.name = "power well 1",
2417 		/* Handled by the DMC firmware */
2418 		.domains = 0,
2419 		.ops = &hsw_power_well_ops,
2420 		.id = SKL_DISP_PW_1,
2421 		{
2422 			.hsw.has_fuses = true,
2423 		},
2424 	},
2425 	{
2426 		.name = "DC off",
2427 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2428 		.ops = &gen9_dc_off_power_well_ops,
2429 		.id = SKL_DISP_PW_DC_OFF,
2430 	},
2431 	{
2432 		.name = "power well 2",
2433 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2434 		.ops = &hsw_power_well_ops,
2435 		.id = SKL_DISP_PW_2,
2436 		{
2437 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2438 			.hsw.has_vga = true,
2439 			.hsw.has_fuses = true,
2440 		},
2441 	},
2442 	{
2443 		.name = "dpio-common-a",
2444 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2445 		.ops = &bxt_dpio_cmn_power_well_ops,
2446 		.id = BXT_DPIO_CMN_A,
2447 		{
2448 			.bxt.phy = DPIO_PHY1,
2449 		},
2450 	},
2451 	{
2452 		.name = "dpio-common-b",
2453 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2454 		.ops = &bxt_dpio_cmn_power_well_ops,
2455 		.id = BXT_DPIO_CMN_BC,
2456 		{
2457 			.bxt.phy = DPIO_PHY0,
2458 		},
2459 	},
2460 	{
2461 		.name = "dpio-common-c",
2462 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2463 		.ops = &bxt_dpio_cmn_power_well_ops,
2464 		.id = GLK_DPIO_CMN_C,
2465 		{
2466 			.bxt.phy = DPIO_PHY2,
2467 		},
2468 	},
2469 	{
2470 		.name = "AUX A",
2471 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2472 		.ops = &hsw_power_well_ops,
2473 		.id = GLK_DISP_PW_AUX_A,
2474 	},
2475 	{
2476 		.name = "AUX B",
2477 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2478 		.ops = &hsw_power_well_ops,
2479 		.id = GLK_DISP_PW_AUX_B,
2480 	},
2481 	{
2482 		.name = "AUX C",
2483 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2484 		.ops = &hsw_power_well_ops,
2485 		.id = GLK_DISP_PW_AUX_C,
2486 	},
2487 	{
2488 		.name = "DDI A IO power well",
2489 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2490 		.ops = &hsw_power_well_ops,
2491 		.id = GLK_DISP_PW_DDI_A,
2492 	},
2493 	{
2494 		.name = "DDI B IO power well",
2495 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2496 		.ops = &hsw_power_well_ops,
2497 		.id = SKL_DISP_PW_DDI_B,
2498 	},
2499 	{
2500 		.name = "DDI C IO power well",
2501 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2502 		.ops = &hsw_power_well_ops,
2503 		.id = SKL_DISP_PW_DDI_C,
2504 	},
2505 };
2506 
2507 static struct i915_power_well cnl_power_wells[] = {
2508 	{
2509 		.name = "always-on",
2510 		.always_on = 1,
2511 		.domains = POWER_DOMAIN_MASK,
2512 		.ops = &i9xx_always_on_power_well_ops,
2513 		.id = I915_DISP_PW_ALWAYS_ON,
2514 	},
2515 	{
2516 		.name = "power well 1",
2517 		/* Handled by the DMC firmware */
2518 		.domains = 0,
2519 		.ops = &hsw_power_well_ops,
2520 		.id = SKL_DISP_PW_1,
2521 		{
2522 			.hsw.has_fuses = true,
2523 		},
2524 	},
2525 	{
2526 		.name = "AUX A",
2527 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2528 		.ops = &hsw_power_well_ops,
2529 		.id = CNL_DISP_PW_AUX_A,
2530 	},
2531 	{
2532 		.name = "AUX B",
2533 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2534 		.ops = &hsw_power_well_ops,
2535 		.id = CNL_DISP_PW_AUX_B,
2536 	},
2537 	{
2538 		.name = "AUX C",
2539 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2540 		.ops = &hsw_power_well_ops,
2541 		.id = CNL_DISP_PW_AUX_C,
2542 	},
2543 	{
2544 		.name = "AUX D",
2545 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2546 		.ops = &hsw_power_well_ops,
2547 		.id = CNL_DISP_PW_AUX_D,
2548 	},
2549 	{
2550 		.name = "DC off",
2551 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2552 		.ops = &gen9_dc_off_power_well_ops,
2553 		.id = SKL_DISP_PW_DC_OFF,
2554 	},
2555 	{
2556 		.name = "power well 2",
2557 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2558 		.ops = &hsw_power_well_ops,
2559 		.id = SKL_DISP_PW_2,
2560 		{
2561 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2562 			.hsw.has_vga = true,
2563 			.hsw.has_fuses = true,
2564 		},
2565 	},
2566 	{
2567 		.name = "DDI A IO power well",
2568 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2569 		.ops = &hsw_power_well_ops,
2570 		.id = CNL_DISP_PW_DDI_A,
2571 	},
2572 	{
2573 		.name = "DDI B IO power well",
2574 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2575 		.ops = &hsw_power_well_ops,
2576 		.id = SKL_DISP_PW_DDI_B,
2577 	},
2578 	{
2579 		.name = "DDI C IO power well",
2580 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2581 		.ops = &hsw_power_well_ops,
2582 		.id = SKL_DISP_PW_DDI_C,
2583 	},
2584 	{
2585 		.name = "DDI D IO power well",
2586 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2587 		.ops = &hsw_power_well_ops,
2588 		.id = SKL_DISP_PW_DDI_D,
2589 	},
2590 	{
2591 		.name = "DDI F IO power well",
2592 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2593 		.ops = &hsw_power_well_ops,
2594 		.id = CNL_DISP_PW_DDI_F,
2595 	},
2596 	{
2597 		.name = "AUX F",
2598 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2599 		.ops = &hsw_power_well_ops,
2600 		.id = CNL_DISP_PW_AUX_F,
2601 	},
2602 };
2603 
2604 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2605 	.sync_hw = hsw_power_well_sync_hw,
2606 	.enable = icl_combo_phy_aux_power_well_enable,
2607 	.disable = icl_combo_phy_aux_power_well_disable,
2608 	.is_enabled = hsw_power_well_enabled,
2609 };
2610 
2611 static struct i915_power_well icl_power_wells[] = {
2612 	{
2613 		.name = "always-on",
2614 		.always_on = 1,
2615 		.domains = POWER_DOMAIN_MASK,
2616 		.ops = &i9xx_always_on_power_well_ops,
2617 		.id = I915_DISP_PW_ALWAYS_ON,
2618 	},
2619 	{
2620 		.name = "power well 1",
2621 		/* Handled by the DMC firmware */
2622 		.domains = 0,
2623 		.ops = &hsw_power_well_ops,
2624 		.id = ICL_DISP_PW_1,
2625 		.hsw.has_fuses = true,
2626 	},
2627 	{
2628 		.name = "power well 2",
2629 		.domains = ICL_PW_2_POWER_DOMAINS,
2630 		.ops = &hsw_power_well_ops,
2631 		.id = ICL_DISP_PW_2,
2632 		.hsw.has_fuses = true,
2633 	},
2634 	{
2635 		.name = "DC off",
2636 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2637 		.ops = &gen9_dc_off_power_well_ops,
2638 		.id = SKL_DISP_PW_DC_OFF,
2639 	},
2640 	{
2641 		.name = "power well 3",
2642 		.domains = ICL_PW_3_POWER_DOMAINS,
2643 		.ops = &hsw_power_well_ops,
2644 		.id = ICL_DISP_PW_3,
2645 		.hsw.irq_pipe_mask = BIT(PIPE_B),
2646 		.hsw.has_vga = true,
2647 		.hsw.has_fuses = true,
2648 	},
2649 	{
2650 		.name = "DDI A IO",
2651 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
2652 		.ops = &hsw_power_well_ops,
2653 		.id = ICL_DISP_PW_DDI_A,
2654 	},
2655 	{
2656 		.name = "DDI B IO",
2657 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
2658 		.ops = &hsw_power_well_ops,
2659 		.id = ICL_DISP_PW_DDI_B,
2660 	},
2661 	{
2662 		.name = "DDI C IO",
2663 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
2664 		.ops = &hsw_power_well_ops,
2665 		.id = ICL_DISP_PW_DDI_C,
2666 	},
2667 	{
2668 		.name = "DDI D IO",
2669 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
2670 		.ops = &hsw_power_well_ops,
2671 		.id = ICL_DISP_PW_DDI_D,
2672 	},
2673 	{
2674 		.name = "DDI E IO",
2675 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
2676 		.ops = &hsw_power_well_ops,
2677 		.id = ICL_DISP_PW_DDI_E,
2678 	},
2679 	{
2680 		.name = "DDI F IO",
2681 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
2682 		.ops = &hsw_power_well_ops,
2683 		.id = ICL_DISP_PW_DDI_F,
2684 	},
2685 	{
2686 		.name = "AUX A",
2687 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
2688 		.ops = &icl_combo_phy_aux_power_well_ops,
2689 		.id = ICL_DISP_PW_AUX_A,
2690 	},
2691 	{
2692 		.name = "AUX B",
2693 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
2694 		.ops = &icl_combo_phy_aux_power_well_ops,
2695 		.id = ICL_DISP_PW_AUX_B,
2696 	},
2697 	{
2698 		.name = "AUX C",
2699 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
2700 		.ops = &hsw_power_well_ops,
2701 		.id = ICL_DISP_PW_AUX_C,
2702 	},
2703 	{
2704 		.name = "AUX D",
2705 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
2706 		.ops = &hsw_power_well_ops,
2707 		.id = ICL_DISP_PW_AUX_D,
2708 	},
2709 	{
2710 		.name = "AUX E",
2711 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
2712 		.ops = &hsw_power_well_ops,
2713 		.id = ICL_DISP_PW_AUX_E,
2714 	},
2715 	{
2716 		.name = "AUX F",
2717 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
2718 		.ops = &hsw_power_well_ops,
2719 		.id = ICL_DISP_PW_AUX_F,
2720 	},
2721 	{
2722 		.name = "AUX TBT1",
2723 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2724 		.ops = &hsw_power_well_ops,
2725 		.id = ICL_DISP_PW_AUX_TBT1,
2726 	},
2727 	{
2728 		.name = "AUX TBT2",
2729 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2730 		.ops = &hsw_power_well_ops,
2731 		.id = ICL_DISP_PW_AUX_TBT2,
2732 	},
2733 	{
2734 		.name = "AUX TBT3",
2735 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
2736 		.ops = &hsw_power_well_ops,
2737 		.id = ICL_DISP_PW_AUX_TBT3,
2738 	},
2739 	{
2740 		.name = "AUX TBT4",
2741 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
2742 		.ops = &hsw_power_well_ops,
2743 		.id = ICL_DISP_PW_AUX_TBT4,
2744 	},
2745 	{
2746 		.name = "power well 4",
2747 		.domains = ICL_PW_4_POWER_DOMAINS,
2748 		.ops = &hsw_power_well_ops,
2749 		.id = ICL_DISP_PW_4,
2750 		.hsw.has_fuses = true,
2751 		.hsw.irq_pipe_mask = BIT(PIPE_C),
2752 	},
2753 };
2754 
2755 static int
2756 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2757 				   int disable_power_well)
2758 {
2759 	if (disable_power_well >= 0)
2760 		return !!disable_power_well;
2761 
2762 	return 1;
2763 }
2764 
2765 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2766 				    int enable_dc)
2767 {
2768 	uint32_t mask;
2769 	int requested_dc;
2770 	int max_dc;
2771 
2772 	if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
2773 		max_dc = 2;
2774 		mask = 0;
2775 	} else if (IS_GEN9_LP(dev_priv)) {
2776 		max_dc = 1;
2777 		/*
2778 		 * DC9 has a separate HW flow from the rest of the DC states,
2779 		 * not depending on the DMC firmware. It's needed by system
2780 		 * suspend/resume, so allow it unconditionally.
2781 		 */
2782 		mask = DC_STATE_EN_DC9;
2783 	} else {
2784 		max_dc = 0;
2785 		mask = 0;
2786 	}
2787 
2788 	if (!i915_modparams.disable_power_well)
2789 		max_dc = 0;
2790 
2791 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2792 		requested_dc = enable_dc;
2793 	} else if (enable_dc == -1) {
2794 		requested_dc = max_dc;
2795 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2796 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2797 			      enable_dc, max_dc);
2798 		requested_dc = max_dc;
2799 	} else {
2800 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2801 		requested_dc = max_dc;
2802 	}
2803 
2804 	if (requested_dc > 1)
2805 		mask |= DC_STATE_EN_UPTO_DC6;
2806 	if (requested_dc > 0)
2807 		mask |= DC_STATE_EN_UPTO_DC5;
2808 
2809 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2810 
2811 	return mask;
2812 }
2813 
2814 static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2815 {
2816 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2817 	u64 power_well_ids;
2818 	int i;
2819 
2820 	power_well_ids = 0;
2821 	for (i = 0; i < power_domains->power_well_count; i++) {
2822 		enum i915_power_well_id id = power_domains->power_wells[i].id;
2823 
2824 		WARN_ON(id >= sizeof(power_well_ids) * 8);
2825 		WARN_ON(power_well_ids & BIT_ULL(id));
2826 		power_well_ids |= BIT_ULL(id);
2827 	}
2828 }
2829 
2830 #define set_power_wells(power_domains, __power_wells) ({		\
2831 	(power_domains)->power_wells = (__power_wells);			\
2832 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2833 })
2834 
2835 /**
2836  * intel_power_domains_init - initializes the power domain structures
2837  * @dev_priv: i915 device instance
2838  *
2839  * Initializes the power domain structures for @dev_priv depending upon the
2840  * supported platform.
2841  */
2842 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2843 {
2844 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2845 
2846 	i915_modparams.disable_power_well =
2847 		sanitize_disable_power_well_option(dev_priv,
2848 						   i915_modparams.disable_power_well);
2849 	dev_priv->csr.allowed_dc_mask =
2850 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2851 
2852 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2853 
2854 	rw_init(&power_domains->lock, "pdmlk");
2855 
2856 	/*
2857 	 * The enabling order will be from lower to higher indexed wells,
2858 	 * the disabling order is reversed.
2859 	 */
2860 	if (IS_ICELAKE(dev_priv)) {
2861 		set_power_wells(power_domains, icl_power_wells);
2862 	} else if (IS_HASWELL(dev_priv)) {
2863 		set_power_wells(power_domains, hsw_power_wells);
2864 	} else if (IS_BROADWELL(dev_priv)) {
2865 		set_power_wells(power_domains, bdw_power_wells);
2866 	} else if (IS_GEN9_BC(dev_priv)) {
2867 		set_power_wells(power_domains, skl_power_wells);
2868 	} else if (IS_CANNONLAKE(dev_priv)) {
2869 		set_power_wells(power_domains, cnl_power_wells);
2870 
2871 		/*
2872 		 * DDI and Aux IO are getting enabled for all ports
2873 		 * regardless the presence or use. So, in order to avoid
2874 		 * timeouts, lets remove them from the list
2875 		 * for the SKUs without port F.
2876 		 */
2877 		if (!IS_CNL_WITH_PORT_F(dev_priv))
2878 			power_domains->power_well_count -= 2;
2879 
2880 	} else if (IS_BROXTON(dev_priv)) {
2881 		set_power_wells(power_domains, bxt_power_wells);
2882 	} else if (IS_GEMINILAKE(dev_priv)) {
2883 		set_power_wells(power_domains, glk_power_wells);
2884 	} else if (IS_CHERRYVIEW(dev_priv)) {
2885 		set_power_wells(power_domains, chv_power_wells);
2886 	} else if (IS_VALLEYVIEW(dev_priv)) {
2887 		set_power_wells(power_domains, vlv_power_wells);
2888 	} else if (IS_I830(dev_priv)) {
2889 		set_power_wells(power_domains, i830_power_wells);
2890 	} else {
2891 		set_power_wells(power_domains, i9xx_always_on_power_well);
2892 	}
2893 
2894 	assert_power_well_ids_unique(dev_priv);
2895 
2896 	return 0;
2897 }
2898 
2899 /**
2900  * intel_power_domains_fini - finalizes the power domain structures
2901  * @dev_priv: i915 device instance
2902  *
2903  * Finalizes the power domain structures for @dev_priv depending upon the
2904  * supported platform. This function also disables runtime pm and ensures that
2905  * the device stays powered up so that the driver can be reloaded.
2906  */
2907 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2908 {
2909 #ifdef __linux__
2910 	struct device *kdev = &dev_priv->drm.pdev->dev;
2911 #endif
2912 
2913 	/*
2914 	 * The i915.ko module is still not prepared to be loaded when
2915 	 * the power well is not enabled, so just enable it in case
2916 	 * we're going to unload/reload.
2917 	 * The following also reacquires the RPM reference the core passed
2918 	 * to the driver during loading, which is dropped in
2919 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2920 	 * device to the core with this reference held.
2921 	 */
2922 	intel_display_set_init_power(dev_priv, true);
2923 
2924 	/* Remove the refcount we took to keep power well support disabled. */
2925 	if (!i915_modparams.disable_power_well)
2926 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2927 
2928 	/*
2929 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2930 	 * the platform doesn't support runtime PM.
2931 	 */
2932 #ifdef __linux__
2933 	if (!HAS_RUNTIME_PM(dev_priv))
2934 		pm_runtime_put(kdev);
2935 #endif
2936 }
2937 
2938 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2939 {
2940 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2941 	struct i915_power_well *power_well;
2942 
2943 	mutex_lock(&power_domains->lock);
2944 	for_each_power_well(dev_priv, power_well) {
2945 		power_well->ops->sync_hw(dev_priv, power_well);
2946 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2947 								     power_well);
2948 	}
2949 	mutex_unlock(&power_domains->lock);
2950 }
2951 
2952 static inline
2953 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
2954 			  i915_reg_t reg, bool enable)
2955 {
2956 	u32 val, status;
2957 
2958 	val = I915_READ(reg);
2959 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
2960 	I915_WRITE(reg, val);
2961 	POSTING_READ(reg);
2962 	udelay(10);
2963 
2964 	status = I915_READ(reg) & DBUF_POWER_STATE;
2965 	if ((enable && !status) || (!enable && status)) {
2966 		DRM_ERROR("DBus power %s timeout!\n",
2967 			  enable ? "enable" : "disable");
2968 		return false;
2969 	}
2970 	return true;
2971 }
2972 
2973 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2974 {
2975 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
2976 }
2977 
2978 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2979 {
2980 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
2981 }
2982 
2983 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
2984 {
2985 	if (INTEL_GEN(dev_priv) < 11)
2986 		return 1;
2987 	return 2;
2988 }
2989 
2990 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
2991 			    u8 req_slices)
2992 {
2993 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
2994 	u32 val;
2995 	bool ret;
2996 
2997 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
2998 		DRM_ERROR("Invalid number of dbuf slices requested\n");
2999 		return;
3000 	}
3001 
3002 	if (req_slices == hw_enabled_slices || req_slices == 0)
3003 		return;
3004 
3005 	val = I915_READ(DBUF_CTL_S2);
3006 	if (req_slices > hw_enabled_slices)
3007 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3008 	else
3009 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3010 
3011 	if (ret)
3012 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3013 }
3014 
3015 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3016 {
3017 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3018 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3019 	POSTING_READ(DBUF_CTL_S2);
3020 
3021 	udelay(10);
3022 
3023 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3024 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3025 		DRM_ERROR("DBuf power enable timeout\n");
3026 	else
3027 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3028 }
3029 
3030 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3031 {
3032 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3033 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3034 	POSTING_READ(DBUF_CTL_S2);
3035 
3036 	udelay(10);
3037 
3038 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3039 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3040 		DRM_ERROR("DBuf power disable timeout!\n");
3041 	else
3042 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3043 }
3044 
3045 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3046 {
3047 	uint32_t val;
3048 
3049 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3050 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3051 	      MBUS_ABOX_B_CREDIT(1) |
3052 	      MBUS_ABOX_BW_CREDIT(1);
3053 
3054 	I915_WRITE(MBUS_ABOX_CTL, val);
3055 }
3056 
3057 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3058 				   bool resume)
3059 {
3060 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3061 	struct i915_power_well *well;
3062 	uint32_t val;
3063 
3064 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3065 
3066 	/* enable PCH reset handshake */
3067 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3068 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
3069 
3070 	/* enable PG1 and Misc I/O */
3071 	mutex_lock(&power_domains->lock);
3072 
3073 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3074 	intel_power_well_enable(dev_priv, well);
3075 
3076 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3077 	intel_power_well_enable(dev_priv, well);
3078 
3079 	mutex_unlock(&power_domains->lock);
3080 
3081 	skl_init_cdclk(dev_priv);
3082 
3083 	gen9_dbuf_enable(dev_priv);
3084 
3085 	if (resume && dev_priv->csr.dmc_payload)
3086 		intel_csr_load_program(dev_priv);
3087 }
3088 
3089 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3090 {
3091 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3092 	struct i915_power_well *well;
3093 
3094 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3095 
3096 	gen9_dbuf_disable(dev_priv);
3097 
3098 	skl_uninit_cdclk(dev_priv);
3099 
3100 	/* The spec doesn't call for removing the reset handshake flag */
3101 	/* disable PG1 and Misc I/O */
3102 
3103 	mutex_lock(&power_domains->lock);
3104 
3105 	/*
3106 	 * BSpec says to keep the MISC IO power well enabled here, only
3107 	 * remove our request for power well 1.
3108 	 * Note that even though the driver's request is removed power well 1
3109 	 * may stay enabled after this due to DMC's own request on it.
3110 	 */
3111 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3112 	intel_power_well_disable(dev_priv, well);
3113 
3114 	mutex_unlock(&power_domains->lock);
3115 
3116 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3117 }
3118 
3119 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3120 			   bool resume)
3121 {
3122 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3123 	struct i915_power_well *well;
3124 	uint32_t val;
3125 
3126 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3127 
3128 	/*
3129 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3130 	 * or else the reset will hang because there is no PCH to respond.
3131 	 * Move the handshake programming to initialization sequence.
3132 	 * Previously was left up to BIOS.
3133 	 */
3134 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3135 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
3136 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3137 
3138 	/* Enable PG1 */
3139 	mutex_lock(&power_domains->lock);
3140 
3141 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3142 	intel_power_well_enable(dev_priv, well);
3143 
3144 	mutex_unlock(&power_domains->lock);
3145 
3146 	bxt_init_cdclk(dev_priv);
3147 
3148 	gen9_dbuf_enable(dev_priv);
3149 
3150 	if (resume && dev_priv->csr.dmc_payload)
3151 		intel_csr_load_program(dev_priv);
3152 }
3153 
3154 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3155 {
3156 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3157 	struct i915_power_well *well;
3158 
3159 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3160 
3161 	gen9_dbuf_disable(dev_priv);
3162 
3163 	bxt_uninit_cdclk(dev_priv);
3164 
3165 	/* The spec doesn't call for removing the reset handshake flag */
3166 
3167 	/*
3168 	 * Disable PW1 (PG1).
3169 	 * Note that even though the driver's request is removed power well 1
3170 	 * may stay enabled after this due to DMC's own request on it.
3171 	 */
3172 	mutex_lock(&power_domains->lock);
3173 
3174 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3175 	intel_power_well_disable(dev_priv, well);
3176 
3177 	mutex_unlock(&power_domains->lock);
3178 
3179 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3180 }
3181 
3182 enum {
3183 	PROCMON_0_85V_DOT_0,
3184 	PROCMON_0_95V_DOT_0,
3185 	PROCMON_0_95V_DOT_1,
3186 	PROCMON_1_05V_DOT_0,
3187 	PROCMON_1_05V_DOT_1,
3188 };
3189 
3190 static const struct cnl_procmon {
3191 	u32 dw1, dw9, dw10;
3192 } cnl_procmon_values[] = {
3193 	[PROCMON_0_85V_DOT_0] =
3194 		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
3195 	[PROCMON_0_95V_DOT_0] =
3196 		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
3197 	[PROCMON_0_95V_DOT_1] =
3198 		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
3199 	[PROCMON_1_05V_DOT_0] =
3200 		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
3201 	[PROCMON_1_05V_DOT_1] =
3202 		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
3203 };
3204 
3205 /*
3206  * CNL has just one set of registers, while ICL has two sets: one for port A and
3207  * the other for port B. The CNL registers are equivalent to the ICL port A
3208  * registers, that's why we call the ICL macros even though the function has CNL
3209  * on its name.
3210  */
3211 static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
3212 				       enum port port)
3213 {
3214 	const struct cnl_procmon *procmon;
3215 	u32 val;
3216 
3217 	val = I915_READ(ICL_PORT_COMP_DW3(port));
3218 	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
3219 	default:
3220 		MISSING_CASE(val);
3221 		/* fall through */
3222 	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
3223 		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
3224 		break;
3225 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
3226 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
3227 		break;
3228 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
3229 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
3230 		break;
3231 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
3232 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
3233 		break;
3234 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
3235 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
3236 		break;
3237 	}
3238 
3239 	val = I915_READ(ICL_PORT_COMP_DW1(port));
3240 	val &= ~((0xff << 16) | 0xff);
3241 	val |= procmon->dw1;
3242 	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
3243 
3244 	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
3245 	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
3246 }
3247 
3248 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3249 {
3250 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3251 	struct i915_power_well *well;
3252 	u32 val;
3253 
3254 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3255 
3256 	/* 1. Enable PCH Reset Handshake */
3257 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3258 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3259 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3260 
3261 	/* 2. Enable Comp */
3262 	val = I915_READ(CHICKEN_MISC_2);
3263 	val &= ~CNL_COMP_PWR_DOWN;
3264 	I915_WRITE(CHICKEN_MISC_2, val);
3265 
3266 	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
3267 	cnl_set_procmon_ref_values(dev_priv, PORT_A);
3268 
3269 	val = I915_READ(CNL_PORT_COMP_DW0);
3270 	val |= COMP_INIT;
3271 	I915_WRITE(CNL_PORT_COMP_DW0, val);
3272 
3273 	/* 3. */
3274 	val = I915_READ(CNL_PORT_CL1CM_DW5);
3275 	val |= CL_POWER_DOWN_ENABLE;
3276 	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
3277 
3278 	/*
3279 	 * 4. Enable Power Well 1 (PG1).
3280 	 *    The AUX IO power wells will be enabled on demand.
3281 	 */
3282 	mutex_lock(&power_domains->lock);
3283 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3284 	intel_power_well_enable(dev_priv, well);
3285 	mutex_unlock(&power_domains->lock);
3286 
3287 	/* 5. Enable CD clock */
3288 	cnl_init_cdclk(dev_priv);
3289 
3290 	/* 6. Enable DBUF */
3291 	gen9_dbuf_enable(dev_priv);
3292 
3293 	if (resume && dev_priv->csr.dmc_payload)
3294 		intel_csr_load_program(dev_priv);
3295 }
3296 
3297 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3298 {
3299 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3300 	struct i915_power_well *well;
3301 	u32 val;
3302 
3303 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3304 
3305 	/* 1. Disable all display engine functions -> aready done */
3306 
3307 	/* 2. Disable DBUF */
3308 	gen9_dbuf_disable(dev_priv);
3309 
3310 	/* 3. Disable CD clock */
3311 	cnl_uninit_cdclk(dev_priv);
3312 
3313 	/*
3314 	 * 4. Disable Power Well 1 (PG1).
3315 	 *    The AUX IO power wells are toggled on demand, so they are already
3316 	 *    disabled at this point.
3317 	 */
3318 	mutex_lock(&power_domains->lock);
3319 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3320 	intel_power_well_disable(dev_priv, well);
3321 	mutex_unlock(&power_domains->lock);
3322 
3323 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3324 
3325 	/* 5. Disable Comp */
3326 	val = I915_READ(CHICKEN_MISC_2);
3327 	val |= CNL_COMP_PWR_DOWN;
3328 	I915_WRITE(CHICKEN_MISC_2, val);
3329 }
3330 
3331 static void icl_display_core_init(struct drm_i915_private *dev_priv,
3332 				  bool resume)
3333 {
3334 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3335 	struct i915_power_well *well;
3336 	enum port port;
3337 	u32 val;
3338 
3339 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3340 
3341 	/* 1. Enable PCH reset handshake. */
3342 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3343 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3344 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3345 
3346 	for (port = PORT_A; port <= PORT_B; port++) {
3347 		/* 2. Enable DDI combo PHY comp. */
3348 		val = I915_READ(ICL_PHY_MISC(port));
3349 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3350 		I915_WRITE(ICL_PHY_MISC(port), val);
3351 
3352 		cnl_set_procmon_ref_values(dev_priv, port);
3353 
3354 		val = I915_READ(ICL_PORT_COMP_DW0(port));
3355 		val |= COMP_INIT;
3356 		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3357 
3358 		/* 3. Set power down enable. */
3359 		val = I915_READ(ICL_PORT_CL_DW5(port));
3360 		val |= CL_POWER_DOWN_ENABLE;
3361 		I915_WRITE(ICL_PORT_CL_DW5(port), val);
3362 	}
3363 
3364 	/*
3365 	 * 4. Enable Power Well 1 (PG1).
3366 	 *    The AUX IO power wells will be enabled on demand.
3367 	 */
3368 	mutex_lock(&power_domains->lock);
3369 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3370 	intel_power_well_enable(dev_priv, well);
3371 	mutex_unlock(&power_domains->lock);
3372 
3373 	/* 5. Enable CDCLK. */
3374 	icl_init_cdclk(dev_priv);
3375 
3376 	/* 6. Enable DBUF. */
3377 	icl_dbuf_enable(dev_priv);
3378 
3379 	/* 7. Setup MBUS. */
3380 	icl_mbus_init(dev_priv);
3381 
3382 	/* 8. CHICKEN_DCPR_1 */
3383 	I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3384 					CNL_DDI_CLOCK_REG_ACCESS_ON);
3385 }
3386 
3387 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3388 {
3389 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3390 	struct i915_power_well *well;
3391 	enum port port;
3392 	u32 val;
3393 
3394 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3395 
3396 	/* 1. Disable all display engine functions -> aready done */
3397 
3398 	/* 2. Disable DBUF */
3399 	icl_dbuf_disable(dev_priv);
3400 
3401 	/* 3. Disable CD clock */
3402 	icl_uninit_cdclk(dev_priv);
3403 
3404 	/*
3405 	 * 4. Disable Power Well 1 (PG1).
3406 	 *    The AUX IO power wells are toggled on demand, so they are already
3407 	 *    disabled at this point.
3408 	 */
3409 	mutex_lock(&power_domains->lock);
3410 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3411 	intel_power_well_disable(dev_priv, well);
3412 	mutex_unlock(&power_domains->lock);
3413 
3414 	/* 5. Disable Comp */
3415 	for (port = PORT_A; port <= PORT_B; port++) {
3416 		val = I915_READ(ICL_PHY_MISC(port));
3417 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3418 		I915_WRITE(ICL_PHY_MISC(port), val);
3419 	}
3420 }
3421 
3422 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3423 {
3424 	struct i915_power_well *cmn_bc =
3425 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3426 	struct i915_power_well *cmn_d =
3427 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3428 
3429 	/*
3430 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3431 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3432 	 * instead maintain a shadow copy ourselves. Use the actual
3433 	 * power well state and lane status to reconstruct the
3434 	 * expected initial value.
3435 	 */
3436 	dev_priv->chv_phy_control =
3437 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3438 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3439 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3440 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3441 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3442 
3443 	/*
3444 	 * If all lanes are disabled we leave the override disabled
3445 	 * with all power down bits cleared to match the state we
3446 	 * would use after disabling the port. Otherwise enable the
3447 	 * override and set the lane powerdown bits accding to the
3448 	 * current lane status.
3449 	 */
3450 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3451 		uint32_t status = I915_READ(DPLL(PIPE_A));
3452 		unsigned int mask;
3453 
3454 		mask = status & DPLL_PORTB_READY_MASK;
3455 		if (mask == 0xf)
3456 			mask = 0x0;
3457 		else
3458 			dev_priv->chv_phy_control |=
3459 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3460 
3461 		dev_priv->chv_phy_control |=
3462 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3463 
3464 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3465 		if (mask == 0xf)
3466 			mask = 0x0;
3467 		else
3468 			dev_priv->chv_phy_control |=
3469 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3470 
3471 		dev_priv->chv_phy_control |=
3472 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3473 
3474 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3475 
3476 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3477 	} else {
3478 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3479 	}
3480 
3481 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3482 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3483 		unsigned int mask;
3484 
3485 		mask = status & DPLL_PORTD_READY_MASK;
3486 
3487 		if (mask == 0xf)
3488 			mask = 0x0;
3489 		else
3490 			dev_priv->chv_phy_control |=
3491 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3492 
3493 		dev_priv->chv_phy_control |=
3494 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3495 
3496 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3497 
3498 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3499 	} else {
3500 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3501 	}
3502 
3503 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3504 
3505 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3506 		      dev_priv->chv_phy_control);
3507 }
3508 
3509 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3510 {
3511 	struct i915_power_well *cmn =
3512 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3513 	struct i915_power_well *disp2d =
3514 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3515 
3516 	/* If the display might be already active skip this */
3517 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
3518 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
3519 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3520 		return;
3521 
3522 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3523 
3524 	/* cmnlane needs DPLL registers */
3525 	disp2d->ops->enable(dev_priv, disp2d);
3526 
3527 	/*
3528 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3529 	 * Need to assert and de-assert PHY SB reset by gating the
3530 	 * common lane power, then un-gating it.
3531 	 * Simply ungating isn't enough to reset the PHY enough to get
3532 	 * ports and lanes running.
3533 	 */
3534 	cmn->ops->disable(dev_priv, cmn);
3535 }
3536 
3537 /**
3538  * intel_power_domains_init_hw - initialize hardware power domain state
3539  * @dev_priv: i915 device instance
3540  * @resume: Called from resume code paths or not
3541  *
3542  * This function initializes the hardware power domain state and enables all
3543  * power wells belonging to the INIT power domain. Power wells in other
3544  * domains (and not in the INIT domain) are referenced or disabled during the
3545  * modeset state HW readout. After that the reference count of each power well
3546  * must match its HW enabled state, see intel_power_domains_verify_state().
3547  */
3548 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3549 {
3550 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3551 
3552 	power_domains->initializing = true;
3553 
3554 	if (IS_ICELAKE(dev_priv)) {
3555 		icl_display_core_init(dev_priv, resume);
3556 	} else if (IS_CANNONLAKE(dev_priv)) {
3557 		cnl_display_core_init(dev_priv, resume);
3558 	} else if (IS_GEN9_BC(dev_priv)) {
3559 		skl_display_core_init(dev_priv, resume);
3560 	} else if (IS_GEN9_LP(dev_priv)) {
3561 		bxt_display_core_init(dev_priv, resume);
3562 	} else if (IS_CHERRYVIEW(dev_priv)) {
3563 		mutex_lock(&power_domains->lock);
3564 		chv_phy_control_init(dev_priv);
3565 		mutex_unlock(&power_domains->lock);
3566 	} else if (IS_VALLEYVIEW(dev_priv)) {
3567 		mutex_lock(&power_domains->lock);
3568 		vlv_cmnlane_wa(dev_priv);
3569 		mutex_unlock(&power_domains->lock);
3570 	}
3571 
3572 	/* For now, we need the power well to be always enabled. */
3573 	intel_display_set_init_power(dev_priv, true);
3574 	/* Disable power support if the user asked so. */
3575 	if (!i915_modparams.disable_power_well)
3576 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3577 	intel_power_domains_sync_hw(dev_priv);
3578 	power_domains->initializing = false;
3579 }
3580 
3581 /**
3582  * intel_power_domains_suspend - suspend power domain state
3583  * @dev_priv: i915 device instance
3584  *
3585  * This function prepares the hardware power domain state before entering
3586  * system suspend. It must be paired with intel_power_domains_init_hw().
3587  */
3588 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3589 {
3590 	/*
3591 	 * Even if power well support was disabled we still want to disable
3592 	 * power wells while we are system suspended.
3593 	 */
3594 	if (!i915_modparams.disable_power_well)
3595 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3596 
3597 	if (IS_ICELAKE(dev_priv))
3598 		icl_display_core_uninit(dev_priv);
3599 	else if (IS_CANNONLAKE(dev_priv))
3600 		cnl_display_core_uninit(dev_priv);
3601 	else if (IS_GEN9_BC(dev_priv))
3602 		skl_display_core_uninit(dev_priv);
3603 	else if (IS_GEN9_LP(dev_priv))
3604 		bxt_display_core_uninit(dev_priv);
3605 }
3606 
3607 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3608 {
3609 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3610 	struct i915_power_well *power_well;
3611 
3612 	for_each_power_well(dev_priv, power_well) {
3613 		enum intel_display_power_domain domain;
3614 
3615 		DRM_DEBUG_DRIVER("%-25s %d\n",
3616 				 power_well->name, power_well->count);
3617 
3618 		for_each_power_domain(domain, power_well->domains)
3619 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3620 					 intel_display_power_domain_str(domain),
3621 					 power_domains->domain_use_count[domain]);
3622 	}
3623 }
3624 
3625 /**
3626  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3627  * @dev_priv: i915 device instance
3628  *
3629  * Verify if the reference count of each power well matches its HW enabled
3630  * state and the total refcount of the domains it belongs to. This must be
3631  * called after modeset HW state sanitization, which is responsible for
3632  * acquiring reference counts for any power wells in use and disabling the
3633  * ones left on by BIOS but not required by any active output.
3634  */
3635 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3636 {
3637 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3638 	struct i915_power_well *power_well;
3639 	bool dump_domain_info;
3640 
3641 	mutex_lock(&power_domains->lock);
3642 
3643 	dump_domain_info = false;
3644 	for_each_power_well(dev_priv, power_well) {
3645 		enum intel_display_power_domain domain;
3646 		int domains_count;
3647 		bool enabled;
3648 
3649 		/*
3650 		 * Power wells not belonging to any domain (like the MISC_IO
3651 		 * and PW1 power wells) are under FW control, so ignore them,
3652 		 * since their state can change asynchronously.
3653 		 */
3654 		if (!power_well->domains)
3655 			continue;
3656 
3657 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3658 		if ((power_well->count || power_well->always_on) != enabled)
3659 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3660 				  power_well->name, power_well->count, enabled);
3661 
3662 		domains_count = 0;
3663 		for_each_power_domain(domain, power_well->domains)
3664 			domains_count += power_domains->domain_use_count[domain];
3665 
3666 		if (power_well->count != domains_count) {
3667 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3668 				  "(refcount %d/domains refcount %d)\n",
3669 				  power_well->name, power_well->count,
3670 				  domains_count);
3671 			dump_domain_info = true;
3672 		}
3673 	}
3674 
3675 	if (dump_domain_info) {
3676 		static bool dumped;
3677 
3678 		if (!dumped) {
3679 			intel_power_domains_dump_info(dev_priv);
3680 			dumped = true;
3681 		}
3682 	}
3683 
3684 	mutex_unlock(&power_domains->lock);
3685 }
3686 
3687 /**
3688  * intel_runtime_pm_get - grab a runtime pm reference
3689  * @dev_priv: i915 device instance
3690  *
3691  * This function grabs a device-level runtime pm reference (mostly used for GEM
3692  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3693  *
3694  * Any runtime pm reference obtained by this function must have a symmetric
3695  * call to intel_runtime_pm_put() to release the reference again.
3696  */
3697 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3698 {
3699 #ifdef __linux__
3700 	struct pci_dev *pdev = dev_priv->drm.pdev;
3701 	struct device *kdev = &pdev->dev;
3702 	int ret;
3703 
3704 	ret = pm_runtime_get_sync(kdev);
3705 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3706 #endif
3707 
3708 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3709 	assert_rpm_wakelock_held(dev_priv);
3710 }
3711 
3712 /**
3713  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3714  * @dev_priv: i915 device instance
3715  *
3716  * This function grabs a device-level runtime pm reference if the device is
3717  * already in use and ensures that it is powered up. It is illegal to try
3718  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3719  *
3720  * Any runtime pm reference obtained by this function must have a symmetric
3721  * call to intel_runtime_pm_put() to release the reference again.
3722  *
3723  * Returns: True if the wakeref was acquired, or False otherwise.
3724  */
3725 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3726 {
3727 #ifdef __linux__
3728 	if (IS_ENABLED(CONFIG_PM)) {
3729 		struct pci_dev *pdev = dev_priv->drm.pdev;
3730 		struct device *kdev = &pdev->dev;
3731 
3732 		/*
3733 		 * In cases runtime PM is disabled by the RPM core and we get
3734 		 * an -EINVAL return value we are not supposed to call this
3735 		 * function, since the power state is undefined. This applies
3736 		 * atm to the late/early system suspend/resume handlers.
3737 		 */
3738 		if (pm_runtime_get_if_in_use(kdev) <= 0)
3739 			return false;
3740 	}
3741 #endif
3742 
3743 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3744 	assert_rpm_wakelock_held(dev_priv);
3745 
3746 	return true;
3747 }
3748 
3749 /**
3750  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3751  * @dev_priv: i915 device instance
3752  *
3753  * This function grabs a device-level runtime pm reference (mostly used for GEM
3754  * code to ensure the GTT or GT is on).
3755  *
3756  * It will _not_ power up the device but instead only check that it's powered
3757  * on.  Therefore it is only valid to call this functions from contexts where
3758  * the device is known to be powered up and where trying to power it up would
3759  * result in hilarity and deadlocks. That pretty much means only the system
3760  * suspend/resume code where this is used to grab runtime pm references for
3761  * delayed setup down in work items.
3762  *
3763  * Any runtime pm reference obtained by this function must have a symmetric
3764  * call to intel_runtime_pm_put() to release the reference again.
3765  */
3766 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3767 {
3768 #ifdef __linux__
3769 	struct pci_dev *pdev = dev_priv->drm.pdev;
3770 	struct device *kdev = &pdev->dev;
3771 
3772 	assert_rpm_wakelock_held(dev_priv);
3773 	pm_runtime_get_noresume(kdev);
3774 #else
3775 	assert_rpm_wakelock_held(dev_priv);
3776 #endif
3777 
3778 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3779 }
3780 
3781 /**
3782  * intel_runtime_pm_put - release a runtime pm reference
3783  * @dev_priv: i915 device instance
3784  *
3785  * This function drops the device-level runtime pm reference obtained by
3786  * intel_runtime_pm_get() and might power down the corresponding
3787  * hardware block right away if this is the last reference.
3788  */
3789 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3790 {
3791 #ifdef __linux__
3792 	struct pci_dev *pdev = dev_priv->drm.pdev;
3793 	struct device *kdev = &pdev->dev;
3794 
3795 	assert_rpm_wakelock_held(dev_priv);
3796 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3797 
3798 	pm_runtime_mark_last_busy(kdev);
3799 	pm_runtime_put_autosuspend(kdev);
3800 #else
3801 	assert_rpm_wakelock_held(dev_priv);
3802 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3803 #endif
3804 }
3805 
3806 /**
3807  * intel_runtime_pm_enable - enable runtime pm
3808  * @dev_priv: i915 device instance
3809  *
3810  * This function enables runtime pm at the end of the driver load sequence.
3811  *
3812  * Note that this function does currently not enable runtime pm for the
3813  * subordinate display power domains. That is only done on the first modeset
3814  * using intel_display_set_init_power().
3815  */
3816 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3817 {
3818 #ifdef __linux__
3819 	struct pci_dev *pdev = dev_priv->drm.pdev;
3820 	struct device *kdev = &pdev->dev;
3821 
3822 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3823 	pm_runtime_mark_last_busy(kdev);
3824 
3825 	/*
3826 	 * Take a permanent reference to disable the RPM functionality and drop
3827 	 * it only when unloading the driver. Use the low level get/put helpers,
3828 	 * so the driver's own RPM reference tracking asserts also work on
3829 	 * platforms without RPM support.
3830 	 */
3831 	if (!HAS_RUNTIME_PM(dev_priv)) {
3832 		int ret;
3833 
3834 		pm_runtime_dont_use_autosuspend(kdev);
3835 		ret = pm_runtime_get_sync(kdev);
3836 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3837 	} else {
3838 		pm_runtime_use_autosuspend(kdev);
3839 	}
3840 
3841 	/*
3842 	 * The core calls the driver load handler with an RPM reference held.
3843 	 * We drop that here and will reacquire it during unloading in
3844 	 * intel_power_domains_fini().
3845 	 */
3846 	pm_runtime_put_autosuspend(kdev);
3847 #endif
3848 }
3849