xref: /openbsd-src/sys/dev/pci/drm/i915/intel_runtime_pm.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 #include <linux/vgaarb.h>
31 
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: runtime pm
37  *
38  * The i915 driver supports dynamic enabling and disabling of entire hardware
39  * blocks at runtime. This is especially important on the display side where
40  * software is supposed to control many power gates manually on recent hardware,
41  * since on the GT side a lot of the power management is done by the hardware.
42  * But even there some manual control at the device level is required.
43  *
44  * Since i915 supports a diverse set of platforms with a unified codebase and
45  * hardware engineers just love to shuffle functionality around between power
46  * domains there's a sizeable amount of indirection required. This file provides
47  * generic functions to the driver for grabbing and releasing references for
48  * abstract power domains. It then maps those to the actual power wells
49  * present for a given platform.
50  */
51 
52 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
53 					 enum i915_power_well_id power_well_id);
54 
55 static struct i915_power_well *
56 lookup_power_well(struct drm_i915_private *dev_priv,
57 		  enum i915_power_well_id power_well_id);
58 
59 const char *
60 intel_display_power_domain_str(enum intel_display_power_domain domain)
61 {
62 	switch (domain) {
63 	case POWER_DOMAIN_PIPE_A:
64 		return "PIPE_A";
65 	case POWER_DOMAIN_PIPE_B:
66 		return "PIPE_B";
67 	case POWER_DOMAIN_PIPE_C:
68 		return "PIPE_C";
69 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
70 		return "PIPE_A_PANEL_FITTER";
71 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
72 		return "PIPE_B_PANEL_FITTER";
73 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
74 		return "PIPE_C_PANEL_FITTER";
75 	case POWER_DOMAIN_TRANSCODER_A:
76 		return "TRANSCODER_A";
77 	case POWER_DOMAIN_TRANSCODER_B:
78 		return "TRANSCODER_B";
79 	case POWER_DOMAIN_TRANSCODER_C:
80 		return "TRANSCODER_C";
81 	case POWER_DOMAIN_TRANSCODER_EDP:
82 		return "TRANSCODER_EDP";
83 	case POWER_DOMAIN_TRANSCODER_DSI_A:
84 		return "TRANSCODER_DSI_A";
85 	case POWER_DOMAIN_TRANSCODER_DSI_C:
86 		return "TRANSCODER_DSI_C";
87 	case POWER_DOMAIN_PORT_DDI_A_LANES:
88 		return "PORT_DDI_A_LANES";
89 	case POWER_DOMAIN_PORT_DDI_B_LANES:
90 		return "PORT_DDI_B_LANES";
91 	case POWER_DOMAIN_PORT_DDI_C_LANES:
92 		return "PORT_DDI_C_LANES";
93 	case POWER_DOMAIN_PORT_DDI_D_LANES:
94 		return "PORT_DDI_D_LANES";
95 	case POWER_DOMAIN_PORT_DDI_E_LANES:
96 		return "PORT_DDI_E_LANES";
97 	case POWER_DOMAIN_PORT_DDI_F_LANES:
98 		return "PORT_DDI_F_LANES";
99 	case POWER_DOMAIN_PORT_DDI_A_IO:
100 		return "PORT_DDI_A_IO";
101 	case POWER_DOMAIN_PORT_DDI_B_IO:
102 		return "PORT_DDI_B_IO";
103 	case POWER_DOMAIN_PORT_DDI_C_IO:
104 		return "PORT_DDI_C_IO";
105 	case POWER_DOMAIN_PORT_DDI_D_IO:
106 		return "PORT_DDI_D_IO";
107 	case POWER_DOMAIN_PORT_DDI_E_IO:
108 		return "PORT_DDI_E_IO";
109 	case POWER_DOMAIN_PORT_DDI_F_IO:
110 		return "PORT_DDI_F_IO";
111 	case POWER_DOMAIN_PORT_DSI:
112 		return "PORT_DSI";
113 	case POWER_DOMAIN_PORT_CRT:
114 		return "PORT_CRT";
115 	case POWER_DOMAIN_PORT_OTHER:
116 		return "PORT_OTHER";
117 	case POWER_DOMAIN_VGA:
118 		return "VGA";
119 	case POWER_DOMAIN_AUDIO:
120 		return "AUDIO";
121 	case POWER_DOMAIN_PLLS:
122 		return "PLLS";
123 	case POWER_DOMAIN_AUX_A:
124 		return "AUX_A";
125 	case POWER_DOMAIN_AUX_B:
126 		return "AUX_B";
127 	case POWER_DOMAIN_AUX_C:
128 		return "AUX_C";
129 	case POWER_DOMAIN_AUX_D:
130 		return "AUX_D";
131 	case POWER_DOMAIN_AUX_E:
132 		return "AUX_E";
133 	case POWER_DOMAIN_AUX_F:
134 		return "AUX_F";
135 	case POWER_DOMAIN_AUX_IO_A:
136 		return "AUX_IO_A";
137 	case POWER_DOMAIN_AUX_TBT1:
138 		return "AUX_TBT1";
139 	case POWER_DOMAIN_AUX_TBT2:
140 		return "AUX_TBT2";
141 	case POWER_DOMAIN_AUX_TBT3:
142 		return "AUX_TBT3";
143 	case POWER_DOMAIN_AUX_TBT4:
144 		return "AUX_TBT4";
145 	case POWER_DOMAIN_GMBUS:
146 		return "GMBUS";
147 	case POWER_DOMAIN_INIT:
148 		return "INIT";
149 	case POWER_DOMAIN_MODESET:
150 		return "MODESET";
151 	case POWER_DOMAIN_GT_IRQ:
152 		return "GT_IRQ";
153 	default:
154 		MISSING_CASE(domain);
155 		return "?";
156 	}
157 }
158 
159 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
160 				    struct i915_power_well *power_well)
161 {
162 	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
163 	power_well->ops->enable(dev_priv, power_well);
164 	power_well->hw_enabled = true;
165 }
166 
167 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
168 				     struct i915_power_well *power_well)
169 {
170 	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
171 	power_well->hw_enabled = false;
172 	power_well->ops->disable(dev_priv, power_well);
173 }
174 
175 static void intel_power_well_get(struct drm_i915_private *dev_priv,
176 				 struct i915_power_well *power_well)
177 {
178 	if (!power_well->count++)
179 		intel_power_well_enable(dev_priv, power_well);
180 }
181 
182 static void intel_power_well_put(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	WARN(!power_well->count, "Use count on power well %s is already zero",
186 	     power_well->name);
187 
188 	if (!--power_well->count)
189 		intel_power_well_disable(dev_priv, power_well);
190 }
191 
192 /**
193  * __intel_display_power_is_enabled - unlocked check for a power domain
194  * @dev_priv: i915 device instance
195  * @domain: power domain to check
196  *
197  * This is the unlocked version of intel_display_power_is_enabled() and should
198  * only be used from error capture and recovery code where deadlocks are
199  * possible.
200  *
201  * Returns:
202  * True when the power domain is enabled, false otherwise.
203  */
204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
205 				      enum intel_display_power_domain domain)
206 {
207 	struct i915_power_well *power_well;
208 	bool is_enabled;
209 
210 	if (dev_priv->runtime_pm.suspended)
211 		return false;
212 
213 	is_enabled = true;
214 
215 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
216 		if (power_well->always_on)
217 			continue;
218 
219 		if (!power_well->hw_enabled) {
220 			is_enabled = false;
221 			break;
222 		}
223 	}
224 
225 	return is_enabled;
226 }
227 
228 /**
229  * intel_display_power_is_enabled - check for a power domain
230  * @dev_priv: i915 device instance
231  * @domain: power domain to check
232  *
233  * This function can be used to check the hw power domain state. It is mostly
234  * used in hardware state readout functions. Everywhere else code should rely
235  * upon explicit power domain reference counting to ensure that the hardware
236  * block is powered up before accessing it.
237  *
238  * Callers must hold the relevant modesetting locks to ensure that concurrent
239  * threads can't disable the power well while the caller tries to read a few
240  * registers.
241  *
242  * Returns:
243  * True when the power domain is enabled, false otherwise.
244  */
245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
246 				    enum intel_display_power_domain domain)
247 {
248 	struct i915_power_domains *power_domains;
249 	bool ret;
250 
251 	power_domains = &dev_priv->power_domains;
252 
253 	mutex_lock(&power_domains->lock);
254 	ret = __intel_display_power_is_enabled(dev_priv, domain);
255 	mutex_unlock(&power_domains->lock);
256 
257 	return ret;
258 }
259 
260 /**
261  * intel_display_set_init_power - set the initial power domain state
262  * @dev_priv: i915 device instance
263  * @enable: whether to enable or disable the initial power domain state
264  *
265  * For simplicity our driver load/unload and system suspend/resume code assumes
266  * that all power domains are always enabled. This functions controls the state
267  * of this little hack. While the initial power domain state is enabled runtime
268  * pm is effectively disabled.
269  */
270 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
271 				  bool enable)
272 {
273 	if (dev_priv->power_domains.init_power_on == enable)
274 		return;
275 
276 	if (enable)
277 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
278 	else
279 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
280 
281 	dev_priv->power_domains.init_power_on = enable;
282 }
283 
284 /*
285  * Starting with Haswell, we have a "Power Down Well" that can be turned off
286  * when not needed anymore. We have 4 registers that can request the power well
287  * to be enabled, and it will only be disabled if none of the registers is
288  * requesting it to be enabled.
289  */
290 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
291 				       u8 irq_pipe_mask, bool has_vga)
292 {
293 	struct pci_dev *pdev = dev_priv->drm.pdev;
294 
295 	/*
296 	 * After we re-enable the power well, if we touch VGA register 0x3d5
297 	 * we'll get unclaimed register interrupts. This stops after we write
298 	 * anything to the VGA MSR register. The vgacon module uses this
299 	 * register all the time, so if we unbind our driver and, as a
300 	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
301 	 * console_unlock(). So make here we touch the VGA MSR register, making
302 	 * sure vgacon can keep working normally without triggering interrupts
303 	 * and error messages.
304 	 */
305 	if (has_vga) {
306 		vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
307 		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
308 		vga_put(pdev, VGA_RSRC_LEGACY_IO);
309 	}
310 
311 	if (irq_pipe_mask)
312 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
313 }
314 
315 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
316 				       u8 irq_pipe_mask)
317 {
318 	if (irq_pipe_mask)
319 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
320 }
321 
322 
323 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
324 					   struct i915_power_well *power_well)
325 {
326 	enum i915_power_well_id id = power_well->id;
327 
328 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
329 	WARN_ON(intel_wait_for_register(dev_priv,
330 					HSW_PWR_WELL_CTL_DRIVER(id),
331 					HSW_PWR_WELL_CTL_STATE(id),
332 					HSW_PWR_WELL_CTL_STATE(id),
333 					1));
334 }
335 
336 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
337 				     enum i915_power_well_id id)
338 {
339 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
340 	u32 ret;
341 
342 	ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
343 	ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
344 	ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
345 	ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
346 
347 	return ret;
348 }
349 
350 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
351 					    struct i915_power_well *power_well)
352 {
353 	enum i915_power_well_id id = power_well->id;
354 	bool disabled;
355 	u32 reqs;
356 
357 	/*
358 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
359 	 * this for paranoia. The known cases where a PW will be forced on:
360 	 * - a KVMR request on any power well via the KVMR request register
361 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
362 	 *   DEBUG request registers
363 	 * Skip the wait in case any of the request bits are set and print a
364 	 * diagnostic message.
365 	 */
366 	wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
367 			       HSW_PWR_WELL_CTL_STATE(id))) ||
368 		 (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
369 	if (disabled)
370 		return;
371 
372 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
373 		      power_well->name,
374 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
375 }
376 
377 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
378 					   enum skl_power_gate pg)
379 {
380 	/* Timeout 5us for PG#0, for other PGs 1us */
381 	WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
382 					SKL_FUSE_PG_DIST_STATUS(pg),
383 					SKL_FUSE_PG_DIST_STATUS(pg), 1));
384 }
385 
386 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
387 				  struct i915_power_well *power_well)
388 {
389 	enum i915_power_well_id id = power_well->id;
390 	bool wait_fuses = power_well->hsw.has_fuses;
391 	enum skl_power_gate uninitialized_var(pg);
392 	u32 val;
393 
394 	if (wait_fuses) {
395 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
396 						 SKL_PW_TO_PG(id);
397 		/*
398 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
399 		 * before enabling the power well and PW1/PG1's own fuse
400 		 * state after the enabling. For all other power wells with
401 		 * fuses we only have to wait for that PW/PG's fuse state
402 		 * after the enabling.
403 		 */
404 		if (pg == SKL_PG1)
405 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
406 	}
407 
408 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
409 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
410 	hsw_wait_for_power_well_enable(dev_priv, power_well);
411 
412 	/* Display WA #1178: cnl */
413 	if (IS_CANNONLAKE(dev_priv) &&
414 	    (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C ||
415 	     id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) {
416 		val = I915_READ(CNL_AUX_ANAOVRD1(id));
417 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
418 		I915_WRITE(CNL_AUX_ANAOVRD1(id), val);
419 	}
420 
421 	if (wait_fuses)
422 		gen9_wait_for_power_well_fuses(dev_priv, pg);
423 
424 	hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
425 				   power_well->hsw.has_vga);
426 }
427 
428 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
429 				   struct i915_power_well *power_well)
430 {
431 	enum i915_power_well_id id = power_well->id;
432 	u32 val;
433 
434 	hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
435 
436 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
437 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
438 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
439 	hsw_wait_for_power_well_disable(dev_priv, power_well);
440 }
441 
442 #define ICL_AUX_PW_TO_PORT(pw)	((pw) - ICL_DISP_PW_AUX_A)
443 
444 static void
445 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
446 				    struct i915_power_well *power_well)
447 {
448 	enum i915_power_well_id id = power_well->id;
449 	enum port port = ICL_AUX_PW_TO_PORT(id);
450 	u32 val;
451 
452 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
453 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
454 
455 	val = I915_READ(ICL_PORT_CL_DW12(port));
456 	I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
457 
458 	hsw_wait_for_power_well_enable(dev_priv, power_well);
459 }
460 
461 static void
462 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
463 				     struct i915_power_well *power_well)
464 {
465 	enum i915_power_well_id id = power_well->id;
466 	enum port port = ICL_AUX_PW_TO_PORT(id);
467 	u32 val;
468 
469 	val = I915_READ(ICL_PORT_CL_DW12(port));
470 	I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
471 
472 	val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
473 	I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
474 		   val & ~HSW_PWR_WELL_CTL_REQ(id));
475 
476 	hsw_wait_for_power_well_disable(dev_priv, power_well);
477 }
478 
479 /*
480  * We should only use the power well if we explicitly asked the hardware to
481  * enable it, so check if it's enabled and also check if we've requested it to
482  * be enabled.
483  */
484 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
485 				   struct i915_power_well *power_well)
486 {
487 	enum i915_power_well_id id = power_well->id;
488 	u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
489 
490 	return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
491 }
492 
493 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
494 {
495 	enum i915_power_well_id id = SKL_DISP_PW_2;
496 
497 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
498 		  "DC9 already programmed to be enabled.\n");
499 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
500 		  "DC5 still not disabled to enable DC9.\n");
501 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
502 		  HSW_PWR_WELL_CTL_REQ(id),
503 		  "Power well 2 on.\n");
504 	WARN_ONCE(intel_irqs_enabled(dev_priv),
505 		  "Interrupts not disabled yet.\n");
506 
507 	 /*
508 	  * TODO: check for the following to verify the conditions to enter DC9
509 	  * state are satisfied:
510 	  * 1] Check relevant display engine registers to verify if mode set
511 	  * disable sequence was followed.
512 	  * 2] Check if display uninitialize sequence is initialized.
513 	  */
514 }
515 
516 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
517 {
518 	WARN_ONCE(intel_irqs_enabled(dev_priv),
519 		  "Interrupts not disabled yet.\n");
520 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
521 		  "DC5 still not disabled.\n");
522 
523 	 /*
524 	  * TODO: check for the following to verify DC9 state was indeed
525 	  * entered before programming to disable it:
526 	  * 1] Check relevant display engine registers to verify if mode
527 	  *  set disable sequence was followed.
528 	  * 2] Check if display uninitialize sequence is initialized.
529 	  */
530 }
531 
532 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
533 				u32 state)
534 {
535 	int rewrites = 0;
536 	int rereads = 0;
537 	u32 v;
538 
539 	I915_WRITE(DC_STATE_EN, state);
540 
541 	/* It has been observed that disabling the dc6 state sometimes
542 	 * doesn't stick and dmc keeps returning old value. Make sure
543 	 * the write really sticks enough times and also force rewrite until
544 	 * we are confident that state is exactly what we want.
545 	 */
546 	do  {
547 		v = I915_READ(DC_STATE_EN);
548 
549 		if (v != state) {
550 			I915_WRITE(DC_STATE_EN, state);
551 			rewrites++;
552 			rereads = 0;
553 		} else if (rereads++ > 5) {
554 			break;
555 		}
556 
557 	} while (rewrites < 100);
558 
559 	if (v != state)
560 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
561 			  state, v);
562 
563 	/* Most of the times we need one retry, avoid spam */
564 	if (rewrites > 1)
565 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
566 			      state, rewrites);
567 }
568 
569 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
570 {
571 	u32 mask;
572 
573 	mask = DC_STATE_EN_UPTO_DC5;
574 	if (IS_GEN9_LP(dev_priv))
575 		mask |= DC_STATE_EN_DC9;
576 	else
577 		mask |= DC_STATE_EN_UPTO_DC6;
578 
579 	return mask;
580 }
581 
582 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
583 {
584 	u32 val;
585 
586 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
587 
588 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
589 		      dev_priv->csr.dc_state, val);
590 	dev_priv->csr.dc_state = val;
591 }
592 
593 /**
594  * gen9_set_dc_state - set target display C power state
595  * @dev_priv: i915 device instance
596  * @state: target DC power state
597  * - DC_STATE_DISABLE
598  * - DC_STATE_EN_UPTO_DC5
599  * - DC_STATE_EN_UPTO_DC6
600  * - DC_STATE_EN_DC9
601  *
602  * Signal to DMC firmware/HW the target DC power state passed in @state.
603  * DMC/HW can turn off individual display clocks and power rails when entering
604  * a deeper DC power state (higher in number) and turns these back when exiting
605  * that state to a shallower power state (lower in number). The HW will decide
606  * when to actually enter a given state on an on-demand basis, for instance
607  * depending on the active state of display pipes. The state of display
608  * registers backed by affected power rails are saved/restored as needed.
609  *
610  * Based on the above enabling a deeper DC power state is asynchronous wrt.
611  * enabling it. Disabling a deeper power state is synchronous: for instance
612  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
613  * back on and register state is restored. This is guaranteed by the MMIO write
614  * to DC_STATE_EN blocking until the state is restored.
615  */
616 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
617 {
618 	uint32_t val;
619 	uint32_t mask;
620 
621 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
622 		state &= dev_priv->csr.allowed_dc_mask;
623 
624 	val = I915_READ(DC_STATE_EN);
625 	mask = gen9_dc_mask(dev_priv);
626 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
627 		      val & mask, state);
628 
629 	/* Check if DMC is ignoring our DC state requests */
630 	if ((val & mask) != dev_priv->csr.dc_state)
631 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
632 			  dev_priv->csr.dc_state, val & mask);
633 
634 	val &= ~mask;
635 	val |= state;
636 
637 	gen9_write_dc_state(dev_priv, val);
638 
639 	dev_priv->csr.dc_state = val & mask;
640 }
641 
642 void bxt_enable_dc9(struct drm_i915_private *dev_priv)
643 {
644 	assert_can_enable_dc9(dev_priv);
645 
646 	DRM_DEBUG_KMS("Enabling DC9\n");
647 
648 	intel_power_sequencer_reset(dev_priv);
649 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
650 }
651 
652 void bxt_disable_dc9(struct drm_i915_private *dev_priv)
653 {
654 	assert_can_disable_dc9(dev_priv);
655 
656 	DRM_DEBUG_KMS("Disabling DC9\n");
657 
658 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
659 
660 	intel_pps_unlock_regs_wa(dev_priv);
661 }
662 
663 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
664 {
665 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
666 		  "CSR program storage start is NULL\n");
667 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
668 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
669 }
670 
671 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
672 {
673 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
674 					SKL_DISP_PW_2);
675 
676 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
677 
678 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
679 		  "DC5 already programmed to be enabled.\n");
680 	assert_rpm_wakelock_held(dev_priv);
681 
682 	assert_csr_loaded(dev_priv);
683 }
684 
685 void gen9_enable_dc5(struct drm_i915_private *dev_priv)
686 {
687 	assert_can_enable_dc5(dev_priv);
688 
689 	DRM_DEBUG_KMS("Enabling DC5\n");
690 
691 	/* Wa Display #1183: skl,kbl,cfl */
692 	if (IS_GEN9_BC(dev_priv))
693 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
694 			   SKL_SELECT_ALTERNATE_DC_EXIT);
695 
696 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
697 }
698 
699 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
700 {
701 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
702 		  "Backlight is not disabled.\n");
703 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
704 		  "DC6 already programmed to be enabled.\n");
705 
706 	assert_csr_loaded(dev_priv);
707 }
708 
709 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
710 {
711 	assert_can_enable_dc6(dev_priv);
712 
713 	DRM_DEBUG_KMS("Enabling DC6\n");
714 
715 	/* Wa Display #1183: skl,kbl,cfl */
716 	if (IS_GEN9_BC(dev_priv))
717 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
718 			   SKL_SELECT_ALTERNATE_DC_EXIT);
719 
720 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
721 }
722 
723 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
724 				   struct i915_power_well *power_well)
725 {
726 	enum i915_power_well_id id = power_well->id;
727 	u32 mask = HSW_PWR_WELL_CTL_REQ(id);
728 	u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
729 
730 	/* Take over the request bit if set by BIOS. */
731 	if (bios_req & mask) {
732 		u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
733 
734 		if (!(drv_req & mask))
735 			I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
736 		I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
737 	}
738 }
739 
740 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
741 					   struct i915_power_well *power_well)
742 {
743 	bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
744 }
745 
746 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
747 					    struct i915_power_well *power_well)
748 {
749 	bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
750 }
751 
752 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
753 					    struct i915_power_well *power_well)
754 {
755 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
756 }
757 
758 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
759 {
760 	struct i915_power_well *power_well;
761 
762 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
763 	if (power_well->count > 0)
764 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
765 
766 	power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
767 	if (power_well->count > 0)
768 		bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
769 
770 	if (IS_GEMINILAKE(dev_priv)) {
771 		power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
772 		if (power_well->count > 0)
773 			bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
774 	}
775 }
776 
777 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
778 					   struct i915_power_well *power_well)
779 {
780 	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
781 }
782 
783 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
784 {
785 	u32 tmp = I915_READ(DBUF_CTL);
786 
787 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
788 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
789 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
790 }
791 
792 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
793 					  struct i915_power_well *power_well)
794 {
795 	struct intel_cdclk_state cdclk_state = {};
796 
797 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
798 
799 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
800 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
801 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
802 
803 	gen9_assert_dbuf_enabled(dev_priv);
804 
805 	if (IS_GEN9_LP(dev_priv))
806 		bxt_verify_ddi_phy_power_wells(dev_priv);
807 }
808 
809 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
810 					   struct i915_power_well *power_well)
811 {
812 	if (!dev_priv->csr.dmc_payload)
813 		return;
814 
815 	if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
816 		skl_enable_dc6(dev_priv);
817 	else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
818 		gen9_enable_dc5(dev_priv);
819 }
820 
821 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
822 					 struct i915_power_well *power_well)
823 {
824 }
825 
826 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
827 					   struct i915_power_well *power_well)
828 {
829 }
830 
831 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
832 					     struct i915_power_well *power_well)
833 {
834 	return true;
835 }
836 
837 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
838 					 struct i915_power_well *power_well)
839 {
840 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
841 		i830_enable_pipe(dev_priv, PIPE_A);
842 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
843 		i830_enable_pipe(dev_priv, PIPE_B);
844 }
845 
846 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
847 					  struct i915_power_well *power_well)
848 {
849 	i830_disable_pipe(dev_priv, PIPE_B);
850 	i830_disable_pipe(dev_priv, PIPE_A);
851 }
852 
853 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
854 					  struct i915_power_well *power_well)
855 {
856 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
857 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
858 }
859 
860 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
861 					  struct i915_power_well *power_well)
862 {
863 	if (power_well->count > 0)
864 		i830_pipes_power_well_enable(dev_priv, power_well);
865 	else
866 		i830_pipes_power_well_disable(dev_priv, power_well);
867 }
868 
869 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
870 			       struct i915_power_well *power_well, bool enable)
871 {
872 	enum i915_power_well_id power_well_id = power_well->id;
873 	u32 mask;
874 	u32 state;
875 	u32 ctrl;
876 
877 	mask = PUNIT_PWRGT_MASK(power_well_id);
878 	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
879 			 PUNIT_PWRGT_PWR_GATE(power_well_id);
880 
881 	mutex_lock(&dev_priv->pcu_lock);
882 
883 #define COND \
884 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
885 
886 	if (COND)
887 		goto out;
888 
889 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
890 	ctrl &= ~mask;
891 	ctrl |= state;
892 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
893 
894 	if (wait_for(COND, 100))
895 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
896 			  state,
897 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
898 
899 #undef COND
900 
901 out:
902 	mutex_unlock(&dev_priv->pcu_lock);
903 }
904 
905 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
906 				  struct i915_power_well *power_well)
907 {
908 	vlv_set_power_well(dev_priv, power_well, true);
909 }
910 
911 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
912 				   struct i915_power_well *power_well)
913 {
914 	vlv_set_power_well(dev_priv, power_well, false);
915 }
916 
917 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
918 				   struct i915_power_well *power_well)
919 {
920 	enum i915_power_well_id power_well_id = power_well->id;
921 	bool enabled = false;
922 	u32 mask;
923 	u32 state;
924 	u32 ctrl;
925 
926 	mask = PUNIT_PWRGT_MASK(power_well_id);
927 	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
928 
929 	mutex_lock(&dev_priv->pcu_lock);
930 
931 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
932 	/*
933 	 * We only ever set the power-on and power-gate states, anything
934 	 * else is unexpected.
935 	 */
936 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
937 		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
938 	if (state == ctrl)
939 		enabled = true;
940 
941 	/*
942 	 * A transient state at this point would mean some unexpected party
943 	 * is poking at the power controls too.
944 	 */
945 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
946 	WARN_ON(ctrl != state);
947 
948 	mutex_unlock(&dev_priv->pcu_lock);
949 
950 	return enabled;
951 }
952 
953 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
954 {
955 	u32 val;
956 
957 	/*
958 	 * On driver load, a pipe may be active and driving a DSI display.
959 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
960 	 * (and never recovering) in this case. intel_dsi_post_disable() will
961 	 * clear it when we turn off the display.
962 	 */
963 	val = I915_READ(DSPCLK_GATE_D);
964 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
965 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
966 	I915_WRITE(DSPCLK_GATE_D, val);
967 
968 	/*
969 	 * Disable trickle feed and enable pnd deadline calculation
970 	 */
971 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
972 	I915_WRITE(CBR1_VLV, 0);
973 
974 	WARN_ON(dev_priv->rawclk_freq == 0);
975 
976 	I915_WRITE(RAWCLK_FREQ_VLV,
977 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
978 }
979 
980 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
981 {
982 	struct intel_encoder *encoder;
983 	enum pipe pipe;
984 
985 	/*
986 	 * Enable the CRI clock source so we can get at the
987 	 * display and the reference clock for VGA
988 	 * hotplug / manual detection. Supposedly DSI also
989 	 * needs the ref clock up and running.
990 	 *
991 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
992 	 */
993 	for_each_pipe(dev_priv, pipe) {
994 		u32 val = I915_READ(DPLL(pipe));
995 
996 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
997 		if (pipe != PIPE_A)
998 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
999 
1000 		I915_WRITE(DPLL(pipe), val);
1001 	}
1002 
1003 	vlv_init_display_clock_gating(dev_priv);
1004 
1005 	spin_lock_irq(&dev_priv->irq_lock);
1006 	valleyview_enable_display_irqs(dev_priv);
1007 	spin_unlock_irq(&dev_priv->irq_lock);
1008 
1009 	/*
1010 	 * During driver initialization/resume we can avoid restoring the
1011 	 * part of the HW/SW state that will be inited anyway explicitly.
1012 	 */
1013 	if (dev_priv->power_domains.initializing)
1014 		return;
1015 
1016 	intel_hpd_init(dev_priv);
1017 
1018 	/* Re-enable the ADPA, if we have one */
1019 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1020 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1021 			intel_crt_reset(&encoder->base);
1022 	}
1023 
1024 	i915_redisable_vga_power_on(dev_priv);
1025 
1026 	intel_pps_unlock_regs_wa(dev_priv);
1027 }
1028 
1029 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1030 {
1031 	spin_lock_irq(&dev_priv->irq_lock);
1032 	valleyview_disable_display_irqs(dev_priv);
1033 	spin_unlock_irq(&dev_priv->irq_lock);
1034 
1035 	/* make sure we're done processing display irqs */
1036 	synchronize_irq(dev_priv->drm.irq);
1037 
1038 	intel_power_sequencer_reset(dev_priv);
1039 
1040 	/* Prevent us from re-enabling polling on accident in late suspend */
1041 #ifdef __linux__
1042 	if (!dev_priv->drm.dev->power.is_suspended)
1043 #else
1044 	if (!cold)
1045 #endif
1046 		intel_hpd_poll_init(dev_priv);
1047 }
1048 
1049 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1050 					  struct i915_power_well *power_well)
1051 {
1052 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1053 
1054 	vlv_set_power_well(dev_priv, power_well, true);
1055 
1056 	vlv_display_power_well_init(dev_priv);
1057 }
1058 
1059 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1060 					   struct i915_power_well *power_well)
1061 {
1062 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D);
1063 
1064 	vlv_display_power_well_deinit(dev_priv);
1065 
1066 	vlv_set_power_well(dev_priv, power_well, false);
1067 }
1068 
1069 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1070 					   struct i915_power_well *power_well)
1071 {
1072 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1073 
1074 	/* since ref/cri clock was enabled */
1075 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1076 
1077 	vlv_set_power_well(dev_priv, power_well, true);
1078 
1079 	/*
1080 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1081 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1082 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1083 	 *   b.	The other bits such as sfr settings / modesel may all
1084 	 *	be set to 0.
1085 	 *
1086 	 * This should only be done on init and resume from S3 with
1087 	 * both PLLs disabled, or we risk losing DPIO and PLL
1088 	 * synchronization.
1089 	 */
1090 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1091 }
1092 
1093 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1094 					    struct i915_power_well *power_well)
1095 {
1096 	enum pipe pipe;
1097 
1098 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC);
1099 
1100 	for_each_pipe(dev_priv, pipe)
1101 		assert_pll_disabled(dev_priv, pipe);
1102 
1103 	/* Assert common reset */
1104 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1105 
1106 	vlv_set_power_well(dev_priv, power_well, false);
1107 }
1108 
1109 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1110 
1111 static struct i915_power_well *
1112 lookup_power_well(struct drm_i915_private *dev_priv,
1113 		  enum i915_power_well_id power_well_id)
1114 {
1115 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1116 	int i;
1117 
1118 	for (i = 0; i < power_domains->power_well_count; i++) {
1119 		struct i915_power_well *power_well;
1120 
1121 		power_well = &power_domains->power_wells[i];
1122 		if (power_well->id == power_well_id)
1123 			return power_well;
1124 	}
1125 
1126 	return NULL;
1127 }
1128 
1129 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1130 
1131 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1132 {
1133 	struct i915_power_well *cmn_bc =
1134 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1135 	struct i915_power_well *cmn_d =
1136 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1137 	u32 phy_control = dev_priv->chv_phy_control;
1138 	u32 phy_status = 0;
1139 	u32 phy_status_mask = 0xffffffff;
1140 
1141 	/*
1142 	 * The BIOS can leave the PHY is some weird state
1143 	 * where it doesn't fully power down some parts.
1144 	 * Disable the asserts until the PHY has been fully
1145 	 * reset (ie. the power well has been disabled at
1146 	 * least once).
1147 	 */
1148 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1149 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1150 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1151 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1152 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1153 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1154 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1155 
1156 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1157 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1158 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1159 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1160 
1161 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1162 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1163 
1164 		/* this assumes override is only used to enable lanes */
1165 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1166 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1167 
1168 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1169 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1170 
1171 		/* CL1 is on whenever anything is on in either channel */
1172 		if (BITS_SET(phy_control,
1173 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1174 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1175 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1176 
1177 		/*
1178 		 * The DPLLB check accounts for the pipe B + port A usage
1179 		 * with CL2 powered up but all the lanes in the second channel
1180 		 * powered down.
1181 		 */
1182 		if (BITS_SET(phy_control,
1183 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1184 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1185 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1186 
1187 		if (BITS_SET(phy_control,
1188 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1189 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1190 		if (BITS_SET(phy_control,
1191 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1192 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1193 
1194 		if (BITS_SET(phy_control,
1195 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1196 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1197 		if (BITS_SET(phy_control,
1198 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1199 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1200 	}
1201 
1202 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1203 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1204 
1205 		/* this assumes override is only used to enable lanes */
1206 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1207 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1208 
1209 		if (BITS_SET(phy_control,
1210 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1211 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1212 
1213 		if (BITS_SET(phy_control,
1214 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1215 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1216 		if (BITS_SET(phy_control,
1217 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1218 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1219 	}
1220 
1221 	phy_status &= phy_status_mask;
1222 
1223 	/*
1224 	 * The PHY may be busy with some initial calibration and whatnot,
1225 	 * so the power state can take a while to actually change.
1226 	 */
1227 	if (intel_wait_for_register(dev_priv,
1228 				    DISPLAY_PHY_STATUS,
1229 				    phy_status_mask,
1230 				    phy_status,
1231 				    10))
1232 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1233 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1234 			   phy_status, dev_priv->chv_phy_control);
1235 }
1236 
1237 #undef BITS_SET
1238 
1239 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1240 					   struct i915_power_well *power_well)
1241 {
1242 	enum dpio_phy phy;
1243 	enum pipe pipe;
1244 	uint32_t tmp;
1245 
1246 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1247 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1248 
1249 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1250 		pipe = PIPE_A;
1251 		phy = DPIO_PHY0;
1252 	} else {
1253 		pipe = PIPE_C;
1254 		phy = DPIO_PHY1;
1255 	}
1256 
1257 	/* since ref/cri clock was enabled */
1258 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1259 	vlv_set_power_well(dev_priv, power_well, true);
1260 
1261 	/* Poll for phypwrgood signal */
1262 	if (intel_wait_for_register(dev_priv,
1263 				    DISPLAY_PHY_STATUS,
1264 				    PHY_POWERGOOD(phy),
1265 				    PHY_POWERGOOD(phy),
1266 				    1))
1267 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1268 
1269 	mutex_lock(&dev_priv->sb_lock);
1270 
1271 	/* Enable dynamic power down */
1272 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1273 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1274 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1275 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1276 
1277 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1278 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1279 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1280 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1281 	} else {
1282 		/*
1283 		 * Force the non-existing CL2 off. BXT does this
1284 		 * too, so maybe it saves some power even though
1285 		 * CL2 doesn't exist?
1286 		 */
1287 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1288 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1289 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1290 	}
1291 
1292 	mutex_unlock(&dev_priv->sb_lock);
1293 
1294 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1295 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1296 
1297 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1298 		      phy, dev_priv->chv_phy_control);
1299 
1300 	assert_chv_phy_status(dev_priv);
1301 }
1302 
1303 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1304 					    struct i915_power_well *power_well)
1305 {
1306 	enum dpio_phy phy;
1307 
1308 	WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1309 		     power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D);
1310 
1311 	if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1312 		phy = DPIO_PHY0;
1313 		assert_pll_disabled(dev_priv, PIPE_A);
1314 		assert_pll_disabled(dev_priv, PIPE_B);
1315 	} else {
1316 		phy = DPIO_PHY1;
1317 		assert_pll_disabled(dev_priv, PIPE_C);
1318 	}
1319 
1320 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1321 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1322 
1323 	vlv_set_power_well(dev_priv, power_well, false);
1324 
1325 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1326 		      phy, dev_priv->chv_phy_control);
1327 
1328 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1329 	dev_priv->chv_phy_assert[phy] = true;
1330 
1331 	assert_chv_phy_status(dev_priv);
1332 }
1333 
1334 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1335 				     enum dpio_channel ch, bool override, unsigned int mask)
1336 {
1337 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1338 	u32 reg, val, expected, actual;
1339 
1340 	/*
1341 	 * The BIOS can leave the PHY is some weird state
1342 	 * where it doesn't fully power down some parts.
1343 	 * Disable the asserts until the PHY has been fully
1344 	 * reset (ie. the power well has been disabled at
1345 	 * least once).
1346 	 */
1347 	if (!dev_priv->chv_phy_assert[phy])
1348 		return;
1349 
1350 	if (ch == DPIO_CH0)
1351 		reg = _CHV_CMN_DW0_CH0;
1352 	else
1353 		reg = _CHV_CMN_DW6_CH1;
1354 
1355 	mutex_lock(&dev_priv->sb_lock);
1356 	val = vlv_dpio_read(dev_priv, pipe, reg);
1357 	mutex_unlock(&dev_priv->sb_lock);
1358 
1359 	/*
1360 	 * This assumes !override is only used when the port is disabled.
1361 	 * All lanes should power down even without the override when
1362 	 * the port is disabled.
1363 	 */
1364 	if (!override || mask == 0xf) {
1365 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1366 		/*
1367 		 * If CH1 common lane is not active anymore
1368 		 * (eg. for pipe B DPLL) the entire channel will
1369 		 * shut down, which causes the common lane registers
1370 		 * to read as 0. That means we can't actually check
1371 		 * the lane power down status bits, but as the entire
1372 		 * register reads as 0 it's a good indication that the
1373 		 * channel is indeed entirely powered down.
1374 		 */
1375 		if (ch == DPIO_CH1 && val == 0)
1376 			expected = 0;
1377 	} else if (mask != 0x0) {
1378 		expected = DPIO_ANYDL_POWERDOWN;
1379 	} else {
1380 		expected = 0;
1381 	}
1382 
1383 	if (ch == DPIO_CH0)
1384 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1385 	else
1386 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1387 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1388 
1389 	WARN(actual != expected,
1390 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1391 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1392 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1393 	     reg, val);
1394 }
1395 
1396 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1397 			  enum dpio_channel ch, bool override)
1398 {
1399 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1400 	bool was_override;
1401 
1402 	mutex_lock(&power_domains->lock);
1403 
1404 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1405 
1406 	if (override == was_override)
1407 		goto out;
1408 
1409 	if (override)
1410 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1411 	else
1412 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1413 
1414 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1415 
1416 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1417 		      phy, ch, dev_priv->chv_phy_control);
1418 
1419 	assert_chv_phy_status(dev_priv);
1420 
1421 out:
1422 	mutex_unlock(&power_domains->lock);
1423 
1424 	return was_override;
1425 }
1426 
1427 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1428 			     bool override, unsigned int mask)
1429 {
1430 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1431 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1432 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1433 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1434 
1435 	mutex_lock(&power_domains->lock);
1436 
1437 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1438 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1439 
1440 	if (override)
1441 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1442 	else
1443 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1444 
1445 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1446 
1447 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1448 		      phy, ch, mask, dev_priv->chv_phy_control);
1449 
1450 	assert_chv_phy_status(dev_priv);
1451 
1452 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1453 
1454 	mutex_unlock(&power_domains->lock);
1455 }
1456 
1457 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1458 					struct i915_power_well *power_well)
1459 {
1460 	enum pipe pipe = PIPE_A;
1461 	bool enabled;
1462 	u32 state, ctrl;
1463 
1464 	mutex_lock(&dev_priv->pcu_lock);
1465 
1466 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1467 	/*
1468 	 * We only ever set the power-on and power-gate states, anything
1469 	 * else is unexpected.
1470 	 */
1471 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1472 	enabled = state == DP_SSS_PWR_ON(pipe);
1473 
1474 	/*
1475 	 * A transient state at this point would mean some unexpected party
1476 	 * is poking at the power controls too.
1477 	 */
1478 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1479 	WARN_ON(ctrl << 16 != state);
1480 
1481 	mutex_unlock(&dev_priv->pcu_lock);
1482 
1483 	return enabled;
1484 }
1485 
1486 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1487 				    struct i915_power_well *power_well,
1488 				    bool enable)
1489 {
1490 	enum pipe pipe = PIPE_A;
1491 	u32 state;
1492 	u32 ctrl;
1493 
1494 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1495 
1496 	mutex_lock(&dev_priv->pcu_lock);
1497 
1498 #define COND \
1499 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1500 
1501 	if (COND)
1502 		goto out;
1503 
1504 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1505 	ctrl &= ~DP_SSC_MASK(pipe);
1506 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1507 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1508 
1509 	if (wait_for(COND, 100))
1510 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1511 			  state,
1512 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1513 
1514 #undef COND
1515 
1516 out:
1517 	mutex_unlock(&dev_priv->pcu_lock);
1518 }
1519 
1520 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1521 				       struct i915_power_well *power_well)
1522 {
1523 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1524 
1525 	chv_set_pipe_power_well(dev_priv, power_well, true);
1526 
1527 	vlv_display_power_well_init(dev_priv);
1528 }
1529 
1530 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1531 					struct i915_power_well *power_well)
1532 {
1533 	WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
1534 
1535 	vlv_display_power_well_deinit(dev_priv);
1536 
1537 	chv_set_pipe_power_well(dev_priv, power_well, false);
1538 }
1539 
1540 static void
1541 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1542 				 enum intel_display_power_domain domain)
1543 {
1544 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1545 	struct i915_power_well *power_well;
1546 
1547 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1548 		intel_power_well_get(dev_priv, power_well);
1549 
1550 	power_domains->domain_use_count[domain]++;
1551 }
1552 
1553 /**
1554  * intel_display_power_get - grab a power domain reference
1555  * @dev_priv: i915 device instance
1556  * @domain: power domain to reference
1557  *
1558  * This function grabs a power domain reference for @domain and ensures that the
1559  * power domain and all its parents are powered up. Therefore users should only
1560  * grab a reference to the innermost power domain they need.
1561  *
1562  * Any power domain reference obtained by this function must have a symmetric
1563  * call to intel_display_power_put() to release the reference again.
1564  */
1565 void intel_display_power_get(struct drm_i915_private *dev_priv,
1566 			     enum intel_display_power_domain domain)
1567 {
1568 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1569 
1570 	intel_runtime_pm_get(dev_priv);
1571 
1572 	mutex_lock(&power_domains->lock);
1573 
1574 	__intel_display_power_get_domain(dev_priv, domain);
1575 
1576 	mutex_unlock(&power_domains->lock);
1577 }
1578 
1579 /**
1580  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1581  * @dev_priv: i915 device instance
1582  * @domain: power domain to reference
1583  *
1584  * This function grabs a power domain reference for @domain and ensures that the
1585  * power domain and all its parents are powered up. Therefore users should only
1586  * grab a reference to the innermost power domain they need.
1587  *
1588  * Any power domain reference obtained by this function must have a symmetric
1589  * call to intel_display_power_put() to release the reference again.
1590  */
1591 bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1592 					enum intel_display_power_domain domain)
1593 {
1594 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1595 	bool is_enabled;
1596 
1597 	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1598 		return false;
1599 
1600 	mutex_lock(&power_domains->lock);
1601 
1602 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1603 		__intel_display_power_get_domain(dev_priv, domain);
1604 		is_enabled = true;
1605 	} else {
1606 		is_enabled = false;
1607 	}
1608 
1609 	mutex_unlock(&power_domains->lock);
1610 
1611 	if (!is_enabled)
1612 		intel_runtime_pm_put(dev_priv);
1613 
1614 	return is_enabled;
1615 }
1616 
1617 /**
1618  * intel_display_power_put - release a power domain reference
1619  * @dev_priv: i915 device instance
1620  * @domain: power domain to reference
1621  *
1622  * This function drops the power domain reference obtained by
1623  * intel_display_power_get() and might power down the corresponding hardware
1624  * block right away if this is the last reference.
1625  */
1626 void intel_display_power_put(struct drm_i915_private *dev_priv,
1627 			     enum intel_display_power_domain domain)
1628 {
1629 	struct i915_power_domains *power_domains;
1630 	struct i915_power_well *power_well;
1631 
1632 	power_domains = &dev_priv->power_domains;
1633 
1634 	mutex_lock(&power_domains->lock);
1635 
1636 	WARN(!power_domains->domain_use_count[domain],
1637 	     "Use count on domain %s is already zero\n",
1638 	     intel_display_power_domain_str(domain));
1639 	power_domains->domain_use_count[domain]--;
1640 
1641 	for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
1642 		intel_power_well_put(dev_priv, power_well);
1643 
1644 	mutex_unlock(&power_domains->lock);
1645 
1646 	intel_runtime_pm_put(dev_priv);
1647 }
1648 
1649 #define I830_PIPES_POWER_DOMAINS (		\
1650 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1651 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1652 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1653 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1654 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1655 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1656 	BIT_ULL(POWER_DOMAIN_INIT))
1657 
1658 #define VLV_DISPLAY_POWER_DOMAINS (		\
1659 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1660 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1661 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1662 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1663 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1664 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1665 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1666 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1667 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1668 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1669 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1670 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1671 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1672 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1673 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1674 	BIT_ULL(POWER_DOMAIN_INIT))
1675 
1676 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1677 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1678 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1679 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
1680 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1681 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1682 	BIT_ULL(POWER_DOMAIN_INIT))
1683 
1684 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1685 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1686 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1687 	BIT_ULL(POWER_DOMAIN_INIT))
1688 
1689 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1690 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1691 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1692 	BIT_ULL(POWER_DOMAIN_INIT))
1693 
1694 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1695 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1696 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1697 	BIT_ULL(POWER_DOMAIN_INIT))
1698 
1699 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1700 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1701 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1702 	BIT_ULL(POWER_DOMAIN_INIT))
1703 
1704 #define CHV_DISPLAY_POWER_DOMAINS (		\
1705 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
1706 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
1707 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
1708 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
1709 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1710 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1711 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
1712 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
1713 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
1714 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1715 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1716 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1717 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
1718 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1719 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
1720 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1721 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1722 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1723 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
1724 	BIT_ULL(POWER_DOMAIN_INIT))
1725 
1726 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1727 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1728 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1729 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1730 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1731 	BIT_ULL(POWER_DOMAIN_INIT))
1732 
1733 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
1734 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1735 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
1736 	BIT_ULL(POWER_DOMAIN_INIT))
1737 
1738 #define HSW_DISPLAY_POWER_DOMAINS (			\
1739 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1740 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1741 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
1742 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1743 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1744 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1745 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1746 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1747 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1748 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1749 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1750 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1751 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1752 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1753 	BIT_ULL(POWER_DOMAIN_INIT))
1754 
1755 #define BDW_DISPLAY_POWER_DOMAINS (			\
1756 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1757 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1758 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1759 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1760 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1761 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1762 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1763 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1764 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1765 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1766 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
1767 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1768 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1769 	BIT_ULL(POWER_DOMAIN_INIT))
1770 
1771 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1772 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1773 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1774 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1775 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1776 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1777 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1778 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1779 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1780 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1781 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1782 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
1783 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1784 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1785 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1786 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1787 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1788 	BIT_ULL(POWER_DOMAIN_INIT))
1789 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
1790 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1791 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1792 	BIT_ULL(POWER_DOMAIN_INIT))
1793 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1794 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1795 	BIT_ULL(POWER_DOMAIN_INIT))
1796 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1797 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1798 	BIT_ULL(POWER_DOMAIN_INIT))
1799 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
1800 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1801 	BIT_ULL(POWER_DOMAIN_INIT))
1802 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1803 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1804 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1805 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1806 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1807 	BIT_ULL(POWER_DOMAIN_INIT))
1808 
1809 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1810 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1811 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1812 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1813 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1814 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1815 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1816 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1817 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1818 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1819 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1820 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1821 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1822 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1823 	BIT_ULL(POWER_DOMAIN_INIT))
1824 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1825 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1826 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1827 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1828 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1829 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1830 	BIT_ULL(POWER_DOMAIN_INIT))
1831 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
1832 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1833 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1834 	BIT_ULL(POWER_DOMAIN_INIT))
1835 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
1836 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1837 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1838 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1839 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1840 	BIT_ULL(POWER_DOMAIN_INIT))
1841 
1842 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1843 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1844 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1845 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1846 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1847 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1848 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1849 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1850 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1851 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1852 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1853 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1854 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1855 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1856 	BIT_ULL(POWER_DOMAIN_INIT))
1857 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
1858 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
1859 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
1860 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
1861 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
1862 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
1863 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
1864 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1865 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1866 	BIT_ULL(POWER_DOMAIN_INIT))
1867 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
1868 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1869 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1870 	BIT_ULL(POWER_DOMAIN_INIT))
1871 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
1872 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1873 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1874 	BIT_ULL(POWER_DOMAIN_INIT))
1875 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
1876 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
1877 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1878 	BIT_ULL(POWER_DOMAIN_INIT))
1879 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
1880 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
1881 	BIT_ULL(POWER_DOMAIN_INIT))
1882 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
1883 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
1884 	BIT_ULL(POWER_DOMAIN_INIT))
1885 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1886 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1887 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1888 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1889 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1890 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
1891 	BIT_ULL(POWER_DOMAIN_INIT))
1892 
1893 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
1894 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1895 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1896 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1897 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1898 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1899 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
1900 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
1901 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1902 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1903 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1904 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
1905 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
1906 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1907 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1908 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1909 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1910 	BIT_ULL(POWER_DOMAIN_VGA) |				\
1911 	BIT_ULL(POWER_DOMAIN_INIT))
1912 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
1913 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
1914 	BIT_ULL(POWER_DOMAIN_INIT))
1915 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
1916 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1917 	BIT_ULL(POWER_DOMAIN_INIT))
1918 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
1919 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1920 	BIT_ULL(POWER_DOMAIN_INIT))
1921 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
1922 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1923 	BIT_ULL(POWER_DOMAIN_INIT))
1924 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
1925 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1926 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
1927 	BIT_ULL(POWER_DOMAIN_INIT))
1928 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
1929 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1930 	BIT_ULL(POWER_DOMAIN_INIT))
1931 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
1932 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1933 	BIT_ULL(POWER_DOMAIN_INIT))
1934 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
1935 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1936 	BIT_ULL(POWER_DOMAIN_INIT))
1937 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
1938 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1939 	BIT_ULL(POWER_DOMAIN_INIT))
1940 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
1941 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1942 	BIT_ULL(POWER_DOMAIN_INIT))
1943 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
1944 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
1945 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
1946 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
1947 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
1948 	BIT_ULL(POWER_DOMAIN_INIT))
1949 
1950 /*
1951  * ICL PW_0/PG_0 domains (HW/DMC control):
1952  * - PCI
1953  * - clocks except port PLL
1954  * - central power except FBC
1955  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
1956  * ICL PW_1/PG_1 domains (HW/DMC control):
1957  * - DBUF function
1958  * - PIPE_A and its planes, except VGA
1959  * - transcoder EDP + PSR
1960  * - transcoder DSI
1961  * - DDI_A
1962  * - FBC
1963  */
1964 #define ICL_PW_4_POWER_DOMAINS (			\
1965 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
1966 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
1967 	BIT_ULL(POWER_DOMAIN_INIT))
1968 	/* VDSC/joining */
1969 #define ICL_PW_3_POWER_DOMAINS (			\
1970 	ICL_PW_4_POWER_DOMAINS |			\
1971 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
1972 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
1973 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
1974 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
1975 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
1976 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1977 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
1978 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1979 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
1980 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
1981 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
1982 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
1983 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
1984 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
1985 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
1986 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
1987 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
1988 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
1989 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
1990 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
1991 	BIT_ULL(POWER_DOMAIN_AUX_TBT1) |		\
1992 	BIT_ULL(POWER_DOMAIN_AUX_TBT2) |		\
1993 	BIT_ULL(POWER_DOMAIN_AUX_TBT3) |		\
1994 	BIT_ULL(POWER_DOMAIN_AUX_TBT4) |		\
1995 	BIT_ULL(POWER_DOMAIN_VGA) |			\
1996 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
1997 	BIT_ULL(POWER_DOMAIN_INIT))
1998 	/*
1999 	 * - transcoder WD
2000 	 * - KVMR (HW control)
2001 	 */
2002 #define ICL_PW_2_POWER_DOMAINS (			\
2003 	ICL_PW_3_POWER_DOMAINS |			\
2004 	BIT_ULL(POWER_DOMAIN_INIT))
2005 	/*
2006 	 * - eDP/DSI VDSC
2007 	 * - KVMR (HW control)
2008 	 */
2009 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2010 	ICL_PW_2_POWER_DOMAINS |			\
2011 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2012 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2013 	BIT_ULL(POWER_DOMAIN_INIT))
2014 
2015 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2016 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2017 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2018 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2019 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2020 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2021 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2022 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2023 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2024 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2025 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2026 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2027 
2028 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2029 	BIT_ULL(POWER_DOMAIN_AUX_A))
2030 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2031 	BIT_ULL(POWER_DOMAIN_AUX_B))
2032 #define ICL_AUX_C_IO_POWER_DOMAINS (			\
2033 	BIT_ULL(POWER_DOMAIN_AUX_C))
2034 #define ICL_AUX_D_IO_POWER_DOMAINS (			\
2035 	BIT_ULL(POWER_DOMAIN_AUX_D))
2036 #define ICL_AUX_E_IO_POWER_DOMAINS (			\
2037 	BIT_ULL(POWER_DOMAIN_AUX_E))
2038 #define ICL_AUX_F_IO_POWER_DOMAINS (			\
2039 	BIT_ULL(POWER_DOMAIN_AUX_F))
2040 #define ICL_AUX_TBT1_IO_POWER_DOMAINS (			\
2041 	BIT_ULL(POWER_DOMAIN_AUX_TBT1))
2042 #define ICL_AUX_TBT2_IO_POWER_DOMAINS (			\
2043 	BIT_ULL(POWER_DOMAIN_AUX_TBT2))
2044 #define ICL_AUX_TBT3_IO_POWER_DOMAINS (			\
2045 	BIT_ULL(POWER_DOMAIN_AUX_TBT3))
2046 #define ICL_AUX_TBT4_IO_POWER_DOMAINS (			\
2047 	BIT_ULL(POWER_DOMAIN_AUX_TBT4))
2048 
2049 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2050 	.sync_hw = i9xx_power_well_sync_hw_noop,
2051 	.enable = i9xx_always_on_power_well_noop,
2052 	.disable = i9xx_always_on_power_well_noop,
2053 	.is_enabled = i9xx_always_on_power_well_enabled,
2054 };
2055 
2056 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2057 	.sync_hw = i9xx_power_well_sync_hw_noop,
2058 	.enable = chv_pipe_power_well_enable,
2059 	.disable = chv_pipe_power_well_disable,
2060 	.is_enabled = chv_pipe_power_well_enabled,
2061 };
2062 
2063 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2064 	.sync_hw = i9xx_power_well_sync_hw_noop,
2065 	.enable = chv_dpio_cmn_power_well_enable,
2066 	.disable = chv_dpio_cmn_power_well_disable,
2067 	.is_enabled = vlv_power_well_enabled,
2068 };
2069 
2070 static struct i915_power_well i9xx_always_on_power_well[] = {
2071 	{
2072 		.name = "always-on",
2073 		.always_on = 1,
2074 		.domains = POWER_DOMAIN_MASK,
2075 		.ops = &i9xx_always_on_power_well_ops,
2076 		.id = I915_DISP_PW_ALWAYS_ON,
2077 	},
2078 };
2079 
2080 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2081 	.sync_hw = i830_pipes_power_well_sync_hw,
2082 	.enable = i830_pipes_power_well_enable,
2083 	.disable = i830_pipes_power_well_disable,
2084 	.is_enabled = i830_pipes_power_well_enabled,
2085 };
2086 
2087 static struct i915_power_well i830_power_wells[] = {
2088 	{
2089 		.name = "always-on",
2090 		.always_on = 1,
2091 		.domains = POWER_DOMAIN_MASK,
2092 		.ops = &i9xx_always_on_power_well_ops,
2093 		.id = I915_DISP_PW_ALWAYS_ON,
2094 	},
2095 	{
2096 		.name = "pipes",
2097 		.domains = I830_PIPES_POWER_DOMAINS,
2098 		.ops = &i830_pipes_power_well_ops,
2099 		.id = I830_DISP_PW_PIPES,
2100 	},
2101 };
2102 
2103 static const struct i915_power_well_ops hsw_power_well_ops = {
2104 	.sync_hw = hsw_power_well_sync_hw,
2105 	.enable = hsw_power_well_enable,
2106 	.disable = hsw_power_well_disable,
2107 	.is_enabled = hsw_power_well_enabled,
2108 };
2109 
2110 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2111 	.sync_hw = i9xx_power_well_sync_hw_noop,
2112 	.enable = gen9_dc_off_power_well_enable,
2113 	.disable = gen9_dc_off_power_well_disable,
2114 	.is_enabled = gen9_dc_off_power_well_enabled,
2115 };
2116 
2117 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2118 	.sync_hw = i9xx_power_well_sync_hw_noop,
2119 	.enable = bxt_dpio_cmn_power_well_enable,
2120 	.disable = bxt_dpio_cmn_power_well_disable,
2121 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2122 };
2123 
2124 static struct i915_power_well hsw_power_wells[] = {
2125 	{
2126 		.name = "always-on",
2127 		.always_on = 1,
2128 		.domains = POWER_DOMAIN_MASK,
2129 		.ops = &i9xx_always_on_power_well_ops,
2130 		.id = I915_DISP_PW_ALWAYS_ON,
2131 	},
2132 	{
2133 		.name = "display",
2134 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2135 		.ops = &hsw_power_well_ops,
2136 		.id = HSW_DISP_PW_GLOBAL,
2137 		{
2138 			.hsw.has_vga = true,
2139 		},
2140 	},
2141 };
2142 
2143 static struct i915_power_well bdw_power_wells[] = {
2144 	{
2145 		.name = "always-on",
2146 		.always_on = 1,
2147 		.domains = POWER_DOMAIN_MASK,
2148 		.ops = &i9xx_always_on_power_well_ops,
2149 		.id = I915_DISP_PW_ALWAYS_ON,
2150 	},
2151 	{
2152 		.name = "display",
2153 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2154 		.ops = &hsw_power_well_ops,
2155 		.id = HSW_DISP_PW_GLOBAL,
2156 		{
2157 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2158 			.hsw.has_vga = true,
2159 		},
2160 	},
2161 };
2162 
2163 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2164 	.sync_hw = i9xx_power_well_sync_hw_noop,
2165 	.enable = vlv_display_power_well_enable,
2166 	.disable = vlv_display_power_well_disable,
2167 	.is_enabled = vlv_power_well_enabled,
2168 };
2169 
2170 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2171 	.sync_hw = i9xx_power_well_sync_hw_noop,
2172 	.enable = vlv_dpio_cmn_power_well_enable,
2173 	.disable = vlv_dpio_cmn_power_well_disable,
2174 	.is_enabled = vlv_power_well_enabled,
2175 };
2176 
2177 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2178 	.sync_hw = i9xx_power_well_sync_hw_noop,
2179 	.enable = vlv_power_well_enable,
2180 	.disable = vlv_power_well_disable,
2181 	.is_enabled = vlv_power_well_enabled,
2182 };
2183 
2184 static struct i915_power_well vlv_power_wells[] = {
2185 	{
2186 		.name = "always-on",
2187 		.always_on = 1,
2188 		.domains = POWER_DOMAIN_MASK,
2189 		.ops = &i9xx_always_on_power_well_ops,
2190 		.id = I915_DISP_PW_ALWAYS_ON,
2191 	},
2192 	{
2193 		.name = "display",
2194 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2195 		.id = PUNIT_POWER_WELL_DISP2D,
2196 		.ops = &vlv_display_power_well_ops,
2197 	},
2198 	{
2199 		.name = "dpio-tx-b-01",
2200 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2201 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2202 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2203 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2204 		.ops = &vlv_dpio_power_well_ops,
2205 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
2206 	},
2207 	{
2208 		.name = "dpio-tx-b-23",
2209 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2210 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2211 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2212 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2213 		.ops = &vlv_dpio_power_well_ops,
2214 		.id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
2215 	},
2216 	{
2217 		.name = "dpio-tx-c-01",
2218 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2219 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2220 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2221 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2222 		.ops = &vlv_dpio_power_well_ops,
2223 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
2224 	},
2225 	{
2226 		.name = "dpio-tx-c-23",
2227 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2228 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2229 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2230 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2231 		.ops = &vlv_dpio_power_well_ops,
2232 		.id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
2233 	},
2234 	{
2235 		.name = "dpio-common",
2236 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2237 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2238 		.ops = &vlv_dpio_cmn_power_well_ops,
2239 	},
2240 };
2241 
2242 static struct i915_power_well chv_power_wells[] = {
2243 	{
2244 		.name = "always-on",
2245 		.always_on = 1,
2246 		.domains = POWER_DOMAIN_MASK,
2247 		.ops = &i9xx_always_on_power_well_ops,
2248 		.id = I915_DISP_PW_ALWAYS_ON,
2249 	},
2250 	{
2251 		.name = "display",
2252 		/*
2253 		 * Pipe A power well is the new disp2d well. Pipe B and C
2254 		 * power wells don't actually exist. Pipe A power well is
2255 		 * required for any pipe to work.
2256 		 */
2257 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2258 		.id = CHV_DISP_PW_PIPE_A,
2259 		.ops = &chv_pipe_power_well_ops,
2260 	},
2261 	{
2262 		.name = "dpio-common-bc",
2263 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2264 		.id = PUNIT_POWER_WELL_DPIO_CMN_BC,
2265 		.ops = &chv_dpio_cmn_power_well_ops,
2266 	},
2267 	{
2268 		.name = "dpio-common-d",
2269 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2270 		.id = PUNIT_POWER_WELL_DPIO_CMN_D,
2271 		.ops = &chv_dpio_cmn_power_well_ops,
2272 	},
2273 };
2274 
2275 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2276 					 enum i915_power_well_id power_well_id)
2277 {
2278 	struct i915_power_well *power_well;
2279 	bool ret;
2280 
2281 	power_well = lookup_power_well(dev_priv, power_well_id);
2282 	ret = power_well->ops->is_enabled(dev_priv, power_well);
2283 
2284 	return ret;
2285 }
2286 
2287 static struct i915_power_well skl_power_wells[] = {
2288 	{
2289 		.name = "always-on",
2290 		.always_on = 1,
2291 		.domains = POWER_DOMAIN_MASK,
2292 		.ops = &i9xx_always_on_power_well_ops,
2293 		.id = I915_DISP_PW_ALWAYS_ON,
2294 	},
2295 	{
2296 		.name = "power well 1",
2297 		/* Handled by the DMC firmware */
2298 		.domains = 0,
2299 		.ops = &hsw_power_well_ops,
2300 		.id = SKL_DISP_PW_1,
2301 		{
2302 			.hsw.has_fuses = true,
2303 		},
2304 	},
2305 	{
2306 		.name = "MISC IO power well",
2307 		/* Handled by the DMC firmware */
2308 		.domains = 0,
2309 		.ops = &hsw_power_well_ops,
2310 		.id = SKL_DISP_PW_MISC_IO,
2311 	},
2312 	{
2313 		.name = "DC off",
2314 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2315 		.ops = &gen9_dc_off_power_well_ops,
2316 		.id = SKL_DISP_PW_DC_OFF,
2317 	},
2318 	{
2319 		.name = "power well 2",
2320 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2321 		.ops = &hsw_power_well_ops,
2322 		.id = SKL_DISP_PW_2,
2323 		{
2324 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2325 			.hsw.has_vga = true,
2326 			.hsw.has_fuses = true,
2327 		},
2328 	},
2329 	{
2330 		.name = "DDI A/E IO power well",
2331 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2332 		.ops = &hsw_power_well_ops,
2333 		.id = SKL_DISP_PW_DDI_A_E,
2334 	},
2335 	{
2336 		.name = "DDI B IO power well",
2337 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2338 		.ops = &hsw_power_well_ops,
2339 		.id = SKL_DISP_PW_DDI_B,
2340 	},
2341 	{
2342 		.name = "DDI C IO power well",
2343 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2344 		.ops = &hsw_power_well_ops,
2345 		.id = SKL_DISP_PW_DDI_C,
2346 	},
2347 	{
2348 		.name = "DDI D IO power well",
2349 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2350 		.ops = &hsw_power_well_ops,
2351 		.id = SKL_DISP_PW_DDI_D,
2352 	},
2353 };
2354 
2355 static struct i915_power_well bxt_power_wells[] = {
2356 	{
2357 		.name = "always-on",
2358 		.always_on = 1,
2359 		.domains = POWER_DOMAIN_MASK,
2360 		.ops = &i9xx_always_on_power_well_ops,
2361 		.id = I915_DISP_PW_ALWAYS_ON,
2362 	},
2363 	{
2364 		.name = "power well 1",
2365 		.domains = 0,
2366 		.ops = &hsw_power_well_ops,
2367 		.id = SKL_DISP_PW_1,
2368 		{
2369 			.hsw.has_fuses = true,
2370 		},
2371 	},
2372 	{
2373 		.name = "DC off",
2374 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2375 		.ops = &gen9_dc_off_power_well_ops,
2376 		.id = SKL_DISP_PW_DC_OFF,
2377 	},
2378 	{
2379 		.name = "power well 2",
2380 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2381 		.ops = &hsw_power_well_ops,
2382 		.id = SKL_DISP_PW_2,
2383 		{
2384 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2385 			.hsw.has_vga = true,
2386 			.hsw.has_fuses = true,
2387 		},
2388 	},
2389 	{
2390 		.name = "dpio-common-a",
2391 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2392 		.ops = &bxt_dpio_cmn_power_well_ops,
2393 		.id = BXT_DPIO_CMN_A,
2394 		{
2395 			.bxt.phy = DPIO_PHY1,
2396 		},
2397 	},
2398 	{
2399 		.name = "dpio-common-bc",
2400 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2401 		.ops = &bxt_dpio_cmn_power_well_ops,
2402 		.id = BXT_DPIO_CMN_BC,
2403 		{
2404 			.bxt.phy = DPIO_PHY0,
2405 		},
2406 	},
2407 };
2408 
2409 static struct i915_power_well glk_power_wells[] = {
2410 	{
2411 		.name = "always-on",
2412 		.always_on = 1,
2413 		.domains = POWER_DOMAIN_MASK,
2414 		.ops = &i9xx_always_on_power_well_ops,
2415 		.id = I915_DISP_PW_ALWAYS_ON,
2416 	},
2417 	{
2418 		.name = "power well 1",
2419 		/* Handled by the DMC firmware */
2420 		.domains = 0,
2421 		.ops = &hsw_power_well_ops,
2422 		.id = SKL_DISP_PW_1,
2423 		{
2424 			.hsw.has_fuses = true,
2425 		},
2426 	},
2427 	{
2428 		.name = "DC off",
2429 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
2430 		.ops = &gen9_dc_off_power_well_ops,
2431 		.id = SKL_DISP_PW_DC_OFF,
2432 	},
2433 	{
2434 		.name = "power well 2",
2435 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2436 		.ops = &hsw_power_well_ops,
2437 		.id = SKL_DISP_PW_2,
2438 		{
2439 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2440 			.hsw.has_vga = true,
2441 			.hsw.has_fuses = true,
2442 		},
2443 	},
2444 	{
2445 		.name = "dpio-common-a",
2446 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
2447 		.ops = &bxt_dpio_cmn_power_well_ops,
2448 		.id = BXT_DPIO_CMN_A,
2449 		{
2450 			.bxt.phy = DPIO_PHY1,
2451 		},
2452 	},
2453 	{
2454 		.name = "dpio-common-b",
2455 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
2456 		.ops = &bxt_dpio_cmn_power_well_ops,
2457 		.id = BXT_DPIO_CMN_BC,
2458 		{
2459 			.bxt.phy = DPIO_PHY0,
2460 		},
2461 	},
2462 	{
2463 		.name = "dpio-common-c",
2464 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
2465 		.ops = &bxt_dpio_cmn_power_well_ops,
2466 		.id = GLK_DPIO_CMN_C,
2467 		{
2468 			.bxt.phy = DPIO_PHY2,
2469 		},
2470 	},
2471 	{
2472 		.name = "AUX A",
2473 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
2474 		.ops = &hsw_power_well_ops,
2475 		.id = GLK_DISP_PW_AUX_A,
2476 	},
2477 	{
2478 		.name = "AUX B",
2479 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
2480 		.ops = &hsw_power_well_ops,
2481 		.id = GLK_DISP_PW_AUX_B,
2482 	},
2483 	{
2484 		.name = "AUX C",
2485 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
2486 		.ops = &hsw_power_well_ops,
2487 		.id = GLK_DISP_PW_AUX_C,
2488 	},
2489 	{
2490 		.name = "DDI A IO power well",
2491 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
2492 		.ops = &hsw_power_well_ops,
2493 		.id = GLK_DISP_PW_DDI_A,
2494 	},
2495 	{
2496 		.name = "DDI B IO power well",
2497 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2498 		.ops = &hsw_power_well_ops,
2499 		.id = SKL_DISP_PW_DDI_B,
2500 	},
2501 	{
2502 		.name = "DDI C IO power well",
2503 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2504 		.ops = &hsw_power_well_ops,
2505 		.id = SKL_DISP_PW_DDI_C,
2506 	},
2507 };
2508 
2509 static struct i915_power_well cnl_power_wells[] = {
2510 	{
2511 		.name = "always-on",
2512 		.always_on = 1,
2513 		.domains = POWER_DOMAIN_MASK,
2514 		.ops = &i9xx_always_on_power_well_ops,
2515 		.id = I915_DISP_PW_ALWAYS_ON,
2516 	},
2517 	{
2518 		.name = "power well 1",
2519 		/* Handled by the DMC firmware */
2520 		.domains = 0,
2521 		.ops = &hsw_power_well_ops,
2522 		.id = SKL_DISP_PW_1,
2523 		{
2524 			.hsw.has_fuses = true,
2525 		},
2526 	},
2527 	{
2528 		.name = "AUX A",
2529 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
2530 		.ops = &hsw_power_well_ops,
2531 		.id = CNL_DISP_PW_AUX_A,
2532 	},
2533 	{
2534 		.name = "AUX B",
2535 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
2536 		.ops = &hsw_power_well_ops,
2537 		.id = CNL_DISP_PW_AUX_B,
2538 	},
2539 	{
2540 		.name = "AUX C",
2541 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
2542 		.ops = &hsw_power_well_ops,
2543 		.id = CNL_DISP_PW_AUX_C,
2544 	},
2545 	{
2546 		.name = "AUX D",
2547 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
2548 		.ops = &hsw_power_well_ops,
2549 		.id = CNL_DISP_PW_AUX_D,
2550 	},
2551 	{
2552 		.name = "DC off",
2553 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
2554 		.ops = &gen9_dc_off_power_well_ops,
2555 		.id = SKL_DISP_PW_DC_OFF,
2556 	},
2557 	{
2558 		.name = "power well 2",
2559 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2560 		.ops = &hsw_power_well_ops,
2561 		.id = SKL_DISP_PW_2,
2562 		{
2563 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2564 			.hsw.has_vga = true,
2565 			.hsw.has_fuses = true,
2566 		},
2567 	},
2568 	{
2569 		.name = "DDI A IO power well",
2570 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
2571 		.ops = &hsw_power_well_ops,
2572 		.id = CNL_DISP_PW_DDI_A,
2573 	},
2574 	{
2575 		.name = "DDI B IO power well",
2576 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
2577 		.ops = &hsw_power_well_ops,
2578 		.id = SKL_DISP_PW_DDI_B,
2579 	},
2580 	{
2581 		.name = "DDI C IO power well",
2582 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
2583 		.ops = &hsw_power_well_ops,
2584 		.id = SKL_DISP_PW_DDI_C,
2585 	},
2586 	{
2587 		.name = "DDI D IO power well",
2588 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
2589 		.ops = &hsw_power_well_ops,
2590 		.id = SKL_DISP_PW_DDI_D,
2591 	},
2592 	{
2593 		.name = "DDI F IO power well",
2594 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
2595 		.ops = &hsw_power_well_ops,
2596 		.id = CNL_DISP_PW_DDI_F,
2597 	},
2598 	{
2599 		.name = "AUX F",
2600 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
2601 		.ops = &hsw_power_well_ops,
2602 		.id = CNL_DISP_PW_AUX_F,
2603 	},
2604 };
2605 
2606 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
2607 	.sync_hw = hsw_power_well_sync_hw,
2608 	.enable = icl_combo_phy_aux_power_well_enable,
2609 	.disable = icl_combo_phy_aux_power_well_disable,
2610 	.is_enabled = hsw_power_well_enabled,
2611 };
2612 
2613 static struct i915_power_well icl_power_wells[] = {
2614 	{
2615 		.name = "always-on",
2616 		.always_on = 1,
2617 		.domains = POWER_DOMAIN_MASK,
2618 		.ops = &i9xx_always_on_power_well_ops,
2619 		.id = I915_DISP_PW_ALWAYS_ON,
2620 	},
2621 	{
2622 		.name = "power well 1",
2623 		/* Handled by the DMC firmware */
2624 		.domains = 0,
2625 		.ops = &hsw_power_well_ops,
2626 		.id = ICL_DISP_PW_1,
2627 		.hsw.has_fuses = true,
2628 	},
2629 	{
2630 		.name = "power well 2",
2631 		.domains = ICL_PW_2_POWER_DOMAINS,
2632 		.ops = &hsw_power_well_ops,
2633 		.id = ICL_DISP_PW_2,
2634 		.hsw.has_fuses = true,
2635 	},
2636 	{
2637 		.name = "DC off",
2638 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
2639 		.ops = &gen9_dc_off_power_well_ops,
2640 		.id = SKL_DISP_PW_DC_OFF,
2641 	},
2642 	{
2643 		.name = "power well 3",
2644 		.domains = ICL_PW_3_POWER_DOMAINS,
2645 		.ops = &hsw_power_well_ops,
2646 		.id = ICL_DISP_PW_3,
2647 		.hsw.irq_pipe_mask = BIT(PIPE_B),
2648 		.hsw.has_vga = true,
2649 		.hsw.has_fuses = true,
2650 	},
2651 	{
2652 		.name = "DDI A IO",
2653 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
2654 		.ops = &hsw_power_well_ops,
2655 		.id = ICL_DISP_PW_DDI_A,
2656 	},
2657 	{
2658 		.name = "DDI B IO",
2659 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
2660 		.ops = &hsw_power_well_ops,
2661 		.id = ICL_DISP_PW_DDI_B,
2662 	},
2663 	{
2664 		.name = "DDI C IO",
2665 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
2666 		.ops = &hsw_power_well_ops,
2667 		.id = ICL_DISP_PW_DDI_C,
2668 	},
2669 	{
2670 		.name = "DDI D IO",
2671 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
2672 		.ops = &hsw_power_well_ops,
2673 		.id = ICL_DISP_PW_DDI_D,
2674 	},
2675 	{
2676 		.name = "DDI E IO",
2677 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
2678 		.ops = &hsw_power_well_ops,
2679 		.id = ICL_DISP_PW_DDI_E,
2680 	},
2681 	{
2682 		.name = "DDI F IO",
2683 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
2684 		.ops = &hsw_power_well_ops,
2685 		.id = ICL_DISP_PW_DDI_F,
2686 	},
2687 	{
2688 		.name = "AUX A",
2689 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
2690 		.ops = &icl_combo_phy_aux_power_well_ops,
2691 		.id = ICL_DISP_PW_AUX_A,
2692 	},
2693 	{
2694 		.name = "AUX B",
2695 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
2696 		.ops = &icl_combo_phy_aux_power_well_ops,
2697 		.id = ICL_DISP_PW_AUX_B,
2698 	},
2699 	{
2700 		.name = "AUX C",
2701 		.domains = ICL_AUX_C_IO_POWER_DOMAINS,
2702 		.ops = &hsw_power_well_ops,
2703 		.id = ICL_DISP_PW_AUX_C,
2704 	},
2705 	{
2706 		.name = "AUX D",
2707 		.domains = ICL_AUX_D_IO_POWER_DOMAINS,
2708 		.ops = &hsw_power_well_ops,
2709 		.id = ICL_DISP_PW_AUX_D,
2710 	},
2711 	{
2712 		.name = "AUX E",
2713 		.domains = ICL_AUX_E_IO_POWER_DOMAINS,
2714 		.ops = &hsw_power_well_ops,
2715 		.id = ICL_DISP_PW_AUX_E,
2716 	},
2717 	{
2718 		.name = "AUX F",
2719 		.domains = ICL_AUX_F_IO_POWER_DOMAINS,
2720 		.ops = &hsw_power_well_ops,
2721 		.id = ICL_DISP_PW_AUX_F,
2722 	},
2723 	{
2724 		.name = "AUX TBT1",
2725 		.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
2726 		.ops = &hsw_power_well_ops,
2727 		.id = ICL_DISP_PW_AUX_TBT1,
2728 	},
2729 	{
2730 		.name = "AUX TBT2",
2731 		.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
2732 		.ops = &hsw_power_well_ops,
2733 		.id = ICL_DISP_PW_AUX_TBT2,
2734 	},
2735 	{
2736 		.name = "AUX TBT3",
2737 		.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
2738 		.ops = &hsw_power_well_ops,
2739 		.id = ICL_DISP_PW_AUX_TBT3,
2740 	},
2741 	{
2742 		.name = "AUX TBT4",
2743 		.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
2744 		.ops = &hsw_power_well_ops,
2745 		.id = ICL_DISP_PW_AUX_TBT4,
2746 	},
2747 	{
2748 		.name = "power well 4",
2749 		.domains = ICL_PW_4_POWER_DOMAINS,
2750 		.ops = &hsw_power_well_ops,
2751 		.id = ICL_DISP_PW_4,
2752 		.hsw.has_fuses = true,
2753 		.hsw.irq_pipe_mask = BIT(PIPE_C),
2754 	},
2755 };
2756 
2757 static int
2758 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2759 				   int disable_power_well)
2760 {
2761 	if (disable_power_well >= 0)
2762 		return !!disable_power_well;
2763 
2764 	return 1;
2765 }
2766 
2767 static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
2768 				    int enable_dc)
2769 {
2770 	uint32_t mask;
2771 	int requested_dc;
2772 	int max_dc;
2773 
2774 	if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
2775 		max_dc = 2;
2776 		mask = 0;
2777 	} else if (IS_GEN9_LP(dev_priv)) {
2778 		max_dc = 1;
2779 		/*
2780 		 * DC9 has a separate HW flow from the rest of the DC states,
2781 		 * not depending on the DMC firmware. It's needed by system
2782 		 * suspend/resume, so allow it unconditionally.
2783 		 */
2784 		mask = DC_STATE_EN_DC9;
2785 	} else {
2786 		max_dc = 0;
2787 		mask = 0;
2788 	}
2789 
2790 	if (!i915_modparams.disable_power_well)
2791 		max_dc = 0;
2792 
2793 	if (enable_dc >= 0 && enable_dc <= max_dc) {
2794 		requested_dc = enable_dc;
2795 	} else if (enable_dc == -1) {
2796 		requested_dc = max_dc;
2797 	} else if (enable_dc > max_dc && enable_dc <= 2) {
2798 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
2799 			      enable_dc, max_dc);
2800 		requested_dc = max_dc;
2801 	} else {
2802 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
2803 		requested_dc = max_dc;
2804 	}
2805 
2806 	if (requested_dc > 1)
2807 		mask |= DC_STATE_EN_UPTO_DC6;
2808 	if (requested_dc > 0)
2809 		mask |= DC_STATE_EN_UPTO_DC5;
2810 
2811 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
2812 
2813 	return mask;
2814 }
2815 
2816 static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
2817 {
2818 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2819 	u64 power_well_ids;
2820 	int i;
2821 
2822 	power_well_ids = 0;
2823 	for (i = 0; i < power_domains->power_well_count; i++) {
2824 		enum i915_power_well_id id = power_domains->power_wells[i].id;
2825 
2826 		WARN_ON(id >= sizeof(power_well_ids) * 8);
2827 		WARN_ON(power_well_ids & BIT_ULL(id));
2828 		power_well_ids |= BIT_ULL(id);
2829 	}
2830 }
2831 
2832 #define set_power_wells(power_domains, __power_wells) ({		\
2833 	(power_domains)->power_wells = (__power_wells);			\
2834 	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2835 })
2836 
2837 /**
2838  * intel_power_domains_init - initializes the power domain structures
2839  * @dev_priv: i915 device instance
2840  *
2841  * Initializes the power domain structures for @dev_priv depending upon the
2842  * supported platform.
2843  */
2844 int intel_power_domains_init(struct drm_i915_private *dev_priv)
2845 {
2846 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2847 
2848 	i915_modparams.disable_power_well =
2849 		sanitize_disable_power_well_option(dev_priv,
2850 						   i915_modparams.disable_power_well);
2851 	dev_priv->csr.allowed_dc_mask =
2852 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
2853 
2854 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
2855 
2856 	rw_init(&power_domains->lock, "pdmlk");
2857 
2858 	/*
2859 	 * The enabling order will be from lower to higher indexed wells,
2860 	 * the disabling order is reversed.
2861 	 */
2862 	if (IS_ICELAKE(dev_priv)) {
2863 		set_power_wells(power_domains, icl_power_wells);
2864 	} else if (IS_HASWELL(dev_priv)) {
2865 		set_power_wells(power_domains, hsw_power_wells);
2866 	} else if (IS_BROADWELL(dev_priv)) {
2867 		set_power_wells(power_domains, bdw_power_wells);
2868 	} else if (IS_GEN9_BC(dev_priv)) {
2869 		set_power_wells(power_domains, skl_power_wells);
2870 	} else if (IS_CANNONLAKE(dev_priv)) {
2871 		set_power_wells(power_domains, cnl_power_wells);
2872 
2873 		/*
2874 		 * DDI and Aux IO are getting enabled for all ports
2875 		 * regardless the presence or use. So, in order to avoid
2876 		 * timeouts, lets remove them from the list
2877 		 * for the SKUs without port F.
2878 		 */
2879 		if (!IS_CNL_WITH_PORT_F(dev_priv))
2880 			power_domains->power_well_count -= 2;
2881 
2882 	} else if (IS_BROXTON(dev_priv)) {
2883 		set_power_wells(power_domains, bxt_power_wells);
2884 	} else if (IS_GEMINILAKE(dev_priv)) {
2885 		set_power_wells(power_domains, glk_power_wells);
2886 	} else if (IS_CHERRYVIEW(dev_priv)) {
2887 		set_power_wells(power_domains, chv_power_wells);
2888 	} else if (IS_VALLEYVIEW(dev_priv)) {
2889 		set_power_wells(power_domains, vlv_power_wells);
2890 	} else if (IS_I830(dev_priv)) {
2891 		set_power_wells(power_domains, i830_power_wells);
2892 	} else {
2893 		set_power_wells(power_domains, i9xx_always_on_power_well);
2894 	}
2895 
2896 	assert_power_well_ids_unique(dev_priv);
2897 
2898 	return 0;
2899 }
2900 
2901 /**
2902  * intel_power_domains_fini - finalizes the power domain structures
2903  * @dev_priv: i915 device instance
2904  *
2905  * Finalizes the power domain structures for @dev_priv depending upon the
2906  * supported platform. This function also disables runtime pm and ensures that
2907  * the device stays powered up so that the driver can be reloaded.
2908  */
2909 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2910 {
2911 #ifdef __linux__
2912 	struct device *kdev = &dev_priv->drm.pdev->dev;
2913 #endif
2914 
2915 	/*
2916 	 * The i915.ko module is still not prepared to be loaded when
2917 	 * the power well is not enabled, so just enable it in case
2918 	 * we're going to unload/reload.
2919 	 * The following also reacquires the RPM reference the core passed
2920 	 * to the driver during loading, which is dropped in
2921 	 * intel_runtime_pm_enable(). We have to hand back the control of the
2922 	 * device to the core with this reference held.
2923 	 */
2924 	intel_display_set_init_power(dev_priv, true);
2925 
2926 	/* Remove the refcount we took to keep power well support disabled. */
2927 	if (!i915_modparams.disable_power_well)
2928 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2929 
2930 	/*
2931 	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2932 	 * the platform doesn't support runtime PM.
2933 	 */
2934 #ifdef __linux__
2935 	if (!HAS_RUNTIME_PM(dev_priv))
2936 		pm_runtime_put(kdev);
2937 #endif
2938 }
2939 
2940 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2941 {
2942 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2943 	struct i915_power_well *power_well;
2944 
2945 	mutex_lock(&power_domains->lock);
2946 	for_each_power_well(dev_priv, power_well) {
2947 		power_well->ops->sync_hw(dev_priv, power_well);
2948 		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2949 								     power_well);
2950 	}
2951 	mutex_unlock(&power_domains->lock);
2952 }
2953 
2954 static inline
2955 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
2956 			  i915_reg_t reg, bool enable)
2957 {
2958 	u32 val, status;
2959 
2960 	val = I915_READ(reg);
2961 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
2962 	I915_WRITE(reg, val);
2963 	POSTING_READ(reg);
2964 	udelay(10);
2965 
2966 	status = I915_READ(reg) & DBUF_POWER_STATE;
2967 	if ((enable && !status) || (!enable && status)) {
2968 		DRM_ERROR("DBus power %s timeout!\n",
2969 			  enable ? "enable" : "disable");
2970 		return false;
2971 	}
2972 	return true;
2973 }
2974 
2975 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2976 {
2977 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
2978 }
2979 
2980 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2981 {
2982 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
2983 }
2984 
2985 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
2986 {
2987 	if (INTEL_GEN(dev_priv) < 11)
2988 		return 1;
2989 	return 2;
2990 }
2991 
2992 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
2993 			    u8 req_slices)
2994 {
2995 	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
2996 	u32 val;
2997 	bool ret;
2998 
2999 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
3000 		DRM_ERROR("Invalid number of dbuf slices requested\n");
3001 		return;
3002 	}
3003 
3004 	if (req_slices == hw_enabled_slices || req_slices == 0)
3005 		return;
3006 
3007 	val = I915_READ(DBUF_CTL_S2);
3008 	if (req_slices > hw_enabled_slices)
3009 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
3010 	else
3011 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
3012 
3013 	if (ret)
3014 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
3015 }
3016 
3017 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
3018 {
3019 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
3020 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
3021 	POSTING_READ(DBUF_CTL_S2);
3022 
3023 	udelay(10);
3024 
3025 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3026 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3027 		DRM_ERROR("DBuf power enable timeout\n");
3028 	else
3029 		dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
3030 }
3031 
3032 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
3033 {
3034 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
3035 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
3036 	POSTING_READ(DBUF_CTL_S2);
3037 
3038 	udelay(10);
3039 
3040 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
3041 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
3042 		DRM_ERROR("DBuf power disable timeout!\n");
3043 	else
3044 		dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
3045 }
3046 
3047 static void icl_mbus_init(struct drm_i915_private *dev_priv)
3048 {
3049 	uint32_t val;
3050 
3051 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
3052 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
3053 	      MBUS_ABOX_B_CREDIT(1) |
3054 	      MBUS_ABOX_BW_CREDIT(1);
3055 
3056 	I915_WRITE(MBUS_ABOX_CTL, val);
3057 }
3058 
3059 static void skl_display_core_init(struct drm_i915_private *dev_priv,
3060 				   bool resume)
3061 {
3062 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3063 	struct i915_power_well *well;
3064 	uint32_t val;
3065 
3066 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3067 
3068 	/* enable PCH reset handshake */
3069 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3070 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
3071 
3072 	/* enable PG1 and Misc I/O */
3073 	mutex_lock(&power_domains->lock);
3074 
3075 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3076 	intel_power_well_enable(dev_priv, well);
3077 
3078 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
3079 	intel_power_well_enable(dev_priv, well);
3080 
3081 	mutex_unlock(&power_domains->lock);
3082 
3083 	skl_init_cdclk(dev_priv);
3084 
3085 	gen9_dbuf_enable(dev_priv);
3086 
3087 	if (resume && dev_priv->csr.dmc_payload)
3088 		intel_csr_load_program(dev_priv);
3089 }
3090 
3091 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
3092 {
3093 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3094 	struct i915_power_well *well;
3095 
3096 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3097 
3098 	gen9_dbuf_disable(dev_priv);
3099 
3100 	skl_uninit_cdclk(dev_priv);
3101 
3102 	/* The spec doesn't call for removing the reset handshake flag */
3103 	/* disable PG1 and Misc I/O */
3104 
3105 	mutex_lock(&power_domains->lock);
3106 
3107 	/*
3108 	 * BSpec says to keep the MISC IO power well enabled here, only
3109 	 * remove our request for power well 1.
3110 	 * Note that even though the driver's request is removed power well 1
3111 	 * may stay enabled after this due to DMC's own request on it.
3112 	 */
3113 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3114 	intel_power_well_disable(dev_priv, well);
3115 
3116 	mutex_unlock(&power_domains->lock);
3117 
3118 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3119 }
3120 
3121 void bxt_display_core_init(struct drm_i915_private *dev_priv,
3122 			   bool resume)
3123 {
3124 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3125 	struct i915_power_well *well;
3126 	uint32_t val;
3127 
3128 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3129 
3130 	/*
3131 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
3132 	 * or else the reset will hang because there is no PCH to respond.
3133 	 * Move the handshake programming to initialization sequence.
3134 	 * Previously was left up to BIOS.
3135 	 */
3136 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3137 	val &= ~RESET_PCH_HANDSHAKE_ENABLE;
3138 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3139 
3140 	/* Enable PG1 */
3141 	mutex_lock(&power_domains->lock);
3142 
3143 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3144 	intel_power_well_enable(dev_priv, well);
3145 
3146 	mutex_unlock(&power_domains->lock);
3147 
3148 	bxt_init_cdclk(dev_priv);
3149 
3150 	gen9_dbuf_enable(dev_priv);
3151 
3152 	if (resume && dev_priv->csr.dmc_payload)
3153 		intel_csr_load_program(dev_priv);
3154 }
3155 
3156 void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
3157 {
3158 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3159 	struct i915_power_well *well;
3160 
3161 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3162 
3163 	gen9_dbuf_disable(dev_priv);
3164 
3165 	bxt_uninit_cdclk(dev_priv);
3166 
3167 	/* The spec doesn't call for removing the reset handshake flag */
3168 
3169 	/*
3170 	 * Disable PW1 (PG1).
3171 	 * Note that even though the driver's request is removed power well 1
3172 	 * may stay enabled after this due to DMC's own request on it.
3173 	 */
3174 	mutex_lock(&power_domains->lock);
3175 
3176 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3177 	intel_power_well_disable(dev_priv, well);
3178 
3179 	mutex_unlock(&power_domains->lock);
3180 
3181 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3182 }
3183 
3184 enum {
3185 	PROCMON_0_85V_DOT_0,
3186 	PROCMON_0_95V_DOT_0,
3187 	PROCMON_0_95V_DOT_1,
3188 	PROCMON_1_05V_DOT_0,
3189 	PROCMON_1_05V_DOT_1,
3190 };
3191 
3192 static const struct cnl_procmon {
3193 	u32 dw1, dw9, dw10;
3194 } cnl_procmon_values[] = {
3195 	[PROCMON_0_85V_DOT_0] =
3196 		{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
3197 	[PROCMON_0_95V_DOT_0] =
3198 		{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
3199 	[PROCMON_0_95V_DOT_1] =
3200 		{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
3201 	[PROCMON_1_05V_DOT_0] =
3202 		{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
3203 	[PROCMON_1_05V_DOT_1] =
3204 		{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
3205 };
3206 
3207 /*
3208  * CNL has just one set of registers, while ICL has two sets: one for port A and
3209  * the other for port B. The CNL registers are equivalent to the ICL port A
3210  * registers, that's why we call the ICL macros even though the function has CNL
3211  * on its name.
3212  */
3213 static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
3214 				       enum port port)
3215 {
3216 	const struct cnl_procmon *procmon;
3217 	u32 val;
3218 
3219 	val = I915_READ(ICL_PORT_COMP_DW3(port));
3220 	switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
3221 	default:
3222 		MISSING_CASE(val);
3223 		/* fall through */
3224 	case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
3225 		procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
3226 		break;
3227 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
3228 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
3229 		break;
3230 	case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
3231 		procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
3232 		break;
3233 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
3234 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
3235 		break;
3236 	case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
3237 		procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
3238 		break;
3239 	}
3240 
3241 	val = I915_READ(ICL_PORT_COMP_DW1(port));
3242 	val &= ~((0xff << 16) | 0xff);
3243 	val |= procmon->dw1;
3244 	I915_WRITE(ICL_PORT_COMP_DW1(port), val);
3245 
3246 	I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
3247 	I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
3248 }
3249 
3250 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
3251 {
3252 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3253 	struct i915_power_well *well;
3254 	u32 val;
3255 
3256 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3257 
3258 	/* 1. Enable PCH Reset Handshake */
3259 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3260 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3261 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3262 
3263 	/* 2. Enable Comp */
3264 	val = I915_READ(CHICKEN_MISC_2);
3265 	val &= ~CNL_COMP_PWR_DOWN;
3266 	I915_WRITE(CHICKEN_MISC_2, val);
3267 
3268 	/* Dummy PORT_A to get the correct CNL register from the ICL macro */
3269 	cnl_set_procmon_ref_values(dev_priv, PORT_A);
3270 
3271 	val = I915_READ(CNL_PORT_COMP_DW0);
3272 	val |= COMP_INIT;
3273 	I915_WRITE(CNL_PORT_COMP_DW0, val);
3274 
3275 	/* 3. */
3276 	val = I915_READ(CNL_PORT_CL1CM_DW5);
3277 	val |= CL_POWER_DOWN_ENABLE;
3278 	I915_WRITE(CNL_PORT_CL1CM_DW5, val);
3279 
3280 	/*
3281 	 * 4. Enable Power Well 1 (PG1).
3282 	 *    The AUX IO power wells will be enabled on demand.
3283 	 */
3284 	mutex_lock(&power_domains->lock);
3285 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3286 	intel_power_well_enable(dev_priv, well);
3287 	mutex_unlock(&power_domains->lock);
3288 
3289 	/* 5. Enable CD clock */
3290 	cnl_init_cdclk(dev_priv);
3291 
3292 	/* 6. Enable DBUF */
3293 	gen9_dbuf_enable(dev_priv);
3294 
3295 	if (resume && dev_priv->csr.dmc_payload)
3296 		intel_csr_load_program(dev_priv);
3297 }
3298 
3299 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
3300 {
3301 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3302 	struct i915_power_well *well;
3303 	u32 val;
3304 
3305 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3306 
3307 	/* 1. Disable all display engine functions -> aready done */
3308 
3309 	/* 2. Disable DBUF */
3310 	gen9_dbuf_disable(dev_priv);
3311 
3312 	/* 3. Disable CD clock */
3313 	cnl_uninit_cdclk(dev_priv);
3314 
3315 	/*
3316 	 * 4. Disable Power Well 1 (PG1).
3317 	 *    The AUX IO power wells are toggled on demand, so they are already
3318 	 *    disabled at this point.
3319 	 */
3320 	mutex_lock(&power_domains->lock);
3321 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
3322 	intel_power_well_disable(dev_priv, well);
3323 	mutex_unlock(&power_domains->lock);
3324 
3325 	usleep_range(10, 30);		/* 10 us delay per Bspec */
3326 
3327 	/* 5. Disable Comp */
3328 	val = I915_READ(CHICKEN_MISC_2);
3329 	val |= CNL_COMP_PWR_DOWN;
3330 	I915_WRITE(CHICKEN_MISC_2, val);
3331 }
3332 
3333 static void icl_display_core_init(struct drm_i915_private *dev_priv,
3334 				  bool resume)
3335 {
3336 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3337 	struct i915_power_well *well;
3338 	enum port port;
3339 	u32 val;
3340 
3341 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3342 
3343 	/* 1. Enable PCH reset handshake. */
3344 	val = I915_READ(HSW_NDE_RSTWRN_OPT);
3345 	val |= RESET_PCH_HANDSHAKE_ENABLE;
3346 	I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
3347 
3348 	for (port = PORT_A; port <= PORT_B; port++) {
3349 		/* 2. Enable DDI combo PHY comp. */
3350 		val = I915_READ(ICL_PHY_MISC(port));
3351 		val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3352 		I915_WRITE(ICL_PHY_MISC(port), val);
3353 
3354 		cnl_set_procmon_ref_values(dev_priv, port);
3355 
3356 		val = I915_READ(ICL_PORT_COMP_DW0(port));
3357 		val |= COMP_INIT;
3358 		I915_WRITE(ICL_PORT_COMP_DW0(port), val);
3359 
3360 		/* 3. Set power down enable. */
3361 		val = I915_READ(ICL_PORT_CL_DW5(port));
3362 		val |= CL_POWER_DOWN_ENABLE;
3363 		I915_WRITE(ICL_PORT_CL_DW5(port), val);
3364 	}
3365 
3366 	/*
3367 	 * 4. Enable Power Well 1 (PG1).
3368 	 *    The AUX IO power wells will be enabled on demand.
3369 	 */
3370 	mutex_lock(&power_domains->lock);
3371 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3372 	intel_power_well_enable(dev_priv, well);
3373 	mutex_unlock(&power_domains->lock);
3374 
3375 	/* 5. Enable CDCLK. */
3376 	icl_init_cdclk(dev_priv);
3377 
3378 	/* 6. Enable DBUF. */
3379 	icl_dbuf_enable(dev_priv);
3380 
3381 	/* 7. Setup MBUS. */
3382 	icl_mbus_init(dev_priv);
3383 
3384 	/* 8. CHICKEN_DCPR_1 */
3385 	I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
3386 					CNL_DDI_CLOCK_REG_ACCESS_ON);
3387 }
3388 
3389 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
3390 {
3391 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3392 	struct i915_power_well *well;
3393 	enum port port;
3394 	u32 val;
3395 
3396 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
3397 
3398 	/* 1. Disable all display engine functions -> aready done */
3399 
3400 	/* 2. Disable DBUF */
3401 	icl_dbuf_disable(dev_priv);
3402 
3403 	/* 3. Disable CD clock */
3404 	icl_uninit_cdclk(dev_priv);
3405 
3406 	/*
3407 	 * 4. Disable Power Well 1 (PG1).
3408 	 *    The AUX IO power wells are toggled on demand, so they are already
3409 	 *    disabled at this point.
3410 	 */
3411 	mutex_lock(&power_domains->lock);
3412 	well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
3413 	intel_power_well_disable(dev_priv, well);
3414 	mutex_unlock(&power_domains->lock);
3415 
3416 	/* 5. Disable Comp */
3417 	for (port = PORT_A; port <= PORT_B; port++) {
3418 		val = I915_READ(ICL_PHY_MISC(port));
3419 		val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
3420 		I915_WRITE(ICL_PHY_MISC(port), val);
3421 	}
3422 }
3423 
3424 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
3425 {
3426 	struct i915_power_well *cmn_bc =
3427 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3428 	struct i915_power_well *cmn_d =
3429 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
3430 
3431 	/*
3432 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
3433 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
3434 	 * instead maintain a shadow copy ourselves. Use the actual
3435 	 * power well state and lane status to reconstruct the
3436 	 * expected initial value.
3437 	 */
3438 	dev_priv->chv_phy_control =
3439 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
3440 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
3441 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
3442 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
3443 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
3444 
3445 	/*
3446 	 * If all lanes are disabled we leave the override disabled
3447 	 * with all power down bits cleared to match the state we
3448 	 * would use after disabling the port. Otherwise enable the
3449 	 * override and set the lane powerdown bits accding to the
3450 	 * current lane status.
3451 	 */
3452 	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
3453 		uint32_t status = I915_READ(DPLL(PIPE_A));
3454 		unsigned int mask;
3455 
3456 		mask = status & DPLL_PORTB_READY_MASK;
3457 		if (mask == 0xf)
3458 			mask = 0x0;
3459 		else
3460 			dev_priv->chv_phy_control |=
3461 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
3462 
3463 		dev_priv->chv_phy_control |=
3464 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
3465 
3466 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
3467 		if (mask == 0xf)
3468 			mask = 0x0;
3469 		else
3470 			dev_priv->chv_phy_control |=
3471 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
3472 
3473 		dev_priv->chv_phy_control |=
3474 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
3475 
3476 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
3477 
3478 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
3479 	} else {
3480 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
3481 	}
3482 
3483 	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
3484 		uint32_t status = I915_READ(DPIO_PHY_STATUS);
3485 		unsigned int mask;
3486 
3487 		mask = status & DPLL_PORTD_READY_MASK;
3488 
3489 		if (mask == 0xf)
3490 			mask = 0x0;
3491 		else
3492 			dev_priv->chv_phy_control |=
3493 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
3494 
3495 		dev_priv->chv_phy_control |=
3496 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
3497 
3498 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
3499 
3500 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
3501 	} else {
3502 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
3503 	}
3504 
3505 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
3506 
3507 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
3508 		      dev_priv->chv_phy_control);
3509 }
3510 
3511 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
3512 {
3513 	struct i915_power_well *cmn =
3514 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
3515 	struct i915_power_well *disp2d =
3516 		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
3517 
3518 	/* If the display might be already active skip this */
3519 	if (cmn->ops->is_enabled(dev_priv, cmn) &&
3520 	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
3521 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
3522 		return;
3523 
3524 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
3525 
3526 	/* cmnlane needs DPLL registers */
3527 	disp2d->ops->enable(dev_priv, disp2d);
3528 
3529 	/*
3530 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
3531 	 * Need to assert and de-assert PHY SB reset by gating the
3532 	 * common lane power, then un-gating it.
3533 	 * Simply ungating isn't enough to reset the PHY enough to get
3534 	 * ports and lanes running.
3535 	 */
3536 	cmn->ops->disable(dev_priv, cmn);
3537 }
3538 
3539 /**
3540  * intel_power_domains_init_hw - initialize hardware power domain state
3541  * @dev_priv: i915 device instance
3542  * @resume: Called from resume code paths or not
3543  *
3544  * This function initializes the hardware power domain state and enables all
3545  * power wells belonging to the INIT power domain. Power wells in other
3546  * domains (and not in the INIT domain) are referenced or disabled during the
3547  * modeset state HW readout. After that the reference count of each power well
3548  * must match its HW enabled state, see intel_power_domains_verify_state().
3549  */
3550 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
3551 {
3552 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3553 
3554 	power_domains->initializing = true;
3555 
3556 	if (IS_ICELAKE(dev_priv)) {
3557 		icl_display_core_init(dev_priv, resume);
3558 	} else if (IS_CANNONLAKE(dev_priv)) {
3559 		cnl_display_core_init(dev_priv, resume);
3560 	} else if (IS_GEN9_BC(dev_priv)) {
3561 		skl_display_core_init(dev_priv, resume);
3562 	} else if (IS_GEN9_LP(dev_priv)) {
3563 		bxt_display_core_init(dev_priv, resume);
3564 	} else if (IS_CHERRYVIEW(dev_priv)) {
3565 		mutex_lock(&power_domains->lock);
3566 		chv_phy_control_init(dev_priv);
3567 		mutex_unlock(&power_domains->lock);
3568 	} else if (IS_VALLEYVIEW(dev_priv)) {
3569 		mutex_lock(&power_domains->lock);
3570 		vlv_cmnlane_wa(dev_priv);
3571 		mutex_unlock(&power_domains->lock);
3572 	}
3573 
3574 	/* For now, we need the power well to be always enabled. */
3575 	intel_display_set_init_power(dev_priv, true);
3576 	/* Disable power support if the user asked so. */
3577 	if (!i915_modparams.disable_power_well)
3578 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
3579 	intel_power_domains_sync_hw(dev_priv);
3580 	power_domains->initializing = false;
3581 }
3582 
3583 /**
3584  * intel_power_domains_suspend - suspend power domain state
3585  * @dev_priv: i915 device instance
3586  *
3587  * This function prepares the hardware power domain state before entering
3588  * system suspend. It must be paired with intel_power_domains_init_hw().
3589  */
3590 void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
3591 {
3592 	/*
3593 	 * Even if power well support was disabled we still want to disable
3594 	 * power wells while we are system suspended.
3595 	 */
3596 	if (!i915_modparams.disable_power_well)
3597 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
3598 
3599 	if (IS_ICELAKE(dev_priv))
3600 		icl_display_core_uninit(dev_priv);
3601 	else if (IS_CANNONLAKE(dev_priv))
3602 		cnl_display_core_uninit(dev_priv);
3603 	else if (IS_GEN9_BC(dev_priv))
3604 		skl_display_core_uninit(dev_priv);
3605 	else if (IS_GEN9_LP(dev_priv))
3606 		bxt_display_core_uninit(dev_priv);
3607 }
3608 
3609 static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
3610 {
3611 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3612 	struct i915_power_well *power_well;
3613 
3614 	for_each_power_well(dev_priv, power_well) {
3615 		enum intel_display_power_domain domain;
3616 
3617 		DRM_DEBUG_DRIVER("%-25s %d\n",
3618 				 power_well->name, power_well->count);
3619 
3620 		for_each_power_domain(domain, power_well->domains)
3621 			DRM_DEBUG_DRIVER("  %-23s %d\n",
3622 					 intel_display_power_domain_str(domain),
3623 					 power_domains->domain_use_count[domain]);
3624 	}
3625 }
3626 
3627 /**
3628  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
3629  * @dev_priv: i915 device instance
3630  *
3631  * Verify if the reference count of each power well matches its HW enabled
3632  * state and the total refcount of the domains it belongs to. This must be
3633  * called after modeset HW state sanitization, which is responsible for
3634  * acquiring reference counts for any power wells in use and disabling the
3635  * ones left on by BIOS but not required by any active output.
3636  */
3637 void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
3638 {
3639 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
3640 	struct i915_power_well *power_well;
3641 	bool dump_domain_info;
3642 
3643 	mutex_lock(&power_domains->lock);
3644 
3645 	dump_domain_info = false;
3646 	for_each_power_well(dev_priv, power_well) {
3647 		enum intel_display_power_domain domain;
3648 		int domains_count;
3649 		bool enabled;
3650 
3651 		/*
3652 		 * Power wells not belonging to any domain (like the MISC_IO
3653 		 * and PW1 power wells) are under FW control, so ignore them,
3654 		 * since their state can change asynchronously.
3655 		 */
3656 		if (!power_well->domains)
3657 			continue;
3658 
3659 		enabled = power_well->ops->is_enabled(dev_priv, power_well);
3660 		if ((power_well->count || power_well->always_on) != enabled)
3661 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
3662 				  power_well->name, power_well->count, enabled);
3663 
3664 		domains_count = 0;
3665 		for_each_power_domain(domain, power_well->domains)
3666 			domains_count += power_domains->domain_use_count[domain];
3667 
3668 		if (power_well->count != domains_count) {
3669 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
3670 				  "(refcount %d/domains refcount %d)\n",
3671 				  power_well->name, power_well->count,
3672 				  domains_count);
3673 			dump_domain_info = true;
3674 		}
3675 	}
3676 
3677 	if (dump_domain_info) {
3678 		static bool dumped;
3679 
3680 		if (!dumped) {
3681 			intel_power_domains_dump_info(dev_priv);
3682 			dumped = true;
3683 		}
3684 	}
3685 
3686 	mutex_unlock(&power_domains->lock);
3687 }
3688 
3689 /**
3690  * intel_runtime_pm_get - grab a runtime pm reference
3691  * @dev_priv: i915 device instance
3692  *
3693  * This function grabs a device-level runtime pm reference (mostly used for GEM
3694  * code to ensure the GTT or GT is on) and ensures that it is powered up.
3695  *
3696  * Any runtime pm reference obtained by this function must have a symmetric
3697  * call to intel_runtime_pm_put() to release the reference again.
3698  */
3699 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
3700 {
3701 #ifdef __linux__
3702 	struct pci_dev *pdev = dev_priv->drm.pdev;
3703 	struct device *kdev = &pdev->dev;
3704 	int ret;
3705 
3706 	ret = pm_runtime_get_sync(kdev);
3707 	WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3708 #endif
3709 
3710 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3711 	assert_rpm_wakelock_held(dev_priv);
3712 }
3713 
3714 /**
3715  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
3716  * @dev_priv: i915 device instance
3717  *
3718  * This function grabs a device-level runtime pm reference if the device is
3719  * already in use and ensures that it is powered up. It is illegal to try
3720  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
3721  *
3722  * Any runtime pm reference obtained by this function must have a symmetric
3723  * call to intel_runtime_pm_put() to release the reference again.
3724  *
3725  * Returns: True if the wakeref was acquired, or False otherwise.
3726  */
3727 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
3728 {
3729 #ifdef __linux__
3730 	if (IS_ENABLED(CONFIG_PM)) {
3731 		struct pci_dev *pdev = dev_priv->drm.pdev;
3732 		struct device *kdev = &pdev->dev;
3733 
3734 		/*
3735 		 * In cases runtime PM is disabled by the RPM core and we get
3736 		 * an -EINVAL return value we are not supposed to call this
3737 		 * function, since the power state is undefined. This applies
3738 		 * atm to the late/early system suspend/resume handlers.
3739 		 */
3740 		if (pm_runtime_get_if_in_use(kdev) <= 0)
3741 			return false;
3742 	}
3743 #endif
3744 
3745 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3746 	assert_rpm_wakelock_held(dev_priv);
3747 
3748 	return true;
3749 }
3750 
3751 /**
3752  * intel_runtime_pm_get_noresume - grab a runtime pm reference
3753  * @dev_priv: i915 device instance
3754  *
3755  * This function grabs a device-level runtime pm reference (mostly used for GEM
3756  * code to ensure the GTT or GT is on).
3757  *
3758  * It will _not_ power up the device but instead only check that it's powered
3759  * on.  Therefore it is only valid to call this functions from contexts where
3760  * the device is known to be powered up and where trying to power it up would
3761  * result in hilarity and deadlocks. That pretty much means only the system
3762  * suspend/resume code where this is used to grab runtime pm references for
3763  * delayed setup down in work items.
3764  *
3765  * Any runtime pm reference obtained by this function must have a symmetric
3766  * call to intel_runtime_pm_put() to release the reference again.
3767  */
3768 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
3769 {
3770 #ifdef __linux__
3771 	struct pci_dev *pdev = dev_priv->drm.pdev;
3772 	struct device *kdev = &pdev->dev;
3773 
3774 	assert_rpm_wakelock_held(dev_priv);
3775 	pm_runtime_get_noresume(kdev);
3776 #else
3777 	assert_rpm_wakelock_held(dev_priv);
3778 #endif
3779 
3780 	atomic_inc(&dev_priv->runtime_pm.wakeref_count);
3781 }
3782 
3783 /**
3784  * intel_runtime_pm_put - release a runtime pm reference
3785  * @dev_priv: i915 device instance
3786  *
3787  * This function drops the device-level runtime pm reference obtained by
3788  * intel_runtime_pm_get() and might power down the corresponding
3789  * hardware block right away if this is the last reference.
3790  */
3791 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
3792 {
3793 #ifdef __linux__
3794 	struct pci_dev *pdev = dev_priv->drm.pdev;
3795 	struct device *kdev = &pdev->dev;
3796 
3797 	assert_rpm_wakelock_held(dev_priv);
3798 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3799 
3800 	pm_runtime_mark_last_busy(kdev);
3801 	pm_runtime_put_autosuspend(kdev);
3802 #else
3803 	assert_rpm_wakelock_held(dev_priv);
3804 	atomic_dec(&dev_priv->runtime_pm.wakeref_count);
3805 #endif
3806 }
3807 
3808 /**
3809  * intel_runtime_pm_enable - enable runtime pm
3810  * @dev_priv: i915 device instance
3811  *
3812  * This function enables runtime pm at the end of the driver load sequence.
3813  *
3814  * Note that this function does currently not enable runtime pm for the
3815  * subordinate display power domains. That is only done on the first modeset
3816  * using intel_display_set_init_power().
3817  */
3818 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
3819 {
3820 #ifdef __linux__
3821 	struct pci_dev *pdev = dev_priv->drm.pdev;
3822 	struct device *kdev = &pdev->dev;
3823 
3824 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
3825 	pm_runtime_mark_last_busy(kdev);
3826 
3827 	/*
3828 	 * Take a permanent reference to disable the RPM functionality and drop
3829 	 * it only when unloading the driver. Use the low level get/put helpers,
3830 	 * so the driver's own RPM reference tracking asserts also work on
3831 	 * platforms without RPM support.
3832 	 */
3833 	if (!HAS_RUNTIME_PM(dev_priv)) {
3834 		int ret;
3835 
3836 		pm_runtime_dont_use_autosuspend(kdev);
3837 		ret = pm_runtime_get_sync(kdev);
3838 		WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
3839 	} else {
3840 		pm_runtime_use_autosuspend(kdev);
3841 	}
3842 
3843 	/*
3844 	 * The core calls the driver load handler with an RPM reference held.
3845 	 * We drop that here and will reacquire it during unloading in
3846 	 * intel_power_domains_fini().
3847 	 */
3848 	pm_runtime_put_autosuspend(kdev);
3849 #endif
3850 }
3851