xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/display/intel_display_power.c (revision 2a8c33eaff5adddac3ef2c5cb48ee67ef6d5d6dc)
1 /*	$NetBSD: intel_display_power.c,v 1.5 2021/12/19 12:32:15 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: MIT */
4 /*
5  * Copyright © 2019 Intel Corporation
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: intel_display_power.c,v 1.5 2021/12/19 12:32:15 riastradh Exp $");
10 
11 #include "display/intel_crt.h"
12 #include "display/intel_dp.h"
13 
14 #include "i915_drv.h"
15 #include "i915_irq.h"
16 #include "intel_cdclk.h"
17 #include "intel_combo_phy.h"
18 #include "intel_csr.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_dpio_phy.h"
22 #include "intel_hotplug.h"
23 #include "intel_sideband.h"
24 #include "intel_tc.h"
25 #include "intel_vga.h"
26 
27 #include <linux/nbsd-namespace.h>
28 
29 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
30 					 enum i915_power_well_id power_well_id);
31 
32 const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)33 intel_display_power_domain_str(enum intel_display_power_domain domain)
34 {
35 	switch (domain) {
36 	case POWER_DOMAIN_DISPLAY_CORE:
37 		return "DISPLAY_CORE";
38 	case POWER_DOMAIN_PIPE_A:
39 		return "PIPE_A";
40 	case POWER_DOMAIN_PIPE_B:
41 		return "PIPE_B";
42 	case POWER_DOMAIN_PIPE_C:
43 		return "PIPE_C";
44 	case POWER_DOMAIN_PIPE_D:
45 		return "PIPE_D";
46 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
47 		return "PIPE_A_PANEL_FITTER";
48 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
49 		return "PIPE_B_PANEL_FITTER";
50 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
51 		return "PIPE_C_PANEL_FITTER";
52 	case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
53 		return "PIPE_D_PANEL_FITTER";
54 	case POWER_DOMAIN_TRANSCODER_A:
55 		return "TRANSCODER_A";
56 	case POWER_DOMAIN_TRANSCODER_B:
57 		return "TRANSCODER_B";
58 	case POWER_DOMAIN_TRANSCODER_C:
59 		return "TRANSCODER_C";
60 	case POWER_DOMAIN_TRANSCODER_D:
61 		return "TRANSCODER_D";
62 	case POWER_DOMAIN_TRANSCODER_EDP:
63 		return "TRANSCODER_EDP";
64 	case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
65 		return "TRANSCODER_VDSC_PW2";
66 	case POWER_DOMAIN_TRANSCODER_DSI_A:
67 		return "TRANSCODER_DSI_A";
68 	case POWER_DOMAIN_TRANSCODER_DSI_C:
69 		return "TRANSCODER_DSI_C";
70 	case POWER_DOMAIN_PORT_DDI_A_LANES:
71 		return "PORT_DDI_A_LANES";
72 	case POWER_DOMAIN_PORT_DDI_B_LANES:
73 		return "PORT_DDI_B_LANES";
74 	case POWER_DOMAIN_PORT_DDI_C_LANES:
75 		return "PORT_DDI_C_LANES";
76 	case POWER_DOMAIN_PORT_DDI_D_LANES:
77 		return "PORT_DDI_D_LANES";
78 	case POWER_DOMAIN_PORT_DDI_E_LANES:
79 		return "PORT_DDI_E_LANES";
80 	case POWER_DOMAIN_PORT_DDI_F_LANES:
81 		return "PORT_DDI_F_LANES";
82 	case POWER_DOMAIN_PORT_DDI_G_LANES:
83 		return "PORT_DDI_G_LANES";
84 	case POWER_DOMAIN_PORT_DDI_H_LANES:
85 		return "PORT_DDI_H_LANES";
86 	case POWER_DOMAIN_PORT_DDI_I_LANES:
87 		return "PORT_DDI_I_LANES";
88 	case POWER_DOMAIN_PORT_DDI_A_IO:
89 		return "PORT_DDI_A_IO";
90 	case POWER_DOMAIN_PORT_DDI_B_IO:
91 		return "PORT_DDI_B_IO";
92 	case POWER_DOMAIN_PORT_DDI_C_IO:
93 		return "PORT_DDI_C_IO";
94 	case POWER_DOMAIN_PORT_DDI_D_IO:
95 		return "PORT_DDI_D_IO";
96 	case POWER_DOMAIN_PORT_DDI_E_IO:
97 		return "PORT_DDI_E_IO";
98 	case POWER_DOMAIN_PORT_DDI_F_IO:
99 		return "PORT_DDI_F_IO";
100 	case POWER_DOMAIN_PORT_DDI_G_IO:
101 		return "PORT_DDI_G_IO";
102 	case POWER_DOMAIN_PORT_DDI_H_IO:
103 		return "PORT_DDI_H_IO";
104 	case POWER_DOMAIN_PORT_DDI_I_IO:
105 		return "PORT_DDI_I_IO";
106 	case POWER_DOMAIN_PORT_DSI:
107 		return "PORT_DSI";
108 	case POWER_DOMAIN_PORT_CRT:
109 		return "PORT_CRT";
110 	case POWER_DOMAIN_PORT_OTHER:
111 		return "PORT_OTHER";
112 	case POWER_DOMAIN_VGA:
113 		return "VGA";
114 	case POWER_DOMAIN_AUDIO:
115 		return "AUDIO";
116 	case POWER_DOMAIN_AUX_A:
117 		return "AUX_A";
118 	case POWER_DOMAIN_AUX_B:
119 		return "AUX_B";
120 	case POWER_DOMAIN_AUX_C:
121 		return "AUX_C";
122 	case POWER_DOMAIN_AUX_D:
123 		return "AUX_D";
124 	case POWER_DOMAIN_AUX_E:
125 		return "AUX_E";
126 	case POWER_DOMAIN_AUX_F:
127 		return "AUX_F";
128 	case POWER_DOMAIN_AUX_G:
129 		return "AUX_G";
130 	case POWER_DOMAIN_AUX_H:
131 		return "AUX_H";
132 	case POWER_DOMAIN_AUX_I:
133 		return "AUX_I";
134 	case POWER_DOMAIN_AUX_IO_A:
135 		return "AUX_IO_A";
136 	case POWER_DOMAIN_AUX_C_TBT:
137 		return "AUX_C_TBT";
138 	case POWER_DOMAIN_AUX_D_TBT:
139 		return "AUX_D_TBT";
140 	case POWER_DOMAIN_AUX_E_TBT:
141 		return "AUX_E_TBT";
142 	case POWER_DOMAIN_AUX_F_TBT:
143 		return "AUX_F_TBT";
144 	case POWER_DOMAIN_AUX_G_TBT:
145 		return "AUX_G_TBT";
146 	case POWER_DOMAIN_AUX_H_TBT:
147 		return "AUX_H_TBT";
148 	case POWER_DOMAIN_AUX_I_TBT:
149 		return "AUX_I_TBT";
150 	case POWER_DOMAIN_GMBUS:
151 		return "GMBUS";
152 	case POWER_DOMAIN_INIT:
153 		return "INIT";
154 	case POWER_DOMAIN_MODESET:
155 		return "MODESET";
156 	case POWER_DOMAIN_GT_IRQ:
157 		return "GT_IRQ";
158 	case POWER_DOMAIN_DPLL_DC_OFF:
159 		return "DPLL_DC_OFF";
160 	default:
161 		MISSING_CASE(domain);
162 		return "?";
163 	}
164 }
165 
intel_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)166 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
167 				    struct i915_power_well *power_well)
168 {
169 	DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
170 	power_well->desc->ops->enable(dev_priv, power_well);
171 	power_well->hw_enabled = true;
172 }
173 
intel_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)174 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
175 				     struct i915_power_well *power_well)
176 {
177 	DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
178 	power_well->hw_enabled = false;
179 	power_well->desc->ops->disable(dev_priv, power_well);
180 }
181 
intel_power_well_get(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)182 static void intel_power_well_get(struct drm_i915_private *dev_priv,
183 				 struct i915_power_well *power_well)
184 {
185 	if (!power_well->count++)
186 		intel_power_well_enable(dev_priv, power_well);
187 }
188 
intel_power_well_put(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)189 static void intel_power_well_put(struct drm_i915_private *dev_priv,
190 				 struct i915_power_well *power_well)
191 {
192 	WARN(!power_well->count, "Use count on power well %s is already zero",
193 	     power_well->desc->name);
194 
195 	if (!--power_well->count)
196 		intel_power_well_disable(dev_priv, power_well);
197 }
198 
199 /**
200  * __intel_display_power_is_enabled - unlocked check for a power domain
201  * @dev_priv: i915 device instance
202  * @domain: power domain to check
203  *
204  * This is the unlocked version of intel_display_power_is_enabled() and should
205  * only be used from error capture and recovery code where deadlocks are
206  * possible.
207  *
208  * Returns:
209  * True when the power domain is enabled, false otherwise.
210  */
__intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)211 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
212 				      enum intel_display_power_domain domain)
213 {
214 	struct i915_power_well *power_well;
215 	bool is_enabled;
216 
217 	if (dev_priv->runtime_pm.suspended)
218 		return false;
219 
220 	is_enabled = true;
221 
222 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
223 		if (power_well->desc->always_on)
224 			continue;
225 
226 		if (!power_well->hw_enabled) {
227 			is_enabled = false;
228 			break;
229 		}
230 	}
231 
232 	return is_enabled;
233 }
234 
235 /**
236  * intel_display_power_is_enabled - check for a power domain
237  * @dev_priv: i915 device instance
238  * @domain: power domain to check
239  *
240  * This function can be used to check the hw power domain state. It is mostly
241  * used in hardware state readout functions. Everywhere else code should rely
242  * upon explicit power domain reference counting to ensure that the hardware
243  * block is powered up before accessing it.
244  *
245  * Callers must hold the relevant modesetting locks to ensure that concurrent
246  * threads can't disable the power well while the caller tries to read a few
247  * registers.
248  *
249  * Returns:
250  * True when the power domain is enabled, false otherwise.
251  */
intel_display_power_is_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)252 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
253 				    enum intel_display_power_domain domain)
254 {
255 	struct i915_power_domains *power_domains;
256 	bool ret;
257 
258 	power_domains = &dev_priv->power_domains;
259 
260 	mutex_lock(&power_domains->lock);
261 	ret = __intel_display_power_is_enabled(dev_priv, domain);
262 	mutex_unlock(&power_domains->lock);
263 
264 	return ret;
265 }
266 
267 /*
268  * Starting with Haswell, we have a "Power Down Well" that can be turned off
269  * when not needed anymore. We have 4 registers that can request the power well
270  * to be enabled, and it will only be disabled if none of the registers is
271  * requesting it to be enabled.
272  */
hsw_power_well_post_enable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask,bool has_vga)273 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
274 				       u8 irq_pipe_mask, bool has_vga)
275 {
276 #ifndef __NetBSD__ /* XXX We wait until intelfb is ready.  */
277 	if (has_vga)
278 		intel_vga_reset_io_mem(dev_priv);
279 #endif
280 
281 	if (irq_pipe_mask)
282 		gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
283 }
284 
hsw_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 irq_pipe_mask)285 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
286 				       u8 irq_pipe_mask)
287 {
288 	if (irq_pipe_mask)
289 		gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
290 }
291 
hsw_wait_for_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)292 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
293 					   struct i915_power_well *power_well)
294 {
295 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
296 	int pw_idx = power_well->desc->hsw.idx;
297 
298 	/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
299 	if (intel_de_wait_for_set(dev_priv, regs->driver,
300 				  HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
301 		DRM_DEBUG_KMS("%s power well enable timeout\n",
302 			      power_well->desc->name);
303 
304 		/* An AUX timeout is expected if the TBT DP tunnel is down. */
305 		WARN_ON(!power_well->desc->hsw.is_tc_tbt);
306 	}
307 }
308 
hsw_power_well_requesters(struct drm_i915_private * dev_priv,const struct i915_power_well_regs * regs,int pw_idx)309 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
310 				     const struct i915_power_well_regs *regs,
311 				     int pw_idx)
312 {
313 	u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
314 	u32 ret;
315 
316 	ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
317 	ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
318 	if (regs->kvmr.reg)
319 		ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
320 	ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
321 
322 	return ret;
323 }
324 
hsw_wait_for_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)325 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
326 					    struct i915_power_well *power_well)
327 {
328 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
329 	int pw_idx = power_well->desc->hsw.idx;
330 	bool disabled;
331 	u32 reqs;
332 
333 	/*
334 	 * Bspec doesn't require waiting for PWs to get disabled, but still do
335 	 * this for paranoia. The known cases where a PW will be forced on:
336 	 * - a KVMR request on any power well via the KVMR request register
337 	 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
338 	 *   DEBUG request registers
339 	 * Skip the wait in case any of the request bits are set and print a
340 	 * diagnostic message.
341 	 */
342 	wait_for((disabled = !(I915_READ(regs->driver) &
343 			       HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
344 		 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
345 	if (disabled)
346 		return;
347 
348 	DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
349 		      power_well->desc->name,
350 		      !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
351 }
352 
gen9_wait_for_power_well_fuses(struct drm_i915_private * dev_priv,enum skl_power_gate pg)353 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
354 					   enum skl_power_gate pg)
355 {
356 	/* Timeout 5us for PG#0, for other PGs 1us */
357 	WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
358 				      SKL_FUSE_PG_DIST_STATUS(pg), 1));
359 }
360 
hsw_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)361 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
362 				  struct i915_power_well *power_well)
363 {
364 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
365 	int pw_idx = power_well->desc->hsw.idx;
366 	bool wait_fuses = power_well->desc->hsw.has_fuses;
367 	enum skl_power_gate uninitialized_var(pg);
368 	u32 val;
369 
370 	if (wait_fuses) {
371 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
372 						 SKL_PW_CTL_IDX_TO_PG(pw_idx);
373 		/*
374 		 * For PW1 we have to wait both for the PW0/PG0 fuse state
375 		 * before enabling the power well and PW1/PG1's own fuse
376 		 * state after the enabling. For all other power wells with
377 		 * fuses we only have to wait for that PW/PG's fuse state
378 		 * after the enabling.
379 		 */
380 		if (pg == SKL_PG1)
381 			gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
382 	}
383 
384 	val = I915_READ(regs->driver);
385 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
386 	hsw_wait_for_power_well_enable(dev_priv, power_well);
387 
388 	/* Display WA #1178: cnl */
389 	if (IS_CANNONLAKE(dev_priv) &&
390 	    pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
391 	    pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
392 		val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
393 		val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
394 		I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
395 	}
396 
397 	if (wait_fuses)
398 		gen9_wait_for_power_well_fuses(dev_priv, pg);
399 
400 	hsw_power_well_post_enable(dev_priv,
401 				   power_well->desc->hsw.irq_pipe_mask,
402 				   power_well->desc->hsw.has_vga);
403 }
404 
hsw_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)405 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
406 				   struct i915_power_well *power_well)
407 {
408 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
409 	int pw_idx = power_well->desc->hsw.idx;
410 	u32 val;
411 
412 	hsw_power_well_pre_disable(dev_priv,
413 				   power_well->desc->hsw.irq_pipe_mask);
414 
415 	val = I915_READ(regs->driver);
416 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
417 	hsw_wait_for_power_well_disable(dev_priv, power_well);
418 }
419 
420 #define ICL_AUX_PW_TO_PHY(pw_idx)	((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
421 
422 static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)423 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
424 				    struct i915_power_well *power_well)
425 {
426 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
427 	int pw_idx = power_well->desc->hsw.idx;
428 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
429 	u32 val;
430 
431 	WARN_ON(!IS_ICELAKE(dev_priv));
432 
433 	val = I915_READ(regs->driver);
434 	I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
435 
436 	if (INTEL_GEN(dev_priv) < 12) {
437 		val = I915_READ(ICL_PORT_CL_DW12(phy));
438 		I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
439 	}
440 
441 	hsw_wait_for_power_well_enable(dev_priv, power_well);
442 
443 	/* Display WA #1178: icl */
444 	if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
445 	    !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
446 		val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
447 		val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
448 		I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
449 	}
450 }
451 
452 static void
icl_combo_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)453 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
454 				     struct i915_power_well *power_well)
455 {
456 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
457 	int pw_idx = power_well->desc->hsw.idx;
458 	enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
459 	u32 val;
460 
461 	WARN_ON(!IS_ICELAKE(dev_priv));
462 
463 	val = I915_READ(ICL_PORT_CL_DW12(phy));
464 	I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
465 
466 	val = I915_READ(regs->driver);
467 	I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
468 
469 	hsw_wait_for_power_well_disable(dev_priv, power_well);
470 }
471 
472 #define ICL_AUX_PW_TO_CH(pw_idx)	\
473 	((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
474 
475 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)	\
476 	((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
477 
icl_tc_phy_aux_ch(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)478 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
479 				     struct i915_power_well *power_well)
480 {
481 	int pw_idx = power_well->desc->hsw.idx;
482 
483 	return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
484 						 ICL_AUX_PW_TO_CH(pw_idx);
485 }
486 
487 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
488 
489 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
490 
power_well_async_ref_count(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)491 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
492 				      struct i915_power_well *power_well)
493 {
494 	int refs = hweight64(power_well->desc->domains &
495 			     async_put_domains_mask(&dev_priv->power_domains));
496 
497 	WARN_ON(refs > power_well->count);
498 
499 	return refs;
500 }
501 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)502 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
503 					struct i915_power_well *power_well)
504 {
505 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
506 	struct intel_digital_port *dig_port = NULL;
507 	struct intel_encoder *encoder;
508 
509 	/* Bypass the check if all references are released asynchronously */
510 	if (power_well_async_ref_count(dev_priv, power_well) ==
511 	    power_well->count)
512 		return;
513 
514 	aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
515 
516 	for_each_intel_encoder(&dev_priv->drm, encoder) {
517 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
518 
519 		if (!intel_phy_is_tc(dev_priv, phy))
520 			continue;
521 
522 		/* We'll check the MST primary port */
523 		if (encoder->type == INTEL_OUTPUT_DP_MST)
524 			continue;
525 
526 		dig_port = enc_to_dig_port(encoder);
527 		if (WARN_ON(!dig_port))
528 			continue;
529 
530 		if (dig_port->aux_ch != aux_ch) {
531 			dig_port = NULL;
532 			continue;
533 		}
534 
535 		break;
536 	}
537 
538 	if (WARN_ON(!dig_port))
539 		return;
540 
541 	WARN_ON(!intel_tc_port_ref_held(dig_port));
542 }
543 
544 #else
545 
icl_tc_port_assert_ref_held(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)546 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
547 					struct i915_power_well *power_well)
548 {
549 }
550 
551 #endif
552 
553 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)	((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
554 
555 static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)556 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
557 				 struct i915_power_well *power_well)
558 {
559 	enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
560 	u32 val;
561 
562 	icl_tc_port_assert_ref_held(dev_priv, power_well);
563 
564 	val = I915_READ(DP_AUX_CH_CTL(aux_ch));
565 	val &= ~DP_AUX_CH_CTL_TBT_IO;
566 	if (power_well->desc->hsw.is_tc_tbt)
567 		val |= DP_AUX_CH_CTL_TBT_IO;
568 	I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
569 
570 	hsw_power_well_enable(dev_priv, power_well);
571 
572 	if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
573 		enum tc_port tc_port;
574 
575 		tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
576 		I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
577 
578 		if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
579 					  DKL_CMN_UC_DW27_UC_HEALTH, 1))
580 			DRM_WARN("Timeout waiting TC uC health\n");
581 	}
582 }
583 
584 static void
icl_tc_phy_aux_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)585 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
586 				  struct i915_power_well *power_well)
587 {
588 	icl_tc_port_assert_ref_held(dev_priv, power_well);
589 
590 	hsw_power_well_disable(dev_priv, power_well);
591 }
592 
593 /*
594  * We should only use the power well if we explicitly asked the hardware to
595  * enable it, so check if it's enabled and also check if we've requested it to
596  * be enabled.
597  */
hsw_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)598 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
599 				   struct i915_power_well *power_well)
600 {
601 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
602 	enum i915_power_well_id id = power_well->desc->id;
603 	int pw_idx = power_well->desc->hsw.idx;
604 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
605 		   HSW_PWR_WELL_CTL_STATE(pw_idx);
606 	u32 val;
607 
608 	val = I915_READ(regs->driver);
609 
610 	/*
611 	 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
612 	 * and the MISC_IO PW will be not restored, so check instead for the
613 	 * BIOS's own request bits, which are forced-on for these power wells
614 	 * when exiting DC5/6.
615 	 */
616 	if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
617 	    (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
618 		val |= I915_READ(regs->bios);
619 
620 	return (val & mask) == mask;
621 }
622 
assert_can_enable_dc9(struct drm_i915_private * dev_priv)623 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
624 {
625 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
626 		  "DC9 already programmed to be enabled.\n");
627 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
628 		  "DC5 still not disabled to enable DC9.\n");
629 	WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
630 		  HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
631 		  "Power well 2 on.\n");
632 	WARN_ONCE(intel_irqs_enabled(dev_priv),
633 		  "Interrupts not disabled yet.\n");
634 
635 	 /*
636 	  * TODO: check for the following to verify the conditions to enter DC9
637 	  * state are satisfied:
638 	  * 1] Check relevant display engine registers to verify if mode set
639 	  * disable sequence was followed.
640 	  * 2] Check if display uninitialize sequence is initialized.
641 	  */
642 }
643 
assert_can_disable_dc9(struct drm_i915_private * dev_priv)644 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
645 {
646 	WARN_ONCE(intel_irqs_enabled(dev_priv),
647 		  "Interrupts not disabled yet.\n");
648 	WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
649 		  "DC5 still not disabled.\n");
650 
651 	 /*
652 	  * TODO: check for the following to verify DC9 state was indeed
653 	  * entered before programming to disable it:
654 	  * 1] Check relevant display engine registers to verify if mode
655 	  *  set disable sequence was followed.
656 	  * 2] Check if display uninitialize sequence is initialized.
657 	  */
658 }
659 
gen9_write_dc_state(struct drm_i915_private * dev_priv,u32 state)660 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
661 				u32 state)
662 {
663 	int rewrites = 0;
664 	int rereads = 0;
665 	u32 v;
666 
667 	I915_WRITE(DC_STATE_EN, state);
668 
669 	/* It has been observed that disabling the dc6 state sometimes
670 	 * doesn't stick and dmc keeps returning old value. Make sure
671 	 * the write really sticks enough times and also force rewrite until
672 	 * we are confident that state is exactly what we want.
673 	 */
674 	do  {
675 		v = I915_READ(DC_STATE_EN);
676 
677 		if (v != state) {
678 			I915_WRITE(DC_STATE_EN, state);
679 			rewrites++;
680 			rereads = 0;
681 		} else if (rereads++ > 5) {
682 			break;
683 		}
684 
685 	} while (rewrites < 100);
686 
687 	if (v != state)
688 		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
689 			  state, v);
690 
691 	/* Most of the times we need one retry, avoid spam */
692 	if (rewrites > 1)
693 		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
694 			      state, rewrites);
695 }
696 
gen9_dc_mask(struct drm_i915_private * dev_priv)697 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
698 {
699 	u32 mask;
700 
701 	mask = DC_STATE_EN_UPTO_DC5;
702 
703 	if (INTEL_GEN(dev_priv) >= 12)
704 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
705 					  | DC_STATE_EN_DC9;
706 	else if (IS_GEN(dev_priv, 11))
707 		mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
708 	else if (IS_GEN9_LP(dev_priv))
709 		mask |= DC_STATE_EN_DC9;
710 	else
711 		mask |= DC_STATE_EN_UPTO_DC6;
712 
713 	return mask;
714 }
715 
gen9_sanitize_dc_state(struct drm_i915_private * dev_priv)716 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
717 {
718 	u32 val;
719 
720 	val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
721 
722 	DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
723 		      dev_priv->csr.dc_state, val);
724 	dev_priv->csr.dc_state = val;
725 }
726 
727 /**
728  * gen9_set_dc_state - set target display C power state
729  * @dev_priv: i915 device instance
730  * @state: target DC power state
731  * - DC_STATE_DISABLE
732  * - DC_STATE_EN_UPTO_DC5
733  * - DC_STATE_EN_UPTO_DC6
734  * - DC_STATE_EN_DC9
735  *
736  * Signal to DMC firmware/HW the target DC power state passed in @state.
737  * DMC/HW can turn off individual display clocks and power rails when entering
738  * a deeper DC power state (higher in number) and turns these back when exiting
739  * that state to a shallower power state (lower in number). The HW will decide
740  * when to actually enter a given state on an on-demand basis, for instance
741  * depending on the active state of display pipes. The state of display
742  * registers backed by affected power rails are saved/restored as needed.
743  *
744  * Based on the above enabling a deeper DC power state is asynchronous wrt.
745  * enabling it. Disabling a deeper power state is synchronous: for instance
746  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
747  * back on and register state is restored. This is guaranteed by the MMIO write
748  * to DC_STATE_EN blocking until the state is restored.
749  */
gen9_set_dc_state(struct drm_i915_private * dev_priv,u32 state)750 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
751 {
752 	u32 val;
753 	u32 mask;
754 
755 	if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
756 		state &= dev_priv->csr.allowed_dc_mask;
757 
758 	val = I915_READ(DC_STATE_EN);
759 	mask = gen9_dc_mask(dev_priv);
760 	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
761 		      val & mask, state);
762 
763 	/* Check if DMC is ignoring our DC state requests */
764 	if ((val & mask) != dev_priv->csr.dc_state)
765 		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
766 			  dev_priv->csr.dc_state, val & mask);
767 
768 	val &= ~mask;
769 	val |= state;
770 
771 	gen9_write_dc_state(dev_priv, val);
772 
773 	dev_priv->csr.dc_state = val & mask;
774 }
775 
776 static u32
sanitize_target_dc_state(struct drm_i915_private * dev_priv,u32 target_dc_state)777 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
778 			 u32 target_dc_state)
779 {
780 	u32 states[] = {
781 		DC_STATE_EN_UPTO_DC6,
782 		DC_STATE_EN_UPTO_DC5,
783 		DC_STATE_EN_DC3CO,
784 		DC_STATE_DISABLE,
785 	};
786 	int i;
787 
788 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
789 		if (target_dc_state != states[i])
790 			continue;
791 
792 		if (dev_priv->csr.allowed_dc_mask & target_dc_state)
793 			break;
794 
795 		target_dc_state = states[i + 1];
796 	}
797 
798 	return target_dc_state;
799 }
800 
tgl_enable_dc3co(struct drm_i915_private * dev_priv)801 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
802 {
803 	DRM_DEBUG_KMS("Enabling DC3CO\n");
804 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
805 }
806 
tgl_disable_dc3co(struct drm_i915_private * dev_priv)807 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
808 {
809 	u32 val;
810 
811 	DRM_DEBUG_KMS("Disabling DC3CO\n");
812 	val = I915_READ(DC_STATE_EN);
813 	val &= ~DC_STATE_DC3CO_STATUS;
814 	I915_WRITE(DC_STATE_EN, val);
815 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
816 	/*
817 	 * Delay of 200us DC3CO Exit time B.Spec 49196
818 	 */
819 	usleep_range(200, 210);
820 }
821 
bxt_enable_dc9(struct drm_i915_private * dev_priv)822 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
823 {
824 	assert_can_enable_dc9(dev_priv);
825 
826 	DRM_DEBUG_KMS("Enabling DC9\n");
827 	/*
828 	 * Power sequencer reset is not needed on
829 	 * platforms with South Display Engine on PCH,
830 	 * because PPS registers are always on.
831 	 */
832 	if (!HAS_PCH_SPLIT(dev_priv))
833 		intel_power_sequencer_reset(dev_priv);
834 	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
835 }
836 
bxt_disable_dc9(struct drm_i915_private * dev_priv)837 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
838 {
839 	assert_can_disable_dc9(dev_priv);
840 
841 	DRM_DEBUG_KMS("Disabling DC9\n");
842 
843 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
844 
845 	intel_pps_unlock_regs_wa(dev_priv);
846 }
847 
assert_csr_loaded(struct drm_i915_private * dev_priv)848 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
849 {
850 	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
851 		  "CSR program storage start is NULL\n");
852 	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
853 	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
854 }
855 
856 static struct i915_power_well *
lookup_power_well(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)857 lookup_power_well(struct drm_i915_private *dev_priv,
858 		  enum i915_power_well_id power_well_id)
859 {
860 	struct i915_power_well *power_well;
861 
862 	for_each_power_well(dev_priv, power_well)
863 		if (power_well->desc->id == power_well_id)
864 			return power_well;
865 
866 	/*
867 	 * It's not feasible to add error checking code to the callers since
868 	 * this condition really shouldn't happen and it doesn't even make sense
869 	 * to abort things like display initialization sequences. Just return
870 	 * the first power well and hope the WARN gets reported so we can fix
871 	 * our driver.
872 	 */
873 	WARN(1, "Power well %d not defined for this platform\n", power_well_id);
874 	return &dev_priv->power_domains.power_wells[0];
875 }
876 
877 /**
878  * intel_display_power_set_target_dc_state - Set target dc state.
879  * @dev_priv: i915 device
880  * @state: state which needs to be set as target_dc_state.
881  *
882  * This function set the "DC off" power well target_dc_state,
883  * based upon this target_dc_stste, "DC off" power well will
884  * enable desired DC state.
885  */
intel_display_power_set_target_dc_state(struct drm_i915_private * dev_priv,u32 state)886 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
887 					     u32 state)
888 {
889 	struct i915_power_well *power_well;
890 	bool dc_off_enabled;
891 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
892 
893 	mutex_lock(&power_domains->lock);
894 	power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
895 
896 	if (WARN_ON(!power_well))
897 		goto unlock;
898 
899 	state = sanitize_target_dc_state(dev_priv, state);
900 
901 	if (state == dev_priv->csr.target_dc_state)
902 		goto unlock;
903 
904 	dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
905 							   power_well);
906 	/*
907 	 * If DC off power well is disabled, need to enable and disable the
908 	 * DC off power well to effect target DC state.
909 	 */
910 	if (!dc_off_enabled)
911 		power_well->desc->ops->enable(dev_priv, power_well);
912 
913 	dev_priv->csr.target_dc_state = state;
914 
915 	if (!dc_off_enabled)
916 		power_well->desc->ops->disable(dev_priv, power_well);
917 
918 unlock:
919 	mutex_unlock(&power_domains->lock);
920 }
921 
assert_can_enable_dc5(struct drm_i915_private * dev_priv)922 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
923 {
924 	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
925 					SKL_DISP_PW_2);
926 
927 	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
928 
929 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
930 		  "DC5 already programmed to be enabled.\n");
931 	assert_rpm_wakelock_held(&dev_priv->runtime_pm);
932 
933 	assert_csr_loaded(dev_priv);
934 }
935 
gen9_enable_dc5(struct drm_i915_private * dev_priv)936 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
937 {
938 	assert_can_enable_dc5(dev_priv);
939 
940 	DRM_DEBUG_KMS("Enabling DC5\n");
941 
942 	/* Wa Display #1183: skl,kbl,cfl */
943 	if (IS_GEN9_BC(dev_priv))
944 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
945 			   SKL_SELECT_ALTERNATE_DC_EXIT);
946 
947 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
948 }
949 
assert_can_enable_dc6(struct drm_i915_private * dev_priv)950 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
951 {
952 	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
953 		  "Backlight is not disabled.\n");
954 	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
955 		  "DC6 already programmed to be enabled.\n");
956 
957 	assert_csr_loaded(dev_priv);
958 }
959 
skl_enable_dc6(struct drm_i915_private * dev_priv)960 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
961 {
962 	assert_can_enable_dc6(dev_priv);
963 
964 	DRM_DEBUG_KMS("Enabling DC6\n");
965 
966 	/* Wa Display #1183: skl,kbl,cfl */
967 	if (IS_GEN9_BC(dev_priv))
968 		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
969 			   SKL_SELECT_ALTERNATE_DC_EXIT);
970 
971 	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
972 }
973 
hsw_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)974 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
975 				   struct i915_power_well *power_well)
976 {
977 	const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
978 	int pw_idx = power_well->desc->hsw.idx;
979 	u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
980 	u32 bios_req = I915_READ(regs->bios);
981 
982 	/* Take over the request bit if set by BIOS. */
983 	if (bios_req & mask) {
984 		u32 drv_req = I915_READ(regs->driver);
985 
986 		if (!(drv_req & mask))
987 			I915_WRITE(regs->driver, drv_req | mask);
988 		I915_WRITE(regs->bios, bios_req & ~mask);
989 	}
990 }
991 
bxt_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)992 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
993 					   struct i915_power_well *power_well)
994 {
995 	bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
996 }
997 
bxt_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)998 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
999 					    struct i915_power_well *power_well)
1000 {
1001 	bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1002 }
1003 
bxt_dpio_cmn_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1004 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1005 					    struct i915_power_well *power_well)
1006 {
1007 	return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1008 }
1009 
bxt_verify_ddi_phy_power_wells(struct drm_i915_private * dev_priv)1010 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1011 {
1012 	struct i915_power_well *power_well;
1013 
1014 	power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1015 	if (power_well->count > 0)
1016 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1017 
1018 	power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1019 	if (power_well->count > 0)
1020 		bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1021 
1022 	if (IS_GEMINILAKE(dev_priv)) {
1023 		power_well = lookup_power_well(dev_priv,
1024 					       GLK_DISP_PW_DPIO_CMN_C);
1025 		if (power_well->count > 0)
1026 			bxt_ddi_phy_verify_state(dev_priv,
1027 						 power_well->desc->bxt.phy);
1028 	}
1029 }
1030 
gen9_dc_off_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1031 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1032 					   struct i915_power_well *power_well)
1033 {
1034 	return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1035 		(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1036 }
1037 
gen9_assert_dbuf_enabled(struct drm_i915_private * dev_priv)1038 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1039 {
1040 	u32 tmp = I915_READ(DBUF_CTL);
1041 
1042 	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1043 	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1044 	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
1045 }
1046 
gen9_disable_dc_states(struct drm_i915_private * dev_priv)1047 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1048 {
1049 	struct intel_cdclk_state cdclk_state = {};
1050 
1051 	if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1052 		tgl_disable_dc3co(dev_priv);
1053 		return;
1054 	}
1055 
1056 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1057 
1058 	dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1059 	/* Can't read out voltage_level so can't use intel_cdclk_changed() */
1060 	WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1061 
1062 	gen9_assert_dbuf_enabled(dev_priv);
1063 
1064 	if (IS_GEN9_LP(dev_priv))
1065 		bxt_verify_ddi_phy_power_wells(dev_priv);
1066 
1067 	if (INTEL_GEN(dev_priv) >= 11)
1068 		/*
1069 		 * DMC retains HW context only for port A, the other combo
1070 		 * PHY's HW context for port B is lost after DC transitions,
1071 		 * so we need to restore it manually.
1072 		 */
1073 		intel_combo_phy_init(dev_priv);
1074 }
1075 
gen9_dc_off_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1076 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1077 					  struct i915_power_well *power_well)
1078 {
1079 	gen9_disable_dc_states(dev_priv);
1080 }
1081 
gen9_dc_off_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1082 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1083 					   struct i915_power_well *power_well)
1084 {
1085 	if (!dev_priv->csr.dmc_payload)
1086 		return;
1087 
1088 	switch (dev_priv->csr.target_dc_state) {
1089 	case DC_STATE_EN_DC3CO:
1090 		tgl_enable_dc3co(dev_priv);
1091 		break;
1092 	case DC_STATE_EN_UPTO_DC6:
1093 		skl_enable_dc6(dev_priv);
1094 		break;
1095 	case DC_STATE_EN_UPTO_DC5:
1096 		gen9_enable_dc5(dev_priv);
1097 		break;
1098 	}
1099 }
1100 
i9xx_power_well_sync_hw_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1101 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1102 					 struct i915_power_well *power_well)
1103 {
1104 }
1105 
i9xx_always_on_power_well_noop(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1106 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1107 					   struct i915_power_well *power_well)
1108 {
1109 }
1110 
i9xx_always_on_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1111 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1112 					     struct i915_power_well *power_well)
1113 {
1114 	return true;
1115 }
1116 
i830_pipes_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1117 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1118 					 struct i915_power_well *power_well)
1119 {
1120 	if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1121 		i830_enable_pipe(dev_priv, PIPE_A);
1122 	if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1123 		i830_enable_pipe(dev_priv, PIPE_B);
1124 }
1125 
i830_pipes_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1126 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1127 					  struct i915_power_well *power_well)
1128 {
1129 	i830_disable_pipe(dev_priv, PIPE_B);
1130 	i830_disable_pipe(dev_priv, PIPE_A);
1131 }
1132 
i830_pipes_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1133 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1134 					  struct i915_power_well *power_well)
1135 {
1136 	return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1137 		I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1138 }
1139 
i830_pipes_power_well_sync_hw(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1140 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1141 					  struct i915_power_well *power_well)
1142 {
1143 	if (power_well->count > 0)
1144 		i830_pipes_power_well_enable(dev_priv, power_well);
1145 	else
1146 		i830_pipes_power_well_disable(dev_priv, power_well);
1147 }
1148 
vlv_set_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1149 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1150 			       struct i915_power_well *power_well, bool enable)
1151 {
1152 	int pw_idx = power_well->desc->vlv.idx;
1153 	u32 mask;
1154 	u32 state;
1155 	u32 ctrl;
1156 
1157 	mask = PUNIT_PWRGT_MASK(pw_idx);
1158 	state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1159 			 PUNIT_PWRGT_PWR_GATE(pw_idx);
1160 
1161 	vlv_punit_get(dev_priv);
1162 
1163 #define COND \
1164 	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1165 
1166 	if (COND)
1167 		goto out;
1168 
1169 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1170 	ctrl &= ~mask;
1171 	ctrl |= state;
1172 	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1173 
1174 	if (wait_for(COND, 100))
1175 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1176 			  state,
1177 			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1178 
1179 #undef COND
1180 
1181 out:
1182 	vlv_punit_put(dev_priv);
1183 }
1184 
vlv_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1185 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1186 				  struct i915_power_well *power_well)
1187 {
1188 	vlv_set_power_well(dev_priv, power_well, true);
1189 }
1190 
vlv_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1191 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1192 				   struct i915_power_well *power_well)
1193 {
1194 	vlv_set_power_well(dev_priv, power_well, false);
1195 }
1196 
vlv_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1197 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1198 				   struct i915_power_well *power_well)
1199 {
1200 	int pw_idx = power_well->desc->vlv.idx;
1201 	bool enabled = false;
1202 	u32 mask;
1203 	u32 state;
1204 	u32 ctrl;
1205 
1206 	mask = PUNIT_PWRGT_MASK(pw_idx);
1207 	ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1208 
1209 	vlv_punit_get(dev_priv);
1210 
1211 	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1212 	/*
1213 	 * We only ever set the power-on and power-gate states, anything
1214 	 * else is unexpected.
1215 	 */
1216 	WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1217 		state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1218 	if (state == ctrl)
1219 		enabled = true;
1220 
1221 	/*
1222 	 * A transient state at this point would mean some unexpected party
1223 	 * is poking at the power controls too.
1224 	 */
1225 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1226 	WARN_ON(ctrl != state);
1227 
1228 	vlv_punit_put(dev_priv);
1229 
1230 	return enabled;
1231 }
1232 
vlv_init_display_clock_gating(struct drm_i915_private * dev_priv)1233 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1234 {
1235 	u32 val;
1236 
1237 	/*
1238 	 * On driver load, a pipe may be active and driving a DSI display.
1239 	 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1240 	 * (and never recovering) in this case. intel_dsi_post_disable() will
1241 	 * clear it when we turn off the display.
1242 	 */
1243 	val = I915_READ(DSPCLK_GATE_D);
1244 	val &= DPOUNIT_CLOCK_GATE_DISABLE;
1245 	val |= VRHUNIT_CLOCK_GATE_DISABLE;
1246 	I915_WRITE(DSPCLK_GATE_D, val);
1247 
1248 	/*
1249 	 * Disable trickle feed and enable pnd deadline calculation
1250 	 */
1251 	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1252 	I915_WRITE(CBR1_VLV, 0);
1253 
1254 	WARN_ON(dev_priv->rawclk_freq == 0);
1255 
1256 	I915_WRITE(RAWCLK_FREQ_VLV,
1257 		   DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1258 }
1259 
vlv_display_power_well_init(struct drm_i915_private * dev_priv)1260 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1261 {
1262 	struct intel_encoder *encoder;
1263 	enum pipe pipe;
1264 
1265 	/*
1266 	 * Enable the CRI clock source so we can get at the
1267 	 * display and the reference clock for VGA
1268 	 * hotplug / manual detection. Supposedly DSI also
1269 	 * needs the ref clock up and running.
1270 	 *
1271 	 * CHV DPLL B/C have some issues if VGA mode is enabled.
1272 	 */
1273 	for_each_pipe(dev_priv, pipe) {
1274 		u32 val = I915_READ(DPLL(pipe));
1275 
1276 		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1277 		if (pipe != PIPE_A)
1278 			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1279 
1280 		I915_WRITE(DPLL(pipe), val);
1281 	}
1282 
1283 	vlv_init_display_clock_gating(dev_priv);
1284 
1285 	spin_lock_irq(&dev_priv->irq_lock);
1286 	valleyview_enable_display_irqs(dev_priv);
1287 	spin_unlock_irq(&dev_priv->irq_lock);
1288 
1289 	/*
1290 	 * During driver initialization/resume we can avoid restoring the
1291 	 * part of the HW/SW state that will be inited anyway explicitly.
1292 	 */
1293 	if (dev_priv->power_domains.initializing)
1294 		return;
1295 
1296 	intel_hpd_init(dev_priv);
1297 
1298 	/* Re-enable the ADPA, if we have one */
1299 	for_each_intel_encoder(&dev_priv->drm, encoder) {
1300 		if (encoder->type == INTEL_OUTPUT_ANALOG)
1301 			intel_crt_reset(&encoder->base);
1302 	}
1303 
1304 	intel_vga_redisable_power_on(dev_priv);
1305 
1306 	intel_pps_unlock_regs_wa(dev_priv);
1307 }
1308 
vlv_display_power_well_deinit(struct drm_i915_private * dev_priv)1309 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1310 {
1311 	spin_lock_irq(&dev_priv->irq_lock);
1312 	valleyview_disable_display_irqs(dev_priv);
1313 	spin_unlock_irq(&dev_priv->irq_lock);
1314 
1315 	/* make sure we're done processing display irqs */
1316 	intel_synchronize_irq(dev_priv);
1317 
1318 	intel_power_sequencer_reset(dev_priv);
1319 
1320 	/* Prevent us from re-enabling polling on accident in late suspend */
1321 #ifdef __NetBSD__
1322 	if (device_activation(dev_priv->drm.dev, DEVACT_LEVEL_FULL))
1323 #else
1324 	if (!dev_priv->drm.dev->power.is_suspended)
1325 #endif
1326 		intel_hpd_poll_init(dev_priv);
1327 }
1328 
vlv_display_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1329 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1330 					  struct i915_power_well *power_well)
1331 {
1332 	vlv_set_power_well(dev_priv, power_well, true);
1333 
1334 	vlv_display_power_well_init(dev_priv);
1335 }
1336 
vlv_display_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1337 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1338 					   struct i915_power_well *power_well)
1339 {
1340 	vlv_display_power_well_deinit(dev_priv);
1341 
1342 	vlv_set_power_well(dev_priv, power_well, false);
1343 }
1344 
vlv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1345 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1346 					   struct i915_power_well *power_well)
1347 {
1348 	/* since ref/cri clock was enabled */
1349 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1350 
1351 	vlv_set_power_well(dev_priv, power_well, true);
1352 
1353 	/*
1354 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1355 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1356 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1357 	 *   b.	The other bits such as sfr settings / modesel may all
1358 	 *	be set to 0.
1359 	 *
1360 	 * This should only be done on init and resume from S3 with
1361 	 * both PLLs disabled, or we risk losing DPIO and PLL
1362 	 * synchronization.
1363 	 */
1364 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1365 }
1366 
vlv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1367 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1368 					    struct i915_power_well *power_well)
1369 {
1370 	enum pipe pipe;
1371 
1372 	for_each_pipe(dev_priv, pipe)
1373 		assert_pll_disabled(dev_priv, pipe);
1374 
1375 	/* Assert common reset */
1376 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1377 
1378 	vlv_set_power_well(dev_priv, power_well, false);
1379 }
1380 
1381 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1382 
1383 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1384 
assert_chv_phy_status(struct drm_i915_private * dev_priv)1385 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1386 {
1387 	struct i915_power_well *cmn_bc =
1388 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1389 	struct i915_power_well *cmn_d =
1390 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1391 	u32 phy_control = dev_priv->chv_phy_control;
1392 	u32 phy_status = 0;
1393 	u32 phy_status_mask = 0xffffffff;
1394 
1395 	/*
1396 	 * The BIOS can leave the PHY is some weird state
1397 	 * where it doesn't fully power down some parts.
1398 	 * Disable the asserts until the PHY has been fully
1399 	 * reset (ie. the power well has been disabled at
1400 	 * least once).
1401 	 */
1402 	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1403 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1404 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1405 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1406 				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1407 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1408 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1409 
1410 	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1411 		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1412 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1413 				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1414 
1415 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1416 		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1417 
1418 		/* this assumes override is only used to enable lanes */
1419 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1420 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1421 
1422 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1423 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1424 
1425 		/* CL1 is on whenever anything is on in either channel */
1426 		if (BITS_SET(phy_control,
1427 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1428 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1429 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1430 
1431 		/*
1432 		 * The DPLLB check accounts for the pipe B + port A usage
1433 		 * with CL2 powered up but all the lanes in the second channel
1434 		 * powered down.
1435 		 */
1436 		if (BITS_SET(phy_control,
1437 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1438 		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1439 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1440 
1441 		if (BITS_SET(phy_control,
1442 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1443 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1444 		if (BITS_SET(phy_control,
1445 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1446 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1447 
1448 		if (BITS_SET(phy_control,
1449 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1450 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1451 		if (BITS_SET(phy_control,
1452 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1453 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1454 	}
1455 
1456 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1457 		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1458 
1459 		/* this assumes override is only used to enable lanes */
1460 		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1461 			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1462 
1463 		if (BITS_SET(phy_control,
1464 			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1465 			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1466 
1467 		if (BITS_SET(phy_control,
1468 			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1469 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1470 		if (BITS_SET(phy_control,
1471 			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1472 			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1473 	}
1474 
1475 	phy_status &= phy_status_mask;
1476 
1477 	/*
1478 	 * The PHY may be busy with some initial calibration and whatnot,
1479 	 * so the power state can take a while to actually change.
1480 	 */
1481 	if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1482 				       phy_status_mask, phy_status, 10))
1483 		DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1484 			  I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1485 			   phy_status, dev_priv->chv_phy_control);
1486 }
1487 
1488 #undef BITS_SET
1489 
chv_dpio_cmn_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1490 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1491 					   struct i915_power_well *power_well)
1492 {
1493 	enum dpio_phy phy;
1494 	enum pipe pipe;
1495 	u32 tmp;
1496 
1497 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1498 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1499 
1500 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1501 		pipe = PIPE_A;
1502 		phy = DPIO_PHY0;
1503 	} else {
1504 		pipe = PIPE_C;
1505 		phy = DPIO_PHY1;
1506 	}
1507 
1508 	/* since ref/cri clock was enabled */
1509 	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1510 	vlv_set_power_well(dev_priv, power_well, true);
1511 
1512 	/* Poll for phypwrgood signal */
1513 	if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1514 				  PHY_POWERGOOD(phy), 1))
1515 		DRM_ERROR("Display PHY %d is not power up\n", phy);
1516 
1517 	vlv_dpio_get(dev_priv);
1518 
1519 	/* Enable dynamic power down */
1520 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1521 	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1522 		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1523 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1524 
1525 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1526 		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1527 		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1528 		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1529 	} else {
1530 		/*
1531 		 * Force the non-existing CL2 off. BXT does this
1532 		 * too, so maybe it saves some power even though
1533 		 * CL2 doesn't exist?
1534 		 */
1535 		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1536 		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1537 		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1538 	}
1539 
1540 	vlv_dpio_put(dev_priv);
1541 
1542 	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1543 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1544 
1545 	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1546 		      phy, dev_priv->chv_phy_control);
1547 
1548 	assert_chv_phy_status(dev_priv);
1549 }
1550 
chv_dpio_cmn_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1551 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1552 					    struct i915_power_well *power_well)
1553 {
1554 	enum dpio_phy phy;
1555 
1556 	WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1557 		     power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1558 
1559 	if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1560 		phy = DPIO_PHY0;
1561 		assert_pll_disabled(dev_priv, PIPE_A);
1562 		assert_pll_disabled(dev_priv, PIPE_B);
1563 	} else {
1564 		phy = DPIO_PHY1;
1565 		assert_pll_disabled(dev_priv, PIPE_C);
1566 	}
1567 
1568 	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1569 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1570 
1571 	vlv_set_power_well(dev_priv, power_well, false);
1572 
1573 	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1574 		      phy, dev_priv->chv_phy_control);
1575 
1576 	/* PHY is fully reset now, so we can enable the PHY state asserts */
1577 	dev_priv->chv_phy_assert[phy] = true;
1578 
1579 	assert_chv_phy_status(dev_priv);
1580 }
1581 
assert_chv_phy_powergate(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override,unsigned int mask)1582 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1583 				     enum dpio_channel ch, bool override, unsigned int mask)
1584 {
1585 	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1586 	u32 reg, val, expected, actual;
1587 
1588 	/*
1589 	 * The BIOS can leave the PHY is some weird state
1590 	 * where it doesn't fully power down some parts.
1591 	 * Disable the asserts until the PHY has been fully
1592 	 * reset (ie. the power well has been disabled at
1593 	 * least once).
1594 	 */
1595 	if (!dev_priv->chv_phy_assert[phy])
1596 		return;
1597 
1598 	if (ch == DPIO_CH0)
1599 		reg = _CHV_CMN_DW0_CH0;
1600 	else
1601 		reg = _CHV_CMN_DW6_CH1;
1602 
1603 	vlv_dpio_get(dev_priv);
1604 	val = vlv_dpio_read(dev_priv, pipe, reg);
1605 	vlv_dpio_put(dev_priv);
1606 
1607 	/*
1608 	 * This assumes !override is only used when the port is disabled.
1609 	 * All lanes should power down even without the override when
1610 	 * the port is disabled.
1611 	 */
1612 	if (!override || mask == 0xf) {
1613 		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1614 		/*
1615 		 * If CH1 common lane is not active anymore
1616 		 * (eg. for pipe B DPLL) the entire channel will
1617 		 * shut down, which causes the common lane registers
1618 		 * to read as 0. That means we can't actually check
1619 		 * the lane power down status bits, but as the entire
1620 		 * register reads as 0 it's a good indication that the
1621 		 * channel is indeed entirely powered down.
1622 		 */
1623 		if (ch == DPIO_CH1 && val == 0)
1624 			expected = 0;
1625 	} else if (mask != 0x0) {
1626 		expected = DPIO_ANYDL_POWERDOWN;
1627 	} else {
1628 		expected = 0;
1629 	}
1630 
1631 	if (ch == DPIO_CH0)
1632 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1633 	else
1634 		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1635 	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1636 
1637 	WARN(actual != expected,
1638 	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1639 	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1640 	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1641 	     reg, val);
1642 }
1643 
chv_phy_powergate_ch(struct drm_i915_private * dev_priv,enum dpio_phy phy,enum dpio_channel ch,bool override)1644 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1645 			  enum dpio_channel ch, bool override)
1646 {
1647 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1648 	bool was_override;
1649 
1650 	mutex_lock(&power_domains->lock);
1651 
1652 	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1653 
1654 	if (override == was_override)
1655 		goto out;
1656 
1657 	if (override)
1658 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1659 	else
1660 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1661 
1662 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1663 
1664 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1665 		      phy, ch, dev_priv->chv_phy_control);
1666 
1667 	assert_chv_phy_status(dev_priv);
1668 
1669 out:
1670 	mutex_unlock(&power_domains->lock);
1671 
1672 	return was_override;
1673 }
1674 
chv_phy_powergate_lanes(struct intel_encoder * encoder,bool override,unsigned int mask)1675 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1676 			     bool override, unsigned int mask)
1677 {
1678 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1679 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1680 	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1681 	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1682 
1683 	mutex_lock(&power_domains->lock);
1684 
1685 	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1686 	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1687 
1688 	if (override)
1689 		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1690 	else
1691 		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1692 
1693 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1694 
1695 	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1696 		      phy, ch, mask, dev_priv->chv_phy_control);
1697 
1698 	assert_chv_phy_status(dev_priv);
1699 
1700 	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1701 
1702 	mutex_unlock(&power_domains->lock);
1703 }
1704 
chv_pipe_power_well_enabled(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1705 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1706 					struct i915_power_well *power_well)
1707 {
1708 	enum pipe pipe = PIPE_A;
1709 	bool enabled;
1710 	u32 state, ctrl;
1711 
1712 	vlv_punit_get(dev_priv);
1713 
1714 	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1715 	/*
1716 	 * We only ever set the power-on and power-gate states, anything
1717 	 * else is unexpected.
1718 	 */
1719 	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1720 	enabled = state == DP_SSS_PWR_ON(pipe);
1721 
1722 	/*
1723 	 * A transient state at this point would mean some unexpected party
1724 	 * is poking at the power controls too.
1725 	 */
1726 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1727 	WARN_ON(ctrl << 16 != state);
1728 
1729 	vlv_punit_put(dev_priv);
1730 
1731 	return enabled;
1732 }
1733 
chv_set_pipe_power_well(struct drm_i915_private * dev_priv,struct i915_power_well * power_well,bool enable)1734 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1735 				    struct i915_power_well *power_well,
1736 				    bool enable)
1737 {
1738 	enum pipe pipe = PIPE_A;
1739 	u32 state;
1740 	u32 ctrl;
1741 
1742 	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1743 
1744 	vlv_punit_get(dev_priv);
1745 
1746 #define COND \
1747 	((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1748 
1749 	if (COND)
1750 		goto out;
1751 
1752 	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1753 	ctrl &= ~DP_SSC_MASK(pipe);
1754 	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1755 	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1756 
1757 	if (wait_for(COND, 100))
1758 		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1759 			  state,
1760 			  vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1761 
1762 #undef COND
1763 
1764 out:
1765 	vlv_punit_put(dev_priv);
1766 }
1767 
chv_pipe_power_well_enable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1768 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1769 				       struct i915_power_well *power_well)
1770 {
1771 	chv_set_pipe_power_well(dev_priv, power_well, true);
1772 
1773 	vlv_display_power_well_init(dev_priv);
1774 }
1775 
chv_pipe_power_well_disable(struct drm_i915_private * dev_priv,struct i915_power_well * power_well)1776 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1777 					struct i915_power_well *power_well)
1778 {
1779 	vlv_display_power_well_deinit(dev_priv);
1780 
1781 	chv_set_pipe_power_well(dev_priv, power_well, false);
1782 }
1783 
__async_put_domains_mask(struct i915_power_domains * power_domains)1784 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1785 {
1786 	return power_domains->async_put_domains[0] |
1787 	       power_domains->async_put_domains[1];
1788 }
1789 
1790 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1791 
1792 static bool
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)1793 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1794 {
1795 	return !WARN_ON(power_domains->async_put_domains[0] &
1796 			power_domains->async_put_domains[1]);
1797 }
1798 
1799 static bool
__async_put_domains_state_ok(struct i915_power_domains * power_domains)1800 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1801 {
1802 	enum intel_display_power_domain domain;
1803 	bool err = false;
1804 
1805 	err |= !assert_async_put_domain_masks_disjoint(power_domains);
1806 	err |= WARN_ON(!!power_domains->async_put_wakeref !=
1807 		       !!__async_put_domains_mask(power_domains));
1808 
1809 	for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1810 		err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1811 
1812 	return !err;
1813 }
1814 
print_power_domains(struct i915_power_domains * power_domains,const char * prefix,u64 mask)1815 static void print_power_domains(struct i915_power_domains *power_domains,
1816 				const char *prefix, u64 mask)
1817 {
1818 	enum intel_display_power_domain domain;
1819 
1820 	DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1821 	for_each_power_domain(domain, mask)
1822 		DRM_DEBUG_DRIVER("%s use_count %d\n",
1823 				 intel_display_power_domain_str(domain),
1824 				 power_domains->domain_use_count[domain]);
1825 }
1826 
1827 static void
print_async_put_domains_state(struct i915_power_domains * power_domains)1828 print_async_put_domains_state(struct i915_power_domains *power_domains)
1829 {
1830 	DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1831 			 power_domains->async_put_wakeref);
1832 
1833 	print_power_domains(power_domains, "async_put_domains[0]",
1834 			    power_domains->async_put_domains[0]);
1835 	print_power_domains(power_domains, "async_put_domains[1]",
1836 			    power_domains->async_put_domains[1]);
1837 }
1838 
1839 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)1840 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1841 {
1842 	if (!__async_put_domains_state_ok(power_domains))
1843 		print_async_put_domains_state(power_domains);
1844 }
1845 
1846 #else
1847 
1848 static void
assert_async_put_domain_masks_disjoint(struct i915_power_domains * power_domains)1849 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1850 {
1851 }
1852 
1853 static void
verify_async_put_domains_state(struct i915_power_domains * power_domains)1854 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1855 {
1856 }
1857 
1858 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1859 
async_put_domains_mask(struct i915_power_domains * power_domains)1860 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1861 {
1862 	assert_async_put_domain_masks_disjoint(power_domains);
1863 
1864 	return __async_put_domains_mask(power_domains);
1865 }
1866 
1867 static void
async_put_domains_clear_domain(struct i915_power_domains * power_domains,enum intel_display_power_domain domain)1868 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1869 			       enum intel_display_power_domain domain)
1870 {
1871 	assert_async_put_domain_masks_disjoint(power_domains);
1872 
1873 	power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1874 	power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1875 }
1876 
1877 static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1878 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1879 				       enum intel_display_power_domain domain)
1880 {
1881 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1882 	bool ret = false;
1883 
1884 	if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1885 		goto out_verify;
1886 
1887 	async_put_domains_clear_domain(power_domains, domain);
1888 
1889 	ret = true;
1890 
1891 	if (async_put_domains_mask(power_domains))
1892 		goto out_verify;
1893 
1894 	cancel_delayed_work(&power_domains->async_put_work);
1895 	intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1896 				 fetch_and_zero(&power_domains->async_put_wakeref));
1897 out_verify:
1898 	verify_async_put_domains_state(power_domains);
1899 
1900 	return ret;
1901 }
1902 
1903 static void
__intel_display_power_get_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1904 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1905 				 enum intel_display_power_domain domain)
1906 {
1907 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1908 	struct i915_power_well *power_well;
1909 
1910 	if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1911 		return;
1912 
1913 	for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1914 		intel_power_well_get(dev_priv, power_well);
1915 
1916 	power_domains->domain_use_count[domain]++;
1917 }
1918 
1919 /**
1920  * intel_display_power_get - grab a power domain reference
1921  * @dev_priv: i915 device instance
1922  * @domain: power domain to reference
1923  *
1924  * This function grabs a power domain reference for @domain and ensures that the
1925  * power domain and all its parents are powered up. Therefore users should only
1926  * grab a reference to the innermost power domain they need.
1927  *
1928  * Any power domain reference obtained by this function must have a symmetric
1929  * call to intel_display_power_put() to release the reference again.
1930  */
intel_display_power_get(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1931 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1932 					enum intel_display_power_domain domain)
1933 {
1934 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1935 	intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1936 
1937 	mutex_lock(&power_domains->lock);
1938 	__intel_display_power_get_domain(dev_priv, domain);
1939 	mutex_unlock(&power_domains->lock);
1940 
1941 	return wakeref;
1942 }
1943 
1944 /**
1945  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1946  * @dev_priv: i915 device instance
1947  * @domain: power domain to reference
1948  *
1949  * This function grabs a power domain reference for @domain and ensures that the
1950  * power domain and all its parents are powered up. Therefore users should only
1951  * grab a reference to the innermost power domain they need.
1952  *
1953  * Any power domain reference obtained by this function must have a symmetric
1954  * call to intel_display_power_put() to release the reference again.
1955  */
1956 intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1957 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1958 				   enum intel_display_power_domain domain)
1959 {
1960 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1961 	intel_wakeref_t wakeref;
1962 	bool is_enabled;
1963 
1964 	wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1965 	if (!wakeref)
1966 		return false;
1967 
1968 	mutex_lock(&power_domains->lock);
1969 
1970 	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1971 		__intel_display_power_get_domain(dev_priv, domain);
1972 		is_enabled = true;
1973 	} else {
1974 		is_enabled = false;
1975 	}
1976 
1977 	mutex_unlock(&power_domains->lock);
1978 
1979 	if (!is_enabled) {
1980 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1981 		wakeref = 0;
1982 	}
1983 
1984 	return wakeref;
1985 }
1986 
1987 static void
__intel_display_power_put_domain(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)1988 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1989 				 enum intel_display_power_domain domain)
1990 {
1991 	struct i915_power_domains *power_domains;
1992 	struct i915_power_well *power_well;
1993 	const char *name = intel_display_power_domain_str(domain);
1994 
1995 	power_domains = &dev_priv->power_domains;
1996 
1997 	WARN(!power_domains->domain_use_count[domain],
1998 	     "Use count on domain %s is already zero\n",
1999 	     name);
2000 	WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
2001 	     "Async disabling of domain %s is pending\n",
2002 	     name);
2003 
2004 	power_domains->domain_use_count[domain]--;
2005 
2006 	for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2007 		intel_power_well_put(dev_priv, power_well);
2008 }
2009 
__intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2010 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2011 				      enum intel_display_power_domain domain)
2012 {
2013 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2014 
2015 	mutex_lock(&power_domains->lock);
2016 	__intel_display_power_put_domain(dev_priv, domain);
2017 	mutex_unlock(&power_domains->lock);
2018 }
2019 
2020 /**
2021  * intel_display_power_put_unchecked - release an unchecked power domain reference
2022  * @dev_priv: i915 device instance
2023  * @domain: power domain to reference
2024  *
2025  * This function drops the power domain reference obtained by
2026  * intel_display_power_get() and might power down the corresponding hardware
2027  * block right away if this is the last reference.
2028  *
2029  * This function exists only for historical reasons and should be avoided in
2030  * new code, as the correctness of its use cannot be checked. Always use
2031  * intel_display_power_put() instead.
2032  */
intel_display_power_put_unchecked(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain)2033 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2034 				       enum intel_display_power_domain domain)
2035 {
2036 	__intel_display_power_put(dev_priv, domain);
2037 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2038 }
2039 
2040 static void
queue_async_put_domains_work(struct i915_power_domains * power_domains,intel_wakeref_t wakeref)2041 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2042 			     intel_wakeref_t wakeref)
2043 {
2044 	WARN_ON(power_domains->async_put_wakeref);
2045 	power_domains->async_put_wakeref = wakeref;
2046 	WARN_ON(!queue_delayed_work(system_unbound_wq,
2047 				    &power_domains->async_put_work,
2048 				    msecs_to_jiffies(100)));
2049 }
2050 
2051 static void
release_async_put_domains(struct i915_power_domains * power_domains,u64 mask)2052 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2053 {
2054 	struct drm_i915_private *dev_priv =
2055 		container_of(power_domains, struct drm_i915_private,
2056 			     power_domains);
2057 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2058 	enum intel_display_power_domain domain;
2059 	intel_wakeref_t wakeref;
2060 
2061 	/*
2062 	 * The caller must hold already raw wakeref, upgrade that to a proper
2063 	 * wakeref to make the state checker happy about the HW access during
2064 	 * power well disabling.
2065 	 */
2066 	assert_rpm_raw_wakeref_held(rpm);
2067 	wakeref = intel_runtime_pm_get(rpm);
2068 
2069 	for_each_power_domain(domain, mask) {
2070 		/* Clear before put, so put's sanity check is happy. */
2071 		async_put_domains_clear_domain(power_domains, domain);
2072 		__intel_display_power_put_domain(dev_priv, domain);
2073 	}
2074 
2075 	intel_runtime_pm_put(rpm, wakeref);
2076 }
2077 
2078 static void
intel_display_power_put_async_work(struct work_struct * work)2079 intel_display_power_put_async_work(struct work_struct *work)
2080 {
2081 	struct drm_i915_private *dev_priv =
2082 		container_of(work, struct drm_i915_private,
2083 			     power_domains.async_put_work.work);
2084 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2085 	struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2086 	intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2087 	intel_wakeref_t old_work_wakeref = 0;
2088 
2089 	mutex_lock(&power_domains->lock);
2090 
2091 	/*
2092 	 * Bail out if all the domain refs pending to be released were grabbed
2093 	 * by subsequent gets or a flush_work.
2094 	 */
2095 	old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2096 	if (!old_work_wakeref)
2097 		goto out_verify;
2098 
2099 	release_async_put_domains(power_domains,
2100 				  power_domains->async_put_domains[0]);
2101 
2102 	/* Requeue the work if more domains were async put meanwhile. */
2103 	if (power_domains->async_put_domains[1]) {
2104 		power_domains->async_put_domains[0] =
2105 			fetch_and_zero(&power_domains->async_put_domains[1]);
2106 		queue_async_put_domains_work(power_domains,
2107 					     fetch_and_zero(&new_work_wakeref));
2108 	}
2109 
2110 out_verify:
2111 	verify_async_put_domains_state(power_domains);
2112 
2113 	mutex_unlock(&power_domains->lock);
2114 
2115 	if (old_work_wakeref)
2116 		intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2117 	if (new_work_wakeref)
2118 		intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2119 }
2120 
2121 /**
2122  * intel_display_power_put_async - release a power domain reference asynchronously
2123  * @i915: i915 device instance
2124  * @domain: power domain to reference
2125  * @wakeref: wakeref acquired for the reference that is being released
2126  *
2127  * This function drops the power domain reference obtained by
2128  * intel_display_power_get*() and schedules a work to power down the
2129  * corresponding hardware block if this is the last reference.
2130  */
__intel_display_power_put_async(struct drm_i915_private * i915,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2131 void __intel_display_power_put_async(struct drm_i915_private *i915,
2132 				     enum intel_display_power_domain domain,
2133 				     intel_wakeref_t wakeref)
2134 {
2135 	struct i915_power_domains *power_domains = &i915->power_domains;
2136 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
2137 	intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2138 
2139 	mutex_lock(&power_domains->lock);
2140 
2141 	if (power_domains->domain_use_count[domain] > 1) {
2142 		__intel_display_power_put_domain(i915, domain);
2143 
2144 		goto out_verify;
2145 	}
2146 
2147 	WARN_ON(power_domains->domain_use_count[domain] != 1);
2148 
2149 	/* Let a pending work requeue itself or queue a new one. */
2150 	if (power_domains->async_put_wakeref) {
2151 		power_domains->async_put_domains[1] |= BIT_ULL(domain);
2152 	} else {
2153 		power_domains->async_put_domains[0] |= BIT_ULL(domain);
2154 		queue_async_put_domains_work(power_domains,
2155 					     fetch_and_zero(&work_wakeref));
2156 	}
2157 
2158 out_verify:
2159 	verify_async_put_domains_state(power_domains);
2160 
2161 	mutex_unlock(&power_domains->lock);
2162 
2163 	if (work_wakeref)
2164 		intel_runtime_pm_put_raw(rpm, work_wakeref);
2165 
2166 	intel_runtime_pm_put(rpm, wakeref);
2167 }
2168 
2169 /**
2170  * intel_display_power_flush_work - flushes the async display power disabling work
2171  * @i915: i915 device instance
2172  *
2173  * Flushes any pending work that was scheduled by a preceding
2174  * intel_display_power_put_async() call, completing the disabling of the
2175  * corresponding power domains.
2176  *
2177  * Note that the work handler function may still be running after this
2178  * function returns; to ensure that the work handler isn't running use
2179  * intel_display_power_flush_work_sync() instead.
2180  */
intel_display_power_flush_work(struct drm_i915_private * i915)2181 void intel_display_power_flush_work(struct drm_i915_private *i915)
2182 {
2183 	struct i915_power_domains *power_domains = &i915->power_domains;
2184 	intel_wakeref_t work_wakeref;
2185 
2186 	mutex_lock(&power_domains->lock);
2187 
2188 	work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2189 	if (!work_wakeref)
2190 		goto out_verify;
2191 
2192 	release_async_put_domains(power_domains,
2193 				  async_put_domains_mask(power_domains));
2194 	cancel_delayed_work(&power_domains->async_put_work);
2195 
2196 out_verify:
2197 	verify_async_put_domains_state(power_domains);
2198 
2199 	mutex_unlock(&power_domains->lock);
2200 
2201 	if (work_wakeref)
2202 		intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2203 }
2204 
2205 /**
2206  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2207  * @i915: i915 device instance
2208  *
2209  * Like intel_display_power_flush_work(), but also ensure that the work
2210  * handler function is not running any more when this function returns.
2211  */
2212 static void
intel_display_power_flush_work_sync(struct drm_i915_private * i915)2213 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2214 {
2215 	struct i915_power_domains *power_domains = &i915->power_domains;
2216 
2217 	intel_display_power_flush_work(i915);
2218 	cancel_delayed_work_sync(&power_domains->async_put_work);
2219 
2220 	verify_async_put_domains_state(power_domains);
2221 
2222 	WARN_ON(power_domains->async_put_wakeref);
2223 }
2224 
2225 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2226 /**
2227  * intel_display_power_put - release a power domain reference
2228  * @dev_priv: i915 device instance
2229  * @domain: power domain to reference
2230  * @wakeref: wakeref acquired for the reference that is being released
2231  *
2232  * This function drops the power domain reference obtained by
2233  * intel_display_power_get() and might power down the corresponding hardware
2234  * block right away if this is the last reference.
2235  */
intel_display_power_put(struct drm_i915_private * dev_priv,enum intel_display_power_domain domain,intel_wakeref_t wakeref)2236 void intel_display_power_put(struct drm_i915_private *dev_priv,
2237 			     enum intel_display_power_domain domain,
2238 			     intel_wakeref_t wakeref)
2239 {
2240 	__intel_display_power_put(dev_priv, domain);
2241 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2242 }
2243 #endif
2244 
2245 #define I830_PIPES_POWER_DOMAINS (		\
2246 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2247 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2248 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2249 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2250 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2251 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2252 	BIT_ULL(POWER_DOMAIN_INIT))
2253 
2254 #define VLV_DISPLAY_POWER_DOMAINS (		\
2255 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2256 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2257 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2258 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2259 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2260 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2261 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2262 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2263 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2264 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2265 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2266 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2267 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2268 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2269 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2270 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2271 	BIT_ULL(POWER_DOMAIN_INIT))
2272 
2273 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
2274 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2275 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2276 	BIT_ULL(POWER_DOMAIN_PORT_CRT) |		\
2277 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2278 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2279 	BIT_ULL(POWER_DOMAIN_INIT))
2280 
2281 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
2282 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2283 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2284 	BIT_ULL(POWER_DOMAIN_INIT))
2285 
2286 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
2287 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2288 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2289 	BIT_ULL(POWER_DOMAIN_INIT))
2290 
2291 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
2292 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2293 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2294 	BIT_ULL(POWER_DOMAIN_INIT))
2295 
2296 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
2297 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2298 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2299 	BIT_ULL(POWER_DOMAIN_INIT))
2300 
2301 #define CHV_DISPLAY_POWER_DOMAINS (		\
2302 	BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |	\
2303 	BIT_ULL(POWER_DOMAIN_PIPE_A) |		\
2304 	BIT_ULL(POWER_DOMAIN_PIPE_B) |		\
2305 	BIT_ULL(POWER_DOMAIN_PIPE_C) |		\
2306 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |	\
2307 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2308 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2309 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |	\
2310 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |	\
2311 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |	\
2312 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2313 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2314 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2315 	BIT_ULL(POWER_DOMAIN_PORT_DSI) |		\
2316 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2317 	BIT_ULL(POWER_DOMAIN_AUDIO) |		\
2318 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2319 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2320 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2321 	BIT_ULL(POWER_DOMAIN_GMBUS) |		\
2322 	BIT_ULL(POWER_DOMAIN_INIT))
2323 
2324 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
2325 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2326 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2327 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2328 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2329 	BIT_ULL(POWER_DOMAIN_INIT))
2330 
2331 #define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
2332 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2333 	BIT_ULL(POWER_DOMAIN_AUX_D) |		\
2334 	BIT_ULL(POWER_DOMAIN_INIT))
2335 
2336 #define HSW_DISPLAY_POWER_DOMAINS (			\
2337 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2338 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2339 	BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
2340 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2341 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2342 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2343 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2344 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2345 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2346 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2347 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2348 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2349 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2350 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2351 	BIT_ULL(POWER_DOMAIN_INIT))
2352 
2353 #define BDW_DISPLAY_POWER_DOMAINS (			\
2354 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2355 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2356 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2357 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2358 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2359 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2360 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2361 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2362 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2363 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2364 	BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */	\
2365 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2366 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2367 	BIT_ULL(POWER_DOMAIN_INIT))
2368 
2369 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2370 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2371 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2372 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2373 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2374 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2375 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2376 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2377 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2378 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2379 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2380 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
2381 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2382 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2383 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2384 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2385 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2386 	BIT_ULL(POWER_DOMAIN_INIT))
2387 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (		\
2388 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2389 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |		\
2390 	BIT_ULL(POWER_DOMAIN_INIT))
2391 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2392 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2393 	BIT_ULL(POWER_DOMAIN_INIT))
2394 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2395 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2396 	BIT_ULL(POWER_DOMAIN_INIT))
2397 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (		\
2398 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2399 	BIT_ULL(POWER_DOMAIN_INIT))
2400 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2401 	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2402 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2403 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2404 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2405 	BIT_ULL(POWER_DOMAIN_INIT))
2406 
2407 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2408 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2409 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2410 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2411 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2412 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2413 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2414 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2415 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2416 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2417 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2418 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2419 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2420 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2421 	BIT_ULL(POWER_DOMAIN_INIT))
2422 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2423 	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2424 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2425 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2426 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2427 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2428 	BIT_ULL(POWER_DOMAIN_INIT))
2429 #define BXT_DPIO_CMN_A_POWER_DOMAINS (			\
2430 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2431 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2432 	BIT_ULL(POWER_DOMAIN_INIT))
2433 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (			\
2434 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2435 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2436 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2437 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2438 	BIT_ULL(POWER_DOMAIN_INIT))
2439 
2440 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2441 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2442 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2443 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2444 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2445 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2446 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2447 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2448 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2449 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2450 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2451 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2452 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2453 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2454 	BIT_ULL(POWER_DOMAIN_INIT))
2455 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (		\
2456 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2457 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (		\
2458 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2459 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (		\
2460 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2461 #define GLK_DPIO_CMN_A_POWER_DOMAINS (			\
2462 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
2463 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2464 	BIT_ULL(POWER_DOMAIN_INIT))
2465 #define GLK_DPIO_CMN_B_POWER_DOMAINS (			\
2466 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2467 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2468 	BIT_ULL(POWER_DOMAIN_INIT))
2469 #define GLK_DPIO_CMN_C_POWER_DOMAINS (			\
2470 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2471 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2472 	BIT_ULL(POWER_DOMAIN_INIT))
2473 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (		\
2474 	BIT_ULL(POWER_DOMAIN_AUX_A) |		\
2475 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2476 	BIT_ULL(POWER_DOMAIN_INIT))
2477 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (		\
2478 	BIT_ULL(POWER_DOMAIN_AUX_B) |		\
2479 	BIT_ULL(POWER_DOMAIN_INIT))
2480 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (		\
2481 	BIT_ULL(POWER_DOMAIN_AUX_C) |		\
2482 	BIT_ULL(POWER_DOMAIN_INIT))
2483 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2484 	GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2485 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2486 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2487 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2488 	BIT_ULL(POWER_DOMAIN_GMBUS) |			\
2489 	BIT_ULL(POWER_DOMAIN_INIT))
2490 
2491 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
2492 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2493 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2494 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2495 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2496 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2497 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
2498 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
2499 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
2500 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
2501 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
2502 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |		\
2503 	BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2504 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2505 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2506 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2507 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2508 	BIT_ULL(POWER_DOMAIN_VGA) |				\
2509 	BIT_ULL(POWER_DOMAIN_INIT))
2510 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (		\
2511 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |		\
2512 	BIT_ULL(POWER_DOMAIN_INIT))
2513 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (		\
2514 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |		\
2515 	BIT_ULL(POWER_DOMAIN_INIT))
2516 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (		\
2517 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |		\
2518 	BIT_ULL(POWER_DOMAIN_INIT))
2519 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (		\
2520 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |		\
2521 	BIT_ULL(POWER_DOMAIN_INIT))
2522 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (		\
2523 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2524 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2525 	BIT_ULL(POWER_DOMAIN_INIT))
2526 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (		\
2527 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2528 	BIT_ULL(POWER_DOMAIN_INIT))
2529 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (		\
2530 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2531 	BIT_ULL(POWER_DOMAIN_INIT))
2532 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (		\
2533 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2534 	BIT_ULL(POWER_DOMAIN_INIT))
2535 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (		\
2536 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2537 	BIT_ULL(POWER_DOMAIN_INIT))
2538 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (		\
2539 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |		\
2540 	BIT_ULL(POWER_DOMAIN_INIT))
2541 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2542 	CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
2543 	BIT_ULL(POWER_DOMAIN_GT_IRQ) |			\
2544 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2545 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2546 	BIT_ULL(POWER_DOMAIN_INIT))
2547 
2548 /*
2549  * ICL PW_0/PG_0 domains (HW/DMC control):
2550  * - PCI
2551  * - clocks except port PLL
2552  * - central power except FBC
2553  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2554  * ICL PW_1/PG_1 domains (HW/DMC control):
2555  * - DBUF function
2556  * - PIPE_A and its planes, except VGA
2557  * - transcoder EDP + PSR
2558  * - transcoder DSI
2559  * - DDI_A
2560  * - FBC
2561  */
2562 #define ICL_PW_4_POWER_DOMAINS (			\
2563 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2564 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2565 	BIT_ULL(POWER_DOMAIN_INIT))
2566 	/* VDSC/joining */
2567 #define ICL_PW_3_POWER_DOMAINS (			\
2568 	ICL_PW_4_POWER_DOMAINS |			\
2569 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2570 	BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |		\
2571 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2572 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2573 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2574 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
2575 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
2576 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2577 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2578 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2579 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2580 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2581 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2582 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2583 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2584 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |		\
2585 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2586 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2587 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2588 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2589 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2590 	BIT_ULL(POWER_DOMAIN_INIT))
2591 	/*
2592 	 * - transcoder WD
2593 	 * - KVMR (HW control)
2594 	 */
2595 #define ICL_PW_2_POWER_DOMAINS (			\
2596 	ICL_PW_3_POWER_DOMAINS |			\
2597 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |		\
2598 	BIT_ULL(POWER_DOMAIN_INIT))
2599 	/*
2600 	 * - KVMR (HW control)
2601 	 */
2602 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2603 	ICL_PW_2_POWER_DOMAINS |			\
2604 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2605 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2606 	BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |			\
2607 	BIT_ULL(POWER_DOMAIN_INIT))
2608 
2609 #define ICL_DDI_IO_A_POWER_DOMAINS (			\
2610 	BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2611 #define ICL_DDI_IO_B_POWER_DOMAINS (			\
2612 	BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2613 #define ICL_DDI_IO_C_POWER_DOMAINS (			\
2614 	BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2615 #define ICL_DDI_IO_D_POWER_DOMAINS (			\
2616 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2617 #define ICL_DDI_IO_E_POWER_DOMAINS (			\
2618 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2619 #define ICL_DDI_IO_F_POWER_DOMAINS (			\
2620 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2621 
2622 #define ICL_AUX_A_IO_POWER_DOMAINS (			\
2623 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |		\
2624 	BIT_ULL(POWER_DOMAIN_AUX_A))
2625 #define ICL_AUX_B_IO_POWER_DOMAINS (			\
2626 	BIT_ULL(POWER_DOMAIN_AUX_B))
2627 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (		\
2628 	BIT_ULL(POWER_DOMAIN_AUX_C))
2629 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (		\
2630 	BIT_ULL(POWER_DOMAIN_AUX_D))
2631 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (		\
2632 	BIT_ULL(POWER_DOMAIN_AUX_E))
2633 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (		\
2634 	BIT_ULL(POWER_DOMAIN_AUX_F))
2635 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (		\
2636 	BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2637 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (		\
2638 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2639 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (		\
2640 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2641 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (		\
2642 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2643 
2644 #define TGL_PW_5_POWER_DOMAINS (			\
2645 	BIT_ULL(POWER_DOMAIN_PIPE_D) |			\
2646 	BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |		\
2647 	BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2648 	BIT_ULL(POWER_DOMAIN_INIT))
2649 
2650 #define TGL_PW_4_POWER_DOMAINS (			\
2651 	TGL_PW_5_POWER_DOMAINS |			\
2652 	BIT_ULL(POWER_DOMAIN_PIPE_C) |			\
2653 	BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |		\
2654 	BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |	\
2655 	BIT_ULL(POWER_DOMAIN_INIT))
2656 
2657 #define TGL_PW_3_POWER_DOMAINS (			\
2658 	TGL_PW_4_POWER_DOMAINS |			\
2659 	BIT_ULL(POWER_DOMAIN_PIPE_B) |			\
2660 	BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |		\
2661 	BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |	\
2662 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
2663 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |	\
2664 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |	\
2665 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |	\
2666 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |	\
2667 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |	\
2668 	BIT_ULL(POWER_DOMAIN_AUX_D) |			\
2669 	BIT_ULL(POWER_DOMAIN_AUX_E) |			\
2670 	BIT_ULL(POWER_DOMAIN_AUX_F) |			\
2671 	BIT_ULL(POWER_DOMAIN_AUX_G) |			\
2672 	BIT_ULL(POWER_DOMAIN_AUX_H) |			\
2673 	BIT_ULL(POWER_DOMAIN_AUX_I) |			\
2674 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |		\
2675 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |		\
2676 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |		\
2677 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |		\
2678 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |		\
2679 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |		\
2680 	BIT_ULL(POWER_DOMAIN_VGA) |			\
2681 	BIT_ULL(POWER_DOMAIN_AUDIO) |			\
2682 	BIT_ULL(POWER_DOMAIN_INIT))
2683 
2684 #define TGL_PW_2_POWER_DOMAINS (			\
2685 	TGL_PW_3_POWER_DOMAINS |			\
2686 	BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |	\
2687 	BIT_ULL(POWER_DOMAIN_INIT))
2688 
2689 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
2690 	TGL_PW_2_POWER_DOMAINS |			\
2691 	BIT_ULL(POWER_DOMAIN_MODESET) |			\
2692 	BIT_ULL(POWER_DOMAIN_AUX_A) |			\
2693 	BIT_ULL(POWER_DOMAIN_AUX_B) |			\
2694 	BIT_ULL(POWER_DOMAIN_AUX_C) |			\
2695 	BIT_ULL(POWER_DOMAIN_INIT))
2696 
2697 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (	\
2698 	BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2699 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (	\
2700 	BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2701 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (	\
2702 	BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2703 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (	\
2704 	BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2705 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (	\
2706 	BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2707 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (	\
2708 	BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2709 
2710 #define TGL_AUX_A_IO_POWER_DOMAINS (		\
2711 	BIT_ULL(POWER_DOMAIN_AUX_IO_A) |	\
2712 	BIT_ULL(POWER_DOMAIN_AUX_A))
2713 #define TGL_AUX_B_IO_POWER_DOMAINS (		\
2714 	BIT_ULL(POWER_DOMAIN_AUX_B))
2715 #define TGL_AUX_C_IO_POWER_DOMAINS (		\
2716 	BIT_ULL(POWER_DOMAIN_AUX_C))
2717 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (	\
2718 	BIT_ULL(POWER_DOMAIN_AUX_D))
2719 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (	\
2720 	BIT_ULL(POWER_DOMAIN_AUX_E))
2721 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (	\
2722 	BIT_ULL(POWER_DOMAIN_AUX_F))
2723 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (	\
2724 	BIT_ULL(POWER_DOMAIN_AUX_G))
2725 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (	\
2726 	BIT_ULL(POWER_DOMAIN_AUX_H))
2727 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (	\
2728 	BIT_ULL(POWER_DOMAIN_AUX_I))
2729 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (	\
2730 	BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2731 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (	\
2732 	BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2733 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (	\
2734 	BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2735 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (	\
2736 	BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2737 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (	\
2738 	BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2739 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (	\
2740 	BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2741 
2742 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2743 	.sync_hw = i9xx_power_well_sync_hw_noop,
2744 	.enable = i9xx_always_on_power_well_noop,
2745 	.disable = i9xx_always_on_power_well_noop,
2746 	.is_enabled = i9xx_always_on_power_well_enabled,
2747 };
2748 
2749 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2750 	.sync_hw = i9xx_power_well_sync_hw_noop,
2751 	.enable = chv_pipe_power_well_enable,
2752 	.disable = chv_pipe_power_well_disable,
2753 	.is_enabled = chv_pipe_power_well_enabled,
2754 };
2755 
2756 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2757 	.sync_hw = i9xx_power_well_sync_hw_noop,
2758 	.enable = chv_dpio_cmn_power_well_enable,
2759 	.disable = chv_dpio_cmn_power_well_disable,
2760 	.is_enabled = vlv_power_well_enabled,
2761 };
2762 
2763 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2764 	{
2765 		.name = "always-on",
2766 		.always_on = true,
2767 		.domains = POWER_DOMAIN_MASK,
2768 		.ops = &i9xx_always_on_power_well_ops,
2769 		.id = DISP_PW_ID_NONE,
2770 	},
2771 };
2772 
2773 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2774 	.sync_hw = i830_pipes_power_well_sync_hw,
2775 	.enable = i830_pipes_power_well_enable,
2776 	.disable = i830_pipes_power_well_disable,
2777 	.is_enabled = i830_pipes_power_well_enabled,
2778 };
2779 
2780 static const struct i915_power_well_desc i830_power_wells[] = {
2781 	{
2782 		.name = "always-on",
2783 		.always_on = true,
2784 		.domains = POWER_DOMAIN_MASK,
2785 		.ops = &i9xx_always_on_power_well_ops,
2786 		.id = DISP_PW_ID_NONE,
2787 	},
2788 	{
2789 		.name = "pipes",
2790 		.domains = I830_PIPES_POWER_DOMAINS,
2791 		.ops = &i830_pipes_power_well_ops,
2792 		.id = DISP_PW_ID_NONE,
2793 	},
2794 };
2795 
2796 static const struct i915_power_well_ops hsw_power_well_ops = {
2797 	.sync_hw = hsw_power_well_sync_hw,
2798 	.enable = hsw_power_well_enable,
2799 	.disable = hsw_power_well_disable,
2800 	.is_enabled = hsw_power_well_enabled,
2801 };
2802 
2803 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2804 	.sync_hw = i9xx_power_well_sync_hw_noop,
2805 	.enable = gen9_dc_off_power_well_enable,
2806 	.disable = gen9_dc_off_power_well_disable,
2807 	.is_enabled = gen9_dc_off_power_well_enabled,
2808 };
2809 
2810 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2811 	.sync_hw = i9xx_power_well_sync_hw_noop,
2812 	.enable = bxt_dpio_cmn_power_well_enable,
2813 	.disable = bxt_dpio_cmn_power_well_disable,
2814 	.is_enabled = bxt_dpio_cmn_power_well_enabled,
2815 };
2816 
2817 static const struct i915_power_well_regs hsw_power_well_regs = {
2818 	.bios	= HSW_PWR_WELL_CTL1,
2819 	.driver	= HSW_PWR_WELL_CTL2,
2820 	.kvmr	= HSW_PWR_WELL_CTL3,
2821 	.debug	= HSW_PWR_WELL_CTL4,
2822 };
2823 
2824 static const struct i915_power_well_desc hsw_power_wells[] = {
2825 	{
2826 		.name = "always-on",
2827 		.always_on = true,
2828 		.domains = POWER_DOMAIN_MASK,
2829 		.ops = &i9xx_always_on_power_well_ops,
2830 		.id = DISP_PW_ID_NONE,
2831 	},
2832 	{
2833 		.name = "display",
2834 		.domains = HSW_DISPLAY_POWER_DOMAINS,
2835 		.ops = &hsw_power_well_ops,
2836 		.id = HSW_DISP_PW_GLOBAL,
2837 		{
2838 			.hsw.regs = &hsw_power_well_regs,
2839 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2840 			.hsw.has_vga = true,
2841 		},
2842 	},
2843 };
2844 
2845 static const struct i915_power_well_desc bdw_power_wells[] = {
2846 	{
2847 		.name = "always-on",
2848 		.always_on = true,
2849 		.domains = POWER_DOMAIN_MASK,
2850 		.ops = &i9xx_always_on_power_well_ops,
2851 		.id = DISP_PW_ID_NONE,
2852 	},
2853 	{
2854 		.name = "display",
2855 		.domains = BDW_DISPLAY_POWER_DOMAINS,
2856 		.ops = &hsw_power_well_ops,
2857 		.id = HSW_DISP_PW_GLOBAL,
2858 		{
2859 			.hsw.regs = &hsw_power_well_regs,
2860 			.hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2861 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2862 			.hsw.has_vga = true,
2863 		},
2864 	},
2865 };
2866 
2867 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2868 	.sync_hw = i9xx_power_well_sync_hw_noop,
2869 	.enable = vlv_display_power_well_enable,
2870 	.disable = vlv_display_power_well_disable,
2871 	.is_enabled = vlv_power_well_enabled,
2872 };
2873 
2874 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2875 	.sync_hw = i9xx_power_well_sync_hw_noop,
2876 	.enable = vlv_dpio_cmn_power_well_enable,
2877 	.disable = vlv_dpio_cmn_power_well_disable,
2878 	.is_enabled = vlv_power_well_enabled,
2879 };
2880 
2881 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2882 	.sync_hw = i9xx_power_well_sync_hw_noop,
2883 	.enable = vlv_power_well_enable,
2884 	.disable = vlv_power_well_disable,
2885 	.is_enabled = vlv_power_well_enabled,
2886 };
2887 
2888 static const struct i915_power_well_desc vlv_power_wells[] = {
2889 	{
2890 		.name = "always-on",
2891 		.always_on = true,
2892 		.domains = POWER_DOMAIN_MASK,
2893 		.ops = &i9xx_always_on_power_well_ops,
2894 		.id = DISP_PW_ID_NONE,
2895 	},
2896 	{
2897 		.name = "display",
2898 		.domains = VLV_DISPLAY_POWER_DOMAINS,
2899 		.ops = &vlv_display_power_well_ops,
2900 		.id = VLV_DISP_PW_DISP2D,
2901 		{
2902 			.vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2903 		},
2904 	},
2905 	{
2906 		.name = "dpio-tx-b-01",
2907 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2908 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2909 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2910 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2911 		.ops = &vlv_dpio_power_well_ops,
2912 		.id = DISP_PW_ID_NONE,
2913 		{
2914 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2915 		},
2916 	},
2917 	{
2918 		.name = "dpio-tx-b-23",
2919 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2920 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2921 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2922 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2923 		.ops = &vlv_dpio_power_well_ops,
2924 		.id = DISP_PW_ID_NONE,
2925 		{
2926 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2927 		},
2928 	},
2929 	{
2930 		.name = "dpio-tx-c-01",
2931 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2932 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2933 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2934 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2935 		.ops = &vlv_dpio_power_well_ops,
2936 		.id = DISP_PW_ID_NONE,
2937 		{
2938 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2939 		},
2940 	},
2941 	{
2942 		.name = "dpio-tx-c-23",
2943 		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2944 			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2945 			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2946 			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2947 		.ops = &vlv_dpio_power_well_ops,
2948 		.id = DISP_PW_ID_NONE,
2949 		{
2950 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2951 		},
2952 	},
2953 	{
2954 		.name = "dpio-common",
2955 		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2956 		.ops = &vlv_dpio_cmn_power_well_ops,
2957 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2958 		{
2959 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2960 		},
2961 	},
2962 };
2963 
2964 static const struct i915_power_well_desc chv_power_wells[] = {
2965 	{
2966 		.name = "always-on",
2967 		.always_on = true,
2968 		.domains = POWER_DOMAIN_MASK,
2969 		.ops = &i9xx_always_on_power_well_ops,
2970 		.id = DISP_PW_ID_NONE,
2971 	},
2972 	{
2973 		.name = "display",
2974 		/*
2975 		 * Pipe A power well is the new disp2d well. Pipe B and C
2976 		 * power wells don't actually exist. Pipe A power well is
2977 		 * required for any pipe to work.
2978 		 */
2979 		.domains = CHV_DISPLAY_POWER_DOMAINS,
2980 		.ops = &chv_pipe_power_well_ops,
2981 		.id = DISP_PW_ID_NONE,
2982 	},
2983 	{
2984 		.name = "dpio-common-bc",
2985 		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2986 		.ops = &chv_dpio_cmn_power_well_ops,
2987 		.id = VLV_DISP_PW_DPIO_CMN_BC,
2988 		{
2989 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2990 		},
2991 	},
2992 	{
2993 		.name = "dpio-common-d",
2994 		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2995 		.ops = &chv_dpio_cmn_power_well_ops,
2996 		.id = CHV_DISP_PW_DPIO_CMN_D,
2997 		{
2998 			.vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2999 		},
3000 	},
3001 };
3002 
intel_display_power_well_is_enabled(struct drm_i915_private * dev_priv,enum i915_power_well_id power_well_id)3003 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3004 					 enum i915_power_well_id power_well_id)
3005 {
3006 	struct i915_power_well *power_well;
3007 	bool ret;
3008 
3009 	power_well = lookup_power_well(dev_priv, power_well_id);
3010 	ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3011 
3012 	return ret;
3013 }
3014 
3015 static const struct i915_power_well_desc skl_power_wells[] = {
3016 	{
3017 		.name = "always-on",
3018 		.always_on = true,
3019 		.domains = POWER_DOMAIN_MASK,
3020 		.ops = &i9xx_always_on_power_well_ops,
3021 		.id = DISP_PW_ID_NONE,
3022 	},
3023 	{
3024 		.name = "power well 1",
3025 		/* Handled by the DMC firmware */
3026 		.always_on = true,
3027 		.domains = 0,
3028 		.ops = &hsw_power_well_ops,
3029 		.id = SKL_DISP_PW_1,
3030 		{
3031 			.hsw.regs = &hsw_power_well_regs,
3032 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3033 			.hsw.has_fuses = true,
3034 		},
3035 	},
3036 	{
3037 		.name = "MISC IO power well",
3038 		/* Handled by the DMC firmware */
3039 		.always_on = true,
3040 		.domains = 0,
3041 		.ops = &hsw_power_well_ops,
3042 		.id = SKL_DISP_PW_MISC_IO,
3043 		{
3044 			.hsw.regs = &hsw_power_well_regs,
3045 			.hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3046 		},
3047 	},
3048 	{
3049 		.name = "DC off",
3050 		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3051 		.ops = &gen9_dc_off_power_well_ops,
3052 		.id = SKL_DISP_DC_OFF,
3053 	},
3054 	{
3055 		.name = "power well 2",
3056 		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3057 		.ops = &hsw_power_well_ops,
3058 		.id = SKL_DISP_PW_2,
3059 		{
3060 			.hsw.regs = &hsw_power_well_regs,
3061 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3062 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3063 			.hsw.has_vga = true,
3064 			.hsw.has_fuses = true,
3065 		},
3066 	},
3067 	{
3068 		.name = "DDI A/E IO power well",
3069 		.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3070 		.ops = &hsw_power_well_ops,
3071 		.id = DISP_PW_ID_NONE,
3072 		{
3073 			.hsw.regs = &hsw_power_well_regs,
3074 			.hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3075 		},
3076 	},
3077 	{
3078 		.name = "DDI B IO power well",
3079 		.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3080 		.ops = &hsw_power_well_ops,
3081 		.id = DISP_PW_ID_NONE,
3082 		{
3083 			.hsw.regs = &hsw_power_well_regs,
3084 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3085 		},
3086 	},
3087 	{
3088 		.name = "DDI C IO power well",
3089 		.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3090 		.ops = &hsw_power_well_ops,
3091 		.id = DISP_PW_ID_NONE,
3092 		{
3093 			.hsw.regs = &hsw_power_well_regs,
3094 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3095 		},
3096 	},
3097 	{
3098 		.name = "DDI D IO power well",
3099 		.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3100 		.ops = &hsw_power_well_ops,
3101 		.id = DISP_PW_ID_NONE,
3102 		{
3103 			.hsw.regs = &hsw_power_well_regs,
3104 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3105 		},
3106 	},
3107 };
3108 
3109 static const struct i915_power_well_desc bxt_power_wells[] = {
3110 	{
3111 		.name = "always-on",
3112 		.always_on = true,
3113 		.domains = POWER_DOMAIN_MASK,
3114 		.ops = &i9xx_always_on_power_well_ops,
3115 		.id = DISP_PW_ID_NONE,
3116 	},
3117 	{
3118 		.name = "power well 1",
3119 		/* Handled by the DMC firmware */
3120 		.always_on = true,
3121 		.domains = 0,
3122 		.ops = &hsw_power_well_ops,
3123 		.id = SKL_DISP_PW_1,
3124 		{
3125 			.hsw.regs = &hsw_power_well_regs,
3126 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3127 			.hsw.has_fuses = true,
3128 		},
3129 	},
3130 	{
3131 		.name = "DC off",
3132 		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3133 		.ops = &gen9_dc_off_power_well_ops,
3134 		.id = SKL_DISP_DC_OFF,
3135 	},
3136 	{
3137 		.name = "power well 2",
3138 		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3139 		.ops = &hsw_power_well_ops,
3140 		.id = SKL_DISP_PW_2,
3141 		{
3142 			.hsw.regs = &hsw_power_well_regs,
3143 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3144 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3145 			.hsw.has_vga = true,
3146 			.hsw.has_fuses = true,
3147 		},
3148 	},
3149 	{
3150 		.name = "dpio-common-a",
3151 		.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3152 		.ops = &bxt_dpio_cmn_power_well_ops,
3153 		.id = BXT_DISP_PW_DPIO_CMN_A,
3154 		{
3155 			.bxt.phy = DPIO_PHY1,
3156 		},
3157 	},
3158 	{
3159 		.name = "dpio-common-bc",
3160 		.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3161 		.ops = &bxt_dpio_cmn_power_well_ops,
3162 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3163 		{
3164 			.bxt.phy = DPIO_PHY0,
3165 		},
3166 	},
3167 };
3168 
3169 static const struct i915_power_well_desc glk_power_wells[] = {
3170 	{
3171 		.name = "always-on",
3172 		.always_on = true,
3173 		.domains = POWER_DOMAIN_MASK,
3174 		.ops = &i9xx_always_on_power_well_ops,
3175 		.id = DISP_PW_ID_NONE,
3176 	},
3177 	{
3178 		.name = "power well 1",
3179 		/* Handled by the DMC firmware */
3180 		.always_on = true,
3181 		.domains = 0,
3182 		.ops = &hsw_power_well_ops,
3183 		.id = SKL_DISP_PW_1,
3184 		{
3185 			.hsw.regs = &hsw_power_well_regs,
3186 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3187 			.hsw.has_fuses = true,
3188 		},
3189 	},
3190 	{
3191 		.name = "DC off",
3192 		.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3193 		.ops = &gen9_dc_off_power_well_ops,
3194 		.id = SKL_DISP_DC_OFF,
3195 	},
3196 	{
3197 		.name = "power well 2",
3198 		.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3199 		.ops = &hsw_power_well_ops,
3200 		.id = SKL_DISP_PW_2,
3201 		{
3202 			.hsw.regs = &hsw_power_well_regs,
3203 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3204 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3205 			.hsw.has_vga = true,
3206 			.hsw.has_fuses = true,
3207 		},
3208 	},
3209 	{
3210 		.name = "dpio-common-a",
3211 		.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3212 		.ops = &bxt_dpio_cmn_power_well_ops,
3213 		.id = BXT_DISP_PW_DPIO_CMN_A,
3214 		{
3215 			.bxt.phy = DPIO_PHY1,
3216 		},
3217 	},
3218 	{
3219 		.name = "dpio-common-b",
3220 		.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3221 		.ops = &bxt_dpio_cmn_power_well_ops,
3222 		.id = VLV_DISP_PW_DPIO_CMN_BC,
3223 		{
3224 			.bxt.phy = DPIO_PHY0,
3225 		},
3226 	},
3227 	{
3228 		.name = "dpio-common-c",
3229 		.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3230 		.ops = &bxt_dpio_cmn_power_well_ops,
3231 		.id = GLK_DISP_PW_DPIO_CMN_C,
3232 		{
3233 			.bxt.phy = DPIO_PHY2,
3234 		},
3235 	},
3236 	{
3237 		.name = "AUX A",
3238 		.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3239 		.ops = &hsw_power_well_ops,
3240 		.id = DISP_PW_ID_NONE,
3241 		{
3242 			.hsw.regs = &hsw_power_well_regs,
3243 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3244 		},
3245 	},
3246 	{
3247 		.name = "AUX B",
3248 		.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3249 		.ops = &hsw_power_well_ops,
3250 		.id = DISP_PW_ID_NONE,
3251 		{
3252 			.hsw.regs = &hsw_power_well_regs,
3253 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3254 		},
3255 	},
3256 	{
3257 		.name = "AUX C",
3258 		.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3259 		.ops = &hsw_power_well_ops,
3260 		.id = DISP_PW_ID_NONE,
3261 		{
3262 			.hsw.regs = &hsw_power_well_regs,
3263 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3264 		},
3265 	},
3266 	{
3267 		.name = "DDI A IO power well",
3268 		.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3269 		.ops = &hsw_power_well_ops,
3270 		.id = DISP_PW_ID_NONE,
3271 		{
3272 			.hsw.regs = &hsw_power_well_regs,
3273 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3274 		},
3275 	},
3276 	{
3277 		.name = "DDI B IO power well",
3278 		.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3279 		.ops = &hsw_power_well_ops,
3280 		.id = DISP_PW_ID_NONE,
3281 		{
3282 			.hsw.regs = &hsw_power_well_regs,
3283 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3284 		},
3285 	},
3286 	{
3287 		.name = "DDI C IO power well",
3288 		.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3289 		.ops = &hsw_power_well_ops,
3290 		.id = DISP_PW_ID_NONE,
3291 		{
3292 			.hsw.regs = &hsw_power_well_regs,
3293 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3294 		},
3295 	},
3296 };
3297 
3298 static const struct i915_power_well_desc cnl_power_wells[] = {
3299 	{
3300 		.name = "always-on",
3301 		.always_on = true,
3302 		.domains = POWER_DOMAIN_MASK,
3303 		.ops = &i9xx_always_on_power_well_ops,
3304 		.id = DISP_PW_ID_NONE,
3305 	},
3306 	{
3307 		.name = "power well 1",
3308 		/* Handled by the DMC firmware */
3309 		.always_on = true,
3310 		.domains = 0,
3311 		.ops = &hsw_power_well_ops,
3312 		.id = SKL_DISP_PW_1,
3313 		{
3314 			.hsw.regs = &hsw_power_well_regs,
3315 			.hsw.idx = SKL_PW_CTL_IDX_PW_1,
3316 			.hsw.has_fuses = true,
3317 		},
3318 	},
3319 	{
3320 		.name = "AUX A",
3321 		.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3322 		.ops = &hsw_power_well_ops,
3323 		.id = DISP_PW_ID_NONE,
3324 		{
3325 			.hsw.regs = &hsw_power_well_regs,
3326 			.hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3327 		},
3328 	},
3329 	{
3330 		.name = "AUX B",
3331 		.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3332 		.ops = &hsw_power_well_ops,
3333 		.id = DISP_PW_ID_NONE,
3334 		{
3335 			.hsw.regs = &hsw_power_well_regs,
3336 			.hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3337 		},
3338 	},
3339 	{
3340 		.name = "AUX C",
3341 		.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3342 		.ops = &hsw_power_well_ops,
3343 		.id = DISP_PW_ID_NONE,
3344 		{
3345 			.hsw.regs = &hsw_power_well_regs,
3346 			.hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3347 		},
3348 	},
3349 	{
3350 		.name = "AUX D",
3351 		.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3352 		.ops = &hsw_power_well_ops,
3353 		.id = DISP_PW_ID_NONE,
3354 		{
3355 			.hsw.regs = &hsw_power_well_regs,
3356 			.hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3357 		},
3358 	},
3359 	{
3360 		.name = "DC off",
3361 		.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3362 		.ops = &gen9_dc_off_power_well_ops,
3363 		.id = SKL_DISP_DC_OFF,
3364 	},
3365 	{
3366 		.name = "power well 2",
3367 		.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3368 		.ops = &hsw_power_well_ops,
3369 		.id = SKL_DISP_PW_2,
3370 		{
3371 			.hsw.regs = &hsw_power_well_regs,
3372 			.hsw.idx = SKL_PW_CTL_IDX_PW_2,
3373 			.hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3374 			.hsw.has_vga = true,
3375 			.hsw.has_fuses = true,
3376 		},
3377 	},
3378 	{
3379 		.name = "DDI A IO power well",
3380 		.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3381 		.ops = &hsw_power_well_ops,
3382 		.id = DISP_PW_ID_NONE,
3383 		{
3384 			.hsw.regs = &hsw_power_well_regs,
3385 			.hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3386 		},
3387 	},
3388 	{
3389 		.name = "DDI B IO power well",
3390 		.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3391 		.ops = &hsw_power_well_ops,
3392 		.id = DISP_PW_ID_NONE,
3393 		{
3394 			.hsw.regs = &hsw_power_well_regs,
3395 			.hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3396 		},
3397 	},
3398 	{
3399 		.name = "DDI C IO power well",
3400 		.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3401 		.ops = &hsw_power_well_ops,
3402 		.id = DISP_PW_ID_NONE,
3403 		{
3404 			.hsw.regs = &hsw_power_well_regs,
3405 			.hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3406 		},
3407 	},
3408 	{
3409 		.name = "DDI D IO power well",
3410 		.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3411 		.ops = &hsw_power_well_ops,
3412 		.id = DISP_PW_ID_NONE,
3413 		{
3414 			.hsw.regs = &hsw_power_well_regs,
3415 			.hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3416 		},
3417 	},
3418 	{
3419 		.name = "DDI F IO power well",
3420 		.domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3421 		.ops = &hsw_power_well_ops,
3422 		.id = DISP_PW_ID_NONE,
3423 		{
3424 			.hsw.regs = &hsw_power_well_regs,
3425 			.hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3426 		},
3427 	},
3428 	{
3429 		.name = "AUX F",
3430 		.domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3431 		.ops = &hsw_power_well_ops,
3432 		.id = DISP_PW_ID_NONE,
3433 		{
3434 			.hsw.regs = &hsw_power_well_regs,
3435 			.hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3436 		},
3437 	},
3438 };
3439 
3440 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3441 	.sync_hw = hsw_power_well_sync_hw,
3442 	.enable = icl_combo_phy_aux_power_well_enable,
3443 	.disable = icl_combo_phy_aux_power_well_disable,
3444 	.is_enabled = hsw_power_well_enabled,
3445 };
3446 
3447 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3448 	.sync_hw = hsw_power_well_sync_hw,
3449 	.enable = icl_tc_phy_aux_power_well_enable,
3450 	.disable = icl_tc_phy_aux_power_well_disable,
3451 	.is_enabled = hsw_power_well_enabled,
3452 };
3453 
3454 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3455 	.bios	= ICL_PWR_WELL_CTL_AUX1,
3456 	.driver	= ICL_PWR_WELL_CTL_AUX2,
3457 	.debug	= ICL_PWR_WELL_CTL_AUX4,
3458 };
3459 
3460 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3461 	.bios	= ICL_PWR_WELL_CTL_DDI1,
3462 	.driver	= ICL_PWR_WELL_CTL_DDI2,
3463 	.debug	= ICL_PWR_WELL_CTL_DDI4,
3464 };
3465 
3466 static const struct i915_power_well_desc icl_power_wells[] = {
3467 	{
3468 		.name = "always-on",
3469 		.always_on = true,
3470 		.domains = POWER_DOMAIN_MASK,
3471 		.ops = &i9xx_always_on_power_well_ops,
3472 		.id = DISP_PW_ID_NONE,
3473 	},
3474 	{
3475 		.name = "power well 1",
3476 		/* Handled by the DMC firmware */
3477 		.always_on = true,
3478 		.domains = 0,
3479 		.ops = &hsw_power_well_ops,
3480 		.id = SKL_DISP_PW_1,
3481 		{
3482 			.hsw.regs = &hsw_power_well_regs,
3483 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3484 			.hsw.has_fuses = true,
3485 		},
3486 	},
3487 	{
3488 		.name = "DC off",
3489 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3490 		.ops = &gen9_dc_off_power_well_ops,
3491 		.id = SKL_DISP_DC_OFF,
3492 	},
3493 	{
3494 		.name = "power well 2",
3495 		.domains = ICL_PW_2_POWER_DOMAINS,
3496 		.ops = &hsw_power_well_ops,
3497 		.id = SKL_DISP_PW_2,
3498 		{
3499 			.hsw.regs = &hsw_power_well_regs,
3500 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3501 			.hsw.has_fuses = true,
3502 		},
3503 	},
3504 	{
3505 		.name = "power well 3",
3506 		.domains = ICL_PW_3_POWER_DOMAINS,
3507 		.ops = &hsw_power_well_ops,
3508 		.id = DISP_PW_ID_NONE,
3509 		{
3510 			.hsw.regs = &hsw_power_well_regs,
3511 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3512 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3513 			.hsw.has_vga = true,
3514 			.hsw.has_fuses = true,
3515 		},
3516 	},
3517 	{
3518 		.name = "DDI A IO",
3519 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3520 		.ops = &hsw_power_well_ops,
3521 		.id = DISP_PW_ID_NONE,
3522 		{
3523 			.hsw.regs = &icl_ddi_power_well_regs,
3524 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3525 		},
3526 	},
3527 	{
3528 		.name = "DDI B IO",
3529 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3530 		.ops = &hsw_power_well_ops,
3531 		.id = DISP_PW_ID_NONE,
3532 		{
3533 			.hsw.regs = &icl_ddi_power_well_regs,
3534 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3535 		},
3536 	},
3537 	{
3538 		.name = "DDI C IO",
3539 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3540 		.ops = &hsw_power_well_ops,
3541 		.id = DISP_PW_ID_NONE,
3542 		{
3543 			.hsw.regs = &icl_ddi_power_well_regs,
3544 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3545 		},
3546 	},
3547 	{
3548 		.name = "DDI D IO",
3549 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3550 		.ops = &hsw_power_well_ops,
3551 		.id = DISP_PW_ID_NONE,
3552 		{
3553 			.hsw.regs = &icl_ddi_power_well_regs,
3554 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3555 		},
3556 	},
3557 	{
3558 		.name = "DDI E IO",
3559 		.domains = ICL_DDI_IO_E_POWER_DOMAINS,
3560 		.ops = &hsw_power_well_ops,
3561 		.id = DISP_PW_ID_NONE,
3562 		{
3563 			.hsw.regs = &icl_ddi_power_well_regs,
3564 			.hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3565 		},
3566 	},
3567 	{
3568 		.name = "DDI F IO",
3569 		.domains = ICL_DDI_IO_F_POWER_DOMAINS,
3570 		.ops = &hsw_power_well_ops,
3571 		.id = DISP_PW_ID_NONE,
3572 		{
3573 			.hsw.regs = &icl_ddi_power_well_regs,
3574 			.hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3575 		},
3576 	},
3577 	{
3578 		.name = "AUX A",
3579 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3580 		.ops = &icl_combo_phy_aux_power_well_ops,
3581 		.id = DISP_PW_ID_NONE,
3582 		{
3583 			.hsw.regs = &icl_aux_power_well_regs,
3584 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3585 		},
3586 	},
3587 	{
3588 		.name = "AUX B",
3589 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3590 		.ops = &icl_combo_phy_aux_power_well_ops,
3591 		.id = DISP_PW_ID_NONE,
3592 		{
3593 			.hsw.regs = &icl_aux_power_well_regs,
3594 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3595 		},
3596 	},
3597 	{
3598 		.name = "AUX C TC1",
3599 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3600 		.ops = &icl_tc_phy_aux_power_well_ops,
3601 		.id = DISP_PW_ID_NONE,
3602 		{
3603 			.hsw.regs = &icl_aux_power_well_regs,
3604 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3605 			.hsw.is_tc_tbt = false,
3606 		},
3607 	},
3608 	{
3609 		.name = "AUX D TC2",
3610 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3611 		.ops = &icl_tc_phy_aux_power_well_ops,
3612 		.id = DISP_PW_ID_NONE,
3613 		{
3614 			.hsw.regs = &icl_aux_power_well_regs,
3615 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3616 			.hsw.is_tc_tbt = false,
3617 		},
3618 	},
3619 	{
3620 		.name = "AUX E TC3",
3621 		.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3622 		.ops = &icl_tc_phy_aux_power_well_ops,
3623 		.id = DISP_PW_ID_NONE,
3624 		{
3625 			.hsw.regs = &icl_aux_power_well_regs,
3626 			.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3627 			.hsw.is_tc_tbt = false,
3628 		},
3629 	},
3630 	{
3631 		.name = "AUX F TC4",
3632 		.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3633 		.ops = &icl_tc_phy_aux_power_well_ops,
3634 		.id = DISP_PW_ID_NONE,
3635 		{
3636 			.hsw.regs = &icl_aux_power_well_regs,
3637 			.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3638 			.hsw.is_tc_tbt = false,
3639 		},
3640 	},
3641 	{
3642 		.name = "AUX C TBT1",
3643 		.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3644 		.ops = &icl_tc_phy_aux_power_well_ops,
3645 		.id = DISP_PW_ID_NONE,
3646 		{
3647 			.hsw.regs = &icl_aux_power_well_regs,
3648 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3649 			.hsw.is_tc_tbt = true,
3650 		},
3651 	},
3652 	{
3653 		.name = "AUX D TBT2",
3654 		.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3655 		.ops = &icl_tc_phy_aux_power_well_ops,
3656 		.id = DISP_PW_ID_NONE,
3657 		{
3658 			.hsw.regs = &icl_aux_power_well_regs,
3659 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3660 			.hsw.is_tc_tbt = true,
3661 		},
3662 	},
3663 	{
3664 		.name = "AUX E TBT3",
3665 		.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3666 		.ops = &icl_tc_phy_aux_power_well_ops,
3667 		.id = DISP_PW_ID_NONE,
3668 		{
3669 			.hsw.regs = &icl_aux_power_well_regs,
3670 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3671 			.hsw.is_tc_tbt = true,
3672 		},
3673 	},
3674 	{
3675 		.name = "AUX F TBT4",
3676 		.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3677 		.ops = &icl_tc_phy_aux_power_well_ops,
3678 		.id = DISP_PW_ID_NONE,
3679 		{
3680 			.hsw.regs = &icl_aux_power_well_regs,
3681 			.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3682 			.hsw.is_tc_tbt = true,
3683 		},
3684 	},
3685 	{
3686 		.name = "power well 4",
3687 		.domains = ICL_PW_4_POWER_DOMAINS,
3688 		.ops = &hsw_power_well_ops,
3689 		.id = DISP_PW_ID_NONE,
3690 		{
3691 			.hsw.regs = &hsw_power_well_regs,
3692 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3693 			.hsw.has_fuses = true,
3694 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3695 		},
3696 	},
3697 };
3698 
3699 static const struct i915_power_well_desc ehl_power_wells[] = {
3700 	{
3701 		.name = "always-on",
3702 		.always_on = true,
3703 		.domains = POWER_DOMAIN_MASK,
3704 		.ops = &i9xx_always_on_power_well_ops,
3705 		.id = DISP_PW_ID_NONE,
3706 	},
3707 	{
3708 		.name = "power well 1",
3709 		/* Handled by the DMC firmware */
3710 		.always_on = true,
3711 		.domains = 0,
3712 		.ops = &hsw_power_well_ops,
3713 		.id = SKL_DISP_PW_1,
3714 		{
3715 			.hsw.regs = &hsw_power_well_regs,
3716 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3717 			.hsw.has_fuses = true,
3718 		},
3719 	},
3720 	{
3721 		.name = "DC off",
3722 		.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3723 		.ops = &gen9_dc_off_power_well_ops,
3724 		.id = SKL_DISP_DC_OFF,
3725 	},
3726 	{
3727 		.name = "power well 2",
3728 		.domains = ICL_PW_2_POWER_DOMAINS,
3729 		.ops = &hsw_power_well_ops,
3730 		.id = SKL_DISP_PW_2,
3731 		{
3732 			.hsw.regs = &hsw_power_well_regs,
3733 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3734 			.hsw.has_fuses = true,
3735 		},
3736 	},
3737 	{
3738 		.name = "power well 3",
3739 		.domains = ICL_PW_3_POWER_DOMAINS,
3740 		.ops = &hsw_power_well_ops,
3741 		.id = DISP_PW_ID_NONE,
3742 		{
3743 			.hsw.regs = &hsw_power_well_regs,
3744 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3745 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3746 			.hsw.has_vga = true,
3747 			.hsw.has_fuses = true,
3748 		},
3749 	},
3750 	{
3751 		.name = "DDI A IO",
3752 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3753 		.ops = &hsw_power_well_ops,
3754 		.id = DISP_PW_ID_NONE,
3755 		{
3756 			.hsw.regs = &icl_ddi_power_well_regs,
3757 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3758 		},
3759 	},
3760 	{
3761 		.name = "DDI B IO",
3762 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3763 		.ops = &hsw_power_well_ops,
3764 		.id = DISP_PW_ID_NONE,
3765 		{
3766 			.hsw.regs = &icl_ddi_power_well_regs,
3767 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3768 		},
3769 	},
3770 	{
3771 		.name = "DDI C IO",
3772 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3773 		.ops = &hsw_power_well_ops,
3774 		.id = DISP_PW_ID_NONE,
3775 		{
3776 			.hsw.regs = &icl_ddi_power_well_regs,
3777 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3778 		},
3779 	},
3780 	{
3781 		.name = "DDI D IO",
3782 		.domains = ICL_DDI_IO_D_POWER_DOMAINS,
3783 		.ops = &hsw_power_well_ops,
3784 		.id = DISP_PW_ID_NONE,
3785 		{
3786 			.hsw.regs = &icl_ddi_power_well_regs,
3787 			.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3788 		},
3789 	},
3790 	{
3791 		.name = "AUX A",
3792 		.domains = ICL_AUX_A_IO_POWER_DOMAINS,
3793 		.ops = &hsw_power_well_ops,
3794 		.id = DISP_PW_ID_NONE,
3795 		{
3796 			.hsw.regs = &icl_aux_power_well_regs,
3797 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3798 		},
3799 	},
3800 	{
3801 		.name = "AUX B",
3802 		.domains = ICL_AUX_B_IO_POWER_DOMAINS,
3803 		.ops = &hsw_power_well_ops,
3804 		.id = DISP_PW_ID_NONE,
3805 		{
3806 			.hsw.regs = &icl_aux_power_well_regs,
3807 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3808 		},
3809 	},
3810 	{
3811 		.name = "AUX C",
3812 		.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3813 		.ops = &hsw_power_well_ops,
3814 		.id = DISP_PW_ID_NONE,
3815 		{
3816 			.hsw.regs = &icl_aux_power_well_regs,
3817 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3818 		},
3819 	},
3820 	{
3821 		.name = "AUX D",
3822 		.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3823 		.ops = &hsw_power_well_ops,
3824 		.id = DISP_PW_ID_NONE,
3825 		{
3826 			.hsw.regs = &icl_aux_power_well_regs,
3827 			.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3828 		},
3829 	},
3830 	{
3831 		.name = "power well 4",
3832 		.domains = ICL_PW_4_POWER_DOMAINS,
3833 		.ops = &hsw_power_well_ops,
3834 		.id = DISP_PW_ID_NONE,
3835 		{
3836 			.hsw.regs = &hsw_power_well_regs,
3837 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
3838 			.hsw.has_fuses = true,
3839 			.hsw.irq_pipe_mask = BIT(PIPE_C),
3840 		},
3841 	},
3842 };
3843 
3844 static const struct i915_power_well_desc tgl_power_wells[] = {
3845 	{
3846 		.name = "always-on",
3847 		.always_on = true,
3848 		.domains = POWER_DOMAIN_MASK,
3849 		.ops = &i9xx_always_on_power_well_ops,
3850 		.id = DISP_PW_ID_NONE,
3851 	},
3852 	{
3853 		.name = "power well 1",
3854 		/* Handled by the DMC firmware */
3855 		.always_on = true,
3856 		.domains = 0,
3857 		.ops = &hsw_power_well_ops,
3858 		.id = SKL_DISP_PW_1,
3859 		{
3860 			.hsw.regs = &hsw_power_well_regs,
3861 			.hsw.idx = ICL_PW_CTL_IDX_PW_1,
3862 			.hsw.has_fuses = true,
3863 		},
3864 	},
3865 	{
3866 		.name = "DC off",
3867 		.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3868 		.ops = &gen9_dc_off_power_well_ops,
3869 		.id = SKL_DISP_DC_OFF,
3870 	},
3871 	{
3872 		.name = "power well 2",
3873 		.domains = TGL_PW_2_POWER_DOMAINS,
3874 		.ops = &hsw_power_well_ops,
3875 		.id = SKL_DISP_PW_2,
3876 		{
3877 			.hsw.regs = &hsw_power_well_regs,
3878 			.hsw.idx = ICL_PW_CTL_IDX_PW_2,
3879 			.hsw.has_fuses = true,
3880 		},
3881 	},
3882 	{
3883 		.name = "power well 3",
3884 		.domains = TGL_PW_3_POWER_DOMAINS,
3885 		.ops = &hsw_power_well_ops,
3886 		.id = DISP_PW_ID_NONE,
3887 		{
3888 			.hsw.regs = &hsw_power_well_regs,
3889 			.hsw.idx = ICL_PW_CTL_IDX_PW_3,
3890 			.hsw.irq_pipe_mask = BIT(PIPE_B),
3891 			.hsw.has_vga = true,
3892 			.hsw.has_fuses = true,
3893 		},
3894 	},
3895 	{
3896 		.name = "DDI A IO",
3897 		.domains = ICL_DDI_IO_A_POWER_DOMAINS,
3898 		.ops = &hsw_power_well_ops,
3899 		.id = DISP_PW_ID_NONE,
3900 		{
3901 			.hsw.regs = &icl_ddi_power_well_regs,
3902 			.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3903 		}
3904 	},
3905 	{
3906 		.name = "DDI B IO",
3907 		.domains = ICL_DDI_IO_B_POWER_DOMAINS,
3908 		.ops = &hsw_power_well_ops,
3909 		.id = DISP_PW_ID_NONE,
3910 		{
3911 			.hsw.regs = &icl_ddi_power_well_regs,
3912 			.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3913 		}
3914 	},
3915 	{
3916 		.name = "DDI C IO",
3917 		.domains = ICL_DDI_IO_C_POWER_DOMAINS,
3918 		.ops = &hsw_power_well_ops,
3919 		.id = DISP_PW_ID_NONE,
3920 		{
3921 			.hsw.regs = &icl_ddi_power_well_regs,
3922 			.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3923 		}
3924 	},
3925 	{
3926 		.name = "DDI D TC1 IO",
3927 		.domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3928 		.ops = &hsw_power_well_ops,
3929 		.id = DISP_PW_ID_NONE,
3930 		{
3931 			.hsw.regs = &icl_ddi_power_well_regs,
3932 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3933 		},
3934 	},
3935 	{
3936 		.name = "DDI E TC2 IO",
3937 		.domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3938 		.ops = &hsw_power_well_ops,
3939 		.id = DISP_PW_ID_NONE,
3940 		{
3941 			.hsw.regs = &icl_ddi_power_well_regs,
3942 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3943 		},
3944 	},
3945 	{
3946 		.name = "DDI F TC3 IO",
3947 		.domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
3948 		.ops = &hsw_power_well_ops,
3949 		.id = DISP_PW_ID_NONE,
3950 		{
3951 			.hsw.regs = &icl_ddi_power_well_regs,
3952 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3953 		},
3954 	},
3955 	{
3956 		.name = "DDI G TC4 IO",
3957 		.domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
3958 		.ops = &hsw_power_well_ops,
3959 		.id = DISP_PW_ID_NONE,
3960 		{
3961 			.hsw.regs = &icl_ddi_power_well_regs,
3962 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3963 		},
3964 	},
3965 	{
3966 		.name = "DDI H TC5 IO",
3967 		.domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
3968 		.ops = &hsw_power_well_ops,
3969 		.id = DISP_PW_ID_NONE,
3970 		{
3971 			.hsw.regs = &icl_ddi_power_well_regs,
3972 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
3973 		},
3974 	},
3975 	{
3976 		.name = "DDI I TC6 IO",
3977 		.domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
3978 		.ops = &hsw_power_well_ops,
3979 		.id = DISP_PW_ID_NONE,
3980 		{
3981 			.hsw.regs = &icl_ddi_power_well_regs,
3982 			.hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
3983 		},
3984 	},
3985 	{
3986 		.name = "AUX A",
3987 		.domains = TGL_AUX_A_IO_POWER_DOMAINS,
3988 		.ops = &hsw_power_well_ops,
3989 		.id = DISP_PW_ID_NONE,
3990 		{
3991 			.hsw.regs = &icl_aux_power_well_regs,
3992 			.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3993 		},
3994 	},
3995 	{
3996 		.name = "AUX B",
3997 		.domains = TGL_AUX_B_IO_POWER_DOMAINS,
3998 		.ops = &hsw_power_well_ops,
3999 		.id = DISP_PW_ID_NONE,
4000 		{
4001 			.hsw.regs = &icl_aux_power_well_regs,
4002 			.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4003 		},
4004 	},
4005 	{
4006 		.name = "AUX C",
4007 		.domains = TGL_AUX_C_IO_POWER_DOMAINS,
4008 		.ops = &hsw_power_well_ops,
4009 		.id = DISP_PW_ID_NONE,
4010 		{
4011 			.hsw.regs = &icl_aux_power_well_regs,
4012 			.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4013 		},
4014 	},
4015 	{
4016 		.name = "AUX D TC1",
4017 		.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4018 		.ops = &icl_tc_phy_aux_power_well_ops,
4019 		.id = DISP_PW_ID_NONE,
4020 		{
4021 			.hsw.regs = &icl_aux_power_well_regs,
4022 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4023 			.hsw.is_tc_tbt = false,
4024 		},
4025 	},
4026 	{
4027 		.name = "AUX E TC2",
4028 		.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4029 		.ops = &icl_tc_phy_aux_power_well_ops,
4030 		.id = DISP_PW_ID_NONE,
4031 		{
4032 			.hsw.regs = &icl_aux_power_well_regs,
4033 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4034 			.hsw.is_tc_tbt = false,
4035 		},
4036 	},
4037 	{
4038 		.name = "AUX F TC3",
4039 		.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4040 		.ops = &icl_tc_phy_aux_power_well_ops,
4041 		.id = DISP_PW_ID_NONE,
4042 		{
4043 			.hsw.regs = &icl_aux_power_well_regs,
4044 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4045 			.hsw.is_tc_tbt = false,
4046 		},
4047 	},
4048 	{
4049 		.name = "AUX G TC4",
4050 		.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4051 		.ops = &icl_tc_phy_aux_power_well_ops,
4052 		.id = DISP_PW_ID_NONE,
4053 		{
4054 			.hsw.regs = &icl_aux_power_well_regs,
4055 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4056 			.hsw.is_tc_tbt = false,
4057 		},
4058 	},
4059 	{
4060 		.name = "AUX H TC5",
4061 		.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4062 		.ops = &icl_tc_phy_aux_power_well_ops,
4063 		.id = DISP_PW_ID_NONE,
4064 		{
4065 			.hsw.regs = &icl_aux_power_well_regs,
4066 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4067 			.hsw.is_tc_tbt = false,
4068 		},
4069 	},
4070 	{
4071 		.name = "AUX I TC6",
4072 		.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4073 		.ops = &icl_tc_phy_aux_power_well_ops,
4074 		.id = DISP_PW_ID_NONE,
4075 		{
4076 			.hsw.regs = &icl_aux_power_well_regs,
4077 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4078 			.hsw.is_tc_tbt = false,
4079 		},
4080 	},
4081 	{
4082 		.name = "AUX D TBT1",
4083 		.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4084 		.ops = &hsw_power_well_ops,
4085 		.id = DISP_PW_ID_NONE,
4086 		{
4087 			.hsw.regs = &icl_aux_power_well_regs,
4088 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4089 			.hsw.is_tc_tbt = true,
4090 		},
4091 	},
4092 	{
4093 		.name = "AUX E TBT2",
4094 		.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4095 		.ops = &hsw_power_well_ops,
4096 		.id = DISP_PW_ID_NONE,
4097 		{
4098 			.hsw.regs = &icl_aux_power_well_regs,
4099 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4100 			.hsw.is_tc_tbt = true,
4101 		},
4102 	},
4103 	{
4104 		.name = "AUX F TBT3",
4105 		.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4106 		.ops = &hsw_power_well_ops,
4107 		.id = DISP_PW_ID_NONE,
4108 		{
4109 			.hsw.regs = &icl_aux_power_well_regs,
4110 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4111 			.hsw.is_tc_tbt = true,
4112 		},
4113 	},
4114 	{
4115 		.name = "AUX G TBT4",
4116 		.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4117 		.ops = &hsw_power_well_ops,
4118 		.id = DISP_PW_ID_NONE,
4119 		{
4120 			.hsw.regs = &icl_aux_power_well_regs,
4121 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4122 			.hsw.is_tc_tbt = true,
4123 		},
4124 	},
4125 	{
4126 		.name = "AUX H TBT5",
4127 		.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4128 		.ops = &hsw_power_well_ops,
4129 		.id = DISP_PW_ID_NONE,
4130 		{
4131 			.hsw.regs = &icl_aux_power_well_regs,
4132 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4133 			.hsw.is_tc_tbt = true,
4134 		},
4135 	},
4136 	{
4137 		.name = "AUX I TBT6",
4138 		.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4139 		.ops = &hsw_power_well_ops,
4140 		.id = DISP_PW_ID_NONE,
4141 		{
4142 			.hsw.regs = &icl_aux_power_well_regs,
4143 			.hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4144 			.hsw.is_tc_tbt = true,
4145 		},
4146 	},
4147 	{
4148 		.name = "power well 4",
4149 		.domains = TGL_PW_4_POWER_DOMAINS,
4150 		.ops = &hsw_power_well_ops,
4151 		.id = DISP_PW_ID_NONE,
4152 		{
4153 			.hsw.regs = &hsw_power_well_regs,
4154 			.hsw.idx = ICL_PW_CTL_IDX_PW_4,
4155 			.hsw.has_fuses = true,
4156 			.hsw.irq_pipe_mask = BIT(PIPE_C),
4157 		}
4158 	},
4159 	{
4160 		.name = "power well 5",
4161 		.domains = TGL_PW_5_POWER_DOMAINS,
4162 		.ops = &hsw_power_well_ops,
4163 		.id = DISP_PW_ID_NONE,
4164 		{
4165 			.hsw.regs = &hsw_power_well_regs,
4166 			.hsw.idx = TGL_PW_CTL_IDX_PW_5,
4167 			.hsw.has_fuses = true,
4168 			.hsw.irq_pipe_mask = BIT(PIPE_D),
4169 		},
4170 	},
4171 };
4172 
4173 static int
sanitize_disable_power_well_option(const struct drm_i915_private * dev_priv,int disable_power_well)4174 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4175 				   int disable_power_well)
4176 {
4177 	if (disable_power_well >= 0)
4178 		return !!disable_power_well;
4179 
4180 	return 1;
4181 }
4182 
get_allowed_dc_mask(const struct drm_i915_private * dev_priv,int enable_dc)4183 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4184 			       int enable_dc)
4185 {
4186 	u32 mask;
4187 	int requested_dc;
4188 	int max_dc;
4189 
4190 	if (INTEL_GEN(dev_priv) >= 12) {
4191 		max_dc = 4;
4192 		/*
4193 		 * DC9 has a separate HW flow from the rest of the DC states,
4194 		 * not depending on the DMC firmware. It's needed by system
4195 		 * suspend/resume, so allow it unconditionally.
4196 		 */
4197 		mask = DC_STATE_EN_DC9;
4198 	} else if (IS_GEN(dev_priv, 11)) {
4199 		max_dc = 2;
4200 		mask = DC_STATE_EN_DC9;
4201 	} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4202 		max_dc = 2;
4203 		mask = 0;
4204 	} else if (IS_GEN9_LP(dev_priv)) {
4205 		max_dc = 1;
4206 		mask = DC_STATE_EN_DC9;
4207 	} else {
4208 		max_dc = 0;
4209 		mask = 0;
4210 	}
4211 
4212 	if (!i915_modparams.disable_power_well)
4213 		max_dc = 0;
4214 
4215 	if (enable_dc >= 0 && enable_dc <= max_dc) {
4216 		requested_dc = enable_dc;
4217 	} else if (enable_dc == -1) {
4218 		requested_dc = max_dc;
4219 	} else if (enable_dc > max_dc && enable_dc <= 4) {
4220 		DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
4221 			      enable_dc, max_dc);
4222 		requested_dc = max_dc;
4223 	} else {
4224 		DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
4225 		requested_dc = max_dc;
4226 	}
4227 
4228 	switch (requested_dc) {
4229 	case 4:
4230 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4231 		break;
4232 	case 3:
4233 		mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4234 		break;
4235 	case 2:
4236 		mask |= DC_STATE_EN_UPTO_DC6;
4237 		break;
4238 	case 1:
4239 		mask |= DC_STATE_EN_UPTO_DC5;
4240 		break;
4241 	}
4242 
4243 	DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
4244 
4245 	return mask;
4246 }
4247 
4248 static int
__set_power_wells(struct i915_power_domains * power_domains,const struct i915_power_well_desc * power_well_descs,int power_well_count)4249 __set_power_wells(struct i915_power_domains *power_domains,
4250 		  const struct i915_power_well_desc *power_well_descs,
4251 		  int power_well_count)
4252 {
4253 	u64 power_well_ids = 0;
4254 	int i;
4255 
4256 	power_domains->power_well_count = power_well_count;
4257 	power_domains->power_wells =
4258 				kcalloc(power_well_count,
4259 					sizeof(*power_domains->power_wells),
4260 					GFP_KERNEL);
4261 	if (!power_domains->power_wells)
4262 		return -ENOMEM;
4263 
4264 	for (i = 0; i < power_well_count; i++) {
4265 		enum i915_power_well_id id = power_well_descs[i].id;
4266 
4267 		power_domains->power_wells[i].desc = &power_well_descs[i];
4268 
4269 		if (id == DISP_PW_ID_NONE)
4270 			continue;
4271 
4272 		WARN_ON(id >= sizeof(power_well_ids) * 8);
4273 		WARN_ON(power_well_ids & BIT_ULL(id));
4274 		power_well_ids |= BIT_ULL(id);
4275 	}
4276 
4277 	return 0;
4278 }
4279 
4280 #define set_power_wells(power_domains, __power_well_descs) \
4281 	__set_power_wells(power_domains, __power_well_descs, \
4282 			  ARRAY_SIZE(__power_well_descs))
4283 
4284 /**
4285  * intel_power_domains_init - initializes the power domain structures
4286  * @dev_priv: i915 device instance
4287  *
4288  * Initializes the power domain structures for @dev_priv depending upon the
4289  * supported platform.
4290  */
intel_power_domains_init(struct drm_i915_private * dev_priv)4291 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4292 {
4293 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4294 	int err;
4295 
4296 	i915_modparams.disable_power_well =
4297 		sanitize_disable_power_well_option(dev_priv,
4298 						   i915_modparams.disable_power_well);
4299 	dev_priv->csr.allowed_dc_mask =
4300 		get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4301 
4302 	dev_priv->csr.target_dc_state =
4303 		sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4304 
4305 	BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4306 
4307 	mutex_init(&power_domains->lock);
4308 
4309 	INIT_DELAYED_WORK(&power_domains->async_put_work,
4310 			  intel_display_power_put_async_work);
4311 
4312 	/*
4313 	 * The enabling order will be from lower to higher indexed wells,
4314 	 * the disabling order is reversed.
4315 	 */
4316 	if (IS_GEN(dev_priv, 12)) {
4317 		err = set_power_wells(power_domains, tgl_power_wells);
4318 	} else if (IS_ELKHARTLAKE(dev_priv)) {
4319 		err = set_power_wells(power_domains, ehl_power_wells);
4320 	} else if (IS_GEN(dev_priv, 11)) {
4321 		err = set_power_wells(power_domains, icl_power_wells);
4322 	} else if (IS_CANNONLAKE(dev_priv)) {
4323 		err = set_power_wells(power_domains, cnl_power_wells);
4324 
4325 		/*
4326 		 * DDI and Aux IO are getting enabled for all ports
4327 		 * regardless the presence or use. So, in order to avoid
4328 		 * timeouts, lets remove them from the list
4329 		 * for the SKUs without port F.
4330 		 */
4331 		if (!IS_CNL_WITH_PORT_F(dev_priv))
4332 			power_domains->power_well_count -= 2;
4333 	} else if (IS_GEMINILAKE(dev_priv)) {
4334 		err = set_power_wells(power_domains, glk_power_wells);
4335 	} else if (IS_BROXTON(dev_priv)) {
4336 		err = set_power_wells(power_domains, bxt_power_wells);
4337 	} else if (IS_GEN9_BC(dev_priv)) {
4338 		err = set_power_wells(power_domains, skl_power_wells);
4339 	} else if (IS_CHERRYVIEW(dev_priv)) {
4340 		err = set_power_wells(power_domains, chv_power_wells);
4341 	} else if (IS_BROADWELL(dev_priv)) {
4342 		err = set_power_wells(power_domains, bdw_power_wells);
4343 	} else if (IS_HASWELL(dev_priv)) {
4344 		err = set_power_wells(power_domains, hsw_power_wells);
4345 	} else if (IS_VALLEYVIEW(dev_priv)) {
4346 		err = set_power_wells(power_domains, vlv_power_wells);
4347 	} else if (IS_I830(dev_priv)) {
4348 		err = set_power_wells(power_domains, i830_power_wells);
4349 	} else {
4350 		err = set_power_wells(power_domains, i9xx_always_on_power_well);
4351 	}
4352 
4353 	return err;
4354 }
4355 
4356 /**
4357  * intel_power_domains_cleanup - clean up power domains resources
4358  * @dev_priv: i915 device instance
4359  *
4360  * Release any resources acquired by intel_power_domains_init()
4361  */
intel_power_domains_cleanup(struct drm_i915_private * dev_priv)4362 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4363 {
4364 	kfree(dev_priv->power_domains.power_wells);
4365 	mutex_destroy(&dev_priv->power_domains.lock);
4366 }
4367 
intel_power_domains_sync_hw(struct drm_i915_private * dev_priv)4368 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4369 {
4370 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4371 	struct i915_power_well *power_well;
4372 
4373 	mutex_lock(&power_domains->lock);
4374 	for_each_power_well(dev_priv, power_well) {
4375 		power_well->desc->ops->sync_hw(dev_priv, power_well);
4376 		power_well->hw_enabled =
4377 			power_well->desc->ops->is_enabled(dev_priv, power_well);
4378 	}
4379 	mutex_unlock(&power_domains->lock);
4380 }
4381 
4382 static inline
intel_dbuf_slice_set(struct drm_i915_private * dev_priv,i915_reg_t reg,bool enable)4383 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4384 			  i915_reg_t reg, bool enable)
4385 {
4386 	u32 val, status;
4387 
4388 	val = I915_READ(reg);
4389 	val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4390 	I915_WRITE(reg, val);
4391 	POSTING_READ(reg);
4392 	udelay(10);
4393 
4394 	status = I915_READ(reg) & DBUF_POWER_STATE;
4395 	if ((enable && !status) || (!enable && status)) {
4396 		DRM_ERROR("DBus power %s timeout!\n",
4397 			  enable ? "enable" : "disable");
4398 		return false;
4399 	}
4400 	return true;
4401 }
4402 
gen9_dbuf_enable(struct drm_i915_private * dev_priv)4403 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4404 {
4405 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
4406 }
4407 
gen9_dbuf_disable(struct drm_i915_private * dev_priv)4408 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4409 {
4410 	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
4411 }
4412 
intel_dbuf_max_slices(struct drm_i915_private * dev_priv)4413 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
4414 {
4415 	if (INTEL_GEN(dev_priv) < 11)
4416 		return 1;
4417 	return 2;
4418 }
4419 
icl_dbuf_slices_update(struct drm_i915_private * dev_priv,u8 req_slices)4420 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4421 			    u8 req_slices)
4422 {
4423 	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
4424 	bool ret;
4425 
4426 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
4427 		DRM_ERROR("Invalid number of dbuf slices requested\n");
4428 		return;
4429 	}
4430 
4431 	if (req_slices == hw_enabled_slices || req_slices == 0)
4432 		return;
4433 
4434 	if (req_slices > hw_enabled_slices)
4435 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
4436 	else
4437 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
4438 
4439 	if (ret)
4440 		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
4441 }
4442 
icl_dbuf_enable(struct drm_i915_private * dev_priv)4443 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4444 {
4445 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
4446 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
4447 	POSTING_READ(DBUF_CTL_S2);
4448 
4449 	udelay(10);
4450 
4451 	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4452 	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4453 		DRM_ERROR("DBuf power enable timeout\n");
4454 	else
4455 		/*
4456 		 * FIXME: for now pretend that we only have 1 slice, see
4457 		 * intel_enabled_dbuf_slices_num().
4458 		 */
4459 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4460 }
4461 
icl_dbuf_disable(struct drm_i915_private * dev_priv)4462 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4463 {
4464 	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4465 	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4466 	POSTING_READ(DBUF_CTL_S2);
4467 
4468 	udelay(10);
4469 
4470 	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4471 	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4472 		DRM_ERROR("DBuf power disable timeout!\n");
4473 	else
4474 		/*
4475 		 * FIXME: for now pretend that the first slice is always
4476 		 * enabled, see intel_enabled_dbuf_slices_num().
4477 		 */
4478 		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4479 }
4480 
icl_mbus_init(struct drm_i915_private * dev_priv)4481 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4482 {
4483 	u32 val;
4484 
4485 	val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4486 	      MBUS_ABOX_BT_CREDIT_POOL2(16) |
4487 	      MBUS_ABOX_B_CREDIT(1) |
4488 	      MBUS_ABOX_BW_CREDIT(1);
4489 
4490 	I915_WRITE(MBUS_ABOX_CTL, val);
4491 }
4492 
hsw_assert_cdclk(struct drm_i915_private * dev_priv)4493 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4494 {
4495 	u32 val = I915_READ(LCPLL_CTL);
4496 
4497 	/*
4498 	 * The LCPLL register should be turned on by the BIOS. For now
4499 	 * let's just check its state and print errors in case
4500 	 * something is wrong.  Don't even try to turn it on.
4501 	 */
4502 
4503 	if (val & LCPLL_CD_SOURCE_FCLK)
4504 		DRM_ERROR("CDCLK source is not LCPLL\n");
4505 
4506 	if (val & LCPLL_PLL_DISABLE)
4507 		DRM_ERROR("LCPLL is disabled\n");
4508 
4509 	if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4510 		DRM_ERROR("LCPLL not using non-SSC reference\n");
4511 }
4512 
assert_can_disable_lcpll(struct drm_i915_private * dev_priv)4513 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4514 {
4515 	struct drm_device *dev = &dev_priv->drm;
4516 	struct intel_crtc *crtc;
4517 
4518 	for_each_intel_crtc(dev, crtc)
4519 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4520 				pipe_name(crtc->pipe));
4521 
4522 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4523 			"Display power well on\n");
4524 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4525 			"SPLL enabled\n");
4526 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4527 			"WRPLL1 enabled\n");
4528 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4529 			"WRPLL2 enabled\n");
4530 	I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4531 			"Panel power on\n");
4532 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4533 			"CPU PWM1 enabled\n");
4534 	if (IS_HASWELL(dev_priv))
4535 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4536 				"CPU PWM2 enabled\n");
4537 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4538 			"PCH PWM1 enabled\n");
4539 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4540 			"Utility pin enabled\n");
4541 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4542 			"PCH GTC enabled\n");
4543 
4544 	/*
4545 	 * In theory we can still leave IRQs enabled, as long as only the HPD
4546 	 * interrupts remain enabled. We used to check for that, but since it's
4547 	 * gen-specific and since we only disable LCPLL after we fully disable
4548 	 * the interrupts, the check below should be enough.
4549 	 */
4550 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4551 }
4552 
hsw_read_dcomp(struct drm_i915_private * dev_priv)4553 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4554 {
4555 	if (IS_HASWELL(dev_priv))
4556 		return I915_READ(D_COMP_HSW);
4557 	else
4558 		return I915_READ(D_COMP_BDW);
4559 }
4560 
hsw_write_dcomp(struct drm_i915_private * dev_priv,u32 val)4561 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4562 {
4563 	if (IS_HASWELL(dev_priv)) {
4564 		if (sandybridge_pcode_write(dev_priv,
4565 					    GEN6_PCODE_WRITE_D_COMP, val))
4566 			DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4567 	} else {
4568 		I915_WRITE(D_COMP_BDW, val);
4569 		POSTING_READ(D_COMP_BDW);
4570 	}
4571 }
4572 
4573 /*
4574  * This function implements pieces of two sequences from BSpec:
4575  * - Sequence for display software to disable LCPLL
4576  * - Sequence for display software to allow package C8+
4577  * The steps implemented here are just the steps that actually touch the LCPLL
4578  * register. Callers should take care of disabling all the display engine
4579  * functions, doing the mode unset, fixing interrupts, etc.
4580  */
hsw_disable_lcpll(struct drm_i915_private * dev_priv,bool switch_to_fclk,bool allow_power_down)4581 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4582 			      bool switch_to_fclk, bool allow_power_down)
4583 {
4584 	u32 val;
4585 
4586 	assert_can_disable_lcpll(dev_priv);
4587 
4588 	val = I915_READ(LCPLL_CTL);
4589 
4590 	if (switch_to_fclk) {
4591 		val |= LCPLL_CD_SOURCE_FCLK;
4592 		I915_WRITE(LCPLL_CTL, val);
4593 
4594 		if (wait_for_us(I915_READ(LCPLL_CTL) &
4595 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
4596 			DRM_ERROR("Switching to FCLK failed\n");
4597 
4598 		val = I915_READ(LCPLL_CTL);
4599 	}
4600 
4601 	val |= LCPLL_PLL_DISABLE;
4602 	I915_WRITE(LCPLL_CTL, val);
4603 	POSTING_READ(LCPLL_CTL);
4604 
4605 	if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4606 		DRM_ERROR("LCPLL still locked\n");
4607 
4608 	val = hsw_read_dcomp(dev_priv);
4609 	val |= D_COMP_COMP_DISABLE;
4610 	hsw_write_dcomp(dev_priv, val);
4611 	ndelay(100);
4612 
4613 	if (wait_for((hsw_read_dcomp(dev_priv) &
4614 		      D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4615 		DRM_ERROR("D_COMP RCOMP still in progress\n");
4616 
4617 	if (allow_power_down) {
4618 		val = I915_READ(LCPLL_CTL);
4619 		val |= LCPLL_POWER_DOWN_ALLOW;
4620 		I915_WRITE(LCPLL_CTL, val);
4621 		POSTING_READ(LCPLL_CTL);
4622 	}
4623 }
4624 
4625 /*
4626  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4627  * source.
4628  */
hsw_restore_lcpll(struct drm_i915_private * dev_priv)4629 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4630 {
4631 	u32 val;
4632 
4633 	val = I915_READ(LCPLL_CTL);
4634 
4635 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4636 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4637 		return;
4638 
4639 	/*
4640 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
4641 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4642 	 */
4643 	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4644 
4645 	if (val & LCPLL_POWER_DOWN_ALLOW) {
4646 		val &= ~LCPLL_POWER_DOWN_ALLOW;
4647 		I915_WRITE(LCPLL_CTL, val);
4648 		POSTING_READ(LCPLL_CTL);
4649 	}
4650 
4651 	val = hsw_read_dcomp(dev_priv);
4652 	val |= D_COMP_COMP_FORCE;
4653 	val &= ~D_COMP_COMP_DISABLE;
4654 	hsw_write_dcomp(dev_priv, val);
4655 
4656 	val = I915_READ(LCPLL_CTL);
4657 	val &= ~LCPLL_PLL_DISABLE;
4658 	I915_WRITE(LCPLL_CTL, val);
4659 
4660 	if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4661 		DRM_ERROR("LCPLL not locked yet\n");
4662 
4663 	if (val & LCPLL_CD_SOURCE_FCLK) {
4664 		val = I915_READ(LCPLL_CTL);
4665 		val &= ~LCPLL_CD_SOURCE_FCLK;
4666 		I915_WRITE(LCPLL_CTL, val);
4667 
4668 		if (wait_for_us((I915_READ(LCPLL_CTL) &
4669 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4670 			DRM_ERROR("Switching back to LCPLL failed\n");
4671 	}
4672 
4673 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4674 
4675 	intel_update_cdclk(dev_priv);
4676 	intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4677 }
4678 
4679 /*
4680  * Package states C8 and deeper are really deep PC states that can only be
4681  * reached when all the devices on the system allow it, so even if the graphics
4682  * device allows PC8+, it doesn't mean the system will actually get to these
4683  * states. Our driver only allows PC8+ when going into runtime PM.
4684  *
4685  * The requirements for PC8+ are that all the outputs are disabled, the power
4686  * well is disabled and most interrupts are disabled, and these are also
4687  * requirements for runtime PM. When these conditions are met, we manually do
4688  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4689  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4690  * hang the machine.
4691  *
4692  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4693  * the state of some registers, so when we come back from PC8+ we need to
4694  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4695  * need to take care of the registers kept by RC6. Notice that this happens even
4696  * if we don't put the device in PCI D3 state (which is what currently happens
4697  * because of the runtime PM support).
4698  *
4699  * For more, read "Display Sequences for Package C8" on the hardware
4700  * documentation.
4701  */
hsw_enable_pc8(struct drm_i915_private * dev_priv)4702 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4703 {
4704 	u32 val;
4705 
4706 	DRM_DEBUG_KMS("Enabling package C8+\n");
4707 
4708 	if (HAS_PCH_LPT_LP(dev_priv)) {
4709 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
4710 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4711 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4712 	}
4713 
4714 	lpt_disable_clkout_dp(dev_priv);
4715 	hsw_disable_lcpll(dev_priv, true, true);
4716 }
4717 
hsw_disable_pc8(struct drm_i915_private * dev_priv)4718 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4719 {
4720 	u32 val;
4721 
4722 	DRM_DEBUG_KMS("Disabling package C8+\n");
4723 
4724 	hsw_restore_lcpll(dev_priv);
4725 	intel_init_pch_refclk(dev_priv);
4726 
4727 	if (HAS_PCH_LPT_LP(dev_priv)) {
4728 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
4729 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4730 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4731 	}
4732 }
4733 
intel_pch_reset_handshake(struct drm_i915_private * dev_priv,bool enable)4734 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4735 				      bool enable)
4736 {
4737 	i915_reg_t reg;
4738 	u32 reset_bits, val;
4739 
4740 	if (IS_IVYBRIDGE(dev_priv)) {
4741 		reg = GEN7_MSG_CTL;
4742 		reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4743 	} else {
4744 		reg = HSW_NDE_RSTWRN_OPT;
4745 		reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4746 	}
4747 
4748 	val = I915_READ(reg);
4749 
4750 	if (enable)
4751 		val |= reset_bits;
4752 	else
4753 		val &= ~reset_bits;
4754 
4755 	I915_WRITE(reg, val);
4756 }
4757 
skl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4758 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4759 				  bool resume)
4760 {
4761 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4762 	struct i915_power_well *well;
4763 
4764 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4765 
4766 	/* enable PCH reset handshake */
4767 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4768 
4769 	/* enable PG1 and Misc I/O */
4770 	mutex_lock(&power_domains->lock);
4771 
4772 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4773 	intel_power_well_enable(dev_priv, well);
4774 
4775 	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4776 	intel_power_well_enable(dev_priv, well);
4777 
4778 	mutex_unlock(&power_domains->lock);
4779 
4780 	intel_cdclk_init(dev_priv);
4781 
4782 	gen9_dbuf_enable(dev_priv);
4783 
4784 	if (resume && dev_priv->csr.dmc_payload)
4785 		intel_csr_load_program(dev_priv);
4786 }
4787 
skl_display_core_uninit(struct drm_i915_private * dev_priv)4788 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4789 {
4790 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4791 	struct i915_power_well *well;
4792 
4793 	gen9_disable_dc_states(dev_priv);
4794 
4795 	gen9_dbuf_disable(dev_priv);
4796 
4797 	intel_cdclk_uninit(dev_priv);
4798 
4799 	/* The spec doesn't call for removing the reset handshake flag */
4800 	/* disable PG1 and Misc I/O */
4801 
4802 	mutex_lock(&power_domains->lock);
4803 
4804 	/*
4805 	 * BSpec says to keep the MISC IO power well enabled here, only
4806 	 * remove our request for power well 1.
4807 	 * Note that even though the driver's request is removed power well 1
4808 	 * may stay enabled after this due to DMC's own request on it.
4809 	 */
4810 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4811 	intel_power_well_disable(dev_priv, well);
4812 
4813 	mutex_unlock(&power_domains->lock);
4814 
4815 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4816 }
4817 
bxt_display_core_init(struct drm_i915_private * dev_priv,bool resume)4818 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4819 {
4820 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4821 	struct i915_power_well *well;
4822 
4823 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4824 
4825 	/*
4826 	 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4827 	 * or else the reset will hang because there is no PCH to respond.
4828 	 * Move the handshake programming to initialization sequence.
4829 	 * Previously was left up to BIOS.
4830 	 */
4831 	intel_pch_reset_handshake(dev_priv, false);
4832 
4833 	/* Enable PG1 */
4834 	mutex_lock(&power_domains->lock);
4835 
4836 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4837 	intel_power_well_enable(dev_priv, well);
4838 
4839 	mutex_unlock(&power_domains->lock);
4840 
4841 	intel_cdclk_init(dev_priv);
4842 
4843 	gen9_dbuf_enable(dev_priv);
4844 
4845 	if (resume && dev_priv->csr.dmc_payload)
4846 		intel_csr_load_program(dev_priv);
4847 }
4848 
bxt_display_core_uninit(struct drm_i915_private * dev_priv)4849 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4850 {
4851 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4852 	struct i915_power_well *well;
4853 
4854 	gen9_disable_dc_states(dev_priv);
4855 
4856 	gen9_dbuf_disable(dev_priv);
4857 
4858 	intel_cdclk_uninit(dev_priv);
4859 
4860 	/* The spec doesn't call for removing the reset handshake flag */
4861 
4862 	/*
4863 	 * Disable PW1 (PG1).
4864 	 * Note that even though the driver's request is removed power well 1
4865 	 * may stay enabled after this due to DMC's own request on it.
4866 	 */
4867 	mutex_lock(&power_domains->lock);
4868 
4869 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4870 	intel_power_well_disable(dev_priv, well);
4871 
4872 	mutex_unlock(&power_domains->lock);
4873 
4874 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4875 }
4876 
cnl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4877 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4878 {
4879 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4880 	struct i915_power_well *well;
4881 
4882 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4883 
4884 	/* 1. Enable PCH Reset Handshake */
4885 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4886 
4887 	/* 2-3. */
4888 	intel_combo_phy_init(dev_priv);
4889 
4890 	/*
4891 	 * 4. Enable Power Well 1 (PG1).
4892 	 *    The AUX IO power wells will be enabled on demand.
4893 	 */
4894 	mutex_lock(&power_domains->lock);
4895 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4896 	intel_power_well_enable(dev_priv, well);
4897 	mutex_unlock(&power_domains->lock);
4898 
4899 	/* 5. Enable CD clock */
4900 	intel_cdclk_init(dev_priv);
4901 
4902 	/* 6. Enable DBUF */
4903 	gen9_dbuf_enable(dev_priv);
4904 
4905 	if (resume && dev_priv->csr.dmc_payload)
4906 		intel_csr_load_program(dev_priv);
4907 }
4908 
cnl_display_core_uninit(struct drm_i915_private * dev_priv)4909 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4910 {
4911 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4912 	struct i915_power_well *well;
4913 
4914 	gen9_disable_dc_states(dev_priv);
4915 
4916 	/* 1. Disable all display engine functions -> aready done */
4917 
4918 	/* 2. Disable DBUF */
4919 	gen9_dbuf_disable(dev_priv);
4920 
4921 	/* 3. Disable CD clock */
4922 	intel_cdclk_uninit(dev_priv);
4923 
4924 	/*
4925 	 * 4. Disable Power Well 1 (PG1).
4926 	 *    The AUX IO power wells are toggled on demand, so they are already
4927 	 *    disabled at this point.
4928 	 */
4929 	mutex_lock(&power_domains->lock);
4930 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4931 	intel_power_well_disable(dev_priv, well);
4932 	mutex_unlock(&power_domains->lock);
4933 
4934 	usleep_range(10, 30);		/* 10 us delay per Bspec */
4935 
4936 	/* 5. */
4937 	intel_combo_phy_uninit(dev_priv);
4938 }
4939 
4940 struct buddy_page_mask {
4941 	u32 page_mask;
4942 	u8 type;
4943 	u8 num_channels;
4944 };
4945 
4946 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
4947 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
4948 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
4949 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
4950 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
4951 	{}
4952 };
4953 
4954 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
4955 	{ .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
4956 	{ .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
4957 	{ .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
4958 	{ .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
4959 	{}
4960 };
4961 
tgl_bw_buddy_init(struct drm_i915_private * dev_priv)4962 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
4963 {
4964 	enum intel_dram_type type = dev_priv->dram_info.type;
4965 	u8 num_channels = dev_priv->dram_info.num_channels;
4966 	const struct buddy_page_mask *table;
4967 	int i;
4968 
4969 	if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
4970 		/* Wa_1409767108: tgl */
4971 		table = wa_1409767108_buddy_page_masks;
4972 	else
4973 		table = tgl_buddy_page_masks;
4974 
4975 	for (i = 0; table[i].page_mask != 0; i++)
4976 		if (table[i].num_channels == num_channels &&
4977 		    table[i].type == type)
4978 			break;
4979 
4980 	if (table[i].page_mask == 0) {
4981 		DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
4982 		I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
4983 		I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
4984 	} else {
4985 		I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
4986 		I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
4987 	}
4988 }
4989 
icl_display_core_init(struct drm_i915_private * dev_priv,bool resume)4990 static void icl_display_core_init(struct drm_i915_private *dev_priv,
4991 				  bool resume)
4992 {
4993 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
4994 	struct i915_power_well *well;
4995 
4996 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4997 
4998 	/* 1. Enable PCH reset handshake. */
4999 	intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5000 
5001 	/* 2. Initialize all combo phys */
5002 	intel_combo_phy_init(dev_priv);
5003 
5004 	/*
5005 	 * 3. Enable Power Well 1 (PG1).
5006 	 *    The AUX IO power wells will be enabled on demand.
5007 	 */
5008 	mutex_lock(&power_domains->lock);
5009 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5010 	intel_power_well_enable(dev_priv, well);
5011 	mutex_unlock(&power_domains->lock);
5012 
5013 	/* 4. Enable CDCLK. */
5014 	intel_cdclk_init(dev_priv);
5015 
5016 	/* 5. Enable DBUF. */
5017 	icl_dbuf_enable(dev_priv);
5018 
5019 	/* 6. Setup MBUS. */
5020 	icl_mbus_init(dev_priv);
5021 
5022 	/* 7. Program arbiter BW_BUDDY registers */
5023 	if (INTEL_GEN(dev_priv) >= 12)
5024 		tgl_bw_buddy_init(dev_priv);
5025 
5026 	if (resume && dev_priv->csr.dmc_payload)
5027 		intel_csr_load_program(dev_priv);
5028 }
5029 
icl_display_core_uninit(struct drm_i915_private * dev_priv)5030 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5031 {
5032 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5033 	struct i915_power_well *well;
5034 
5035 	gen9_disable_dc_states(dev_priv);
5036 
5037 	/* 1. Disable all display engine functions -> aready done */
5038 
5039 	/* 2. Disable DBUF */
5040 	icl_dbuf_disable(dev_priv);
5041 
5042 	/* 3. Disable CD clock */
5043 	intel_cdclk_uninit(dev_priv);
5044 
5045 	/*
5046 	 * 4. Disable Power Well 1 (PG1).
5047 	 *    The AUX IO power wells are toggled on demand, so they are already
5048 	 *    disabled at this point.
5049 	 */
5050 	mutex_lock(&power_domains->lock);
5051 	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5052 	intel_power_well_disable(dev_priv, well);
5053 	mutex_unlock(&power_domains->lock);
5054 
5055 	/* 5. */
5056 	intel_combo_phy_uninit(dev_priv);
5057 }
5058 
chv_phy_control_init(struct drm_i915_private * dev_priv)5059 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5060 {
5061 	struct i915_power_well *cmn_bc =
5062 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5063 	struct i915_power_well *cmn_d =
5064 		lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5065 
5066 	/*
5067 	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5068 	 * workaround never ever read DISPLAY_PHY_CONTROL, and
5069 	 * instead maintain a shadow copy ourselves. Use the actual
5070 	 * power well state and lane status to reconstruct the
5071 	 * expected initial value.
5072 	 */
5073 	dev_priv->chv_phy_control =
5074 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5075 		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5076 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5077 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5078 		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5079 
5080 	/*
5081 	 * If all lanes are disabled we leave the override disabled
5082 	 * with all power down bits cleared to match the state we
5083 	 * would use after disabling the port. Otherwise enable the
5084 	 * override and set the lane powerdown bits accding to the
5085 	 * current lane status.
5086 	 */
5087 	if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5088 		u32 status = I915_READ(DPLL(PIPE_A));
5089 		unsigned int mask;
5090 
5091 		mask = status & DPLL_PORTB_READY_MASK;
5092 		if (mask == 0xf)
5093 			mask = 0x0;
5094 		else
5095 			dev_priv->chv_phy_control |=
5096 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5097 
5098 		dev_priv->chv_phy_control |=
5099 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5100 
5101 		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5102 		if (mask == 0xf)
5103 			mask = 0x0;
5104 		else
5105 			dev_priv->chv_phy_control |=
5106 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5107 
5108 		dev_priv->chv_phy_control |=
5109 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5110 
5111 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5112 
5113 		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5114 	} else {
5115 		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5116 	}
5117 
5118 	if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5119 		u32 status = I915_READ(DPIO_PHY_STATUS);
5120 		unsigned int mask;
5121 
5122 		mask = status & DPLL_PORTD_READY_MASK;
5123 
5124 		if (mask == 0xf)
5125 			mask = 0x0;
5126 		else
5127 			dev_priv->chv_phy_control |=
5128 				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5129 
5130 		dev_priv->chv_phy_control |=
5131 			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5132 
5133 		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5134 
5135 		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5136 	} else {
5137 		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5138 	}
5139 
5140 	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
5141 
5142 	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
5143 		      dev_priv->chv_phy_control);
5144 }
5145 
vlv_cmnlane_wa(struct drm_i915_private * dev_priv)5146 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5147 {
5148 	struct i915_power_well *cmn =
5149 		lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5150 	struct i915_power_well *disp2d =
5151 		lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5152 
5153 	/* If the display might be already active skip this */
5154 	if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5155 	    disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5156 	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
5157 		return;
5158 
5159 	DRM_DEBUG_KMS("toggling display PHY side reset\n");
5160 
5161 	/* cmnlane needs DPLL registers */
5162 	disp2d->desc->ops->enable(dev_priv, disp2d);
5163 
5164 	/*
5165 	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5166 	 * Need to assert and de-assert PHY SB reset by gating the
5167 	 * common lane power, then un-gating it.
5168 	 * Simply ungating isn't enough to reset the PHY enough to get
5169 	 * ports and lanes running.
5170 	 */
5171 	cmn->desc->ops->disable(dev_priv, cmn);
5172 }
5173 
vlv_punit_is_power_gated(struct drm_i915_private * dev_priv,u32 reg0)5174 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5175 {
5176 	bool ret;
5177 
5178 	vlv_punit_get(dev_priv);
5179 	ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5180 	vlv_punit_put(dev_priv);
5181 
5182 	return ret;
5183 }
5184 
assert_ved_power_gated(struct drm_i915_private * dev_priv)5185 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5186 {
5187 	WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5188 	     "VED not power gated\n");
5189 }
5190 
assert_isp_power_gated(struct drm_i915_private * dev_priv)5191 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5192 {
5193 	static const struct pci_device_id isp_ids[] = {
5194 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5195 		{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5196 		{}
5197 	};
5198 
5199 	WARN(!pci_dev_present(isp_ids) &&
5200 	     !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5201 	     "ISP not power gated\n");
5202 }
5203 
5204 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5205 
5206 /**
5207  * intel_power_domains_init_hw - initialize hardware power domain state
5208  * @i915: i915 device instance
5209  * @resume: Called from resume code paths or not
5210  *
5211  * This function initializes the hardware power domain state and enables all
5212  * power wells belonging to the INIT power domain. Power wells in other
5213  * domains (and not in the INIT domain) are referenced or disabled by
5214  * intel_modeset_readout_hw_state(). After that the reference count of each
5215  * power well must match its HW enabled state, see
5216  * intel_power_domains_verify_state().
5217  *
5218  * It will return with power domains disabled (to be enabled later by
5219  * intel_power_domains_enable()) and must be paired with
5220  * intel_power_domains_driver_remove().
5221  */
intel_power_domains_init_hw(struct drm_i915_private * i915,bool resume)5222 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5223 {
5224 	struct i915_power_domains *power_domains = &i915->power_domains;
5225 
5226 	power_domains->initializing = true;
5227 
5228 	/* Must happen before power domain init on VLV/CHV */
5229 	intel_update_rawclk(i915);
5230 
5231 	if (INTEL_GEN(i915) >= 11) {
5232 		icl_display_core_init(i915, resume);
5233 	} else if (IS_CANNONLAKE(i915)) {
5234 		cnl_display_core_init(i915, resume);
5235 	} else if (IS_GEN9_BC(i915)) {
5236 		skl_display_core_init(i915, resume);
5237 	} else if (IS_GEN9_LP(i915)) {
5238 		bxt_display_core_init(i915, resume);
5239 	} else if (IS_CHERRYVIEW(i915)) {
5240 		mutex_lock(&power_domains->lock);
5241 		chv_phy_control_init(i915);
5242 		mutex_unlock(&power_domains->lock);
5243 		assert_isp_power_gated(i915);
5244 	} else if (IS_VALLEYVIEW(i915)) {
5245 		mutex_lock(&power_domains->lock);
5246 		vlv_cmnlane_wa(i915);
5247 		mutex_unlock(&power_domains->lock);
5248 		assert_ved_power_gated(i915);
5249 		assert_isp_power_gated(i915);
5250 	} else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5251 		hsw_assert_cdclk(i915);
5252 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5253 	} else if (IS_IVYBRIDGE(i915)) {
5254 		intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5255 	}
5256 
5257 	/*
5258 	 * Keep all power wells enabled for any dependent HW access during
5259 	 * initialization and to make sure we keep BIOS enabled display HW
5260 	 * resources powered until display HW readout is complete. We drop
5261 	 * this reference in intel_power_domains_enable().
5262 	 */
5263 	power_domains->wakeref =
5264 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5265 
5266 	/* Disable power support if the user asked so. */
5267 	if (!i915_modparams.disable_power_well)
5268 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5269 	intel_power_domains_sync_hw(i915);
5270 
5271 	power_domains->initializing = false;
5272 }
5273 
5274 /**
5275  * intel_power_domains_driver_remove - deinitialize hw power domain state
5276  * @i915: i915 device instance
5277  *
5278  * De-initializes the display power domain HW state. It also ensures that the
5279  * device stays powered up so that the driver can be reloaded.
5280  *
5281  * It must be called with power domains already disabled (after a call to
5282  * intel_power_domains_disable()) and must be paired with
5283  * intel_power_domains_init_hw().
5284  */
intel_power_domains_driver_remove(struct drm_i915_private * i915)5285 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5286 {
5287 	intel_wakeref_t wakeref __maybe_unused =
5288 		fetch_and_zero(&i915->power_domains.wakeref);
5289 
5290 	/* Remove the refcount we took to keep power well support disabled. */
5291 	if (!i915_modparams.disable_power_well)
5292 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5293 
5294 	intel_display_power_flush_work_sync(i915);
5295 
5296 	intel_power_domains_verify_state(i915);
5297 
5298 	/* Keep the power well enabled, but cancel its rpm wakeref. */
5299 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5300 }
5301 
5302 /**
5303  * intel_power_domains_enable - enable toggling of display power wells
5304  * @i915: i915 device instance
5305  *
5306  * Enable the ondemand enabling/disabling of the display power wells. Note that
5307  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5308  * only at specific points of the display modeset sequence, thus they are not
5309  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5310  * of these function is to keep the rest of power wells enabled until the end
5311  * of display HW readout (which will acquire the power references reflecting
5312  * the current HW state).
5313  */
intel_power_domains_enable(struct drm_i915_private * i915)5314 void intel_power_domains_enable(struct drm_i915_private *i915)
5315 {
5316 	intel_wakeref_t wakeref __maybe_unused =
5317 		fetch_and_zero(&i915->power_domains.wakeref);
5318 
5319 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5320 	intel_power_domains_verify_state(i915);
5321 }
5322 
5323 /**
5324  * intel_power_domains_disable - disable toggling of display power wells
5325  * @i915: i915 device instance
5326  *
5327  * Disable the ondemand enabling/disabling of the display power wells. See
5328  * intel_power_domains_enable() for which power wells this call controls.
5329  */
intel_power_domains_disable(struct drm_i915_private * i915)5330 void intel_power_domains_disable(struct drm_i915_private *i915)
5331 {
5332 	struct i915_power_domains *power_domains = &i915->power_domains;
5333 
5334 	WARN_ON(power_domains->wakeref);
5335 	power_domains->wakeref =
5336 		intel_display_power_get(i915, POWER_DOMAIN_INIT);
5337 
5338 	intel_power_domains_verify_state(i915);
5339 }
5340 
5341 /**
5342  * intel_power_domains_suspend - suspend power domain state
5343  * @i915: i915 device instance
5344  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5345  *
5346  * This function prepares the hardware power domain state before entering
5347  * system suspend.
5348  *
5349  * It must be called with power domains already disabled (after a call to
5350  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5351  */
intel_power_domains_suspend(struct drm_i915_private * i915,enum i915_drm_suspend_mode suspend_mode)5352 void intel_power_domains_suspend(struct drm_i915_private *i915,
5353 				 enum i915_drm_suspend_mode suspend_mode)
5354 {
5355 	struct i915_power_domains *power_domains = &i915->power_domains;
5356 	intel_wakeref_t wakeref __maybe_unused =
5357 		fetch_and_zero(&power_domains->wakeref);
5358 
5359 	intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5360 
5361 	/*
5362 	 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5363 	 * support don't manually deinit the power domains. This also means the
5364 	 * CSR/DMC firmware will stay active, it will power down any HW
5365 	 * resources as required and also enable deeper system power states
5366 	 * that would be blocked if the firmware was inactive.
5367 	 */
5368 	if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5369 	    suspend_mode == I915_DRM_SUSPEND_IDLE &&
5370 	    i915->csr.dmc_payload) {
5371 		intel_display_power_flush_work(i915);
5372 		intel_power_domains_verify_state(i915);
5373 		return;
5374 	}
5375 
5376 	/*
5377 	 * Even if power well support was disabled we still want to disable
5378 	 * power wells if power domains must be deinitialized for suspend.
5379 	 */
5380 	if (!i915_modparams.disable_power_well)
5381 		intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5382 
5383 	intel_display_power_flush_work(i915);
5384 	intel_power_domains_verify_state(i915);
5385 
5386 	if (INTEL_GEN(i915) >= 11)
5387 		icl_display_core_uninit(i915);
5388 	else if (IS_CANNONLAKE(i915))
5389 		cnl_display_core_uninit(i915);
5390 	else if (IS_GEN9_BC(i915))
5391 		skl_display_core_uninit(i915);
5392 	else if (IS_GEN9_LP(i915))
5393 		bxt_display_core_uninit(i915);
5394 
5395 	power_domains->display_core_suspended = true;
5396 }
5397 
5398 /**
5399  * intel_power_domains_resume - resume power domain state
5400  * @i915: i915 device instance
5401  *
5402  * This function resume the hardware power domain state during system resume.
5403  *
5404  * It will return with power domain support disabled (to be enabled later by
5405  * intel_power_domains_enable()) and must be paired with
5406  * intel_power_domains_suspend().
5407  */
intel_power_domains_resume(struct drm_i915_private * i915)5408 void intel_power_domains_resume(struct drm_i915_private *i915)
5409 {
5410 	struct i915_power_domains *power_domains = &i915->power_domains;
5411 
5412 	if (power_domains->display_core_suspended) {
5413 		intel_power_domains_init_hw(i915, true);
5414 		power_domains->display_core_suspended = false;
5415 	} else {
5416 		WARN_ON(power_domains->wakeref);
5417 		power_domains->wakeref =
5418 			intel_display_power_get(i915, POWER_DOMAIN_INIT);
5419 	}
5420 
5421 	intel_power_domains_verify_state(i915);
5422 }
5423 
5424 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5425 
intel_power_domains_dump_info(struct drm_i915_private * i915)5426 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5427 {
5428 	struct i915_power_domains *power_domains = &i915->power_domains;
5429 	struct i915_power_well *power_well;
5430 
5431 	for_each_power_well(i915, power_well) {
5432 		enum intel_display_power_domain domain;
5433 
5434 		DRM_DEBUG_DRIVER("%-25s %d\n",
5435 				 power_well->desc->name, power_well->count);
5436 
5437 		for_each_power_domain(domain, power_well->desc->domains)
5438 			DRM_DEBUG_DRIVER("  %-23s %d\n",
5439 					 intel_display_power_domain_str(domain),
5440 					 power_domains->domain_use_count[domain]);
5441 	}
5442 }
5443 
5444 /**
5445  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5446  * @i915: i915 device instance
5447  *
5448  * Verify if the reference count of each power well matches its HW enabled
5449  * state and the total refcount of the domains it belongs to. This must be
5450  * called after modeset HW state sanitization, which is responsible for
5451  * acquiring reference counts for any power wells in use and disabling the
5452  * ones left on by BIOS but not required by any active output.
5453  */
intel_power_domains_verify_state(struct drm_i915_private * i915)5454 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5455 {
5456 	struct i915_power_domains *power_domains = &i915->power_domains;
5457 	struct i915_power_well *power_well;
5458 	bool dump_domain_info;
5459 
5460 	mutex_lock(&power_domains->lock);
5461 
5462 	verify_async_put_domains_state(power_domains);
5463 
5464 	dump_domain_info = false;
5465 	for_each_power_well(i915, power_well) {
5466 		enum intel_display_power_domain domain;
5467 		int domains_count;
5468 		bool enabled;
5469 
5470 		enabled = power_well->desc->ops->is_enabled(i915, power_well);
5471 		if ((power_well->count || power_well->desc->always_on) !=
5472 		    enabled)
5473 			DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
5474 				  power_well->desc->name,
5475 				  power_well->count, enabled);
5476 
5477 		domains_count = 0;
5478 		for_each_power_domain(domain, power_well->desc->domains)
5479 			domains_count += power_domains->domain_use_count[domain];
5480 
5481 		if (power_well->count != domains_count) {
5482 			DRM_ERROR("power well %s refcount/domain refcount mismatch "
5483 				  "(refcount %d/domains refcount %d)\n",
5484 				  power_well->desc->name, power_well->count,
5485 				  domains_count);
5486 			dump_domain_info = true;
5487 		}
5488 	}
5489 
5490 	if (dump_domain_info) {
5491 		static bool dumped;
5492 
5493 		if (!dumped) {
5494 			intel_power_domains_dump_info(i915);
5495 			dumped = true;
5496 		}
5497 	}
5498 
5499 	mutex_unlock(&power_domains->lock);
5500 }
5501 
5502 #else
5503 
intel_power_domains_verify_state(struct drm_i915_private * i915)5504 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5505 {
5506 }
5507 
5508 #endif
5509 
intel_display_power_suspend_late(struct drm_i915_private * i915)5510 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5511 {
5512 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5513 		bxt_enable_dc9(i915);
5514 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5515 		hsw_enable_pc8(i915);
5516 }
5517 
intel_display_power_resume_early(struct drm_i915_private * i915)5518 void intel_display_power_resume_early(struct drm_i915_private *i915)
5519 {
5520 	if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5521 		gen9_sanitize_dc_state(i915);
5522 		bxt_disable_dc9(i915);
5523 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5524 		hsw_disable_pc8(i915);
5525 	}
5526 }
5527 
intel_display_power_suspend(struct drm_i915_private * i915)5528 void intel_display_power_suspend(struct drm_i915_private *i915)
5529 {
5530 	if (INTEL_GEN(i915) >= 11) {
5531 		icl_display_core_uninit(i915);
5532 		bxt_enable_dc9(i915);
5533 	} else if (IS_GEN9_LP(i915)) {
5534 		bxt_display_core_uninit(i915);
5535 		bxt_enable_dc9(i915);
5536 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5537 		hsw_enable_pc8(i915);
5538 	}
5539 }
5540 
intel_display_power_resume(struct drm_i915_private * i915)5541 void intel_display_power_resume(struct drm_i915_private *i915)
5542 {
5543 	if (INTEL_GEN(i915) >= 11) {
5544 		bxt_disable_dc9(i915);
5545 		icl_display_core_init(i915, true);
5546 		if (i915->csr.dmc_payload) {
5547 			if (i915->csr.allowed_dc_mask &
5548 			    DC_STATE_EN_UPTO_DC6)
5549 				skl_enable_dc6(i915);
5550 			else if (i915->csr.allowed_dc_mask &
5551 				 DC_STATE_EN_UPTO_DC5)
5552 				gen9_enable_dc5(i915);
5553 		}
5554 	} else if (IS_GEN9_LP(i915)) {
5555 		bxt_disable_dc9(i915);
5556 		bxt_display_core_init(i915, true);
5557 		if (i915->csr.dmc_payload &&
5558 		    (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5559 			gen9_enable_dc5(i915);
5560 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5561 		hsw_disable_pc8(i915);
5562 	}
5563 }
5564