xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_display.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_snps_phy.h"
63 #include "display/intel_tv.h"
64 #include "display/intel_vdsc.h"
65 #include "display/intel_vrr.h"
66 
67 #include "gem/i915_gem_lmem.h"
68 #include "gem/i915_gem_object.h"
69 
70 #include "gt/intel_rps.h"
71 #include "gt/gen8_ppgtt.h"
72 
73 #include "g4x_dp.h"
74 #include "g4x_hdmi.h"
75 #include "i915_drv.h"
76 #include "intel_acpi.h"
77 #include "intel_atomic.h"
78 #include "intel_atomic_plane.h"
79 #include "intel_bw.h"
80 #include "intel_cdclk.h"
81 #include "intel_color.h"
82 #include "intel_crtc.h"
83 #include "intel_de.h"
84 #include "intel_display_types.h"
85 #include "intel_dmc.h"
86 #include "intel_dp_link_training.h"
87 #include "intel_dpt.h"
88 #include "intel_fbc.h"
89 #include "intel_fdi.h"
90 #include "intel_fbdev.h"
91 #include "intel_fifo_underrun.h"
92 #include "intel_frontbuffer.h"
93 #include "intel_hdcp.h"
94 #include "intel_hotplug.h"
95 #include "intel_overlay.h"
96 #include "intel_pipe_crc.h"
97 #include "intel_pm.h"
98 #include "intel_pps.h"
99 #include "intel_psr.h"
100 #include "intel_quirks.h"
101 #include "intel_sideband.h"
102 #include "intel_sprite.h"
103 #include "intel_tc.h"
104 #include "intel_vga.h"
105 #include "i9xx_plane.h"
106 #include "skl_scaler.h"
107 #include "skl_universal_plane.h"
108 
109 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
110 				struct intel_crtc_state *pipe_config);
111 static void ilk_pch_clock_get(struct intel_crtc *crtc,
112 			      struct intel_crtc_state *pipe_config);
113 
114 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
115 				  struct drm_i915_gem_object *obj,
116 				  struct drm_mode_fb_cmd2 *mode_cmd);
117 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
118 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
119 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
120 					 const struct intel_link_m_n *m_n,
121 					 const struct intel_link_m_n *m2_n2);
122 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
123 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
124 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
125 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
126 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
127 static void intel_modeset_setup_hw_state(struct drm_device *dev,
128 					 struct drm_modeset_acquire_ctx *ctx);
129 
130 /* returns HPLL frequency in kHz */
131 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
132 {
133 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
134 
135 	/* Obtain SKU information */
136 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
137 		CCK_FUSE_HPLL_FREQ_MASK;
138 
139 	return vco_freq[hpll_freq] * 1000;
140 }
141 
142 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
143 		      const char *name, u32 reg, int ref_freq)
144 {
145 	u32 val;
146 	int divider;
147 
148 	val = vlv_cck_read(dev_priv, reg);
149 	divider = val & CCK_FREQUENCY_VALUES;
150 
151 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
152 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
153 		 "%s change in progress\n", name);
154 
155 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
156 }
157 
158 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
159 			   const char *name, u32 reg)
160 {
161 	int hpll;
162 
163 	vlv_cck_get(dev_priv);
164 
165 	if (dev_priv->hpll_freq == 0)
166 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
167 
168 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
169 
170 	vlv_cck_put(dev_priv);
171 
172 	return hpll;
173 }
174 
175 static void intel_update_czclk(struct drm_i915_private *dev_priv)
176 {
177 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
178 		return;
179 
180 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
181 						      CCK_CZ_CLOCK_CONTROL);
182 
183 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
184 		dev_priv->czclk_freq);
185 }
186 
187 /* WA Display #0827: Gen9:all */
188 static void
189 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
190 {
191 	if (enable)
192 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
193 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
194 	else
195 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
196 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
197 }
198 
199 /* Wa_2006604312:icl,ehl */
200 static void
201 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
202 		       bool enable)
203 {
204 	if (enable)
205 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
206 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
207 	else
208 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
209 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
210 }
211 
212 static bool
213 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
214 {
215 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
216 }
217 
218 static bool
219 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
220 {
221 	return crtc_state->sync_mode_slaves_mask != 0;
222 }
223 
224 bool
225 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
226 {
227 	return is_trans_port_sync_master(crtc_state) ||
228 		is_trans_port_sync_slave(crtc_state);
229 }
230 
231 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
232 				    enum pipe pipe)
233 {
234 	i915_reg_t reg = PIPEDSL(pipe);
235 	u32 line1, line2;
236 	u32 line_mask;
237 
238 	if (DISPLAY_VER(dev_priv) == 2)
239 		line_mask = DSL_LINEMASK_GEN2;
240 	else
241 		line_mask = DSL_LINEMASK_GEN3;
242 
243 	line1 = intel_de_read(dev_priv, reg) & line_mask;
244 	drm_msleep(5);
245 	line2 = intel_de_read(dev_priv, reg) & line_mask;
246 
247 	return line1 != line2;
248 }
249 
250 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
251 {
252 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
253 	enum pipe pipe = crtc->pipe;
254 
255 	/* Wait for the display line to settle/start moving */
256 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
257 		drm_err(&dev_priv->drm,
258 			"pipe %c scanline %s wait timed out\n",
259 			pipe_name(pipe), onoff(state));
260 }
261 
262 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
263 {
264 	wait_for_pipe_scanline_moving(crtc, false);
265 }
266 
267 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
268 {
269 	wait_for_pipe_scanline_moving(crtc, true);
270 }
271 
272 static void
273 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
274 {
275 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
276 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
277 
278 	if (DISPLAY_VER(dev_priv) >= 4) {
279 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
280 		i915_reg_t reg = PIPECONF(cpu_transcoder);
281 
282 		/* Wait for the Pipe State to go off */
283 		if (intel_de_wait_for_clear(dev_priv, reg,
284 					    I965_PIPECONF_ACTIVE, 100))
285 			drm_WARN(&dev_priv->drm, 1,
286 				 "pipe_off wait timed out\n");
287 	} else {
288 		intel_wait_for_pipe_scanline_stopped(crtc);
289 	}
290 }
291 
292 /* Only for pre-ILK configs */
293 void assert_pll(struct drm_i915_private *dev_priv,
294 		enum pipe pipe, bool state)
295 {
296 	u32 val;
297 	bool cur_state;
298 
299 	val = intel_de_read(dev_priv, DPLL(pipe));
300 	cur_state = !!(val & DPLL_VCO_ENABLE);
301 	I915_STATE_WARN(cur_state != state,
302 	     "PLL state assertion failure (expected %s, current %s)\n",
303 			onoff(state), onoff(cur_state));
304 }
305 
306 /* XXX: the dsi pll is shared between MIPI DSI ports */
307 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
308 {
309 	u32 val;
310 	bool cur_state;
311 
312 	vlv_cck_get(dev_priv);
313 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
314 	vlv_cck_put(dev_priv);
315 
316 	cur_state = val & DSI_PLL_VCO_EN;
317 	I915_STATE_WARN(cur_state != state,
318 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
319 			onoff(state), onoff(cur_state));
320 }
321 
322 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
323 			  enum pipe pipe, bool state)
324 {
325 	bool cur_state;
326 
327 	if (HAS_DDI(dev_priv)) {
328 		/*
329 		 * DDI does not have a specific FDI_TX register.
330 		 *
331 		 * FDI is never fed from EDP transcoder
332 		 * so pipe->transcoder cast is fine here.
333 		 */
334 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
335 		u32 val = intel_de_read(dev_priv,
336 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
337 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
338 	} else {
339 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
340 		cur_state = !!(val & FDI_TX_ENABLE);
341 	}
342 	I915_STATE_WARN(cur_state != state,
343 	     "FDI TX state assertion failure (expected %s, current %s)\n",
344 			onoff(state), onoff(cur_state));
345 }
346 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
347 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
348 
349 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
350 			  enum pipe pipe, bool state)
351 {
352 	u32 val;
353 	bool cur_state;
354 
355 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
356 	cur_state = !!(val & FDI_RX_ENABLE);
357 	I915_STATE_WARN(cur_state != state,
358 	     "FDI RX state assertion failure (expected %s, current %s)\n",
359 			onoff(state), onoff(cur_state));
360 }
361 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
362 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
363 
364 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
365 				      enum pipe pipe)
366 {
367 	u32 val;
368 
369 	/* ILK FDI PLL is always enabled */
370 	if (IS_IRONLAKE(dev_priv))
371 		return;
372 
373 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
374 	if (HAS_DDI(dev_priv))
375 		return;
376 
377 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
378 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
379 }
380 
381 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
382 		       enum pipe pipe, bool state)
383 {
384 	u32 val;
385 	bool cur_state;
386 
387 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
388 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
389 	I915_STATE_WARN(cur_state != state,
390 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
391 			onoff(state), onoff(cur_state));
392 }
393 
394 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
395 {
396 	i915_reg_t pp_reg;
397 	u32 val;
398 	enum pipe panel_pipe = INVALID_PIPE;
399 	bool locked = true;
400 
401 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
402 		return;
403 
404 	if (HAS_PCH_SPLIT(dev_priv)) {
405 		u32 port_sel;
406 
407 		pp_reg = PP_CONTROL(0);
408 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
409 
410 		switch (port_sel) {
411 		case PANEL_PORT_SELECT_LVDS:
412 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
413 			break;
414 		case PANEL_PORT_SELECT_DPA:
415 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
416 			break;
417 		case PANEL_PORT_SELECT_DPC:
418 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
419 			break;
420 		case PANEL_PORT_SELECT_DPD:
421 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
422 			break;
423 		default:
424 			MISSING_CASE(port_sel);
425 			break;
426 		}
427 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
428 		/* presumably write lock depends on pipe, not port select */
429 		pp_reg = PP_CONTROL(pipe);
430 		panel_pipe = pipe;
431 	} else {
432 		u32 port_sel;
433 
434 		pp_reg = PP_CONTROL(0);
435 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
436 
437 		drm_WARN_ON(&dev_priv->drm,
438 			    port_sel != PANEL_PORT_SELECT_LVDS);
439 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
440 	}
441 
442 	val = intel_de_read(dev_priv, pp_reg);
443 	if (!(val & PANEL_POWER_ON) ||
444 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
445 		locked = false;
446 
447 	I915_STATE_WARN(panel_pipe == pipe && locked,
448 	     "panel assertion failure, pipe %c regs locked\n",
449 	     pipe_name(pipe));
450 }
451 
452 void assert_pipe(struct drm_i915_private *dev_priv,
453 		 enum transcoder cpu_transcoder, bool state)
454 {
455 	bool cur_state;
456 	enum intel_display_power_domain power_domain;
457 	intel_wakeref_t wakeref;
458 
459 	/* we keep both pipes enabled on 830 */
460 	if (IS_I830(dev_priv))
461 		state = true;
462 
463 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
464 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
465 	if (wakeref) {
466 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
467 		cur_state = !!(val & PIPECONF_ENABLE);
468 
469 		intel_display_power_put(dev_priv, power_domain, wakeref);
470 	} else {
471 		cur_state = false;
472 	}
473 
474 	I915_STATE_WARN(cur_state != state,
475 			"transcoder %s assertion failure (expected %s, current %s)\n",
476 			transcoder_name(cpu_transcoder),
477 			onoff(state), onoff(cur_state));
478 }
479 
480 static void assert_plane(struct intel_plane *plane, bool state)
481 {
482 	enum pipe pipe;
483 	bool cur_state;
484 
485 	cur_state = plane->get_hw_state(plane, &pipe);
486 
487 	I915_STATE_WARN(cur_state != state,
488 			"%s assertion failure (expected %s, current %s)\n",
489 			plane->base.name, onoff(state), onoff(cur_state));
490 }
491 
492 #define assert_plane_enabled(p) assert_plane(p, true)
493 #define assert_plane_disabled(p) assert_plane(p, false)
494 
495 static void assert_planes_disabled(struct intel_crtc *crtc)
496 {
497 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
498 	struct intel_plane *plane;
499 
500 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
501 		assert_plane_disabled(plane);
502 }
503 
504 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
505 				    enum pipe pipe)
506 {
507 	u32 val;
508 	bool enabled;
509 
510 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
511 	enabled = !!(val & TRANS_ENABLE);
512 	I915_STATE_WARN(enabled,
513 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
514 	     pipe_name(pipe));
515 }
516 
517 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
518 				   enum pipe pipe, enum port port,
519 				   i915_reg_t dp_reg)
520 {
521 	enum pipe port_pipe;
522 	bool state;
523 
524 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
525 
526 	I915_STATE_WARN(state && port_pipe == pipe,
527 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
528 			port_name(port), pipe_name(pipe));
529 
530 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
531 			"IBX PCH DP %c still using transcoder B\n",
532 			port_name(port));
533 }
534 
535 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
536 				     enum pipe pipe, enum port port,
537 				     i915_reg_t hdmi_reg)
538 {
539 	enum pipe port_pipe;
540 	bool state;
541 
542 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
543 
544 	I915_STATE_WARN(state && port_pipe == pipe,
545 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
546 			port_name(port), pipe_name(pipe));
547 
548 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
549 			"IBX PCH HDMI %c still using transcoder B\n",
550 			port_name(port));
551 }
552 
553 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
554 				      enum pipe pipe)
555 {
556 	enum pipe port_pipe;
557 
558 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
559 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
560 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
561 
562 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
563 			port_pipe == pipe,
564 			"PCH VGA enabled on transcoder %c, should be disabled\n",
565 			pipe_name(pipe));
566 
567 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
568 			port_pipe == pipe,
569 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
570 			pipe_name(pipe));
571 
572 	/* PCH SDVOB multiplex with HDMIB */
573 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
574 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
575 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
576 }
577 
578 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
579 			 struct intel_digital_port *dig_port,
580 			 unsigned int expected_mask)
581 {
582 	u32 port_mask;
583 	i915_reg_t dpll_reg;
584 
585 	switch (dig_port->base.port) {
586 	case PORT_B:
587 		port_mask = DPLL_PORTB_READY_MASK;
588 		dpll_reg = DPLL(0);
589 		break;
590 	case PORT_C:
591 		port_mask = DPLL_PORTC_READY_MASK;
592 		dpll_reg = DPLL(0);
593 		expected_mask <<= 4;
594 		break;
595 	case PORT_D:
596 		port_mask = DPLL_PORTD_READY_MASK;
597 		dpll_reg = DPIO_PHY_STATUS;
598 		break;
599 	default:
600 		BUG();
601 	}
602 
603 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
604 				       port_mask, expected_mask, 1000))
605 		drm_WARN(&dev_priv->drm, 1,
606 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
607 			 dig_port->base.base.base.id, dig_port->base.base.name,
608 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
609 			 expected_mask);
610 }
611 
612 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
613 {
614 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
615 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
616 	enum pipe pipe = crtc->pipe;
617 	i915_reg_t reg;
618 	u32 val, pipeconf_val;
619 
620 	/* Make sure PCH DPLL is enabled */
621 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
622 
623 	/* FDI must be feeding us bits for PCH ports */
624 	assert_fdi_tx_enabled(dev_priv, pipe);
625 	assert_fdi_rx_enabled(dev_priv, pipe);
626 
627 	if (HAS_PCH_CPT(dev_priv)) {
628 		reg = TRANS_CHICKEN2(pipe);
629 		val = intel_de_read(dev_priv, reg);
630 		/*
631 		 * Workaround: Set the timing override bit
632 		 * before enabling the pch transcoder.
633 		 */
634 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
635 		/* Configure frame start delay to match the CPU */
636 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
637 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
638 		intel_de_write(dev_priv, reg, val);
639 	}
640 
641 	reg = PCH_TRANSCONF(pipe);
642 	val = intel_de_read(dev_priv, reg);
643 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
644 
645 	if (HAS_PCH_IBX(dev_priv)) {
646 		/* Configure frame start delay to match the CPU */
647 		val &= ~TRANS_FRAME_START_DELAY_MASK;
648 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
649 
650 		/*
651 		 * Make the BPC in transcoder be consistent with
652 		 * that in pipeconf reg. For HDMI we must use 8bpc
653 		 * here for both 8bpc and 12bpc.
654 		 */
655 		val &= ~PIPECONF_BPC_MASK;
656 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
657 			val |= PIPECONF_8BPC;
658 		else
659 			val |= pipeconf_val & PIPECONF_BPC_MASK;
660 	}
661 
662 	val &= ~TRANS_INTERLACE_MASK;
663 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
664 		if (HAS_PCH_IBX(dev_priv) &&
665 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
666 			val |= TRANS_LEGACY_INTERLACED_ILK;
667 		else
668 			val |= TRANS_INTERLACED;
669 	} else {
670 		val |= TRANS_PROGRESSIVE;
671 	}
672 
673 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
674 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
675 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
676 			pipe_name(pipe));
677 }
678 
679 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
680 				      enum transcoder cpu_transcoder)
681 {
682 	u32 val, pipeconf_val;
683 
684 	/* FDI must be feeding us bits for PCH ports */
685 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
686 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
687 
688 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
689 	/* Workaround: set timing override bit. */
690 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
691 	/* Configure frame start delay to match the CPU */
692 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
693 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
694 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
695 
696 	val = TRANS_ENABLE;
697 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
698 
699 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
700 	    PIPECONF_INTERLACED_ILK)
701 		val |= TRANS_INTERLACED;
702 	else
703 		val |= TRANS_PROGRESSIVE;
704 
705 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
706 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
707 				  TRANS_STATE_ENABLE, 100))
708 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
709 }
710 
711 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
712 				       enum pipe pipe)
713 {
714 	i915_reg_t reg;
715 	u32 val;
716 
717 	/* FDI relies on the transcoder */
718 	assert_fdi_tx_disabled(dev_priv, pipe);
719 	assert_fdi_rx_disabled(dev_priv, pipe);
720 
721 	/* Ports must be off as well */
722 	assert_pch_ports_disabled(dev_priv, pipe);
723 
724 	reg = PCH_TRANSCONF(pipe);
725 	val = intel_de_read(dev_priv, reg);
726 	val &= ~TRANS_ENABLE;
727 	intel_de_write(dev_priv, reg, val);
728 	/* wait for PCH transcoder off, transcoder state */
729 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
730 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
731 			pipe_name(pipe));
732 
733 	if (HAS_PCH_CPT(dev_priv)) {
734 		/* Workaround: Clear the timing override chicken bit again. */
735 		reg = TRANS_CHICKEN2(pipe);
736 		val = intel_de_read(dev_priv, reg);
737 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
738 		intel_de_write(dev_priv, reg, val);
739 	}
740 }
741 
742 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
743 {
744 	u32 val;
745 
746 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
747 	val &= ~TRANS_ENABLE;
748 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
749 	/* wait for PCH transcoder off, transcoder state */
750 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
751 				    TRANS_STATE_ENABLE, 50))
752 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
753 
754 	/* Workaround: clear timing override bit. */
755 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
756 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
757 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
758 }
759 
760 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
761 {
762 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
763 
764 	if (HAS_PCH_LPT(dev_priv))
765 		return PIPE_A;
766 	else
767 		return crtc->pipe;
768 }
769 
770 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
771 {
772 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
773 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
774 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
775 	enum pipe pipe = crtc->pipe;
776 	i915_reg_t reg;
777 	u32 val;
778 
779 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
780 
781 	assert_planes_disabled(crtc);
782 
783 	/*
784 	 * A pipe without a PLL won't actually be able to drive bits from
785 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
786 	 * need the check.
787 	 */
788 	if (HAS_GMCH(dev_priv)) {
789 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
790 			assert_dsi_pll_enabled(dev_priv);
791 		else
792 			assert_pll_enabled(dev_priv, pipe);
793 	} else {
794 		if (new_crtc_state->has_pch_encoder) {
795 			/* if driving the PCH, we need FDI enabled */
796 			assert_fdi_rx_pll_enabled(dev_priv,
797 						  intel_crtc_pch_transcoder(crtc));
798 			assert_fdi_tx_pll_enabled(dev_priv,
799 						  (enum pipe) cpu_transcoder);
800 		}
801 		/* FIXME: assert CPU port conditions for SNB+ */
802 	}
803 
804 	/* Wa_22012358565:adl-p */
805 	if (DISPLAY_VER(dev_priv) == 13)
806 		intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
807 			     0, PIPE_ARB_USE_PROG_SLOTS);
808 
809 	reg = PIPECONF(cpu_transcoder);
810 	val = intel_de_read(dev_priv, reg);
811 	if (val & PIPECONF_ENABLE) {
812 		/* we keep both pipes enabled on 830 */
813 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
814 		return;
815 	}
816 
817 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
818 	intel_de_posting_read(dev_priv, reg);
819 
820 	/*
821 	 * Until the pipe starts PIPEDSL reads will return a stale value,
822 	 * which causes an apparent vblank timestamp jump when PIPEDSL
823 	 * resets to its proper value. That also messes up the frame count
824 	 * when it's derived from the timestamps. So let's wait for the
825 	 * pipe to start properly before we call drm_crtc_vblank_on()
826 	 */
827 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
828 		intel_wait_for_pipe_scanline_moving(crtc);
829 }
830 
831 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
832 {
833 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
834 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
835 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
836 	enum pipe pipe = crtc->pipe;
837 	i915_reg_t reg;
838 	u32 val;
839 
840 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
841 
842 	/*
843 	 * Make sure planes won't keep trying to pump pixels to us,
844 	 * or we might hang the display.
845 	 */
846 	assert_planes_disabled(crtc);
847 
848 	reg = PIPECONF(cpu_transcoder);
849 	val = intel_de_read(dev_priv, reg);
850 	if ((val & PIPECONF_ENABLE) == 0)
851 		return;
852 
853 	/*
854 	 * Double wide has implications for planes
855 	 * so best keep it disabled when not needed.
856 	 */
857 	if (old_crtc_state->double_wide)
858 		val &= ~PIPECONF_DOUBLE_WIDE;
859 
860 	/* Don't disable pipe or pipe PLLs if needed */
861 	if (!IS_I830(dev_priv))
862 		val &= ~PIPECONF_ENABLE;
863 
864 	if (DISPLAY_VER(dev_priv) >= 12)
865 		intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
866 			     FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
867 
868 	intel_de_write(dev_priv, reg, val);
869 	if ((val & PIPECONF_ENABLE) == 0)
870 		intel_wait_for_pipe_off(old_crtc_state);
871 }
872 
873 bool
874 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
875 				    u64 modifier)
876 {
877 	return info->is_yuv &&
878 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
879 }
880 
881 unsigned int
882 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
883 {
884 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
885 	unsigned int cpp = fb->format->cpp[color_plane];
886 
887 	switch (fb->modifier) {
888 	case DRM_FORMAT_MOD_LINEAR:
889 		return intel_tile_size(dev_priv);
890 	case I915_FORMAT_MOD_X_TILED:
891 		if (DISPLAY_VER(dev_priv) == 2)
892 			return 128;
893 		else
894 			return 512;
895 	case I915_FORMAT_MOD_Y_TILED_CCS:
896 		if (is_ccs_plane(fb, color_plane))
897 			return 128;
898 		fallthrough;
899 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
900 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
901 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
902 		if (is_ccs_plane(fb, color_plane))
903 			return 64;
904 		fallthrough;
905 	case I915_FORMAT_MOD_Y_TILED:
906 		if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
907 			return 128;
908 		else
909 			return 512;
910 	case I915_FORMAT_MOD_Yf_TILED_CCS:
911 		if (is_ccs_plane(fb, color_plane))
912 			return 128;
913 		fallthrough;
914 	case I915_FORMAT_MOD_Yf_TILED:
915 		switch (cpp) {
916 		case 1:
917 			return 64;
918 		case 2:
919 		case 4:
920 			return 128;
921 		case 8:
922 		case 16:
923 			return 256;
924 		default:
925 			MISSING_CASE(cpp);
926 			return cpp;
927 		}
928 		break;
929 	default:
930 		MISSING_CASE(fb->modifier);
931 		return cpp;
932 	}
933 }
934 
935 unsigned int
936 intel_fb_align_height(const struct drm_framebuffer *fb,
937 		      int color_plane, unsigned int height)
938 {
939 	unsigned int tile_height = intel_tile_height(fb, color_plane);
940 
941 	return roundup2(height, tile_height);
942 }
943 
944 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
945 {
946 	unsigned int size = 0;
947 	int i;
948 
949 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
950 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
951 
952 	return size;
953 }
954 
955 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
956 {
957 	unsigned int size = 0;
958 	int i;
959 
960 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
961 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
962 
963 	return size;
964 }
965 
966 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
967 {
968 	if (DISPLAY_VER(dev_priv) >= 9)
969 		return 256 * 1024;
970 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
971 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
972 		return 128 * 1024;
973 	else if (DISPLAY_VER(dev_priv) >= 4)
974 		return 4 * 1024;
975 	else
976 		return 0;
977 }
978 
979 static bool has_async_flips(struct drm_i915_private *i915)
980 {
981 	return DISPLAY_VER(i915) >= 5;
982 }
983 
984 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
985 				  int color_plane)
986 {
987 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
988 
989 	if (intel_fb_uses_dpt(fb))
990 		return 512 * 4096;
991 
992 	/* AUX_DIST needs only 4K alignment */
993 	if (is_ccs_plane(fb, color_plane))
994 		return 4096;
995 
996 	if (is_semiplanar_uv_plane(fb, color_plane)) {
997 		/*
998 		 * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
999 		 * alignment for linear UV planes on all platforms.
1000 		 */
1001 		if (DISPLAY_VER(dev_priv) >= 12) {
1002 			if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1003 				return intel_linear_alignment(dev_priv);
1004 
1005 			return intel_tile_row_size(fb, color_plane);
1006 		}
1007 
1008 		return 4096;
1009 	}
1010 
1011 	drm_WARN_ON(&dev_priv->drm, color_plane != 0);
1012 
1013 	switch (fb->modifier) {
1014 	case DRM_FORMAT_MOD_LINEAR:
1015 		return intel_linear_alignment(dev_priv);
1016 	case I915_FORMAT_MOD_X_TILED:
1017 		if (has_async_flips(dev_priv))
1018 			return 256 * 1024;
1019 		return 0;
1020 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1021 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1022 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1023 		return 16 * 1024;
1024 	case I915_FORMAT_MOD_Y_TILED_CCS:
1025 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1026 	case I915_FORMAT_MOD_Y_TILED:
1027 	case I915_FORMAT_MOD_Yf_TILED:
1028 		return 1 * 1024 * 1024;
1029 	default:
1030 		MISSING_CASE(fb->modifier);
1031 		return 0;
1032 	}
1033 }
1034 
1035 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1036 {
1037 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1038 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1039 
1040 	return DISPLAY_VER(dev_priv) < 4 ||
1041 		(plane->has_fbc &&
1042 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1043 }
1044 
1045 static struct i915_vma *
1046 intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
1047 		     const struct i915_ggtt_view *view,
1048 		     bool uses_fence,
1049 		     unsigned long *out_flags,
1050 		     struct i915_address_space *vm)
1051 {
1052 	struct drm_device *dev = fb->dev;
1053 	struct drm_i915_private *dev_priv = to_i915(dev);
1054 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1055 	struct i915_vma *vma;
1056 	u32 alignment;
1057 	int ret;
1058 
1059 	if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
1060 		return ERR_PTR(-EINVAL);
1061 
1062 	alignment = 4096 * 512;
1063 
1064 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1065 
1066 	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
1067 	if (ret) {
1068 		vma = ERR_PTR(ret);
1069 		goto err;
1070 	}
1071 
1072 	vma = i915_vma_instance(obj, vm, view);
1073 	if (IS_ERR(vma))
1074 		goto err;
1075 
1076 	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
1077 		ret = i915_vma_unbind(vma);
1078 		if (ret) {
1079 			vma = ERR_PTR(ret);
1080 			goto err;
1081 		}
1082 	}
1083 
1084 	ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
1085 	if (ret) {
1086 		vma = ERR_PTR(ret);
1087 		goto err;
1088 	}
1089 
1090 	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
1091 
1092 	i915_gem_object_flush_if_display(obj);
1093 
1094 	i915_vma_get(vma);
1095 err:
1096 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1097 
1098 	return vma;
1099 }
1100 
1101 struct i915_vma *
1102 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1103 			   bool phys_cursor,
1104 			   const struct i915_ggtt_view *view,
1105 			   bool uses_fence,
1106 			   unsigned long *out_flags)
1107 {
1108 	struct drm_device *dev = fb->dev;
1109 	struct drm_i915_private *dev_priv = to_i915(dev);
1110 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1111 	intel_wakeref_t wakeref;
1112 	struct i915_gem_ww_ctx ww;
1113 	struct i915_vma *vma;
1114 	unsigned int pinctl;
1115 	u32 alignment;
1116 	int ret;
1117 
1118 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1119 		return ERR_PTR(-EINVAL);
1120 
1121 	if (phys_cursor)
1122 		alignment = intel_cursor_alignment(dev_priv);
1123 	else
1124 		alignment = intel_surf_alignment(fb, 0);
1125 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1126 		return ERR_PTR(-EINVAL);
1127 
1128 	/* Note that the w/a also requires 64 PTE of padding following the
1129 	 * bo. We currently fill all unused PTE with the shadow page and so
1130 	 * we should always have valid PTE following the scanout preventing
1131 	 * the VT-d warning.
1132 	 */
1133 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1134 		alignment = 256 * 1024;
1135 
1136 	/*
1137 	 * Global gtt pte registers are special registers which actually forward
1138 	 * writes to a chunk of system memory. Which means that there is no risk
1139 	 * that the register values disappear as soon as we call
1140 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1141 	 * pin/unpin/fence and not more.
1142 	 */
1143 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1144 
1145 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1146 
1147 	/*
1148 	 * Valleyview is definitely limited to scanning out the first
1149 	 * 512MiB. Lets presume this behaviour was inherited from the
1150 	 * g4x display engine and that all earlier gen are similarly
1151 	 * limited. Testing suggests that it is a little more
1152 	 * complicated than this. For example, Cherryview appears quite
1153 	 * happy to scanout from anywhere within its global aperture.
1154 	 */
1155 	pinctl = 0;
1156 	if (HAS_GMCH(dev_priv))
1157 		pinctl |= PIN_MAPPABLE;
1158 
1159 	i915_gem_ww_ctx_init(&ww, true);
1160 retry:
1161 	ret = i915_gem_object_lock(obj, &ww);
1162 	if (!ret && phys_cursor)
1163 		ret = i915_gem_object_attach_phys(obj, alignment);
1164 	else if (!ret && HAS_LMEM(dev_priv))
1165 		ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
1166 	/* TODO: Do we need to sync when migration becomes async? */
1167 	if (!ret)
1168 		ret = i915_gem_object_pin_pages(obj);
1169 	if (ret)
1170 		goto err;
1171 
1172 	if (!ret) {
1173 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1174 							   view, pinctl);
1175 		if (IS_ERR(vma)) {
1176 			ret = PTR_ERR(vma);
1177 			goto err_unpin;
1178 		}
1179 	}
1180 
1181 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1182 		/*
1183 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1184 		 * fence, whereas 965+ only requires a fence if using
1185 		 * framebuffer compression.  For simplicity, we always, when
1186 		 * possible, install a fence as the cost is not that onerous.
1187 		 *
1188 		 * If we fail to fence the tiled scanout, then either the
1189 		 * modeset will reject the change (which is highly unlikely as
1190 		 * the affected systems, all but one, do not have unmappable
1191 		 * space) or we will not be able to enable full powersaving
1192 		 * techniques (also likely not to apply due to various limits
1193 		 * FBC and the like impose on the size of the buffer, which
1194 		 * presumably we violated anyway with this unmappable buffer).
1195 		 * Anyway, it is presumably better to stumble onwards with
1196 		 * something and try to run the system in a "less than optimal"
1197 		 * mode that matches the user configuration.
1198 		 */
1199 		ret = i915_vma_pin_fence(vma);
1200 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1201 			i915_vma_unpin(vma);
1202 			goto err_unpin;
1203 		}
1204 		ret = 0;
1205 
1206 		if (vma->fence)
1207 			*out_flags |= PLANE_HAS_FENCE;
1208 	}
1209 
1210 	i915_vma_get(vma);
1211 
1212 err_unpin:
1213 	i915_gem_object_unpin_pages(obj);
1214 err:
1215 	if (ret == -EDEADLK) {
1216 		ret = i915_gem_ww_ctx_backoff(&ww);
1217 		if (!ret)
1218 			goto retry;
1219 	}
1220 	i915_gem_ww_ctx_fini(&ww);
1221 	if (ret)
1222 		vma = ERR_PTR(ret);
1223 
1224 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1225 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1226 	return vma;
1227 }
1228 
1229 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1230 {
1231 	if (flags & PLANE_HAS_FENCE)
1232 		i915_vma_unpin_fence(vma);
1233 	i915_vma_unpin(vma);
1234 	i915_vma_put(vma);
1235 }
1236 
1237 /*
1238  * Convert the x/y offsets into a linear offset.
1239  * Only valid with 0/180 degree rotation, which is fine since linear
1240  * offset is only used with linear buffers on pre-hsw and tiled buffers
1241  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1242  */
1243 u32 intel_fb_xy_to_linear(int x, int y,
1244 			  const struct intel_plane_state *state,
1245 			  int color_plane)
1246 {
1247 	const struct drm_framebuffer *fb = state->hw.fb;
1248 	unsigned int cpp = fb->format->cpp[color_plane];
1249 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1250 
1251 	return y * pitch + x * cpp;
1252 }
1253 
1254 /*
1255  * Add the x/y offsets derived from fb->offsets[] to the user
1256  * specified plane src x/y offsets. The resulting x/y offsets
1257  * specify the start of scanout from the beginning of the gtt mapping.
1258  */
1259 void intel_add_fb_offsets(int *x, int *y,
1260 			  const struct intel_plane_state *state,
1261 			  int color_plane)
1262 
1263 {
1264 	*x += state->view.color_plane[color_plane].x;
1265 	*y += state->view.color_plane[color_plane].y;
1266 }
1267 
1268 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1269 {
1270 	switch (fb_modifier) {
1271 	case I915_FORMAT_MOD_X_TILED:
1272 		return I915_TILING_X;
1273 	case I915_FORMAT_MOD_Y_TILED:
1274 	case I915_FORMAT_MOD_Y_TILED_CCS:
1275 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1276 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1277 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1278 		return I915_TILING_Y;
1279 	default:
1280 		return I915_TILING_NONE;
1281 	}
1282 }
1283 
1284 /*
1285  * From the Sky Lake PRM:
1286  * "The Color Control Surface (CCS) contains the compression status of
1287  *  the cache-line pairs. The compression state of the cache-line pair
1288  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1289  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1290  *  cache-line-pairs. CCS is always Y tiled."
1291  *
1292  * Since cache line pairs refers to horizontally adjacent cache lines,
1293  * each cache line in the CCS corresponds to an area of 32x16 cache
1294  * lines on the main surface. Since each pixel is 4 bytes, this gives
1295  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1296  * main surface.
1297  */
1298 static const struct drm_format_info skl_ccs_formats[] = {
1299 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1300 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1301 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1302 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1303 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1304 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1305 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1306 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1307 };
1308 
1309 /*
1310  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1311  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1312  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1313  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1314  * the main surface.
1315  */
1316 static const struct drm_format_info gen12_ccs_formats[] = {
1317 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1318 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1319 	  .hsub = 1, .vsub = 1, },
1320 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1321 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1322 	  .hsub = 1, .vsub = 1, },
1323 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1324 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1325 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1326 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1327 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1328 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1329 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1330 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1331 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1332 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1333 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1334 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1335 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1336 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1337 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1338 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1339 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1340 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1341 	{ .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
1342 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1343 	  .hsub = 1, .vsub = 1, .is_yuv = true },
1344 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1345 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1346 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1347 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1348 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1349 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1350 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1351 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1352 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1353 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1354 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1355 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1356 };
1357 
1358 /*
1359  * Same as gen12_ccs_formats[] above, but with additional surface used
1360  * to pass Clear Color information in plane 2 with 64 bits of data.
1361  */
1362 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1363 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1364 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1365 	  .hsub = 1, .vsub = 1, },
1366 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1367 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1368 	  .hsub = 1, .vsub = 1, },
1369 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1370 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1371 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1372 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1373 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1374 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1375 };
1376 
1377 static const struct drm_format_info *
1378 lookup_format_info(const struct drm_format_info formats[],
1379 		   int num_formats, u32 format)
1380 {
1381 	int i;
1382 
1383 	for (i = 0; i < num_formats; i++) {
1384 		if (formats[i].format == format)
1385 			return &formats[i];
1386 	}
1387 
1388 	return NULL;
1389 }
1390 
1391 static const struct drm_format_info *
1392 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1393 {
1394 	switch (cmd->modifier[0]) {
1395 	case I915_FORMAT_MOD_Y_TILED_CCS:
1396 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1397 		return lookup_format_info(skl_ccs_formats,
1398 					  ARRAY_SIZE(skl_ccs_formats),
1399 					  cmd->pixel_format);
1400 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1401 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1402 		return lookup_format_info(gen12_ccs_formats,
1403 					  ARRAY_SIZE(gen12_ccs_formats),
1404 					  cmd->pixel_format);
1405 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1406 		return lookup_format_info(gen12_ccs_cc_formats,
1407 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1408 					  cmd->pixel_format);
1409 	default:
1410 		return NULL;
1411 	}
1412 }
1413 
1414 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1415 {
1416 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1417 			    512) * 64;
1418 }
1419 
1420 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1421 			      u32 pixel_format, u64 modifier)
1422 {
1423 	struct intel_crtc *crtc;
1424 	struct intel_plane *plane;
1425 
1426 	if (!HAS_DISPLAY(dev_priv))
1427 		return 0;
1428 
1429 	/*
1430 	 * We assume the primary plane for pipe A has
1431 	 * the highest stride limits of them all,
1432 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1433 	 */
1434 	crtc = intel_get_first_crtc(dev_priv);
1435 	if (!crtc)
1436 		return 0;
1437 
1438 	plane = to_intel_plane(crtc->base.primary);
1439 
1440 	return plane->max_stride(plane, pixel_format, modifier,
1441 				 DRM_MODE_ROTATE_0);
1442 }
1443 
1444 static
1445 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1446 			u32 pixel_format, u64 modifier)
1447 {
1448 	/*
1449 	 * Arbitrary limit for gen4+ chosen to match the
1450 	 * render engine max stride.
1451 	 *
1452 	 * The new CCS hash mode makes remapping impossible
1453 	 */
1454 	if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
1455 	    intel_modifier_uses_dpt(dev_priv, modifier))
1456 		return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1457 	else if (DISPLAY_VER(dev_priv) >= 7)
1458 		return 256 * 1024;
1459 	else
1460 		return 128 * 1024;
1461 }
1462 
1463 static u32
1464 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1465 {
1466 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1467 	u32 tile_width;
1468 
1469 	if (is_surface_linear(fb, color_plane)) {
1470 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1471 							   fb->format->format,
1472 							   fb->modifier);
1473 
1474 		/*
1475 		 * To make remapping with linear generally feasible
1476 		 * we need the stride to be page aligned.
1477 		 */
1478 		if (fb->pitches[color_plane] > max_stride &&
1479 		    !is_ccs_modifier(fb->modifier))
1480 			return intel_tile_size(dev_priv);
1481 		else
1482 			return 64;
1483 	}
1484 
1485 	tile_width = intel_tile_width_bytes(fb, color_plane);
1486 	if (is_ccs_modifier(fb->modifier)) {
1487 		/*
1488 		 * Display WA #0531: skl,bxt,kbl,glk
1489 		 *
1490 		 * Render decompression and plane width > 3840
1491 		 * combined with horizontal panning requires the
1492 		 * plane stride to be a multiple of 4. We'll just
1493 		 * require the entire fb to accommodate that to avoid
1494 		 * potential runtime errors at plane configuration time.
1495 		 */
1496 		if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
1497 		    color_plane == 0 && fb->width > 3840)
1498 			tile_width *= 4;
1499 		/*
1500 		 * The main surface pitch must be padded to a multiple of four
1501 		 * tile widths.
1502 		 */
1503 		else if (DISPLAY_VER(dev_priv) >= 12)
1504 			tile_width *= 4;
1505 	}
1506 	return tile_width;
1507 }
1508 
1509 static struct i915_vma *
1510 initial_plane_vma(struct drm_i915_private *i915,
1511 		  struct intel_initial_plane_config *plane_config)
1512 {
1513 	struct drm_i915_gem_object *obj;
1514 	struct i915_vma *vma;
1515 	u32 base, size;
1516 
1517 	if (plane_config->size == 0)
1518 		return NULL;
1519 
1520 	base = round_down(plane_config->base,
1521 			  I915_GTT_MIN_ALIGNMENT);
1522 	size = round_up(plane_config->base + plane_config->size,
1523 			I915_GTT_MIN_ALIGNMENT);
1524 	size -= base;
1525 
1526 	/*
1527 	 * If the FB is too big, just don't use it since fbdev is not very
1528 	 * important and we should probably use that space with FBC or other
1529 	 * features.
1530 	 */
1531 	if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
1532 	    size * 2 > i915->stolen_usable_size)
1533 		return NULL;
1534 
1535 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1536 	if (IS_ERR(obj))
1537 		return NULL;
1538 
1539 	/*
1540 	 * Mark it WT ahead of time to avoid changing the
1541 	 * cache_level during fbdev initialization. The
1542 	 * unbind there would get stuck waiting for rcu.
1543 	 */
1544 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1545 					    I915_CACHE_WT : I915_CACHE_NONE);
1546 
1547 	switch (plane_config->tiling) {
1548 	case I915_TILING_NONE:
1549 		break;
1550 	case I915_TILING_X:
1551 	case I915_TILING_Y:
1552 		obj->tiling_and_stride =
1553 			plane_config->fb->base.pitches[0] |
1554 			plane_config->tiling;
1555 		break;
1556 	default:
1557 		MISSING_CASE(plane_config->tiling);
1558 		goto err_obj;
1559 	}
1560 
1561 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1562 	if (IS_ERR(vma))
1563 		goto err_obj;
1564 
1565 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1566 		goto err_obj;
1567 
1568 	if (i915_gem_object_is_tiled(obj) &&
1569 	    !i915_vma_is_map_and_fenceable(vma))
1570 		goto err_obj;
1571 
1572 	return vma;
1573 
1574 err_obj:
1575 	i915_gem_object_put(obj);
1576 	return NULL;
1577 }
1578 
1579 static bool
1580 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1581 			      struct intel_initial_plane_config *plane_config)
1582 {
1583 	struct drm_device *dev = crtc->base.dev;
1584 	struct drm_i915_private *dev_priv = to_i915(dev);
1585 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1586 	struct drm_framebuffer *fb = &plane_config->fb->base;
1587 	struct i915_vma *vma;
1588 
1589 	switch (fb->modifier) {
1590 	case DRM_FORMAT_MOD_LINEAR:
1591 	case I915_FORMAT_MOD_X_TILED:
1592 	case I915_FORMAT_MOD_Y_TILED:
1593 		break;
1594 	default:
1595 		drm_dbg(&dev_priv->drm,
1596 			"Unsupported modifier for initial FB: 0x%llx\n",
1597 			fb->modifier);
1598 		return false;
1599 	}
1600 
1601 	vma = initial_plane_vma(dev_priv, plane_config);
1602 	if (!vma)
1603 		return false;
1604 
1605 	mode_cmd.pixel_format = fb->format->format;
1606 	mode_cmd.width = fb->width;
1607 	mode_cmd.height = fb->height;
1608 	mode_cmd.pitches[0] = fb->pitches[0];
1609 	mode_cmd.modifier[0] = fb->modifier;
1610 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1611 
1612 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1613 				   vma->obj, &mode_cmd)) {
1614 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1615 		goto err_vma;
1616 	}
1617 
1618 	plane_config->vma = vma;
1619 	return true;
1620 
1621 err_vma:
1622 	i915_vma_put(vma);
1623 	return false;
1624 }
1625 
1626 static void
1627 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1628 			struct intel_plane_state *plane_state,
1629 			bool visible)
1630 {
1631 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1632 
1633 	plane_state->uapi.visible = visible;
1634 
1635 	if (visible)
1636 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1637 	else
1638 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1639 }
1640 
1641 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1642 {
1643 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1644 	struct drm_plane *plane;
1645 
1646 	/*
1647 	 * Active_planes aliases if multiple "primary" or cursor planes
1648 	 * have been used on the same (or wrong) pipe. plane_mask uses
1649 	 * unique ids, hence we can use that to reconstruct active_planes.
1650 	 */
1651 	crtc_state->enabled_planes = 0;
1652 	crtc_state->active_planes = 0;
1653 
1654 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1655 				crtc_state->uapi.plane_mask) {
1656 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1657 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1658 	}
1659 }
1660 
1661 void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1662 				  struct intel_plane *plane)
1663 {
1664 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1665 	struct intel_crtc_state *crtc_state =
1666 		to_intel_crtc_state(crtc->base.state);
1667 	struct intel_plane_state *plane_state =
1668 		to_intel_plane_state(plane->base.state);
1669 
1670 	drm_dbg_kms(&dev_priv->drm,
1671 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1672 		    plane->base.base.id, plane->base.name,
1673 		    crtc->base.base.id, crtc->base.name);
1674 
1675 	intel_set_plane_visible(crtc_state, plane_state, false);
1676 	fixup_plane_bitmasks(crtc_state);
1677 	crtc_state->data_rate[plane->id] = 0;
1678 	crtc_state->min_cdclk[plane->id] = 0;
1679 
1680 	if (plane->id == PLANE_PRIMARY)
1681 		hsw_disable_ips(crtc_state);
1682 
1683 	/*
1684 	 * Vblank time updates from the shadow to live plane control register
1685 	 * are blocked if the memory self-refresh mode is active at that
1686 	 * moment. So to make sure the plane gets truly disabled, disable
1687 	 * first the self-refresh mode. The self-refresh enable bit in turn
1688 	 * will be checked/applied by the HW only at the next frame start
1689 	 * event which is after the vblank start event, so we need to have a
1690 	 * wait-for-vblank between disabling the plane and the pipe.
1691 	 */
1692 	if (HAS_GMCH(dev_priv) &&
1693 	    intel_set_memory_cxsr(dev_priv, false))
1694 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1695 
1696 	/*
1697 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1698 	 * So disable underrun reporting before all the planes get disabled.
1699 	 */
1700 	if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
1701 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1702 
1703 	intel_disable_plane(plane, crtc_state);
1704 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1705 }
1706 
1707 static bool
1708 intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
1709 			      const struct intel_initial_plane_config *plane_config,
1710 			      struct drm_framebuffer **fb,
1711 			      struct i915_vma **vma)
1712 {
1713 	struct intel_crtc *crtc;
1714 
1715 	for_each_intel_crtc(&i915->drm, crtc) {
1716 		struct intel_crtc_state *crtc_state =
1717 			to_intel_crtc_state(crtc->base.state);
1718 		struct intel_plane *plane =
1719 			to_intel_plane(crtc->base.primary);
1720 		struct intel_plane_state *plane_state =
1721 			to_intel_plane_state(plane->base.state);
1722 
1723 		if (!crtc_state->uapi.active)
1724 			continue;
1725 
1726 		if (!plane_state->ggtt_vma)
1727 			continue;
1728 
1729 		if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
1730 			*fb = plane_state->hw.fb;
1731 			*vma = plane_state->ggtt_vma;
1732 			return true;
1733 		}
1734 	}
1735 
1736 	return false;
1737 }
1738 
1739 static void
1740 intel_find_initial_plane_obj(struct intel_crtc *crtc,
1741 			     struct intel_initial_plane_config *plane_config)
1742 {
1743 	struct drm_device *dev = crtc->base.dev;
1744 	struct drm_i915_private *dev_priv = to_i915(dev);
1745 	struct intel_crtc_state *crtc_state =
1746 		to_intel_crtc_state(crtc->base.state);
1747 	struct intel_plane *plane =
1748 		to_intel_plane(crtc->base.primary);
1749 	struct intel_plane_state *plane_state =
1750 		to_intel_plane_state(plane->base.state);
1751 	struct drm_framebuffer *fb;
1752 	struct i915_vma *vma;
1753 
1754 	/*
1755 	 * TODO:
1756 	 *   Disable planes if get_initial_plane_config() failed.
1757 	 *   Make sure things work if the surface base is not page aligned.
1758 	 */
1759 	if (!plane_config->fb)
1760 		return;
1761 
1762 	if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
1763 		fb = &plane_config->fb->base;
1764 		vma = plane_config->vma;
1765 		goto valid_fb;
1766 	}
1767 
1768 	/*
1769 	 * Failed to alloc the obj, check to see if we should share
1770 	 * an fb with another CRTC instead
1771 	 */
1772 	if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
1773 		goto valid_fb;
1774 
1775 	/*
1776 	 * We've failed to reconstruct the BIOS FB.  Current display state
1777 	 * indicates that the primary plane is visible, but has a NULL FB,
1778 	 * which will lead to problems later if we don't fix it up.  The
1779 	 * simplest solution is to just disable the primary plane now and
1780 	 * pretend the BIOS never had it enabled.
1781 	 */
1782 	intel_plane_disable_noatomic(crtc, plane);
1783 	if (crtc_state->bigjoiner) {
1784 		struct intel_crtc *slave =
1785 			crtc_state->bigjoiner_linked_crtc;
1786 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1787 	}
1788 
1789 	return;
1790 
1791 valid_fb:
1792 	plane_state->uapi.rotation = plane_config->rotation;
1793 	intel_fb_fill_view(to_intel_framebuffer(fb),
1794 			   plane_state->uapi.rotation, &plane_state->view);
1795 
1796 	__i915_vma_pin(vma);
1797 	plane_state->ggtt_vma = i915_vma_get(vma);
1798 	if (intel_plane_uses_fence(plane_state) &&
1799 	    i915_vma_pin_fence(vma) == 0 && vma->fence)
1800 		plane_state->flags |= PLANE_HAS_FENCE;
1801 
1802 	plane_state->uapi.src_x = 0;
1803 	plane_state->uapi.src_y = 0;
1804 	plane_state->uapi.src_w = fb->width << 16;
1805 	plane_state->uapi.src_h = fb->height << 16;
1806 
1807 	plane_state->uapi.crtc_x = 0;
1808 	plane_state->uapi.crtc_y = 0;
1809 	plane_state->uapi.crtc_w = fb->width;
1810 	plane_state->uapi.crtc_h = fb->height;
1811 
1812 	if (plane_config->tiling)
1813 		dev_priv->preserve_bios_swizzle = true;
1814 
1815 	plane_state->uapi.fb = fb;
1816 	drm_framebuffer_get(fb);
1817 
1818 	plane_state->uapi.crtc = &crtc->base;
1819 	intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
1820 
1821 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1822 
1823 	atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
1824 }
1825 
1826 unsigned int
1827 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1828 {
1829 	int x = 0, y = 0;
1830 
1831 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1832 					  plane_state->view.color_plane[0].offset, 0);
1833 
1834 	return y;
1835 }
1836 
1837 static int
1838 __intel_display_resume(struct drm_device *dev,
1839 		       struct drm_atomic_state *state,
1840 		       struct drm_modeset_acquire_ctx *ctx)
1841 {
1842 	struct drm_crtc_state *crtc_state;
1843 	struct drm_crtc *crtc;
1844 	int i, ret;
1845 
1846 	intel_modeset_setup_hw_state(dev, ctx);
1847 	intel_vga_redisable(to_i915(dev));
1848 
1849 	if (!state)
1850 		return 0;
1851 
1852 	/*
1853 	 * We've duplicated the state, pointers to the old state are invalid.
1854 	 *
1855 	 * Don't attempt to use the old state until we commit the duplicated state.
1856 	 */
1857 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1858 		/*
1859 		 * Force recalculation even if we restore
1860 		 * current state. With fast modeset this may not result
1861 		 * in a modeset when the state is compatible.
1862 		 */
1863 		crtc_state->mode_changed = true;
1864 	}
1865 
1866 	/* ignore any reset values/BIOS leftovers in the WM registers */
1867 	if (!HAS_GMCH(to_i915(dev)))
1868 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
1869 
1870 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1871 
1872 	drm_WARN_ON(dev, ret == -EDEADLK);
1873 	return ret;
1874 }
1875 
1876 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1877 {
1878 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1879 		intel_has_gpu_reset(&dev_priv->gt));
1880 }
1881 
1882 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1883 {
1884 	struct drm_device *dev = &dev_priv->drm;
1885 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1886 	struct drm_atomic_state *state;
1887 	int ret;
1888 
1889 	if (!HAS_DISPLAY(dev_priv))
1890 		return;
1891 
1892 	/* reset doesn't touch the display */
1893 	if (!dev_priv->params.force_reset_modeset_test &&
1894 	    !gpu_reset_clobbers_display(dev_priv))
1895 		return;
1896 
1897 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
1898 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1899 	smp_mb__after_atomic();
1900 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1901 
1902 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1903 		drm_dbg_kms(&dev_priv->drm,
1904 			    "Modeset potentially stuck, unbreaking through wedging\n");
1905 		intel_gt_set_wedged(&dev_priv->gt);
1906 	}
1907 
1908 	/*
1909 	 * Need mode_config.mutex so that we don't
1910 	 * trample ongoing ->detect() and whatnot.
1911 	 */
1912 	mutex_lock(&dev->mode_config.mutex);
1913 	drm_modeset_acquire_init(ctx, 0);
1914 	while (1) {
1915 		ret = drm_modeset_lock_all_ctx(dev, ctx);
1916 		if (ret != -EDEADLK)
1917 			break;
1918 
1919 		drm_modeset_backoff(ctx);
1920 	}
1921 	/*
1922 	 * Disabling the crtcs gracefully seems nicer. Also the
1923 	 * g33 docs say we should at least disable all the planes.
1924 	 */
1925 	state = drm_atomic_helper_duplicate_state(dev, ctx);
1926 	if (IS_ERR(state)) {
1927 		ret = PTR_ERR(state);
1928 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1929 			ret);
1930 		return;
1931 	}
1932 
1933 	ret = drm_atomic_helper_disable_all(dev, ctx);
1934 	if (ret) {
1935 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1936 			ret);
1937 		drm_atomic_state_put(state);
1938 		return;
1939 	}
1940 
1941 	dev_priv->modeset_restore_state = state;
1942 	state->acquire_ctx = ctx;
1943 }
1944 
1945 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1946 {
1947 	struct drm_device *dev = &dev_priv->drm;
1948 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1949 	struct drm_atomic_state *state;
1950 	int ret;
1951 
1952 	if (!HAS_DISPLAY(dev_priv))
1953 		return;
1954 
1955 	/* reset doesn't touch the display */
1956 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1957 		return;
1958 
1959 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
1960 	if (!state)
1961 		goto unlock;
1962 
1963 	/* reset doesn't touch the display */
1964 	if (!gpu_reset_clobbers_display(dev_priv)) {
1965 		/* for testing only restore the display */
1966 		ret = __intel_display_resume(dev, state, ctx);
1967 		if (ret)
1968 			drm_err(&dev_priv->drm,
1969 				"Restoring old state failed with %i\n", ret);
1970 	} else {
1971 		/*
1972 		 * The display has been reset as well,
1973 		 * so need a full re-initialization.
1974 		 */
1975 		intel_pps_unlock_regs_wa(dev_priv);
1976 		intel_modeset_init_hw(dev_priv);
1977 		intel_init_clock_gating(dev_priv);
1978 		intel_hpd_init(dev_priv);
1979 
1980 		ret = __intel_display_resume(dev, state, ctx);
1981 		if (ret)
1982 			drm_err(&dev_priv->drm,
1983 				"Restoring old state failed with %i\n", ret);
1984 
1985 		intel_hpd_poll_disable(dev_priv);
1986 	}
1987 
1988 	drm_atomic_state_put(state);
1989 unlock:
1990 	drm_modeset_drop_locks(ctx);
1991 	drm_modeset_acquire_fini(ctx);
1992 	mutex_unlock(&dev->mode_config.mutex);
1993 
1994 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1995 }
1996 
1997 static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
1998 {
1999 	if (crtc_state->pch_pfit.enabled &&
2000 	    (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
2001 	     crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
2002 	     crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
2003 		return false;
2004 
2005 	if (crtc_state->dsc.compression_enable)
2006 		return false;
2007 
2008 	if (crtc_state->has_psr2)
2009 		return false;
2010 
2011 	if (crtc_state->splitter.enable)
2012 		return false;
2013 
2014 	return true;
2015 }
2016 
2017 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
2018 {
2019 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2020 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2021 	enum pipe pipe = crtc->pipe;
2022 	u32 tmp;
2023 
2024 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
2025 
2026 	/*
2027 	 * Display WA #1153: icl
2028 	 * enable hardware to bypass the alpha math
2029 	 * and rounding for per-pixel values 00 and 0xff
2030 	 */
2031 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
2032 	/*
2033 	 * Display WA # 1605353570: icl
2034 	 * Set the pixel rounding bit to 1 for allowing
2035 	 * passthrough of Frame buffer pixels unmodified
2036 	 * across pipe
2037 	 */
2038 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
2039 
2040 	if (IS_DG2(dev_priv)) {
2041 		/*
2042 		 * Underrun recovery must always be disabled on DG2.  However
2043 		 * the chicken bit meaning is inverted compared to other
2044 		 * platforms.
2045 		 */
2046 		tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
2047 	} else if (DISPLAY_VER(dev_priv) >= 13) {
2048 		if (underrun_recovery_supported(crtc_state))
2049 			tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
2050 		else
2051 			tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
2052 	}
2053 
2054 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
2055 }
2056 
2057 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
2058 {
2059 	struct drm_crtc *crtc;
2060 	bool cleanup_done;
2061 
2062 	drm_for_each_crtc(crtc, &dev_priv->drm) {
2063 		struct drm_crtc_commit *commit;
2064 		spin_lock(&crtc->commit_lock);
2065 		commit = list_first_entry_or_null(&crtc->commit_list,
2066 						  struct drm_crtc_commit, commit_entry);
2067 		cleanup_done = commit ?
2068 			try_wait_for_completion(&commit->cleanup_done) : true;
2069 		spin_unlock(&crtc->commit_lock);
2070 
2071 		if (cleanup_done)
2072 			continue;
2073 
2074 		drm_crtc_wait_one_vblank(crtc);
2075 
2076 		return true;
2077 	}
2078 
2079 	return false;
2080 }
2081 
2082 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
2083 {
2084 	u32 temp;
2085 
2086 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
2087 
2088 	mutex_lock(&dev_priv->sb_lock);
2089 
2090 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2091 	temp |= SBI_SSCCTL_DISABLE;
2092 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2093 
2094 	mutex_unlock(&dev_priv->sb_lock);
2095 }
2096 
2097 /* Program iCLKIP clock to the desired frequency */
2098 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
2099 {
2100 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2101 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2102 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
2103 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
2104 	u32 temp;
2105 
2106 	lpt_disable_iclkip(dev_priv);
2107 
2108 	/* The iCLK virtual clock root frequency is in MHz,
2109 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
2110 	 * divisors, it is necessary to divide one by another, so we
2111 	 * convert the virtual clock precision to KHz here for higher
2112 	 * precision.
2113 	 */
2114 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
2115 		u32 iclk_virtual_root_freq = 172800 * 1000;
2116 		u32 iclk_pi_range = 64;
2117 		u32 desired_divisor;
2118 
2119 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2120 						    clock << auxdiv);
2121 		divsel = (desired_divisor / iclk_pi_range) - 2;
2122 		phaseinc = desired_divisor % iclk_pi_range;
2123 
2124 		/*
2125 		 * Near 20MHz is a corner case which is
2126 		 * out of range for the 7-bit divisor
2127 		 */
2128 		if (divsel <= 0x7f)
2129 			break;
2130 	}
2131 
2132 	/* This should not happen with any sane values */
2133 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2134 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2135 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
2136 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2137 
2138 	drm_dbg_kms(&dev_priv->drm,
2139 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2140 		    clock, auxdiv, divsel, phasedir, phaseinc);
2141 
2142 	mutex_lock(&dev_priv->sb_lock);
2143 
2144 	/* Program SSCDIVINTPHASE6 */
2145 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2146 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2147 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2148 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2149 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2150 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2151 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2152 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2153 
2154 	/* Program SSCAUXDIV */
2155 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2156 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2157 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2158 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2159 
2160 	/* Enable modulator and associated divider */
2161 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2162 	temp &= ~SBI_SSCCTL_DISABLE;
2163 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2164 
2165 	mutex_unlock(&dev_priv->sb_lock);
2166 
2167 	/* Wait for initialization time */
2168 	udelay(24);
2169 
2170 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2171 }
2172 
2173 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2174 {
2175 	u32 divsel, phaseinc, auxdiv;
2176 	u32 iclk_virtual_root_freq = 172800 * 1000;
2177 	u32 iclk_pi_range = 64;
2178 	u32 desired_divisor;
2179 	u32 temp;
2180 
2181 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2182 		return 0;
2183 
2184 	mutex_lock(&dev_priv->sb_lock);
2185 
2186 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2187 	if (temp & SBI_SSCCTL_DISABLE) {
2188 		mutex_unlock(&dev_priv->sb_lock);
2189 		return 0;
2190 	}
2191 
2192 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2193 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2194 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2195 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2196 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2197 
2198 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2199 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2200 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2201 
2202 	mutex_unlock(&dev_priv->sb_lock);
2203 
2204 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2205 
2206 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2207 				 desired_divisor << auxdiv);
2208 }
2209 
2210 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2211 					   enum pipe pch_transcoder)
2212 {
2213 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2214 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2215 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2216 
2217 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2218 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2219 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2220 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2221 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2222 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2223 
2224 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2225 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2226 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2227 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2228 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2229 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2230 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2231 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2232 }
2233 
2234 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2235 {
2236 	u32 temp;
2237 
2238 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2239 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2240 		return;
2241 
2242 	drm_WARN_ON(&dev_priv->drm,
2243 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2244 		    FDI_RX_ENABLE);
2245 	drm_WARN_ON(&dev_priv->drm,
2246 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2247 		    FDI_RX_ENABLE);
2248 
2249 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2250 	if (enable)
2251 		temp |= FDI_BC_BIFURCATION_SELECT;
2252 
2253 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2254 		    enable ? "en" : "dis");
2255 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2256 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2257 }
2258 
2259 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2260 {
2261 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2262 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2263 
2264 	switch (crtc->pipe) {
2265 	case PIPE_A:
2266 		break;
2267 	case PIPE_B:
2268 		if (crtc_state->fdi_lanes > 2)
2269 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2270 		else
2271 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2272 
2273 		break;
2274 	case PIPE_C:
2275 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2276 
2277 		break;
2278 	default:
2279 		BUG();
2280 	}
2281 }
2282 
2283 /*
2284  * Finds the encoder associated with the given CRTC. This can only be
2285  * used when we know that the CRTC isn't feeding multiple encoders!
2286  */
2287 struct intel_encoder *
2288 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2289 			   const struct intel_crtc_state *crtc_state)
2290 {
2291 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2292 	const struct drm_connector_state *connector_state;
2293 	const struct drm_connector *connector;
2294 	struct intel_encoder *encoder = NULL;
2295 	int num_encoders = 0;
2296 	int i;
2297 
2298 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2299 		if (connector_state->crtc != &crtc->base)
2300 			continue;
2301 
2302 		encoder = to_intel_encoder(connector_state->best_encoder);
2303 		num_encoders++;
2304 	}
2305 
2306 	drm_WARN(encoder->base.dev, num_encoders != 1,
2307 		 "%d encoders for pipe %c\n",
2308 		 num_encoders, pipe_name(crtc->pipe));
2309 
2310 	return encoder;
2311 }
2312 
2313 /*
2314  * Enable PCH resources required for PCH ports:
2315  *   - PCH PLLs
2316  *   - FDI training & RX/TX
2317  *   - update transcoder timings
2318  *   - DP transcoding bits
2319  *   - transcoder
2320  */
2321 static void ilk_pch_enable(const struct intel_atomic_state *state,
2322 			   const struct intel_crtc_state *crtc_state)
2323 {
2324 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2325 	struct drm_device *dev = crtc->base.dev;
2326 	struct drm_i915_private *dev_priv = to_i915(dev);
2327 	enum pipe pipe = crtc->pipe;
2328 	u32 temp;
2329 
2330 	assert_pch_transcoder_disabled(dev_priv, pipe);
2331 
2332 	if (IS_IVYBRIDGE(dev_priv))
2333 		ivb_update_fdi_bc_bifurcation(crtc_state);
2334 
2335 	/* Write the TU size bits before fdi link training, so that error
2336 	 * detection works. */
2337 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2338 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2339 
2340 	/* For PCH output, training FDI link */
2341 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2342 
2343 	/* We need to program the right clock selection before writing the pixel
2344 	 * mutliplier into the DPLL. */
2345 	if (HAS_PCH_CPT(dev_priv)) {
2346 		u32 sel;
2347 
2348 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2349 		temp |= TRANS_DPLL_ENABLE(pipe);
2350 		sel = TRANS_DPLLB_SEL(pipe);
2351 		if (crtc_state->shared_dpll ==
2352 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2353 			temp |= sel;
2354 		else
2355 			temp &= ~sel;
2356 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2357 	}
2358 
2359 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2360 	 * transcoder, and we actually should do this to not upset any PCH
2361 	 * transcoder that already use the clock when we share it.
2362 	 *
2363 	 * Note that enable_shared_dpll tries to do the right thing, but
2364 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2365 	 * the right LVDS enable sequence. */
2366 	intel_enable_shared_dpll(crtc_state);
2367 
2368 	/* set transcoder timing, panel must allow it */
2369 	assert_panel_unlocked(dev_priv, pipe);
2370 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2371 
2372 	intel_fdi_normal_train(crtc);
2373 
2374 	/* For PCH DP, enable TRANS_DP_CTL */
2375 	if (HAS_PCH_CPT(dev_priv) &&
2376 	    intel_crtc_has_dp_encoder(crtc_state)) {
2377 		const struct drm_display_mode *adjusted_mode =
2378 			&crtc_state->hw.adjusted_mode;
2379 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2380 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2381 		enum port port;
2382 
2383 		temp = intel_de_read(dev_priv, reg);
2384 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2385 			  TRANS_DP_SYNC_MASK |
2386 			  TRANS_DP_BPC_MASK);
2387 		temp |= TRANS_DP_OUTPUT_ENABLE;
2388 		temp |= bpc << 9; /* same format but at 11:9 */
2389 
2390 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2391 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2392 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2393 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2394 
2395 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2396 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2397 		temp |= TRANS_DP_PORT_SEL(port);
2398 
2399 		intel_de_write(dev_priv, reg, temp);
2400 	}
2401 
2402 	ilk_enable_pch_transcoder(crtc_state);
2403 }
2404 
2405 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2406 {
2407 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2408 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2409 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2410 
2411 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2412 
2413 	lpt_program_iclkip(crtc_state);
2414 
2415 	/* Set transcoder timing. */
2416 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2417 
2418 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2419 }
2420 
2421 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2422 			       enum pipe pipe)
2423 {
2424 	i915_reg_t dslreg = PIPEDSL(pipe);
2425 	u32 temp;
2426 
2427 	temp = intel_de_read(dev_priv, dslreg);
2428 	udelay(500);
2429 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2430 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2431 			drm_err(&dev_priv->drm,
2432 				"mode set failed: pipe %c stuck\n",
2433 				pipe_name(pipe));
2434 	}
2435 }
2436 
2437 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2438 {
2439 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2440 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2441 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2442 	enum pipe pipe = crtc->pipe;
2443 	int width = drm_rect_width(dst);
2444 	int height = drm_rect_height(dst);
2445 	int x = dst->x1;
2446 	int y = dst->y1;
2447 
2448 	if (!crtc_state->pch_pfit.enabled)
2449 		return;
2450 
2451 	/* Force use of hard-coded filter coefficients
2452 	 * as some pre-programmed values are broken,
2453 	 * e.g. x201.
2454 	 */
2455 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2456 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2457 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2458 	else
2459 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2460 			       PF_FILTER_MED_3x3);
2461 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2462 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2463 }
2464 
2465 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2466 {
2467 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2468 	struct drm_device *dev = crtc->base.dev;
2469 	struct drm_i915_private *dev_priv = to_i915(dev);
2470 
2471 	if (!crtc_state->ips_enabled)
2472 		return;
2473 
2474 	/*
2475 	 * We can only enable IPS after we enable a plane and wait for a vblank
2476 	 * This function is called from post_plane_update, which is run after
2477 	 * a vblank wait.
2478 	 */
2479 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2480 
2481 	if (IS_BROADWELL(dev_priv)) {
2482 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2483 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2484 		/* Quoting Art Runyan: "its not safe to expect any particular
2485 		 * value in IPS_CTL bit 31 after enabling IPS through the
2486 		 * mailbox." Moreover, the mailbox may return a bogus state,
2487 		 * so we need to just enable it and continue on.
2488 		 */
2489 	} else {
2490 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2491 		/* The bit only becomes 1 in the next vblank, so this wait here
2492 		 * is essentially intel_wait_for_vblank. If we don't have this
2493 		 * and don't wait for vblanks until the end of crtc_enable, then
2494 		 * the HW state readout code will complain that the expected
2495 		 * IPS_CTL value is not the one we read. */
2496 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2497 			drm_err(&dev_priv->drm,
2498 				"Timed out waiting for IPS enable\n");
2499 	}
2500 }
2501 
2502 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2503 {
2504 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2505 	struct drm_device *dev = crtc->base.dev;
2506 	struct drm_i915_private *dev_priv = to_i915(dev);
2507 
2508 	if (!crtc_state->ips_enabled)
2509 		return;
2510 
2511 	if (IS_BROADWELL(dev_priv)) {
2512 		drm_WARN_ON(dev,
2513 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2514 		/*
2515 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2516 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2517 		 * instead.
2518 		 */
2519 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2520 			drm_err(&dev_priv->drm,
2521 				"Timed out waiting for IPS disable\n");
2522 	} else {
2523 		intel_de_write(dev_priv, IPS_CTL, 0);
2524 		intel_de_posting_read(dev_priv, IPS_CTL);
2525 	}
2526 
2527 	/* We need to wait for a vblank before we can disable the plane. */
2528 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2529 }
2530 
2531 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
2532 {
2533 	if (crtc->overlay)
2534 		(void) intel_overlay_switch_off(crtc->overlay);
2535 
2536 	/* Let userspace switch the overlay on again. In most cases userspace
2537 	 * has to recompute where to put it anyway.
2538 	 */
2539 }
2540 
2541 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2542 				       const struct intel_crtc_state *new_crtc_state)
2543 {
2544 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2545 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2546 
2547 	if (!old_crtc_state->ips_enabled)
2548 		return false;
2549 
2550 	if (intel_crtc_needs_modeset(new_crtc_state))
2551 		return true;
2552 
2553 	/*
2554 	 * Workaround : Do not read or write the pipe palette/gamma data while
2555 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2556 	 *
2557 	 * Disable IPS before we program the LUT.
2558 	 */
2559 	if (IS_HASWELL(dev_priv) &&
2560 	    (new_crtc_state->uapi.color_mgmt_changed ||
2561 	     new_crtc_state->update_pipe) &&
2562 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2563 		return true;
2564 
2565 	return !new_crtc_state->ips_enabled;
2566 }
2567 
2568 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2569 				       const struct intel_crtc_state *new_crtc_state)
2570 {
2571 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2572 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2573 
2574 	if (!new_crtc_state->ips_enabled)
2575 		return false;
2576 
2577 	if (intel_crtc_needs_modeset(new_crtc_state))
2578 		return true;
2579 
2580 	/*
2581 	 * Workaround : Do not read or write the pipe palette/gamma data while
2582 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2583 	 *
2584 	 * Re-enable IPS after the LUT has been programmed.
2585 	 */
2586 	if (IS_HASWELL(dev_priv) &&
2587 	    (new_crtc_state->uapi.color_mgmt_changed ||
2588 	     new_crtc_state->update_pipe) &&
2589 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2590 		return true;
2591 
2592 	/*
2593 	 * We can't read out IPS on broadwell, assume the worst and
2594 	 * forcibly enable IPS on the first fastset.
2595 	 */
2596 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2597 		return true;
2598 
2599 	return !old_crtc_state->ips_enabled;
2600 }
2601 
2602 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2603 {
2604 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2605 
2606 	if (!crtc_state->nv12_planes)
2607 		return false;
2608 
2609 	/* WA Display #0827: Gen9:all */
2610 	if (DISPLAY_VER(dev_priv) == 9)
2611 		return true;
2612 
2613 	return false;
2614 }
2615 
2616 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2617 {
2618 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2619 
2620 	/* Wa_2006604312:icl,ehl */
2621 	if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11)
2622 		return true;
2623 
2624 	return false;
2625 }
2626 
2627 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2628 			    const struct intel_crtc_state *new_crtc_state)
2629 {
2630 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2631 		new_crtc_state->active_planes;
2632 }
2633 
2634 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2635 			     const struct intel_crtc_state *new_crtc_state)
2636 {
2637 	return old_crtc_state->active_planes &&
2638 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2639 }
2640 
2641 static void intel_post_plane_update(struct intel_atomic_state *state,
2642 				    struct intel_crtc *crtc)
2643 {
2644 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2645 	const struct intel_crtc_state *old_crtc_state =
2646 		intel_atomic_get_old_crtc_state(state, crtc);
2647 	const struct intel_crtc_state *new_crtc_state =
2648 		intel_atomic_get_new_crtc_state(state, crtc);
2649 	enum pipe pipe = crtc->pipe;
2650 
2651 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2652 
2653 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2654 		intel_update_watermarks(crtc);
2655 
2656 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2657 		hsw_enable_ips(new_crtc_state);
2658 
2659 	intel_fbc_post_update(state, crtc);
2660 
2661 	if (needs_nv12_wa(old_crtc_state) &&
2662 	    !needs_nv12_wa(new_crtc_state))
2663 		skl_wa_827(dev_priv, pipe, false);
2664 
2665 	if (needs_scalerclk_wa(old_crtc_state) &&
2666 	    !needs_scalerclk_wa(new_crtc_state))
2667 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2668 }
2669 
2670 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2671 					struct intel_crtc *crtc)
2672 {
2673 	const struct intel_crtc_state *crtc_state =
2674 		intel_atomic_get_new_crtc_state(state, crtc);
2675 	u8 update_planes = crtc_state->update_planes;
2676 	const struct intel_plane_state *plane_state;
2677 	struct intel_plane *plane;
2678 	int i;
2679 
2680 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2681 		if (plane->enable_flip_done &&
2682 		    plane->pipe == crtc->pipe &&
2683 		    update_planes & BIT(plane->id))
2684 			plane->enable_flip_done(plane);
2685 	}
2686 }
2687 
2688 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2689 					 struct intel_crtc *crtc)
2690 {
2691 	const struct intel_crtc_state *crtc_state =
2692 		intel_atomic_get_new_crtc_state(state, crtc);
2693 	u8 update_planes = crtc_state->update_planes;
2694 	const struct intel_plane_state *plane_state;
2695 	struct intel_plane *plane;
2696 	int i;
2697 
2698 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2699 		if (plane->disable_flip_done &&
2700 		    plane->pipe == crtc->pipe &&
2701 		    update_planes & BIT(plane->id))
2702 			plane->disable_flip_done(plane);
2703 	}
2704 }
2705 
2706 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2707 					     struct intel_crtc *crtc)
2708 {
2709 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2710 	const struct intel_crtc_state *old_crtc_state =
2711 		intel_atomic_get_old_crtc_state(state, crtc);
2712 	const struct intel_crtc_state *new_crtc_state =
2713 		intel_atomic_get_new_crtc_state(state, crtc);
2714 	u8 update_planes = new_crtc_state->update_planes;
2715 	const struct intel_plane_state *old_plane_state;
2716 	struct intel_plane *plane;
2717 	bool need_vbl_wait = false;
2718 	int i;
2719 
2720 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2721 		if (plane->need_async_flip_disable_wa &&
2722 		    plane->pipe == crtc->pipe &&
2723 		    update_planes & BIT(plane->id)) {
2724 			/*
2725 			 * Apart from the async flip bit we want to
2726 			 * preserve the old state for the plane.
2727 			 */
2728 			plane->async_flip(plane, old_crtc_state,
2729 					  old_plane_state, false);
2730 			need_vbl_wait = true;
2731 		}
2732 	}
2733 
2734 	if (need_vbl_wait)
2735 		intel_wait_for_vblank(i915, crtc->pipe);
2736 }
2737 
2738 static void intel_pre_plane_update(struct intel_atomic_state *state,
2739 				   struct intel_crtc *crtc)
2740 {
2741 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2742 	const struct intel_crtc_state *old_crtc_state =
2743 		intel_atomic_get_old_crtc_state(state, crtc);
2744 	const struct intel_crtc_state *new_crtc_state =
2745 		intel_atomic_get_new_crtc_state(state, crtc);
2746 	enum pipe pipe = crtc->pipe;
2747 
2748 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2749 		hsw_disable_ips(old_crtc_state);
2750 
2751 	if (intel_fbc_pre_update(state, crtc))
2752 		intel_wait_for_vblank(dev_priv, pipe);
2753 
2754 	/* Display WA 827 */
2755 	if (!needs_nv12_wa(old_crtc_state) &&
2756 	    needs_nv12_wa(new_crtc_state))
2757 		skl_wa_827(dev_priv, pipe, true);
2758 
2759 	/* Wa_2006604312:icl,ehl */
2760 	if (!needs_scalerclk_wa(old_crtc_state) &&
2761 	    needs_scalerclk_wa(new_crtc_state))
2762 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2763 
2764 	/*
2765 	 * Vblank time updates from the shadow to live plane control register
2766 	 * are blocked if the memory self-refresh mode is active at that
2767 	 * moment. So to make sure the plane gets truly disabled, disable
2768 	 * first the self-refresh mode. The self-refresh enable bit in turn
2769 	 * will be checked/applied by the HW only at the next frame start
2770 	 * event which is after the vblank start event, so we need to have a
2771 	 * wait-for-vblank between disabling the plane and the pipe.
2772 	 */
2773 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2774 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2775 		intel_wait_for_vblank(dev_priv, pipe);
2776 
2777 	/*
2778 	 * IVB workaround: must disable low power watermarks for at least
2779 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2780 	 * when scaling is disabled.
2781 	 *
2782 	 * WaCxSRDisabledForSpriteScaling:ivb
2783 	 */
2784 	if (old_crtc_state->hw.active &&
2785 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2786 		intel_wait_for_vblank(dev_priv, pipe);
2787 
2788 	/*
2789 	 * If we're doing a modeset we don't need to do any
2790 	 * pre-vblank watermark programming here.
2791 	 */
2792 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
2793 		/*
2794 		 * For platforms that support atomic watermarks, program the
2795 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2796 		 * will be the intermediate values that are safe for both pre- and
2797 		 * post- vblank; when vblank happens, the 'active' values will be set
2798 		 * to the final 'target' values and we'll do this again to get the
2799 		 * optimal watermarks.  For gen9+ platforms, the values we program here
2800 		 * will be the final target values which will get automatically latched
2801 		 * at vblank time; no further programming will be necessary.
2802 		 *
2803 		 * If a platform hasn't been transitioned to atomic watermarks yet,
2804 		 * we'll continue to update watermarks the old way, if flags tell
2805 		 * us to.
2806 		 */
2807 		if (dev_priv->display.initial_watermarks)
2808 			dev_priv->display.initial_watermarks(state, crtc);
2809 		else if (new_crtc_state->update_wm_pre)
2810 			intel_update_watermarks(crtc);
2811 	}
2812 
2813 	/*
2814 	 * Gen2 reports pipe underruns whenever all planes are disabled.
2815 	 * So disable underrun reporting before all the planes get disabled.
2816 	 *
2817 	 * We do this after .initial_watermarks() so that we have a
2818 	 * chance of catching underruns with the intermediate watermarks
2819 	 * vs. the old plane configuration.
2820 	 */
2821 	if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state))
2822 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2823 
2824 	/*
2825 	 * WA for platforms where async address update enable bit
2826 	 * is double buffered and only latched at start of vblank.
2827 	 */
2828 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2829 		intel_crtc_async_flip_disable_wa(state, crtc);
2830 }
2831 
2832 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2833 				      struct intel_crtc *crtc)
2834 {
2835 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2836 	const struct intel_crtc_state *new_crtc_state =
2837 		intel_atomic_get_new_crtc_state(state, crtc);
2838 	unsigned int update_mask = new_crtc_state->update_planes;
2839 	const struct intel_plane_state *old_plane_state;
2840 	struct intel_plane *plane;
2841 	unsigned fb_bits = 0;
2842 	int i;
2843 
2844 	intel_crtc_dpms_overlay_disable(crtc);
2845 
2846 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2847 		if (crtc->pipe != plane->pipe ||
2848 		    !(update_mask & BIT(plane->id)))
2849 			continue;
2850 
2851 		intel_disable_plane(plane, new_crtc_state);
2852 
2853 		if (old_plane_state->uapi.visible)
2854 			fb_bits |= plane->frontbuffer_bit;
2855 	}
2856 
2857 	intel_frontbuffer_flip(dev_priv, fb_bits);
2858 }
2859 
2860 /*
2861  * intel_connector_primary_encoder - get the primary encoder for a connector
2862  * @connector: connector for which to return the encoder
2863  *
2864  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2865  * all connectors to their encoder, except for DP-MST connectors which have
2866  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2867  * pointed to by as many DP-MST connectors as there are pipes.
2868  */
2869 static struct intel_encoder *
2870 intel_connector_primary_encoder(struct intel_connector *connector)
2871 {
2872 	struct intel_encoder *encoder;
2873 
2874 	if (connector->mst_port)
2875 		return &dp_to_dig_port(connector->mst_port)->base;
2876 
2877 	encoder = intel_attached_encoder(connector);
2878 	drm_WARN_ON(connector->base.dev, !encoder);
2879 
2880 	return encoder;
2881 }
2882 
2883 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2884 {
2885 	struct drm_connector_state *new_conn_state;
2886 	struct drm_connector *connector;
2887 	int i;
2888 
2889 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2890 					i) {
2891 		struct intel_connector *intel_connector;
2892 		struct intel_encoder *encoder;
2893 		struct intel_crtc *crtc;
2894 
2895 		if (!intel_connector_needs_modeset(state, connector))
2896 			continue;
2897 
2898 		intel_connector = to_intel_connector(connector);
2899 		encoder = intel_connector_primary_encoder(intel_connector);
2900 		if (!encoder->update_prepare)
2901 			continue;
2902 
2903 		crtc = new_conn_state->crtc ?
2904 			to_intel_crtc(new_conn_state->crtc) : NULL;
2905 		encoder->update_prepare(state, encoder, crtc);
2906 	}
2907 }
2908 
2909 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2910 {
2911 	struct drm_connector_state *new_conn_state;
2912 	struct drm_connector *connector;
2913 	int i;
2914 
2915 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2916 					i) {
2917 		struct intel_connector *intel_connector;
2918 		struct intel_encoder *encoder;
2919 		struct intel_crtc *crtc;
2920 
2921 		if (!intel_connector_needs_modeset(state, connector))
2922 			continue;
2923 
2924 		intel_connector = to_intel_connector(connector);
2925 		encoder = intel_connector_primary_encoder(intel_connector);
2926 		if (!encoder->update_complete)
2927 			continue;
2928 
2929 		crtc = new_conn_state->crtc ?
2930 			to_intel_crtc(new_conn_state->crtc) : NULL;
2931 		encoder->update_complete(state, encoder, crtc);
2932 	}
2933 }
2934 
2935 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2936 					  struct intel_crtc *crtc)
2937 {
2938 	const struct intel_crtc_state *crtc_state =
2939 		intel_atomic_get_new_crtc_state(state, crtc);
2940 	const struct drm_connector_state *conn_state;
2941 	struct drm_connector *conn;
2942 	int i;
2943 
2944 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2945 		struct intel_encoder *encoder =
2946 			to_intel_encoder(conn_state->best_encoder);
2947 
2948 		if (conn_state->crtc != &crtc->base)
2949 			continue;
2950 
2951 		if (encoder->pre_pll_enable)
2952 			encoder->pre_pll_enable(state, encoder,
2953 						crtc_state, conn_state);
2954 	}
2955 }
2956 
2957 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2958 				      struct intel_crtc *crtc)
2959 {
2960 	const struct intel_crtc_state *crtc_state =
2961 		intel_atomic_get_new_crtc_state(state, crtc);
2962 	const struct drm_connector_state *conn_state;
2963 	struct drm_connector *conn;
2964 	int i;
2965 
2966 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2967 		struct intel_encoder *encoder =
2968 			to_intel_encoder(conn_state->best_encoder);
2969 
2970 		if (conn_state->crtc != &crtc->base)
2971 			continue;
2972 
2973 		if (encoder->pre_enable)
2974 			encoder->pre_enable(state, encoder,
2975 					    crtc_state, conn_state);
2976 	}
2977 }
2978 
2979 static void intel_encoders_enable(struct intel_atomic_state *state,
2980 				  struct intel_crtc *crtc)
2981 {
2982 	const struct intel_crtc_state *crtc_state =
2983 		intel_atomic_get_new_crtc_state(state, crtc);
2984 	const struct drm_connector_state *conn_state;
2985 	struct drm_connector *conn;
2986 	int i;
2987 
2988 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2989 		struct intel_encoder *encoder =
2990 			to_intel_encoder(conn_state->best_encoder);
2991 
2992 		if (conn_state->crtc != &crtc->base)
2993 			continue;
2994 
2995 		if (encoder->enable)
2996 			encoder->enable(state, encoder,
2997 					crtc_state, conn_state);
2998 		intel_opregion_notify_encoder(encoder, true);
2999 	}
3000 }
3001 
3002 static void intel_encoders_pre_disable(struct intel_atomic_state *state,
3003 				       struct intel_crtc *crtc)
3004 {
3005 	const struct intel_crtc_state *old_crtc_state =
3006 		intel_atomic_get_old_crtc_state(state, crtc);
3007 	const struct drm_connector_state *old_conn_state;
3008 	struct drm_connector *conn;
3009 	int i;
3010 
3011 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3012 		struct intel_encoder *encoder =
3013 			to_intel_encoder(old_conn_state->best_encoder);
3014 
3015 		if (old_conn_state->crtc != &crtc->base)
3016 			continue;
3017 
3018 		if (encoder->pre_disable)
3019 			encoder->pre_disable(state, encoder, old_crtc_state,
3020 					     old_conn_state);
3021 	}
3022 }
3023 
3024 static void intel_encoders_disable(struct intel_atomic_state *state,
3025 				   struct intel_crtc *crtc)
3026 {
3027 	const struct intel_crtc_state *old_crtc_state =
3028 		intel_atomic_get_old_crtc_state(state, crtc);
3029 	const struct drm_connector_state *old_conn_state;
3030 	struct drm_connector *conn;
3031 	int i;
3032 
3033 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3034 		struct intel_encoder *encoder =
3035 			to_intel_encoder(old_conn_state->best_encoder);
3036 
3037 		if (old_conn_state->crtc != &crtc->base)
3038 			continue;
3039 
3040 		intel_opregion_notify_encoder(encoder, false);
3041 		if (encoder->disable)
3042 			encoder->disable(state, encoder,
3043 					 old_crtc_state, old_conn_state);
3044 	}
3045 }
3046 
3047 static void intel_encoders_post_disable(struct intel_atomic_state *state,
3048 					struct intel_crtc *crtc)
3049 {
3050 	const struct intel_crtc_state *old_crtc_state =
3051 		intel_atomic_get_old_crtc_state(state, crtc);
3052 	const struct drm_connector_state *old_conn_state;
3053 	struct drm_connector *conn;
3054 	int i;
3055 
3056 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3057 		struct intel_encoder *encoder =
3058 			to_intel_encoder(old_conn_state->best_encoder);
3059 
3060 		if (old_conn_state->crtc != &crtc->base)
3061 			continue;
3062 
3063 		if (encoder->post_disable)
3064 			encoder->post_disable(state, encoder,
3065 					      old_crtc_state, old_conn_state);
3066 	}
3067 }
3068 
3069 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
3070 					    struct intel_crtc *crtc)
3071 {
3072 	const struct intel_crtc_state *old_crtc_state =
3073 		intel_atomic_get_old_crtc_state(state, crtc);
3074 	const struct drm_connector_state *old_conn_state;
3075 	struct drm_connector *conn;
3076 	int i;
3077 
3078 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
3079 		struct intel_encoder *encoder =
3080 			to_intel_encoder(old_conn_state->best_encoder);
3081 
3082 		if (old_conn_state->crtc != &crtc->base)
3083 			continue;
3084 
3085 		if (encoder->post_pll_disable)
3086 			encoder->post_pll_disable(state, encoder,
3087 						  old_crtc_state, old_conn_state);
3088 	}
3089 }
3090 
3091 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
3092 				       struct intel_crtc *crtc)
3093 {
3094 	const struct intel_crtc_state *crtc_state =
3095 		intel_atomic_get_new_crtc_state(state, crtc);
3096 	const struct drm_connector_state *conn_state;
3097 	struct drm_connector *conn;
3098 	int i;
3099 
3100 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3101 		struct intel_encoder *encoder =
3102 			to_intel_encoder(conn_state->best_encoder);
3103 
3104 		if (conn_state->crtc != &crtc->base)
3105 			continue;
3106 
3107 		if (encoder->update_pipe)
3108 			encoder->update_pipe(state, encoder,
3109 					     crtc_state, conn_state);
3110 	}
3111 }
3112 
3113 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
3114 {
3115 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3116 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
3117 
3118 	plane->disable_plane(plane, crtc_state);
3119 }
3120 
3121 static void ilk_crtc_enable(struct intel_atomic_state *state,
3122 			    struct intel_crtc *crtc)
3123 {
3124 	const struct intel_crtc_state *new_crtc_state =
3125 		intel_atomic_get_new_crtc_state(state, crtc);
3126 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3127 	enum pipe pipe = crtc->pipe;
3128 
3129 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3130 		return;
3131 
3132 	/*
3133 	 * Sometimes spurious CPU pipe underruns happen during FDI
3134 	 * training, at least with VGA+HDMI cloning. Suppress them.
3135 	 *
3136 	 * On ILK we get an occasional spurious CPU pipe underruns
3137 	 * between eDP port A enable and vdd enable. Also PCH port
3138 	 * enable seems to result in the occasional CPU pipe underrun.
3139 	 *
3140 	 * Spurious PCH underruns also occur during PCH enabling.
3141 	 */
3142 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3143 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3144 
3145 	if (new_crtc_state->has_pch_encoder)
3146 		intel_prepare_shared_dpll(new_crtc_state);
3147 
3148 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3149 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3150 
3151 	intel_set_transcoder_timings(new_crtc_state);
3152 	intel_set_pipe_src_size(new_crtc_state);
3153 
3154 	if (new_crtc_state->has_pch_encoder)
3155 		intel_cpu_transcoder_set_m_n(new_crtc_state,
3156 					     &new_crtc_state->fdi_m_n, NULL);
3157 
3158 	ilk_set_pipeconf(new_crtc_state);
3159 
3160 	crtc->active = true;
3161 
3162 	intel_encoders_pre_enable(state, crtc);
3163 
3164 	if (new_crtc_state->has_pch_encoder) {
3165 		/* Note: FDI PLL enabling _must_ be done before we enable the
3166 		 * cpu pipes, hence this is separate from all the other fdi/pch
3167 		 * enabling. */
3168 		ilk_fdi_pll_enable(new_crtc_state);
3169 	} else {
3170 		assert_fdi_tx_disabled(dev_priv, pipe);
3171 		assert_fdi_rx_disabled(dev_priv, pipe);
3172 	}
3173 
3174 	ilk_pfit_enable(new_crtc_state);
3175 
3176 	/*
3177 	 * On ILK+ LUT must be loaded before the pipe is running but with
3178 	 * clocks enabled
3179 	 */
3180 	intel_color_load_luts(new_crtc_state);
3181 	intel_color_commit(new_crtc_state);
3182 	/* update DSPCNTR to configure gamma for pipe bottom color */
3183 	intel_disable_primary_plane(new_crtc_state);
3184 
3185 	if (dev_priv->display.initial_watermarks)
3186 		dev_priv->display.initial_watermarks(state, crtc);
3187 	intel_enable_pipe(new_crtc_state);
3188 
3189 	if (new_crtc_state->has_pch_encoder)
3190 		ilk_pch_enable(state, new_crtc_state);
3191 
3192 	intel_crtc_vblank_on(new_crtc_state);
3193 
3194 	intel_encoders_enable(state, crtc);
3195 
3196 	if (HAS_PCH_CPT(dev_priv))
3197 		cpt_verify_modeset(dev_priv, pipe);
3198 
3199 	/*
3200 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3201 	 * And a second vblank wait is needed at least on ILK with
3202 	 * some interlaced HDMI modes. Let's do the double wait always
3203 	 * in case there are more corner cases we don't know about.
3204 	 */
3205 	if (new_crtc_state->has_pch_encoder) {
3206 		intel_wait_for_vblank(dev_priv, pipe);
3207 		intel_wait_for_vblank(dev_priv, pipe);
3208 	}
3209 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3210 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3211 }
3212 
3213 /* IPS only exists on ULT machines and is tied to pipe A. */
3214 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3215 {
3216 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3217 }
3218 
3219 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3220 					    enum pipe pipe, bool apply)
3221 {
3222 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3223 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3224 
3225 	if (apply)
3226 		val |= mask;
3227 	else
3228 		val &= ~mask;
3229 
3230 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3231 }
3232 
3233 static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
3234 {
3235 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3236 	enum pipe pipe = crtc->pipe;
3237 	u32 val;
3238 
3239 	/* Wa_22010947358:adl-p */
3240 	if (IS_ALDERLAKE_P(dev_priv))
3241 		val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
3242 	else
3243 		val = MBUS_DBOX_A_CREDIT(2);
3244 
3245 	if (DISPLAY_VER(dev_priv) >= 12) {
3246 		val |= MBUS_DBOX_BW_CREDIT(2);
3247 		val |= MBUS_DBOX_B_CREDIT(12);
3248 	} else {
3249 		val |= MBUS_DBOX_BW_CREDIT(1);
3250 		val |= MBUS_DBOX_B_CREDIT(8);
3251 	}
3252 
3253 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3254 }
3255 
3256 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3257 {
3258 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3259 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3260 
3261 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3262 		       HSW_LINETIME(crtc_state->linetime) |
3263 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3264 }
3265 
3266 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3267 {
3268 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3269 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3270 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3271 	u32 val;
3272 
3273 	val = intel_de_read(dev_priv, reg);
3274 	val &= ~HSW_FRAME_START_DELAY_MASK;
3275 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3276 	intel_de_write(dev_priv, reg, val);
3277 }
3278 
3279 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3280 					 const struct intel_crtc_state *crtc_state)
3281 {
3282 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3283 	struct drm_i915_private *dev_priv = to_i915(master->base.dev);
3284 	struct intel_crtc_state *master_crtc_state;
3285 	struct drm_connector_state *conn_state;
3286 	struct drm_connector *conn;
3287 	struct intel_encoder *encoder = NULL;
3288 	int i;
3289 
3290 	if (crtc_state->bigjoiner_slave)
3291 		master = crtc_state->bigjoiner_linked_crtc;
3292 
3293 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3294 
3295 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3296 		if (conn_state->crtc != &master->base)
3297 			continue;
3298 
3299 		encoder = to_intel_encoder(conn_state->best_encoder);
3300 		break;
3301 	}
3302 
3303 	if (!crtc_state->bigjoiner_slave) {
3304 		/* need to enable VDSC, which we skipped in pre-enable */
3305 		intel_dsc_enable(encoder, crtc_state);
3306 	} else {
3307 		/*
3308 		 * Enable sequence steps 1-7 on bigjoiner master
3309 		 */
3310 		intel_encoders_pre_pll_enable(state, master);
3311 		if (master_crtc_state->shared_dpll)
3312 			intel_enable_shared_dpll(master_crtc_state);
3313 		intel_encoders_pre_enable(state, master);
3314 
3315 		/* and DSC on slave */
3316 		intel_dsc_enable(NULL, crtc_state);
3317 	}
3318 
3319 	if (DISPLAY_VER(dev_priv) >= 13)
3320 		intel_uncompressed_joiner_enable(crtc_state);
3321 }
3322 
3323 static void hsw_crtc_enable(struct intel_atomic_state *state,
3324 			    struct intel_crtc *crtc)
3325 {
3326 	const struct intel_crtc_state *new_crtc_state =
3327 		intel_atomic_get_new_crtc_state(state, crtc);
3328 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3329 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3330 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3331 	bool psl_clkgate_wa;
3332 
3333 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3334 		return;
3335 
3336 	if (!new_crtc_state->bigjoiner) {
3337 		intel_encoders_pre_pll_enable(state, crtc);
3338 
3339 		if (new_crtc_state->shared_dpll)
3340 			intel_enable_shared_dpll(new_crtc_state);
3341 
3342 		intel_encoders_pre_enable(state, crtc);
3343 	} else {
3344 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3345 	}
3346 
3347 	intel_set_pipe_src_size(new_crtc_state);
3348 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3349 		bdw_set_pipemisc(new_crtc_state);
3350 
3351 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3352 		intel_set_transcoder_timings(new_crtc_state);
3353 
3354 		if (cpu_transcoder != TRANSCODER_EDP)
3355 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3356 				       new_crtc_state->pixel_multiplier - 1);
3357 
3358 		if (new_crtc_state->has_pch_encoder)
3359 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3360 						     &new_crtc_state->fdi_m_n, NULL);
3361 
3362 		hsw_set_frame_start_delay(new_crtc_state);
3363 	}
3364 
3365 	if (!transcoder_is_dsi(cpu_transcoder))
3366 		hsw_set_pipeconf(new_crtc_state);
3367 
3368 	crtc->active = true;
3369 
3370 	/* Display WA #1180: WaDisableScalarClockGating: glk */
3371 	psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
3372 		new_crtc_state->pch_pfit.enabled;
3373 	if (psl_clkgate_wa)
3374 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3375 
3376 	if (DISPLAY_VER(dev_priv) >= 9)
3377 		skl_pfit_enable(new_crtc_state);
3378 	else
3379 		ilk_pfit_enable(new_crtc_state);
3380 
3381 	/*
3382 	 * On ILK+ LUT must be loaded before the pipe is running but with
3383 	 * clocks enabled
3384 	 */
3385 	intel_color_load_luts(new_crtc_state);
3386 	intel_color_commit(new_crtc_state);
3387 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3388 	if (DISPLAY_VER(dev_priv) < 9)
3389 		intel_disable_primary_plane(new_crtc_state);
3390 
3391 	hsw_set_linetime_wm(new_crtc_state);
3392 
3393 	if (DISPLAY_VER(dev_priv) >= 11)
3394 		icl_set_pipe_chicken(new_crtc_state);
3395 
3396 	if (dev_priv->display.initial_watermarks)
3397 		dev_priv->display.initial_watermarks(state, crtc);
3398 
3399 	if (DISPLAY_VER(dev_priv) >= 11) {
3400 		const struct intel_dbuf_state *dbuf_state =
3401 				intel_atomic_get_new_dbuf_state(state);
3402 
3403 		icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
3404 	}
3405 
3406 	if (new_crtc_state->bigjoiner_slave)
3407 		intel_crtc_vblank_on(new_crtc_state);
3408 
3409 	intel_encoders_enable(state, crtc);
3410 
3411 	if (psl_clkgate_wa) {
3412 		intel_wait_for_vblank(dev_priv, pipe);
3413 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3414 	}
3415 
3416 	/* If we change the relative order between pipe/planes enabling, we need
3417 	 * to change the workaround. */
3418 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3419 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3420 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3421 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3422 	}
3423 }
3424 
3425 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3426 {
3427 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3428 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3429 	enum pipe pipe = crtc->pipe;
3430 
3431 	/* To avoid upsetting the power well on haswell only disable the pfit if
3432 	 * it's in use. The hw state code will make sure we get this right. */
3433 	if (!old_crtc_state->pch_pfit.enabled)
3434 		return;
3435 
3436 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3437 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3438 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3439 }
3440 
3441 static void ilk_crtc_disable(struct intel_atomic_state *state,
3442 			     struct intel_crtc *crtc)
3443 {
3444 	const struct intel_crtc_state *old_crtc_state =
3445 		intel_atomic_get_old_crtc_state(state, crtc);
3446 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3447 	enum pipe pipe = crtc->pipe;
3448 
3449 	/*
3450 	 * Sometimes spurious CPU pipe underruns happen when the
3451 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3452 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3453 	 */
3454 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3455 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3456 
3457 	intel_encoders_disable(state, crtc);
3458 
3459 	intel_crtc_vblank_off(old_crtc_state);
3460 
3461 	intel_disable_pipe(old_crtc_state);
3462 
3463 	ilk_pfit_disable(old_crtc_state);
3464 
3465 	if (old_crtc_state->has_pch_encoder)
3466 		ilk_fdi_disable(crtc);
3467 
3468 	intel_encoders_post_disable(state, crtc);
3469 
3470 	if (old_crtc_state->has_pch_encoder) {
3471 		ilk_disable_pch_transcoder(dev_priv, pipe);
3472 
3473 		if (HAS_PCH_CPT(dev_priv)) {
3474 			i915_reg_t reg;
3475 			u32 temp;
3476 
3477 			/* disable TRANS_DP_CTL */
3478 			reg = TRANS_DP_CTL(pipe);
3479 			temp = intel_de_read(dev_priv, reg);
3480 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3481 				  TRANS_DP_PORT_SEL_MASK);
3482 			temp |= TRANS_DP_PORT_SEL_NONE;
3483 			intel_de_write(dev_priv, reg, temp);
3484 
3485 			/* disable DPLL_SEL */
3486 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3487 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3488 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3489 		}
3490 
3491 		ilk_fdi_pll_disable(crtc);
3492 	}
3493 
3494 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3495 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3496 }
3497 
3498 static void hsw_crtc_disable(struct intel_atomic_state *state,
3499 			     struct intel_crtc *crtc)
3500 {
3501 	/*
3502 	 * FIXME collapse everything to one hook.
3503 	 * Need care with mst->ddi interactions.
3504 	 */
3505 	intel_encoders_disable(state, crtc);
3506 	intel_encoders_post_disable(state, crtc);
3507 }
3508 
3509 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3510 {
3511 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3512 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3513 
3514 	if (!crtc_state->gmch_pfit.control)
3515 		return;
3516 
3517 	/*
3518 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3519 	 * according to register description and PRM.
3520 	 */
3521 	drm_WARN_ON(&dev_priv->drm,
3522 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3523 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3524 
3525 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3526 		       crtc_state->gmch_pfit.pgm_ratios);
3527 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3528 
3529 	/* Border color in case we don't scale up to the full screen. Black by
3530 	 * default, change to something else for debugging. */
3531 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3532 }
3533 
3534 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3535 {
3536 	if (phy == PHY_NONE)
3537 		return false;
3538 	else if (IS_DG2(dev_priv))
3539 		/*
3540 		 * DG2 outputs labelled as "combo PHY" in the bspec use
3541 		 * SNPS PHYs with completely different programming,
3542 		 * hence we always return false here.
3543 		 */
3544 		return false;
3545 	else if (IS_ALDERLAKE_S(dev_priv))
3546 		return phy <= PHY_E;
3547 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3548 		return phy <= PHY_D;
3549 	else if (IS_JSL_EHL(dev_priv))
3550 		return phy <= PHY_C;
3551 	else if (DISPLAY_VER(dev_priv) >= 11)
3552 		return phy <= PHY_B;
3553 	else
3554 		return false;
3555 }
3556 
3557 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3558 {
3559 	if (IS_DG2(dev_priv))
3560 		/* DG2's "TC1" output uses a SNPS PHY */
3561 		return false;
3562 	else if (IS_ALDERLAKE_P(dev_priv))
3563 		return phy >= PHY_F && phy <= PHY_I;
3564 	else if (IS_TIGERLAKE(dev_priv))
3565 		return phy >= PHY_D && phy <= PHY_I;
3566 	else if (IS_ICELAKE(dev_priv))
3567 		return phy >= PHY_C && phy <= PHY_F;
3568 	else
3569 		return false;
3570 }
3571 
3572 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
3573 {
3574 	if (phy == PHY_NONE)
3575 		return false;
3576 	else if (IS_DG2(dev_priv))
3577 		/*
3578 		 * All four "combo" ports and the TC1 port (PHY E) use
3579 		 * Synopsis PHYs.
3580 		 */
3581 		return phy <= PHY_E;
3582 
3583 	return false;
3584 }
3585 
3586 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3587 {
3588 	if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
3589 		return PHY_D + port - PORT_D_XELPD;
3590 	else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1)
3591 		return PHY_F + port - PORT_TC1;
3592 	else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3593 		return PHY_B + port - PORT_TC1;
3594 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3595 		return PHY_C + port - PORT_TC1;
3596 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3597 		return PHY_A;
3598 
3599 	return PHY_A + port - PORT_A;
3600 }
3601 
3602 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3603 {
3604 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3605 		return TC_PORT_NONE;
3606 
3607 	if (DISPLAY_VER(dev_priv) >= 12)
3608 		return TC_PORT_1 + port - PORT_TC1;
3609 	else
3610 		return TC_PORT_1 + port - PORT_C;
3611 }
3612 
3613 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3614 {
3615 	switch (port) {
3616 	case PORT_A:
3617 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3618 	case PORT_B:
3619 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3620 	case PORT_C:
3621 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3622 	case PORT_D:
3623 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3624 	case PORT_E:
3625 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3626 	case PORT_F:
3627 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3628 	case PORT_G:
3629 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3630 	case PORT_H:
3631 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3632 	case PORT_I:
3633 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3634 	default:
3635 		MISSING_CASE(port);
3636 		return POWER_DOMAIN_PORT_OTHER;
3637 	}
3638 }
3639 
3640 enum intel_display_power_domain
3641 intel_aux_power_domain(struct intel_digital_port *dig_port)
3642 {
3643 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3644 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3645 
3646 	if (intel_phy_is_tc(dev_priv, phy) &&
3647 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3648 		switch (dig_port->aux_ch) {
3649 		case AUX_CH_C:
3650 			return POWER_DOMAIN_AUX_C_TBT;
3651 		case AUX_CH_D:
3652 			return POWER_DOMAIN_AUX_D_TBT;
3653 		case AUX_CH_E:
3654 			return POWER_DOMAIN_AUX_E_TBT;
3655 		case AUX_CH_F:
3656 			return POWER_DOMAIN_AUX_F_TBT;
3657 		case AUX_CH_G:
3658 			return POWER_DOMAIN_AUX_G_TBT;
3659 		case AUX_CH_H:
3660 			return POWER_DOMAIN_AUX_H_TBT;
3661 		case AUX_CH_I:
3662 			return POWER_DOMAIN_AUX_I_TBT;
3663 		default:
3664 			MISSING_CASE(dig_port->aux_ch);
3665 			return POWER_DOMAIN_AUX_C_TBT;
3666 		}
3667 	}
3668 
3669 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3670 }
3671 
3672 /*
3673  * Converts aux_ch to power_domain without caring about TBT ports for that use
3674  * intel_aux_power_domain()
3675  */
3676 enum intel_display_power_domain
3677 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3678 {
3679 	switch (aux_ch) {
3680 	case AUX_CH_A:
3681 		return POWER_DOMAIN_AUX_A;
3682 	case AUX_CH_B:
3683 		return POWER_DOMAIN_AUX_B;
3684 	case AUX_CH_C:
3685 		return POWER_DOMAIN_AUX_C;
3686 	case AUX_CH_D:
3687 		return POWER_DOMAIN_AUX_D;
3688 	case AUX_CH_E:
3689 		return POWER_DOMAIN_AUX_E;
3690 	case AUX_CH_F:
3691 		return POWER_DOMAIN_AUX_F;
3692 	case AUX_CH_G:
3693 		return POWER_DOMAIN_AUX_G;
3694 	case AUX_CH_H:
3695 		return POWER_DOMAIN_AUX_H;
3696 	case AUX_CH_I:
3697 		return POWER_DOMAIN_AUX_I;
3698 	default:
3699 		MISSING_CASE(aux_ch);
3700 		return POWER_DOMAIN_AUX_A;
3701 	}
3702 }
3703 
3704 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3705 {
3706 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3707 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3708 	struct drm_encoder *encoder;
3709 	enum pipe pipe = crtc->pipe;
3710 	u64 mask;
3711 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3712 
3713 	if (!crtc_state->hw.active)
3714 		return 0;
3715 
3716 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3717 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3718 	if (crtc_state->pch_pfit.enabled ||
3719 	    crtc_state->pch_pfit.force_thru)
3720 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3721 
3722 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3723 				  crtc_state->uapi.encoder_mask) {
3724 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3725 
3726 		mask |= BIT_ULL(intel_encoder->power_domain);
3727 	}
3728 
3729 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3730 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
3731 
3732 	if (crtc_state->shared_dpll)
3733 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3734 
3735 	if (crtc_state->dsc.compression_enable)
3736 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3737 
3738 	return mask;
3739 }
3740 
3741 static u64
3742 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3743 {
3744 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3745 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3746 	enum intel_display_power_domain domain;
3747 	u64 domains, new_domains, old_domains;
3748 
3749 	domains = get_crtc_power_domains(crtc_state);
3750 
3751 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3752 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3753 
3754 	for_each_power_domain(domain, new_domains)
3755 		intel_display_power_get_in_set(dev_priv,
3756 					       &crtc->enabled_power_domains,
3757 					       domain);
3758 
3759 	return old_domains;
3760 }
3761 
3762 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3763 					   u64 domains)
3764 {
3765 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3766 					    &crtc->enabled_power_domains,
3767 					    domains);
3768 }
3769 
3770 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3771 				   struct intel_crtc *crtc)
3772 {
3773 	const struct intel_crtc_state *new_crtc_state =
3774 		intel_atomic_get_new_crtc_state(state, crtc);
3775 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3776 	enum pipe pipe = crtc->pipe;
3777 
3778 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3779 		return;
3780 
3781 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3782 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3783 
3784 	intel_set_transcoder_timings(new_crtc_state);
3785 	intel_set_pipe_src_size(new_crtc_state);
3786 
3787 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3788 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3789 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3790 	}
3791 
3792 	i9xx_set_pipeconf(new_crtc_state);
3793 
3794 	crtc->active = true;
3795 
3796 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3797 
3798 	intel_encoders_pre_pll_enable(state, crtc);
3799 
3800 	if (IS_CHERRYVIEW(dev_priv)) {
3801 		chv_prepare_pll(crtc, new_crtc_state);
3802 		chv_enable_pll(crtc, new_crtc_state);
3803 	} else {
3804 		vlv_prepare_pll(crtc, new_crtc_state);
3805 		vlv_enable_pll(crtc, new_crtc_state);
3806 	}
3807 
3808 	intel_encoders_pre_enable(state, crtc);
3809 
3810 	i9xx_pfit_enable(new_crtc_state);
3811 
3812 	intel_color_load_luts(new_crtc_state);
3813 	intel_color_commit(new_crtc_state);
3814 	/* update DSPCNTR to configure gamma for pipe bottom color */
3815 	intel_disable_primary_plane(new_crtc_state);
3816 
3817 	dev_priv->display.initial_watermarks(state, crtc);
3818 	intel_enable_pipe(new_crtc_state);
3819 
3820 	intel_crtc_vblank_on(new_crtc_state);
3821 
3822 	intel_encoders_enable(state, crtc);
3823 }
3824 
3825 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3826 {
3827 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3828 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3829 
3830 	intel_de_write(dev_priv, FP0(crtc->pipe),
3831 		       crtc_state->dpll_hw_state.fp0);
3832 	intel_de_write(dev_priv, FP1(crtc->pipe),
3833 		       crtc_state->dpll_hw_state.fp1);
3834 }
3835 
3836 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3837 			     struct intel_crtc *crtc)
3838 {
3839 	const struct intel_crtc_state *new_crtc_state =
3840 		intel_atomic_get_new_crtc_state(state, crtc);
3841 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3842 	enum pipe pipe = crtc->pipe;
3843 
3844 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3845 		return;
3846 
3847 	i9xx_set_pll_dividers(new_crtc_state);
3848 
3849 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3850 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3851 
3852 	intel_set_transcoder_timings(new_crtc_state);
3853 	intel_set_pipe_src_size(new_crtc_state);
3854 
3855 	i9xx_set_pipeconf(new_crtc_state);
3856 
3857 	crtc->active = true;
3858 
3859 	if (DISPLAY_VER(dev_priv) != 2)
3860 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3861 
3862 	intel_encoders_pre_enable(state, crtc);
3863 
3864 	i9xx_enable_pll(crtc, new_crtc_state);
3865 
3866 	i9xx_pfit_enable(new_crtc_state);
3867 
3868 	intel_color_load_luts(new_crtc_state);
3869 	intel_color_commit(new_crtc_state);
3870 	/* update DSPCNTR to configure gamma for pipe bottom color */
3871 	intel_disable_primary_plane(new_crtc_state);
3872 
3873 	if (dev_priv->display.initial_watermarks)
3874 		dev_priv->display.initial_watermarks(state, crtc);
3875 	else
3876 		intel_update_watermarks(crtc);
3877 	intel_enable_pipe(new_crtc_state);
3878 
3879 	intel_crtc_vblank_on(new_crtc_state);
3880 
3881 	intel_encoders_enable(state, crtc);
3882 
3883 	/* prevents spurious underruns */
3884 	if (DISPLAY_VER(dev_priv) == 2)
3885 		intel_wait_for_vblank(dev_priv, pipe);
3886 }
3887 
3888 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3889 {
3890 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3891 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3892 
3893 	if (!old_crtc_state->gmch_pfit.control)
3894 		return;
3895 
3896 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3897 
3898 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3899 		    intel_de_read(dev_priv, PFIT_CONTROL));
3900 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
3901 }
3902 
3903 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3904 			      struct intel_crtc *crtc)
3905 {
3906 	struct intel_crtc_state *old_crtc_state =
3907 		intel_atomic_get_old_crtc_state(state, crtc);
3908 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3909 	enum pipe pipe = crtc->pipe;
3910 
3911 	/*
3912 	 * On gen2 planes are double buffered but the pipe isn't, so we must
3913 	 * wait for planes to fully turn off before disabling the pipe.
3914 	 */
3915 	if (DISPLAY_VER(dev_priv) == 2)
3916 		intel_wait_for_vblank(dev_priv, pipe);
3917 
3918 	intel_encoders_disable(state, crtc);
3919 
3920 	intel_crtc_vblank_off(old_crtc_state);
3921 
3922 	intel_disable_pipe(old_crtc_state);
3923 
3924 	i9xx_pfit_disable(old_crtc_state);
3925 
3926 	intel_encoders_post_disable(state, crtc);
3927 
3928 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3929 		if (IS_CHERRYVIEW(dev_priv))
3930 			chv_disable_pll(dev_priv, pipe);
3931 		else if (IS_VALLEYVIEW(dev_priv))
3932 			vlv_disable_pll(dev_priv, pipe);
3933 		else
3934 			i9xx_disable_pll(old_crtc_state);
3935 	}
3936 
3937 	intel_encoders_post_pll_disable(state, crtc);
3938 
3939 	if (DISPLAY_VER(dev_priv) != 2)
3940 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3941 
3942 	if (!dev_priv->display.initial_watermarks)
3943 		intel_update_watermarks(crtc);
3944 
3945 	/* clock the pipe down to 640x480@60 to potentially save power */
3946 	if (IS_I830(dev_priv))
3947 		i830_enable_pipe(dev_priv, pipe);
3948 }
3949 
3950 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3951 					struct drm_modeset_acquire_ctx *ctx)
3952 {
3953 	struct intel_encoder *encoder;
3954 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3955 	struct intel_bw_state *bw_state =
3956 		to_intel_bw_state(dev_priv->bw_obj.state);
3957 	struct intel_cdclk_state *cdclk_state =
3958 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3959 	struct intel_dbuf_state *dbuf_state =
3960 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3961 	struct intel_crtc_state *crtc_state =
3962 		to_intel_crtc_state(crtc->base.state);
3963 	struct intel_plane *plane;
3964 	struct drm_atomic_state *state;
3965 	struct intel_crtc_state *temp_crtc_state;
3966 	enum pipe pipe = crtc->pipe;
3967 	int ret;
3968 
3969 	if (!crtc_state->hw.active)
3970 		return;
3971 
3972 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3973 		const struct intel_plane_state *plane_state =
3974 			to_intel_plane_state(plane->base.state);
3975 
3976 		if (plane_state->uapi.visible)
3977 			intel_plane_disable_noatomic(crtc, plane);
3978 	}
3979 
3980 	state = drm_atomic_state_alloc(&dev_priv->drm);
3981 	if (!state) {
3982 		drm_dbg_kms(&dev_priv->drm,
3983 			    "failed to disable [CRTC:%d:%s], out of memory",
3984 			    crtc->base.base.id, crtc->base.name);
3985 		return;
3986 	}
3987 
3988 	state->acquire_ctx = ctx;
3989 
3990 	/* Everything's already locked, -EDEADLK can't happen. */
3991 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3992 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3993 
3994 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3995 
3996 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3997 
3998 	drm_atomic_state_put(state);
3999 
4000 	drm_dbg_kms(&dev_priv->drm,
4001 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
4002 		    crtc->base.base.id, crtc->base.name);
4003 
4004 	crtc->active = false;
4005 	crtc->base.enabled = false;
4006 
4007 	drm_WARN_ON(&dev_priv->drm,
4008 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
4009 	crtc_state->uapi.active = false;
4010 	crtc_state->uapi.connector_mask = 0;
4011 	crtc_state->uapi.encoder_mask = 0;
4012 	intel_crtc_free_hw_state(crtc_state);
4013 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
4014 
4015 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
4016 		encoder->base.crtc = NULL;
4017 
4018 	intel_fbc_disable(crtc);
4019 	intel_update_watermarks(crtc);
4020 	intel_disable_shared_dpll(crtc_state);
4021 
4022 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
4023 
4024 	dev_priv->active_pipes &= ~BIT(pipe);
4025 	cdclk_state->min_cdclk[pipe] = 0;
4026 	cdclk_state->min_voltage_level[pipe] = 0;
4027 	cdclk_state->active_pipes &= ~BIT(pipe);
4028 
4029 	dbuf_state->active_pipes &= ~BIT(pipe);
4030 
4031 	bw_state->data_rate[pipe] = 0;
4032 	bw_state->num_active_planes[pipe] = 0;
4033 }
4034 
4035 /*
4036  * turn all crtc's off, but do not adjust state
4037  * This has to be paired with a call to intel_modeset_setup_hw_state.
4038  */
4039 int intel_display_suspend(struct drm_device *dev)
4040 {
4041 	struct drm_i915_private *dev_priv = to_i915(dev);
4042 	struct drm_atomic_state *state;
4043 	int ret;
4044 
4045 	if (!HAS_DISPLAY(dev_priv))
4046 		return 0;
4047 
4048 	state = drm_atomic_helper_suspend(dev);
4049 	ret = PTR_ERR_OR_ZERO(state);
4050 	if (ret)
4051 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4052 			ret);
4053 	else
4054 		dev_priv->modeset_restore_state = state;
4055 	return ret;
4056 }
4057 
4058 void intel_encoder_destroy(struct drm_encoder *encoder)
4059 {
4060 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4061 
4062 	drm_encoder_cleanup(encoder);
4063 	kfree(intel_encoder);
4064 }
4065 
4066 /* Cross check the actual hw state with our own modeset state tracking (and it's
4067  * internal consistency). */
4068 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
4069 					 struct drm_connector_state *conn_state)
4070 {
4071 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
4072 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
4073 
4074 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
4075 		    connector->base.base.id, connector->base.name);
4076 
4077 	if (connector->get_hw_state(connector)) {
4078 		struct intel_encoder *encoder = intel_attached_encoder(connector);
4079 
4080 		I915_STATE_WARN(!crtc_state,
4081 			 "connector enabled without attached crtc\n");
4082 
4083 		if (!crtc_state)
4084 			return;
4085 
4086 		I915_STATE_WARN(!crtc_state->hw.active,
4087 				"connector is active, but attached crtc isn't\n");
4088 
4089 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
4090 			return;
4091 
4092 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
4093 			"atomic encoder doesn't match attached encoder\n");
4094 
4095 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
4096 			"attached encoder crtc differs from connector crtc\n");
4097 	} else {
4098 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
4099 				"attached crtc is active, but connector isn't\n");
4100 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
4101 			"best encoder set without crtc!\n");
4102 	}
4103 }
4104 
4105 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
4106 {
4107 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4108 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4109 
4110 	/* IPS only exists on ULT machines and is tied to pipe A. */
4111 	if (!hsw_crtc_supports_ips(crtc))
4112 		return false;
4113 
4114 	if (!dev_priv->params.enable_ips)
4115 		return false;
4116 
4117 	if (crtc_state->pipe_bpp > 24)
4118 		return false;
4119 
4120 	/*
4121 	 * We compare against max which means we must take
4122 	 * the increased cdclk requirement into account when
4123 	 * calculating the new cdclk.
4124 	 *
4125 	 * Should measure whether using a lower cdclk w/o IPS
4126 	 */
4127 	if (IS_BROADWELL(dev_priv) &&
4128 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
4129 		return false;
4130 
4131 	return true;
4132 }
4133 
4134 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
4135 {
4136 	struct drm_i915_private *dev_priv =
4137 		to_i915(crtc_state->uapi.crtc->dev);
4138 	struct intel_atomic_state *state =
4139 		to_intel_atomic_state(crtc_state->uapi.state);
4140 
4141 	crtc_state->ips_enabled = false;
4142 
4143 	if (!hsw_crtc_state_ips_capable(crtc_state))
4144 		return 0;
4145 
4146 	/*
4147 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4148 	 * enabled and disabled dynamically based on package C states,
4149 	 * user space can't make reliable use of the CRCs, so let's just
4150 	 * completely disable it.
4151 	 */
4152 	if (crtc_state->crc_enabled)
4153 		return 0;
4154 
4155 	/* IPS should be fine as long as at least one plane is enabled. */
4156 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
4157 		return 0;
4158 
4159 	if (IS_BROADWELL(dev_priv)) {
4160 		const struct intel_cdclk_state *cdclk_state;
4161 
4162 		cdclk_state = intel_atomic_get_cdclk_state(state);
4163 		if (IS_ERR(cdclk_state))
4164 			return PTR_ERR(cdclk_state);
4165 
4166 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
4167 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
4168 			return 0;
4169 	}
4170 
4171 	crtc_state->ips_enabled = true;
4172 
4173 	return 0;
4174 }
4175 
4176 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
4177 {
4178 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4179 
4180 	/* GDG double wide on either pipe, otherwise pipe A only */
4181 	return DISPLAY_VER(dev_priv) < 4 &&
4182 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
4183 }
4184 
4185 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
4186 {
4187 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
4188 	struct drm_rect src;
4189 
4190 	/*
4191 	 * We only use IF-ID interlacing. If we ever use
4192 	 * PF-ID we'll need to adjust the pixel_rate here.
4193 	 */
4194 
4195 	if (!crtc_state->pch_pfit.enabled)
4196 		return pixel_rate;
4197 
4198 	drm_rect_init(&src, 0, 0,
4199 		      crtc_state->pipe_src_w << 16,
4200 		      crtc_state->pipe_src_h << 16);
4201 
4202 	return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst,
4203 				   pixel_rate);
4204 }
4205 
4206 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4207 					 const struct drm_display_mode *timings)
4208 {
4209 	mode->hdisplay = timings->crtc_hdisplay;
4210 	mode->htotal = timings->crtc_htotal;
4211 	mode->hsync_start = timings->crtc_hsync_start;
4212 	mode->hsync_end = timings->crtc_hsync_end;
4213 
4214 	mode->vdisplay = timings->crtc_vdisplay;
4215 	mode->vtotal = timings->crtc_vtotal;
4216 	mode->vsync_start = timings->crtc_vsync_start;
4217 	mode->vsync_end = timings->crtc_vsync_end;
4218 
4219 	mode->flags = timings->flags;
4220 	mode->type = DRM_MODE_TYPE_DRIVER;
4221 
4222 	mode->clock = timings->crtc_clock;
4223 
4224 	drm_mode_set_name(mode);
4225 }
4226 
4227 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4228 {
4229 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4230 
4231 	if (HAS_GMCH(dev_priv))
4232 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4233 		crtc_state->pixel_rate =
4234 			crtc_state->hw.pipe_mode.crtc_clock;
4235 	else
4236 		crtc_state->pixel_rate =
4237 			ilk_pipe_pixel_rate(crtc_state);
4238 }
4239 
4240 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4241 {
4242 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4243 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4244 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4245 
4246 	drm_mode_copy(pipe_mode, adjusted_mode);
4247 
4248 	if (crtc_state->bigjoiner) {
4249 		/*
4250 		 * transcoder is programmed to the full mode,
4251 		 * but pipe timings are half of the transcoder mode
4252 		 */
4253 		pipe_mode->crtc_hdisplay /= 2;
4254 		pipe_mode->crtc_hblank_start /= 2;
4255 		pipe_mode->crtc_hblank_end /= 2;
4256 		pipe_mode->crtc_hsync_start /= 2;
4257 		pipe_mode->crtc_hsync_end /= 2;
4258 		pipe_mode->crtc_htotal /= 2;
4259 		pipe_mode->crtc_clock /= 2;
4260 	}
4261 
4262 	if (crtc_state->splitter.enable) {
4263 		int n = crtc_state->splitter.link_count;
4264 		int overlap = crtc_state->splitter.pixel_overlap;
4265 
4266 		/*
4267 		 * eDP MSO uses segment timings from EDID for transcoder
4268 		 * timings, but full mode for everything else.
4269 		 *
4270 		 * h_full = (h_segment - pixel_overlap) * link_count
4271 		 */
4272 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4273 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4274 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4275 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4276 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4277 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4278 		pipe_mode->crtc_clock *= n;
4279 
4280 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4281 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4282 	} else {
4283 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4284 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4285 	}
4286 
4287 	intel_crtc_compute_pixel_rate(crtc_state);
4288 
4289 	drm_mode_copy(mode, adjusted_mode);
4290 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4291 	mode->vdisplay = crtc_state->pipe_src_h;
4292 }
4293 
4294 static void intel_encoder_get_config(struct intel_encoder *encoder,
4295 				     struct intel_crtc_state *crtc_state)
4296 {
4297 	encoder->get_config(encoder, crtc_state);
4298 
4299 	intel_crtc_readout_derived_state(crtc_state);
4300 }
4301 
4302 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4303 				     struct intel_crtc_state *pipe_config)
4304 {
4305 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4306 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4307 	int clock_limit = dev_priv->max_dotclk_freq;
4308 
4309 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4310 
4311 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4312 	if (pipe_config->bigjoiner) {
4313 		pipe_mode->crtc_clock /= 2;
4314 		pipe_mode->crtc_hdisplay /= 2;
4315 		pipe_mode->crtc_hblank_start /= 2;
4316 		pipe_mode->crtc_hblank_end /= 2;
4317 		pipe_mode->crtc_hsync_start /= 2;
4318 		pipe_mode->crtc_hsync_end /= 2;
4319 		pipe_mode->crtc_htotal /= 2;
4320 		pipe_config->pipe_src_w /= 2;
4321 	}
4322 
4323 	if (pipe_config->splitter.enable) {
4324 		int n = pipe_config->splitter.link_count;
4325 		int overlap = pipe_config->splitter.pixel_overlap;
4326 
4327 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4328 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4329 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4330 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4331 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4332 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4333 		pipe_mode->crtc_clock *= n;
4334 	}
4335 
4336 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4337 
4338 	if (DISPLAY_VER(dev_priv) < 4) {
4339 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4340 
4341 		/*
4342 		 * Enable double wide mode when the dot clock
4343 		 * is > 90% of the (display) core speed.
4344 		 */
4345 		if (intel_crtc_supports_double_wide(crtc) &&
4346 		    pipe_mode->crtc_clock > clock_limit) {
4347 			clock_limit = dev_priv->max_dotclk_freq;
4348 			pipe_config->double_wide = true;
4349 		}
4350 	}
4351 
4352 	if (pipe_mode->crtc_clock > clock_limit) {
4353 		drm_dbg_kms(&dev_priv->drm,
4354 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4355 			    pipe_mode->crtc_clock, clock_limit,
4356 			    yesno(pipe_config->double_wide));
4357 		return -EINVAL;
4358 	}
4359 
4360 	/*
4361 	 * Pipe horizontal size must be even in:
4362 	 * - DVO ganged mode
4363 	 * - LVDS dual channel mode
4364 	 * - Double wide pipe
4365 	 */
4366 	if (pipe_config->pipe_src_w & 1) {
4367 		if (pipe_config->double_wide) {
4368 			drm_dbg_kms(&dev_priv->drm,
4369 				    "Odd pipe source width not supported with double wide pipe\n");
4370 			return -EINVAL;
4371 		}
4372 
4373 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4374 		    intel_is_dual_link_lvds(dev_priv)) {
4375 			drm_dbg_kms(&dev_priv->drm,
4376 				    "Odd pipe source width not supported with dual link LVDS\n");
4377 			return -EINVAL;
4378 		}
4379 	}
4380 
4381 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4382 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4383 	 */
4384 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4385 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4386 		return -EINVAL;
4387 
4388 	intel_crtc_compute_pixel_rate(pipe_config);
4389 
4390 	if (pipe_config->has_pch_encoder)
4391 		return ilk_fdi_compute_config(crtc, pipe_config);
4392 
4393 	return 0;
4394 }
4395 
4396 static void
4397 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4398 {
4399 	while (*num > DATA_LINK_M_N_MASK ||
4400 	       *den > DATA_LINK_M_N_MASK) {
4401 		*num >>= 1;
4402 		*den >>= 1;
4403 	}
4404 }
4405 
4406 static void compute_m_n(unsigned int m, unsigned int n,
4407 			u32 *ret_m, u32 *ret_n,
4408 			bool constant_n)
4409 {
4410 	/*
4411 	 * Several DP dongles in particular seem to be fussy about
4412 	 * too large link M/N values. Give N value as 0x8000 that
4413 	 * should be acceptable by specific devices. 0x8000 is the
4414 	 * specified fixed N value for asynchronous clock mode,
4415 	 * which the devices expect also in synchronous clock mode.
4416 	 */
4417 	if (constant_n)
4418 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4419 	else
4420 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4421 
4422 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4423 	intel_reduce_m_n_ratio(ret_m, ret_n);
4424 }
4425 
4426 void
4427 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4428 		       int pixel_clock, int link_clock,
4429 		       struct intel_link_m_n *m_n,
4430 		       bool constant_n, bool fec_enable)
4431 {
4432 	u32 data_clock = bits_per_pixel * pixel_clock;
4433 
4434 	if (fec_enable)
4435 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4436 
4437 	m_n->tu = 64;
4438 	compute_m_n(data_clock,
4439 		    link_clock * nlanes * 8,
4440 		    &m_n->gmch_m, &m_n->gmch_n,
4441 		    constant_n);
4442 
4443 	compute_m_n(pixel_clock, link_clock,
4444 		    &m_n->link_m, &m_n->link_n,
4445 		    constant_n);
4446 }
4447 
4448 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4449 {
4450 	/*
4451 	 * There may be no VBT; and if the BIOS enabled SSC we can
4452 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4453 	 * BIOS isn't using it, don't assume it will work even if the VBT
4454 	 * indicates as much.
4455 	 */
4456 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4457 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4458 						       PCH_DREF_CONTROL) &
4459 			DREF_SSC1_ENABLE;
4460 
4461 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4462 			drm_dbg_kms(&dev_priv->drm,
4463 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4464 				    enableddisabled(bios_lvds_use_ssc),
4465 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4466 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4467 		}
4468 	}
4469 }
4470 
4471 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4472 					 const struct intel_link_m_n *m_n)
4473 {
4474 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4475 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4476 	enum pipe pipe = crtc->pipe;
4477 
4478 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4479 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4480 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4481 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4482 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4483 }
4484 
4485 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4486 				 enum transcoder transcoder)
4487 {
4488 	if (IS_HASWELL(dev_priv))
4489 		return transcoder == TRANSCODER_EDP;
4490 
4491 	/*
4492 	 * Strictly speaking some registers are available before
4493 	 * gen7, but we only support DRRS on gen7+
4494 	 */
4495 	return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
4496 }
4497 
4498 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4499 					 const struct intel_link_m_n *m_n,
4500 					 const struct intel_link_m_n *m2_n2)
4501 {
4502 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4503 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4504 	enum pipe pipe = crtc->pipe;
4505 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4506 
4507 	if (DISPLAY_VER(dev_priv) >= 5) {
4508 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4509 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4510 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4511 			       m_n->gmch_n);
4512 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4513 			       m_n->link_m);
4514 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4515 			       m_n->link_n);
4516 		/*
4517 		 *  M2_N2 registers are set only if DRRS is supported
4518 		 * (to make sure the registers are not unnecessarily accessed).
4519 		 */
4520 		if (m2_n2 && crtc_state->has_drrs &&
4521 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4522 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4523 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4524 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4525 				       m2_n2->gmch_n);
4526 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4527 				       m2_n2->link_m);
4528 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4529 				       m2_n2->link_n);
4530 		}
4531 	} else {
4532 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4533 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4534 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4535 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4536 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4537 	}
4538 }
4539 
4540 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4541 {
4542 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4543 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4544 
4545 	if (m_n == M1_N1) {
4546 		dp_m_n = &crtc_state->dp_m_n;
4547 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4548 	} else if (m_n == M2_N2) {
4549 
4550 		/*
4551 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4552 		 * needs to be programmed into M1_N1.
4553 		 */
4554 		dp_m_n = &crtc_state->dp_m2_n2;
4555 	} else {
4556 		drm_err(&i915->drm, "Unsupported divider value\n");
4557 		return;
4558 	}
4559 
4560 	if (crtc_state->has_pch_encoder)
4561 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4562 	else
4563 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4564 }
4565 
4566 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4567 {
4568 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4569 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4570 	enum pipe pipe = crtc->pipe;
4571 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4572 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4573 	u32 crtc_vtotal, crtc_vblank_end;
4574 	int vsyncshift = 0;
4575 
4576 	/* We need to be careful not to changed the adjusted mode, for otherwise
4577 	 * the hw state checker will get angry at the mismatch. */
4578 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4579 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4580 
4581 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4582 		/* the chip adds 2 halflines automatically */
4583 		crtc_vtotal -= 1;
4584 		crtc_vblank_end -= 1;
4585 
4586 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4587 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4588 		else
4589 			vsyncshift = adjusted_mode->crtc_hsync_start -
4590 				adjusted_mode->crtc_htotal / 2;
4591 		if (vsyncshift < 0)
4592 			vsyncshift += adjusted_mode->crtc_htotal;
4593 	}
4594 
4595 	if (DISPLAY_VER(dev_priv) > 3)
4596 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4597 		               vsyncshift);
4598 
4599 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4600 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4601 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4602 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4603 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4604 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4605 
4606 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4607 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4608 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4609 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4610 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4611 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4612 
4613 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4614 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4615 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4616 	 * bits. */
4617 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4618 	    (pipe == PIPE_B || pipe == PIPE_C))
4619 		intel_de_write(dev_priv, VTOTAL(pipe),
4620 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4621 
4622 }
4623 
4624 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4625 {
4626 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4627 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4628 	enum pipe pipe = crtc->pipe;
4629 
4630 	/* pipesrc controls the size that is scaled from, which should
4631 	 * always be the user's requested size.
4632 	 */
4633 	intel_de_write(dev_priv, PIPESRC(pipe),
4634 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4635 }
4636 
4637 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4638 {
4639 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4640 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4641 
4642 	if (DISPLAY_VER(dev_priv) == 2)
4643 		return false;
4644 
4645 	if (DISPLAY_VER(dev_priv) >= 9 ||
4646 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4647 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4648 	else
4649 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4650 }
4651 
4652 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4653 					 struct intel_crtc_state *pipe_config)
4654 {
4655 	struct drm_device *dev = crtc->base.dev;
4656 	struct drm_i915_private *dev_priv = to_i915(dev);
4657 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4658 	u32 tmp;
4659 
4660 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4661 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4662 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4663 
4664 	if (!transcoder_is_dsi(cpu_transcoder)) {
4665 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4666 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4667 							(tmp & 0xffff) + 1;
4668 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4669 						((tmp >> 16) & 0xffff) + 1;
4670 	}
4671 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4672 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4673 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4674 
4675 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4676 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4677 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4678 
4679 	if (!transcoder_is_dsi(cpu_transcoder)) {
4680 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4681 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4682 							(tmp & 0xffff) + 1;
4683 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4684 						((tmp >> 16) & 0xffff) + 1;
4685 	}
4686 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4687 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4688 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4689 
4690 	if (intel_pipe_is_interlaced(pipe_config)) {
4691 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4692 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4693 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4694 	}
4695 }
4696 
4697 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4698 				    struct intel_crtc_state *pipe_config)
4699 {
4700 	struct drm_device *dev = crtc->base.dev;
4701 	struct drm_i915_private *dev_priv = to_i915(dev);
4702 	u32 tmp;
4703 
4704 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4705 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4706 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4707 }
4708 
4709 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4710 {
4711 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4712 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4713 	u32 pipeconf;
4714 
4715 	pipeconf = 0;
4716 
4717 	/* we keep both pipes enabled on 830 */
4718 	if (IS_I830(dev_priv))
4719 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4720 
4721 	if (crtc_state->double_wide)
4722 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4723 
4724 	/* only g4x and later have fancy bpc/dither controls */
4725 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4726 	    IS_CHERRYVIEW(dev_priv)) {
4727 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4728 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4729 			pipeconf |= PIPECONF_DITHER_EN |
4730 				    PIPECONF_DITHER_TYPE_SP;
4731 
4732 		switch (crtc_state->pipe_bpp) {
4733 		case 18:
4734 			pipeconf |= PIPECONF_6BPC;
4735 			break;
4736 		case 24:
4737 			pipeconf |= PIPECONF_8BPC;
4738 			break;
4739 		case 30:
4740 			pipeconf |= PIPECONF_10BPC;
4741 			break;
4742 		default:
4743 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4744 			BUG();
4745 		}
4746 	}
4747 
4748 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4749 		if (DISPLAY_VER(dev_priv) < 4 ||
4750 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4751 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4752 		else
4753 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4754 	} else {
4755 		pipeconf |= PIPECONF_PROGRESSIVE;
4756 	}
4757 
4758 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4759 	     crtc_state->limited_color_range)
4760 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4761 
4762 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4763 
4764 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4765 
4766 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4767 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4768 }
4769 
4770 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4771 {
4772 	if (IS_I830(dev_priv))
4773 		return false;
4774 
4775 	return DISPLAY_VER(dev_priv) >= 4 ||
4776 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4777 }
4778 
4779 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4780 {
4781 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4782 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4783 	u32 tmp;
4784 
4785 	if (!i9xx_has_pfit(dev_priv))
4786 		return;
4787 
4788 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4789 	if (!(tmp & PFIT_ENABLE))
4790 		return;
4791 
4792 	/* Check whether the pfit is attached to our pipe. */
4793 	if (DISPLAY_VER(dev_priv) < 4) {
4794 		if (crtc->pipe != PIPE_B)
4795 			return;
4796 	} else {
4797 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4798 			return;
4799 	}
4800 
4801 	crtc_state->gmch_pfit.control = tmp;
4802 	crtc_state->gmch_pfit.pgm_ratios =
4803 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4804 }
4805 
4806 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4807 			       struct intel_crtc_state *pipe_config)
4808 {
4809 	struct drm_device *dev = crtc->base.dev;
4810 	struct drm_i915_private *dev_priv = to_i915(dev);
4811 	enum pipe pipe = crtc->pipe;
4812 	struct dpll clock;
4813 	u32 mdiv;
4814 	int refclk = 100000;
4815 
4816 	/* In case of DSI, DPLL will not be used */
4817 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4818 		return;
4819 
4820 	vlv_dpio_get(dev_priv);
4821 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4822 	vlv_dpio_put(dev_priv);
4823 
4824 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4825 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
4826 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4827 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4828 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4829 
4830 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4831 }
4832 
4833 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4834 			       struct intel_crtc_state *pipe_config)
4835 {
4836 	struct drm_device *dev = crtc->base.dev;
4837 	struct drm_i915_private *dev_priv = to_i915(dev);
4838 	enum pipe pipe = crtc->pipe;
4839 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
4840 	struct dpll clock;
4841 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4842 	int refclk = 100000;
4843 
4844 	/* In case of DSI, DPLL will not be used */
4845 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4846 		return;
4847 
4848 	vlv_dpio_get(dev_priv);
4849 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4850 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4851 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4852 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4853 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4854 	vlv_dpio_put(dev_priv);
4855 
4856 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4857 	clock.m2 = (pll_dw0 & 0xff) << 22;
4858 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4859 		clock.m2 |= pll_dw2 & 0x3fffff;
4860 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4861 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4862 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4863 
4864 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4865 }
4866 
4867 static enum intel_output_format
4868 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4869 {
4870 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4871 	u32 tmp;
4872 
4873 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4874 
4875 	if (tmp & PIPEMISC_YUV420_ENABLE) {
4876 		/* We support 4:2:0 in full blend mode only */
4877 		drm_WARN_ON(&dev_priv->drm,
4878 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4879 
4880 		return INTEL_OUTPUT_FORMAT_YCBCR420;
4881 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4882 		return INTEL_OUTPUT_FORMAT_YCBCR444;
4883 	} else {
4884 		return INTEL_OUTPUT_FORMAT_RGB;
4885 	}
4886 }
4887 
4888 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4889 {
4890 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4891 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4892 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4893 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4894 	u32 tmp;
4895 
4896 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4897 
4898 	if (tmp & DISPPLANE_GAMMA_ENABLE)
4899 		crtc_state->gamma_enable = true;
4900 
4901 	if (!HAS_GMCH(dev_priv) &&
4902 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
4903 		crtc_state->csc_enable = true;
4904 }
4905 
4906 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4907 				 struct intel_crtc_state *pipe_config)
4908 {
4909 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4910 	enum intel_display_power_domain power_domain;
4911 	intel_wakeref_t wakeref;
4912 	u32 tmp;
4913 	bool ret;
4914 
4915 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4916 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4917 	if (!wakeref)
4918 		return false;
4919 
4920 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4921 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4922 	pipe_config->shared_dpll = NULL;
4923 
4924 	ret = false;
4925 
4926 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4927 	if (!(tmp & PIPECONF_ENABLE))
4928 		goto out;
4929 
4930 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4931 	    IS_CHERRYVIEW(dev_priv)) {
4932 		switch (tmp & PIPECONF_BPC_MASK) {
4933 		case PIPECONF_6BPC:
4934 			pipe_config->pipe_bpp = 18;
4935 			break;
4936 		case PIPECONF_8BPC:
4937 			pipe_config->pipe_bpp = 24;
4938 			break;
4939 		case PIPECONF_10BPC:
4940 			pipe_config->pipe_bpp = 30;
4941 			break;
4942 		default:
4943 			break;
4944 		}
4945 	}
4946 
4947 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4948 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
4949 		pipe_config->limited_color_range = true;
4950 
4951 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4952 		PIPECONF_GAMMA_MODE_SHIFT;
4953 
4954 	if (IS_CHERRYVIEW(dev_priv))
4955 		pipe_config->cgm_mode = intel_de_read(dev_priv,
4956 						      CGM_PIPE_MODE(crtc->pipe));
4957 
4958 	i9xx_get_pipe_color_config(pipe_config);
4959 	intel_color_get_config(pipe_config);
4960 
4961 	if (DISPLAY_VER(dev_priv) < 4)
4962 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4963 
4964 	intel_get_transcoder_timings(crtc, pipe_config);
4965 	intel_get_pipe_src_size(crtc, pipe_config);
4966 
4967 	i9xx_get_pfit_config(pipe_config);
4968 
4969 	if (DISPLAY_VER(dev_priv) >= 4) {
4970 		/* No way to read it out on pipes B and C */
4971 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4972 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
4973 		else
4974 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4975 		pipe_config->pixel_multiplier =
4976 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4977 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4978 		pipe_config->dpll_hw_state.dpll_md = tmp;
4979 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4980 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4981 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4982 		pipe_config->pixel_multiplier =
4983 			((tmp & SDVO_MULTIPLIER_MASK)
4984 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4985 	} else {
4986 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
4987 		 * port and will be fixed up in the encoder->get_config
4988 		 * function. */
4989 		pipe_config->pixel_multiplier = 1;
4990 	}
4991 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4992 							DPLL(crtc->pipe));
4993 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4994 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4995 							       FP0(crtc->pipe));
4996 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4997 							       FP1(crtc->pipe));
4998 	} else {
4999 		/* Mask out read-only status bits. */
5000 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5001 						     DPLL_PORTC_READY_MASK |
5002 						     DPLL_PORTB_READY_MASK);
5003 	}
5004 
5005 	if (IS_CHERRYVIEW(dev_priv))
5006 		chv_crtc_clock_get(crtc, pipe_config);
5007 	else if (IS_VALLEYVIEW(dev_priv))
5008 		vlv_crtc_clock_get(crtc, pipe_config);
5009 	else
5010 		i9xx_crtc_clock_get(crtc, pipe_config);
5011 
5012 	/*
5013 	 * Normally the dotclock is filled in by the encoder .get_config()
5014 	 * but in case the pipe is enabled w/o any ports we need a sane
5015 	 * default.
5016 	 */
5017 	pipe_config->hw.adjusted_mode.crtc_clock =
5018 		pipe_config->port_clock / pipe_config->pixel_multiplier;
5019 
5020 	ret = true;
5021 
5022 out:
5023 	intel_display_power_put(dev_priv, power_domain, wakeref);
5024 
5025 	return ret;
5026 }
5027 
5028 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
5029 {
5030 	struct intel_encoder *encoder;
5031 	int i;
5032 	u32 val, final;
5033 	bool has_lvds = false;
5034 	bool has_cpu_edp = false;
5035 	bool has_panel = false;
5036 	bool has_ck505 = false;
5037 	bool can_ssc = false;
5038 	bool using_ssc_source = false;
5039 
5040 	/* We need to take the global config into account */
5041 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5042 		switch (encoder->type) {
5043 		case INTEL_OUTPUT_LVDS:
5044 			has_panel = true;
5045 			has_lvds = true;
5046 			break;
5047 		case INTEL_OUTPUT_EDP:
5048 			has_panel = true;
5049 			if (encoder->port == PORT_A)
5050 				has_cpu_edp = true;
5051 			break;
5052 		default:
5053 			break;
5054 		}
5055 	}
5056 
5057 	if (HAS_PCH_IBX(dev_priv)) {
5058 		has_ck505 = dev_priv->vbt.display_clock_mode;
5059 		can_ssc = has_ck505;
5060 	} else {
5061 		has_ck505 = false;
5062 		can_ssc = true;
5063 	}
5064 
5065 	/* Check if any DPLLs are using the SSC source */
5066 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
5067 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
5068 
5069 		if (!(temp & DPLL_VCO_ENABLE))
5070 			continue;
5071 
5072 		if ((temp & PLL_REF_INPUT_MASK) ==
5073 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5074 			using_ssc_source = true;
5075 			break;
5076 		}
5077 	}
5078 
5079 	drm_dbg_kms(&dev_priv->drm,
5080 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
5081 		    has_panel, has_lvds, has_ck505, using_ssc_source);
5082 
5083 	/* Ironlake: try to setup display ref clock before DPLL
5084 	 * enabling. This is only under driver's control after
5085 	 * PCH B stepping, previous chipset stepping should be
5086 	 * ignoring this setting.
5087 	 */
5088 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
5089 
5090 	/* As we must carefully and slowly disable/enable each source in turn,
5091 	 * compute the final state we want first and check if we need to
5092 	 * make any changes at all.
5093 	 */
5094 	final = val;
5095 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5096 	if (has_ck505)
5097 		final |= DREF_NONSPREAD_CK505_ENABLE;
5098 	else
5099 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5100 
5101 	final &= ~DREF_SSC_SOURCE_MASK;
5102 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5103 	final &= ~DREF_SSC1_ENABLE;
5104 
5105 	if (has_panel) {
5106 		final |= DREF_SSC_SOURCE_ENABLE;
5107 
5108 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5109 			final |= DREF_SSC1_ENABLE;
5110 
5111 		if (has_cpu_edp) {
5112 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5113 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5114 			else
5115 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5116 		} else
5117 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5118 	} else if (using_ssc_source) {
5119 		final |= DREF_SSC_SOURCE_ENABLE;
5120 		final |= DREF_SSC1_ENABLE;
5121 	}
5122 
5123 	if (final == val)
5124 		return;
5125 
5126 	/* Always enable nonspread source */
5127 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5128 
5129 	if (has_ck505)
5130 		val |= DREF_NONSPREAD_CK505_ENABLE;
5131 	else
5132 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5133 
5134 	if (has_panel) {
5135 		val &= ~DREF_SSC_SOURCE_MASK;
5136 		val |= DREF_SSC_SOURCE_ENABLE;
5137 
5138 		/* SSC must be turned on before enabling the CPU output  */
5139 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5140 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
5141 			val |= DREF_SSC1_ENABLE;
5142 		} else
5143 			val &= ~DREF_SSC1_ENABLE;
5144 
5145 		/* Get SSC going before enabling the outputs */
5146 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5147 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5148 		udelay(200);
5149 
5150 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5151 
5152 		/* Enable CPU source on CPU attached eDP */
5153 		if (has_cpu_edp) {
5154 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5155 				drm_dbg_kms(&dev_priv->drm,
5156 					    "Using SSC on eDP\n");
5157 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5158 			} else
5159 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5160 		} else
5161 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5162 
5163 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5164 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5165 		udelay(200);
5166 	} else {
5167 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
5168 
5169 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5170 
5171 		/* Turn off CPU output */
5172 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5173 
5174 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5175 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5176 		udelay(200);
5177 
5178 		if (!using_ssc_source) {
5179 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
5180 
5181 			/* Turn off the SSC source */
5182 			val &= ~DREF_SSC_SOURCE_MASK;
5183 			val |= DREF_SSC_SOURCE_DISABLE;
5184 
5185 			/* Turn off SSC1 */
5186 			val &= ~DREF_SSC1_ENABLE;
5187 
5188 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
5189 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
5190 			udelay(200);
5191 		}
5192 	}
5193 
5194 	BUG_ON(val != final);
5195 }
5196 
5197 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5198 {
5199 	u32 tmp;
5200 
5201 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5202 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5203 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5204 
5205 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5206 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5207 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5208 
5209 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5210 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5211 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5212 
5213 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5214 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5215 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5216 }
5217 
5218 /* WaMPhyProgramming:hsw */
5219 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5220 {
5221 	u32 tmp;
5222 
5223 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5224 	tmp &= ~(0xFF << 24);
5225 	tmp |= (0x12 << 24);
5226 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5227 
5228 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5229 	tmp |= (1 << 11);
5230 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5231 
5232 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5233 	tmp |= (1 << 11);
5234 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5235 
5236 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5237 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5238 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5239 
5240 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5241 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5242 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5243 
5244 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5245 	tmp &= ~(7 << 13);
5246 	tmp |= (5 << 13);
5247 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5248 
5249 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5250 	tmp &= ~(7 << 13);
5251 	tmp |= (5 << 13);
5252 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5253 
5254 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5255 	tmp &= ~0xFF;
5256 	tmp |= 0x1C;
5257 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5258 
5259 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5260 	tmp &= ~0xFF;
5261 	tmp |= 0x1C;
5262 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5263 
5264 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5265 	tmp &= ~(0xFF << 16);
5266 	tmp |= (0x1C << 16);
5267 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5268 
5269 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5270 	tmp &= ~(0xFF << 16);
5271 	tmp |= (0x1C << 16);
5272 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5273 
5274 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5275 	tmp |= (1 << 27);
5276 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5277 
5278 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5279 	tmp |= (1 << 27);
5280 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5281 
5282 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5283 	tmp &= ~(0xF << 28);
5284 	tmp |= (4 << 28);
5285 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5286 
5287 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5288 	tmp &= ~(0xF << 28);
5289 	tmp |= (4 << 28);
5290 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5291 }
5292 
5293 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5294  * Programming" based on the parameters passed:
5295  * - Sequence to enable CLKOUT_DP
5296  * - Sequence to enable CLKOUT_DP without spread
5297  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5298  */
5299 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5300 				 bool with_spread, bool with_fdi)
5301 {
5302 	u32 reg, tmp;
5303 
5304 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5305 		     "FDI requires downspread\n"))
5306 		with_spread = true;
5307 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5308 		     with_fdi, "LP PCH doesn't have FDI\n"))
5309 		with_fdi = false;
5310 
5311 	mutex_lock(&dev_priv->sb_lock);
5312 
5313 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5314 	tmp &= ~SBI_SSCCTL_DISABLE;
5315 	tmp |= SBI_SSCCTL_PATHALT;
5316 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5317 
5318 	udelay(24);
5319 
5320 	if (with_spread) {
5321 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5322 		tmp &= ~SBI_SSCCTL_PATHALT;
5323 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5324 
5325 		if (with_fdi) {
5326 			lpt_reset_fdi_mphy(dev_priv);
5327 			lpt_program_fdi_mphy(dev_priv);
5328 		}
5329 	}
5330 
5331 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5332 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5333 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5334 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5335 
5336 	mutex_unlock(&dev_priv->sb_lock);
5337 }
5338 
5339 /* Sequence to disable CLKOUT_DP */
5340 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5341 {
5342 	u32 reg, tmp;
5343 
5344 	mutex_lock(&dev_priv->sb_lock);
5345 
5346 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5347 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5348 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5349 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5350 
5351 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5352 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5353 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5354 			tmp |= SBI_SSCCTL_PATHALT;
5355 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5356 			udelay(32);
5357 		}
5358 		tmp |= SBI_SSCCTL_DISABLE;
5359 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5360 	}
5361 
5362 	mutex_unlock(&dev_priv->sb_lock);
5363 }
5364 
5365 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5366 
5367 static const u16 sscdivintphase[] = {
5368 	[BEND_IDX( 50)] = 0x3B23,
5369 	[BEND_IDX( 45)] = 0x3B23,
5370 	[BEND_IDX( 40)] = 0x3C23,
5371 	[BEND_IDX( 35)] = 0x3C23,
5372 	[BEND_IDX( 30)] = 0x3D23,
5373 	[BEND_IDX( 25)] = 0x3D23,
5374 	[BEND_IDX( 20)] = 0x3E23,
5375 	[BEND_IDX( 15)] = 0x3E23,
5376 	[BEND_IDX( 10)] = 0x3F23,
5377 	[BEND_IDX(  5)] = 0x3F23,
5378 	[BEND_IDX(  0)] = 0x0025,
5379 	[BEND_IDX( -5)] = 0x0025,
5380 	[BEND_IDX(-10)] = 0x0125,
5381 	[BEND_IDX(-15)] = 0x0125,
5382 	[BEND_IDX(-20)] = 0x0225,
5383 	[BEND_IDX(-25)] = 0x0225,
5384 	[BEND_IDX(-30)] = 0x0325,
5385 	[BEND_IDX(-35)] = 0x0325,
5386 	[BEND_IDX(-40)] = 0x0425,
5387 	[BEND_IDX(-45)] = 0x0425,
5388 	[BEND_IDX(-50)] = 0x0525,
5389 };
5390 
5391 /*
5392  * Bend CLKOUT_DP
5393  * steps -50 to 50 inclusive, in steps of 5
5394  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5395  * change in clock period = -(steps / 10) * 5.787 ps
5396  */
5397 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5398 {
5399 	u32 tmp;
5400 	int idx = BEND_IDX(steps);
5401 
5402 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5403 		return;
5404 
5405 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5406 		return;
5407 
5408 	mutex_lock(&dev_priv->sb_lock);
5409 
5410 	if (steps % 10 != 0)
5411 		tmp = 0xAAAAAAAB;
5412 	else
5413 		tmp = 0x00000000;
5414 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5415 
5416 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5417 	tmp &= 0xffff0000;
5418 	tmp |= sscdivintphase[idx];
5419 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5420 
5421 	mutex_unlock(&dev_priv->sb_lock);
5422 }
5423 
5424 #undef BEND_IDX
5425 
5426 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5427 {
5428 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5429 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5430 
5431 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5432 		return false;
5433 
5434 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5435 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5436 		return true;
5437 
5438 	if (IS_BROADWELL(dev_priv) &&
5439 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5440 		return true;
5441 
5442 	return false;
5443 }
5444 
5445 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5446 			       enum intel_dpll_id id)
5447 {
5448 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5449 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5450 
5451 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5452 		return false;
5453 
5454 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5455 		return true;
5456 
5457 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5458 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5459 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5460 		return true;
5461 
5462 	return false;
5463 }
5464 
5465 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5466 {
5467 	struct intel_encoder *encoder;
5468 	bool has_fdi = false;
5469 
5470 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5471 		switch (encoder->type) {
5472 		case INTEL_OUTPUT_ANALOG:
5473 			has_fdi = true;
5474 			break;
5475 		default:
5476 			break;
5477 		}
5478 	}
5479 
5480 	/*
5481 	 * The BIOS may have decided to use the PCH SSC
5482 	 * reference so we must not disable it until the
5483 	 * relevant PLLs have stopped relying on it. We'll
5484 	 * just leave the PCH SSC reference enabled in case
5485 	 * any active PLL is using it. It will get disabled
5486 	 * after runtime suspend if we don't have FDI.
5487 	 *
5488 	 * TODO: Move the whole reference clock handling
5489 	 * to the modeset sequence proper so that we can
5490 	 * actually enable/disable/reconfigure these things
5491 	 * safely. To do that we need to introduce a real
5492 	 * clock hierarchy. That would also allow us to do
5493 	 * clock bending finally.
5494 	 */
5495 	dev_priv->pch_ssc_use = 0;
5496 
5497 	if (spll_uses_pch_ssc(dev_priv)) {
5498 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5499 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5500 	}
5501 
5502 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5503 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5504 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5505 	}
5506 
5507 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5508 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5509 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5510 	}
5511 
5512 	if (dev_priv->pch_ssc_use)
5513 		return;
5514 
5515 	if (has_fdi) {
5516 		lpt_bend_clkout_dp(dev_priv, 0);
5517 		lpt_enable_clkout_dp(dev_priv, true, true);
5518 	} else {
5519 		lpt_disable_clkout_dp(dev_priv);
5520 	}
5521 }
5522 
5523 /*
5524  * Initialize reference clocks when the driver loads
5525  */
5526 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5527 {
5528 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5529 		ilk_init_pch_refclk(dev_priv);
5530 	else if (HAS_PCH_LPT(dev_priv))
5531 		lpt_init_pch_refclk(dev_priv);
5532 }
5533 
5534 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5535 {
5536 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5537 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5538 	enum pipe pipe = crtc->pipe;
5539 	u32 val;
5540 
5541 	val = 0;
5542 
5543 	switch (crtc_state->pipe_bpp) {
5544 	case 18:
5545 		val |= PIPECONF_6BPC;
5546 		break;
5547 	case 24:
5548 		val |= PIPECONF_8BPC;
5549 		break;
5550 	case 30:
5551 		val |= PIPECONF_10BPC;
5552 		break;
5553 	case 36:
5554 		val |= PIPECONF_12BPC;
5555 		break;
5556 	default:
5557 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5558 		BUG();
5559 	}
5560 
5561 	if (crtc_state->dither)
5562 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5563 
5564 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5565 		val |= PIPECONF_INTERLACED_ILK;
5566 	else
5567 		val |= PIPECONF_PROGRESSIVE;
5568 
5569 	/*
5570 	 * This would end up with an odd purple hue over
5571 	 * the entire display. Make sure we don't do it.
5572 	 */
5573 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5574 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5575 
5576 	if (crtc_state->limited_color_range &&
5577 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5578 		val |= PIPECONF_COLOR_RANGE_SELECT;
5579 
5580 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5581 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5582 
5583 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5584 
5585 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5586 
5587 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5588 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5589 }
5590 
5591 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5592 {
5593 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5594 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5595 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5596 	u32 val = 0;
5597 
5598 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5599 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5600 
5601 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5602 		val |= PIPECONF_INTERLACED_ILK;
5603 	else
5604 		val |= PIPECONF_PROGRESSIVE;
5605 
5606 	if (IS_HASWELL(dev_priv) &&
5607 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5608 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5609 
5610 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5611 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5612 }
5613 
5614 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5615 {
5616 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5617 	const struct intel_crtc_scaler_state *scaler_state =
5618 		&crtc_state->scaler_state;
5619 
5620 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5621 	u32 val = 0;
5622 	int i;
5623 
5624 	switch (crtc_state->pipe_bpp) {
5625 	case 18:
5626 		val |= PIPEMISC_6_BPC;
5627 		break;
5628 	case 24:
5629 		val |= PIPEMISC_8_BPC;
5630 		break;
5631 	case 30:
5632 		val |= PIPEMISC_10_BPC;
5633 		break;
5634 	case 36:
5635 		/* Port output 12BPC defined for ADLP+ */
5636 		if (DISPLAY_VER(dev_priv) > 12)
5637 			val |= PIPEMISC_12_BPC_ADLP;
5638 		break;
5639 	default:
5640 		MISSING_CASE(crtc_state->pipe_bpp);
5641 		break;
5642 	}
5643 
5644 	if (crtc_state->dither)
5645 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5646 
5647 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5648 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5649 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5650 
5651 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5652 		val |= PIPEMISC_YUV420_ENABLE |
5653 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5654 
5655 	if (DISPLAY_VER(dev_priv) >= 11 &&
5656 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5657 					   BIT(PLANE_CURSOR))) == 0)
5658 		val |= PIPEMISC_HDR_MODE_PRECISION;
5659 
5660 	if (DISPLAY_VER(dev_priv) >= 12)
5661 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5662 
5663 	if (IS_ALDERLAKE_P(dev_priv)) {
5664 		bool scaler_in_use = false;
5665 
5666 		for (i = 0; i < crtc->num_scalers; i++) {
5667 			if (!scaler_state->scalers[i].in_use)
5668 				continue;
5669 
5670 			scaler_in_use = true;
5671 			break;
5672 		}
5673 
5674 		intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
5675 			     PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
5676 			     scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
5677 			     PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
5678 	}
5679 
5680 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5681 }
5682 
5683 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5684 {
5685 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5686 	u32 tmp;
5687 
5688 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5689 
5690 	switch (tmp & PIPEMISC_BPC_MASK) {
5691 	case PIPEMISC_6_BPC:
5692 		return 18;
5693 	case PIPEMISC_8_BPC:
5694 		return 24;
5695 	case PIPEMISC_10_BPC:
5696 		return 30;
5697 	/*
5698 	 * PORT OUTPUT 12 BPC defined for ADLP+.
5699 	 *
5700 	 * TODO:
5701 	 * For previous platforms with DSI interface, bits 5:7
5702 	 * are used for storing pipe_bpp irrespective of dithering.
5703 	 * Since the value of 12 BPC is not defined for these bits
5704 	 * on older platforms, need to find a workaround for 12 BPC
5705 	 * MIPI DSI HW readout.
5706 	 */
5707 	case PIPEMISC_12_BPC_ADLP:
5708 		if (DISPLAY_VER(dev_priv) > 12)
5709 			return 36;
5710 		fallthrough;
5711 	default:
5712 		MISSING_CASE(tmp);
5713 		return 0;
5714 	}
5715 }
5716 
5717 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5718 {
5719 	/*
5720 	 * Account for spread spectrum to avoid
5721 	 * oversubscribing the link. Max center spread
5722 	 * is 2.5%; use 5% for safety's sake.
5723 	 */
5724 	u32 bps = target_clock * bpp * 21 / 20;
5725 	return DIV_ROUND_UP(bps, link_bw * 8);
5726 }
5727 
5728 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5729 					 struct intel_link_m_n *m_n)
5730 {
5731 	struct drm_device *dev = crtc->base.dev;
5732 	struct drm_i915_private *dev_priv = to_i915(dev);
5733 	enum pipe pipe = crtc->pipe;
5734 
5735 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5736 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5737 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5738 		& ~TU_SIZE_MASK;
5739 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5740 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5741 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5742 }
5743 
5744 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5745 					 enum transcoder transcoder,
5746 					 struct intel_link_m_n *m_n,
5747 					 struct intel_link_m_n *m2_n2)
5748 {
5749 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5750 	enum pipe pipe = crtc->pipe;
5751 
5752 	if (DISPLAY_VER(dev_priv) >= 5) {
5753 		m_n->link_m = intel_de_read(dev_priv,
5754 					    PIPE_LINK_M1(transcoder));
5755 		m_n->link_n = intel_de_read(dev_priv,
5756 					    PIPE_LINK_N1(transcoder));
5757 		m_n->gmch_m = intel_de_read(dev_priv,
5758 					    PIPE_DATA_M1(transcoder))
5759 			& ~TU_SIZE_MASK;
5760 		m_n->gmch_n = intel_de_read(dev_priv,
5761 					    PIPE_DATA_N1(transcoder));
5762 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5763 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5764 
5765 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5766 			m2_n2->link_m = intel_de_read(dev_priv,
5767 						      PIPE_LINK_M2(transcoder));
5768 			m2_n2->link_n =	intel_de_read(dev_priv,
5769 							     PIPE_LINK_N2(transcoder));
5770 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5771 							     PIPE_DATA_M2(transcoder))
5772 					& ~TU_SIZE_MASK;
5773 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5774 							     PIPE_DATA_N2(transcoder));
5775 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5776 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5777 		}
5778 	} else {
5779 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5780 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5781 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5782 			& ~TU_SIZE_MASK;
5783 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5784 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5785 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5786 	}
5787 }
5788 
5789 void intel_dp_get_m_n(struct intel_crtc *crtc,
5790 		      struct intel_crtc_state *pipe_config)
5791 {
5792 	if (pipe_config->has_pch_encoder)
5793 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5794 	else
5795 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5796 					     &pipe_config->dp_m_n,
5797 					     &pipe_config->dp_m2_n2);
5798 }
5799 
5800 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5801 				   struct intel_crtc_state *pipe_config)
5802 {
5803 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5804 				     &pipe_config->fdi_m_n, NULL);
5805 }
5806 
5807 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5808 				  u32 pos, u32 size)
5809 {
5810 	drm_rect_init(&crtc_state->pch_pfit.dst,
5811 		      pos >> 16, pos & 0xffff,
5812 		      size >> 16, size & 0xffff);
5813 }
5814 
5815 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5816 {
5817 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5818 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5819 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5820 	int id = -1;
5821 	int i;
5822 
5823 	/* find scaler attached to this pipe */
5824 	for (i = 0; i < crtc->num_scalers; i++) {
5825 		u32 ctl, pos, size;
5826 
5827 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5828 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5829 			continue;
5830 
5831 		id = i;
5832 		crtc_state->pch_pfit.enabled = true;
5833 
5834 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5835 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5836 
5837 		ilk_get_pfit_pos_size(crtc_state, pos, size);
5838 
5839 		scaler_state->scalers[i].in_use = true;
5840 		break;
5841 	}
5842 
5843 	scaler_state->scaler_id = id;
5844 	if (id >= 0)
5845 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5846 	else
5847 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5848 }
5849 
5850 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5851 {
5852 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5853 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5854 	u32 ctl, pos, size;
5855 
5856 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5857 	if ((ctl & PF_ENABLE) == 0)
5858 		return;
5859 
5860 	crtc_state->pch_pfit.enabled = true;
5861 
5862 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5863 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5864 
5865 	ilk_get_pfit_pos_size(crtc_state, pos, size);
5866 
5867 	/*
5868 	 * We currently do not free assignements of panel fitters on
5869 	 * ivb/hsw (since we don't use the higher upscaling modes which
5870 	 * differentiates them) so just WARN about this case for now.
5871 	 */
5872 	drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
5873 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5874 }
5875 
5876 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5877 				struct intel_crtc_state *pipe_config)
5878 {
5879 	struct drm_device *dev = crtc->base.dev;
5880 	struct drm_i915_private *dev_priv = to_i915(dev);
5881 	enum intel_display_power_domain power_domain;
5882 	intel_wakeref_t wakeref;
5883 	u32 tmp;
5884 	bool ret;
5885 
5886 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5887 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5888 	if (!wakeref)
5889 		return false;
5890 
5891 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5892 	pipe_config->shared_dpll = NULL;
5893 
5894 	ret = false;
5895 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5896 	if (!(tmp & PIPECONF_ENABLE))
5897 		goto out;
5898 
5899 	switch (tmp & PIPECONF_BPC_MASK) {
5900 	case PIPECONF_6BPC:
5901 		pipe_config->pipe_bpp = 18;
5902 		break;
5903 	case PIPECONF_8BPC:
5904 		pipe_config->pipe_bpp = 24;
5905 		break;
5906 	case PIPECONF_10BPC:
5907 		pipe_config->pipe_bpp = 30;
5908 		break;
5909 	case PIPECONF_12BPC:
5910 		pipe_config->pipe_bpp = 36;
5911 		break;
5912 	default:
5913 		break;
5914 	}
5915 
5916 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5917 		pipe_config->limited_color_range = true;
5918 
5919 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5920 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5921 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5922 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5923 		break;
5924 	default:
5925 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5926 		break;
5927 	}
5928 
5929 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5930 		PIPECONF_GAMMA_MODE_SHIFT;
5931 
5932 	pipe_config->csc_mode = intel_de_read(dev_priv,
5933 					      PIPE_CSC_MODE(crtc->pipe));
5934 
5935 	i9xx_get_pipe_color_config(pipe_config);
5936 	intel_color_get_config(pipe_config);
5937 
5938 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5939 		struct intel_shared_dpll *pll;
5940 		enum intel_dpll_id pll_id;
5941 		bool pll_active;
5942 
5943 		pipe_config->has_pch_encoder = true;
5944 
5945 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5946 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5947 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5948 
5949 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5950 
5951 		if (HAS_PCH_IBX(dev_priv)) {
5952 			/*
5953 			 * The pipe->pch transcoder and pch transcoder->pll
5954 			 * mapping is fixed.
5955 			 */
5956 			pll_id = (enum intel_dpll_id) crtc->pipe;
5957 		} else {
5958 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5959 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5960 				pll_id = DPLL_ID_PCH_PLL_B;
5961 			else
5962 				pll_id= DPLL_ID_PCH_PLL_A;
5963 		}
5964 
5965 		pipe_config->shared_dpll =
5966 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
5967 		pll = pipe_config->shared_dpll;
5968 
5969 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5970 						     &pipe_config->dpll_hw_state);
5971 		drm_WARN_ON(dev, !pll_active);
5972 
5973 		tmp = pipe_config->dpll_hw_state.dpll;
5974 		pipe_config->pixel_multiplier =
5975 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5976 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5977 
5978 		ilk_pch_clock_get(crtc, pipe_config);
5979 	} else {
5980 		pipe_config->pixel_multiplier = 1;
5981 	}
5982 
5983 	intel_get_transcoder_timings(crtc, pipe_config);
5984 	intel_get_pipe_src_size(crtc, pipe_config);
5985 
5986 	ilk_get_pfit_config(pipe_config);
5987 
5988 	ret = true;
5989 
5990 out:
5991 	intel_display_power_put(dev_priv, power_domain, wakeref);
5992 
5993 	return ret;
5994 }
5995 
5996 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5997 				     struct intel_crtc_state *pipe_config,
5998 				     struct intel_display_power_domain_set *power_domain_set)
5999 {
6000 	struct drm_device *dev = crtc->base.dev;
6001 	struct drm_i915_private *dev_priv = to_i915(dev);
6002 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
6003 	unsigned long enabled_panel_transcoders = 0;
6004 	enum transcoder panel_transcoder;
6005 	u32 tmp;
6006 
6007 	if (DISPLAY_VER(dev_priv) >= 11)
6008 		panel_transcoder_mask |=
6009 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
6010 
6011 	/*
6012 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
6013 	 * and DSI transcoders handled below.
6014 	 */
6015 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6016 
6017 	/*
6018 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
6019 	 * consistency and less surprising code; it's in always on power).
6020 	 */
6021 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
6022 				       panel_transcoder_mask) {
6023 		bool force_thru = false;
6024 		enum pipe trans_pipe;
6025 
6026 		tmp = intel_de_read(dev_priv,
6027 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
6028 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6029 			continue;
6030 
6031 		/*
6032 		 * Log all enabled ones, only use the first one.
6033 		 *
6034 		 * FIXME: This won't work for two separate DSI displays.
6035 		 */
6036 		enabled_panel_transcoders |= BIT(panel_transcoder);
6037 		if (enabled_panel_transcoders != BIT(panel_transcoder))
6038 			continue;
6039 
6040 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6041 		default:
6042 			drm_WARN(dev, 1,
6043 				 "unknown pipe linked to transcoder %s\n",
6044 				 transcoder_name(panel_transcoder));
6045 			fallthrough;
6046 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
6047 			force_thru = true;
6048 			fallthrough;
6049 		case TRANS_DDI_EDP_INPUT_A_ON:
6050 			trans_pipe = PIPE_A;
6051 			break;
6052 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
6053 			trans_pipe = PIPE_B;
6054 			break;
6055 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
6056 			trans_pipe = PIPE_C;
6057 			break;
6058 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
6059 			trans_pipe = PIPE_D;
6060 			break;
6061 		}
6062 
6063 		if (trans_pipe == crtc->pipe) {
6064 			pipe_config->cpu_transcoder = panel_transcoder;
6065 			pipe_config->pch_pfit.force_thru = force_thru;
6066 		}
6067 	}
6068 
6069 	/*
6070 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
6071 	 */
6072 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
6073 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
6074 
6075 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6076 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
6077 		return false;
6078 
6079 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
6080 
6081 	return tmp & PIPECONF_ENABLE;
6082 }
6083 
6084 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
6085 					 struct intel_crtc_state *pipe_config,
6086 					 struct intel_display_power_domain_set *power_domain_set)
6087 {
6088 	struct drm_device *dev = crtc->base.dev;
6089 	struct drm_i915_private *dev_priv = to_i915(dev);
6090 	enum transcoder cpu_transcoder;
6091 	enum port port;
6092 	u32 tmp;
6093 
6094 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
6095 		if (port == PORT_A)
6096 			cpu_transcoder = TRANSCODER_DSI_A;
6097 		else
6098 			cpu_transcoder = TRANSCODER_DSI_C;
6099 
6100 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
6101 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
6102 			continue;
6103 
6104 		/*
6105 		 * The PLL needs to be enabled with a valid divider
6106 		 * configuration, otherwise accessing DSI registers will hang
6107 		 * the machine. See BSpec North Display Engine
6108 		 * registers/MIPI[BXT]. We can break out here early, since we
6109 		 * need the same DSI PLL to be enabled for both DSI ports.
6110 		 */
6111 		if (!bxt_dsi_pll_is_enabled(dev_priv))
6112 			break;
6113 
6114 		/* XXX: this works for video mode only */
6115 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
6116 		if (!(tmp & DPI_ENABLE))
6117 			continue;
6118 
6119 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
6120 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
6121 			continue;
6122 
6123 		pipe_config->cpu_transcoder = cpu_transcoder;
6124 		break;
6125 	}
6126 
6127 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
6128 }
6129 
6130 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
6131 				   struct intel_crtc_state *pipe_config)
6132 {
6133 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6134 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6135 	enum port port;
6136 	u32 tmp;
6137 
6138 	if (transcoder_is_dsi(cpu_transcoder)) {
6139 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
6140 						PORT_A : PORT_B;
6141 	} else {
6142 		tmp = intel_de_read(dev_priv,
6143 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
6144 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
6145 			return;
6146 		if (DISPLAY_VER(dev_priv) >= 12)
6147 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6148 		else
6149 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
6150 	}
6151 
6152 	/*
6153 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
6154 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
6155 	 * the PCH transcoder is on.
6156 	 */
6157 	if (DISPLAY_VER(dev_priv) < 9 &&
6158 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
6159 		pipe_config->has_pch_encoder = true;
6160 
6161 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
6162 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6163 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6164 
6165 		ilk_get_fdi_m_n_config(crtc, pipe_config);
6166 	}
6167 }
6168 
6169 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
6170 				struct intel_crtc_state *pipe_config)
6171 {
6172 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6173 	struct intel_display_power_domain_set power_domain_set = { };
6174 	bool active;
6175 	u32 tmp;
6176 
6177 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6178 						       POWER_DOMAIN_PIPE(crtc->pipe)))
6179 		return false;
6180 
6181 	pipe_config->shared_dpll = NULL;
6182 
6183 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
6184 
6185 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
6186 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
6187 		drm_WARN_ON(&dev_priv->drm, active);
6188 		active = true;
6189 	}
6190 
6191 	intel_dsc_get_config(pipe_config);
6192 	if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
6193 		intel_uncompressed_joiner_get_config(pipe_config);
6194 
6195 	if (!active) {
6196 		/* bigjoiner slave doesn't enable transcoder */
6197 		if (!pipe_config->bigjoiner_slave)
6198 			goto out;
6199 
6200 		active = true;
6201 		pipe_config->pixel_multiplier = 1;
6202 
6203 		/* we cannot read out most state, so don't bother.. */
6204 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
6205 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
6206 	    DISPLAY_VER(dev_priv) >= 11) {
6207 		hsw_get_ddi_port_state(crtc, pipe_config);
6208 		intel_get_transcoder_timings(crtc, pipe_config);
6209 	}
6210 
6211 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
6212 		intel_vrr_get_config(crtc, pipe_config);
6213 
6214 	intel_get_pipe_src_size(crtc, pipe_config);
6215 
6216 	if (IS_HASWELL(dev_priv)) {
6217 		u32 tmp = intel_de_read(dev_priv,
6218 					PIPECONF(pipe_config->cpu_transcoder));
6219 
6220 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
6221 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
6222 		else
6223 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
6224 	} else {
6225 		pipe_config->output_format =
6226 			bdw_get_pipemisc_output_format(crtc);
6227 	}
6228 
6229 	pipe_config->gamma_mode = intel_de_read(dev_priv,
6230 						GAMMA_MODE(crtc->pipe));
6231 
6232 	pipe_config->csc_mode = intel_de_read(dev_priv,
6233 					      PIPE_CSC_MODE(crtc->pipe));
6234 
6235 	if (DISPLAY_VER(dev_priv) >= 9) {
6236 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6237 
6238 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6239 			pipe_config->gamma_enable = true;
6240 
6241 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6242 			pipe_config->csc_enable = true;
6243 	} else {
6244 		i9xx_get_pipe_color_config(pipe_config);
6245 	}
6246 
6247 	intel_color_get_config(pipe_config);
6248 
6249 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6250 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6251 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6252 		pipe_config->ips_linetime =
6253 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6254 
6255 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6256 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6257 		if (DISPLAY_VER(dev_priv) >= 9)
6258 			skl_get_pfit_config(pipe_config);
6259 		else
6260 			ilk_get_pfit_config(pipe_config);
6261 	}
6262 
6263 	if (hsw_crtc_supports_ips(crtc)) {
6264 		if (IS_HASWELL(dev_priv))
6265 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6266 								 IPS_CTL) & IPS_ENABLE;
6267 		else {
6268 			/*
6269 			 * We cannot readout IPS state on broadwell, set to
6270 			 * true so we can set it to a defined state on first
6271 			 * commit.
6272 			 */
6273 			pipe_config->ips_enabled = true;
6274 		}
6275 	}
6276 
6277 	if (pipe_config->bigjoiner_slave) {
6278 		/* Cannot be read out as a slave, set to 0. */
6279 		pipe_config->pixel_multiplier = 0;
6280 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6281 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6282 		pipe_config->pixel_multiplier =
6283 			intel_de_read(dev_priv,
6284 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6285 	} else {
6286 		pipe_config->pixel_multiplier = 1;
6287 	}
6288 
6289 out:
6290 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6291 
6292 	return active;
6293 }
6294 
6295 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6296 {
6297 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6298 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6299 
6300 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6301 		return false;
6302 
6303 	crtc_state->hw.active = true;
6304 
6305 	intel_crtc_readout_derived_state(crtc_state);
6306 
6307 	return true;
6308 }
6309 
6310 /* VESA 640x480x72Hz mode to set on the pipe */
6311 static const struct drm_display_mode load_detect_mode = {
6312 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6313 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6314 };
6315 
6316 struct drm_framebuffer *
6317 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6318 			 struct drm_mode_fb_cmd2 *mode_cmd)
6319 {
6320 	struct intel_framebuffer *intel_fb;
6321 	int ret;
6322 
6323 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6324 	if (!intel_fb)
6325 		return ERR_PTR(-ENOMEM);
6326 
6327 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6328 	if (ret)
6329 		goto err;
6330 
6331 	return &intel_fb->base;
6332 
6333 err:
6334 	kfree(intel_fb);
6335 	return ERR_PTR(ret);
6336 }
6337 
6338 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6339 					struct drm_crtc *crtc)
6340 {
6341 	struct drm_plane *plane;
6342 	struct drm_plane_state *plane_state;
6343 	int ret, i;
6344 
6345 	ret = drm_atomic_add_affected_planes(state, crtc);
6346 	if (ret)
6347 		return ret;
6348 
6349 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6350 		if (plane_state->crtc != crtc)
6351 			continue;
6352 
6353 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6354 		if (ret)
6355 			return ret;
6356 
6357 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6358 	}
6359 
6360 	return 0;
6361 }
6362 
6363 int intel_get_load_detect_pipe(struct drm_connector *connector,
6364 			       struct intel_load_detect_pipe *old,
6365 			       struct drm_modeset_acquire_ctx *ctx)
6366 {
6367 	struct intel_encoder *encoder =
6368 		intel_attached_encoder(to_intel_connector(connector));
6369 	struct intel_crtc *possible_crtc;
6370 	struct intel_crtc *crtc = NULL;
6371 	struct drm_device *dev = encoder->base.dev;
6372 	struct drm_i915_private *dev_priv = to_i915(dev);
6373 	struct drm_mode_config *config = &dev->mode_config;
6374 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6375 	struct drm_connector_state *connector_state;
6376 	struct intel_crtc_state *crtc_state;
6377 	int ret;
6378 
6379 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6380 		    connector->base.id, connector->name,
6381 		    encoder->base.base.id, encoder->base.name);
6382 
6383 	old->restore_state = NULL;
6384 
6385 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6386 
6387 	/*
6388 	 * Algorithm gets a little messy:
6389 	 *
6390 	 *   - if the connector already has an assigned crtc, use it (but make
6391 	 *     sure it's on first)
6392 	 *
6393 	 *   - try to find the first unused crtc that can drive this connector,
6394 	 *     and use that if we find one
6395 	 */
6396 
6397 	/* See if we already have a CRTC for this connector */
6398 	if (connector->state->crtc) {
6399 		crtc = to_intel_crtc(connector->state->crtc);
6400 
6401 		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
6402 		if (ret)
6403 			goto fail;
6404 
6405 		/* Make sure the crtc and connector are running */
6406 		goto found;
6407 	}
6408 
6409 	/* Find an unused one (if possible) */
6410 	for_each_intel_crtc(dev, possible_crtc) {
6411 		if (!(encoder->base.possible_crtcs &
6412 		      drm_crtc_mask(&possible_crtc->base)))
6413 			continue;
6414 
6415 		ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
6416 		if (ret)
6417 			goto fail;
6418 
6419 		if (possible_crtc->base.state->enable) {
6420 			drm_modeset_unlock(&possible_crtc->base.mutex);
6421 			continue;
6422 		}
6423 
6424 		crtc = possible_crtc;
6425 		break;
6426 	}
6427 
6428 	/*
6429 	 * If we didn't find an unused CRTC, don't use any.
6430 	 */
6431 	if (!crtc) {
6432 		drm_dbg_kms(&dev_priv->drm,
6433 			    "no pipe available for load-detect\n");
6434 		ret = -ENODEV;
6435 		goto fail;
6436 	}
6437 
6438 found:
6439 	state = drm_atomic_state_alloc(dev);
6440 	restore_state = drm_atomic_state_alloc(dev);
6441 	if (!state || !restore_state) {
6442 		ret = -ENOMEM;
6443 		goto fail;
6444 	}
6445 
6446 	state->acquire_ctx = ctx;
6447 	restore_state->acquire_ctx = ctx;
6448 
6449 	connector_state = drm_atomic_get_connector_state(state, connector);
6450 	if (IS_ERR(connector_state)) {
6451 		ret = PTR_ERR(connector_state);
6452 		goto fail;
6453 	}
6454 
6455 	ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
6456 	if (ret)
6457 		goto fail;
6458 
6459 	crtc_state = intel_atomic_get_crtc_state(state, crtc);
6460 	if (IS_ERR(crtc_state)) {
6461 		ret = PTR_ERR(crtc_state);
6462 		goto fail;
6463 	}
6464 
6465 	crtc_state->uapi.active = true;
6466 
6467 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6468 					   &load_detect_mode);
6469 	if (ret)
6470 		goto fail;
6471 
6472 	ret = intel_modeset_disable_planes(state, &crtc->base);
6473 	if (ret)
6474 		goto fail;
6475 
6476 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6477 	if (!ret)
6478 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
6479 	if (!ret)
6480 		ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
6481 	if (ret) {
6482 		drm_dbg_kms(&dev_priv->drm,
6483 			    "Failed to create a copy of old state to restore: %i\n",
6484 			    ret);
6485 		goto fail;
6486 	}
6487 
6488 	ret = drm_atomic_commit(state);
6489 	if (ret) {
6490 		drm_dbg_kms(&dev_priv->drm,
6491 			    "failed to set mode on load-detect pipe\n");
6492 		goto fail;
6493 	}
6494 
6495 	old->restore_state = restore_state;
6496 	drm_atomic_state_put(state);
6497 
6498 	/* let the connector get through one full cycle before testing */
6499 	intel_wait_for_vblank(dev_priv, crtc->pipe);
6500 	return true;
6501 
6502 fail:
6503 	if (state) {
6504 		drm_atomic_state_put(state);
6505 		state = NULL;
6506 	}
6507 	if (restore_state) {
6508 		drm_atomic_state_put(restore_state);
6509 		restore_state = NULL;
6510 	}
6511 
6512 	if (ret == -EDEADLK)
6513 		return ret;
6514 
6515 	return false;
6516 }
6517 
6518 void intel_release_load_detect_pipe(struct drm_connector *connector,
6519 				    struct intel_load_detect_pipe *old,
6520 				    struct drm_modeset_acquire_ctx *ctx)
6521 {
6522 	struct intel_encoder *intel_encoder =
6523 		intel_attached_encoder(to_intel_connector(connector));
6524 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6525 	struct drm_encoder *encoder = &intel_encoder->base;
6526 	struct drm_atomic_state *state = old->restore_state;
6527 	int ret;
6528 
6529 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6530 		    connector->base.id, connector->name,
6531 		    encoder->base.id, encoder->name);
6532 
6533 	if (!state)
6534 		return;
6535 
6536 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6537 	if (ret)
6538 		drm_dbg_kms(&i915->drm,
6539 			    "Couldn't release load detect pipe: %i\n", ret);
6540 	drm_atomic_state_put(state);
6541 }
6542 
6543 static int i9xx_pll_refclk(struct drm_device *dev,
6544 			   const struct intel_crtc_state *pipe_config)
6545 {
6546 	struct drm_i915_private *dev_priv = to_i915(dev);
6547 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6548 
6549 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6550 		return dev_priv->vbt.lvds_ssc_freq;
6551 	else if (HAS_PCH_SPLIT(dev_priv))
6552 		return 120000;
6553 	else if (DISPLAY_VER(dev_priv) != 2)
6554 		return 96000;
6555 	else
6556 		return 48000;
6557 }
6558 
6559 /* Returns the clock of the currently programmed mode of the given pipe. */
6560 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6561 				struct intel_crtc_state *pipe_config)
6562 {
6563 	struct drm_device *dev = crtc->base.dev;
6564 	struct drm_i915_private *dev_priv = to_i915(dev);
6565 	enum pipe pipe = crtc->pipe;
6566 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6567 	u32 fp;
6568 	struct dpll clock;
6569 	int port_clock;
6570 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6571 
6572 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6573 		fp = pipe_config->dpll_hw_state.fp0;
6574 	else
6575 		fp = pipe_config->dpll_hw_state.fp1;
6576 
6577 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6578 	if (IS_PINEVIEW(dev_priv)) {
6579 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6580 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6581 	} else {
6582 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6583 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6584 	}
6585 
6586 	if (DISPLAY_VER(dev_priv) != 2) {
6587 		if (IS_PINEVIEW(dev_priv))
6588 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6589 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6590 		else
6591 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6592 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6593 
6594 		switch (dpll & DPLL_MODE_MASK) {
6595 		case DPLLB_MODE_DAC_SERIAL:
6596 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6597 				5 : 10;
6598 			break;
6599 		case DPLLB_MODE_LVDS:
6600 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6601 				7 : 14;
6602 			break;
6603 		default:
6604 			drm_dbg_kms(&dev_priv->drm,
6605 				    "Unknown DPLL mode %08x in programmed "
6606 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6607 			return;
6608 		}
6609 
6610 		if (IS_PINEVIEW(dev_priv))
6611 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6612 		else
6613 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6614 	} else {
6615 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6616 								 LVDS);
6617 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6618 
6619 		if (is_lvds) {
6620 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6621 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6622 
6623 			if (lvds & LVDS_CLKB_POWER_UP)
6624 				clock.p2 = 7;
6625 			else
6626 				clock.p2 = 14;
6627 		} else {
6628 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6629 				clock.p1 = 2;
6630 			else {
6631 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6632 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6633 			}
6634 			if (dpll & PLL_P2_DIVIDE_BY_4)
6635 				clock.p2 = 4;
6636 			else
6637 				clock.p2 = 2;
6638 		}
6639 
6640 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6641 	}
6642 
6643 	/*
6644 	 * This value includes pixel_multiplier. We will use
6645 	 * port_clock to compute adjusted_mode.crtc_clock in the
6646 	 * encoder's get_config() function.
6647 	 */
6648 	pipe_config->port_clock = port_clock;
6649 }
6650 
6651 int intel_dotclock_calculate(int link_freq,
6652 			     const struct intel_link_m_n *m_n)
6653 {
6654 	/*
6655 	 * The calculation for the data clock is:
6656 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6657 	 * But we want to avoid losing precison if possible, so:
6658 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6659 	 *
6660 	 * and the link clock is simpler:
6661 	 * link_clock = (m * link_clock) / n
6662 	 */
6663 
6664 	if (!m_n->link_n)
6665 		return 0;
6666 
6667 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6668 }
6669 
6670 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6671 			      struct intel_crtc_state *pipe_config)
6672 {
6673 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6674 
6675 	/* read out port_clock from the DPLL */
6676 	i9xx_crtc_clock_get(crtc, pipe_config);
6677 
6678 	/*
6679 	 * In case there is an active pipe without active ports,
6680 	 * we may need some idea for the dotclock anyway.
6681 	 * Calculate one based on the FDI configuration.
6682 	 */
6683 	pipe_config->hw.adjusted_mode.crtc_clock =
6684 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6685 					 &pipe_config->fdi_m_n);
6686 }
6687 
6688 /* Returns the currently programmed mode of the given encoder. */
6689 struct drm_display_mode *
6690 intel_encoder_current_mode(struct intel_encoder *encoder)
6691 {
6692 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6693 	struct intel_crtc_state *crtc_state;
6694 	struct drm_display_mode *mode;
6695 	struct intel_crtc *crtc;
6696 	enum pipe pipe;
6697 
6698 	if (!encoder->get_hw_state(encoder, &pipe))
6699 		return NULL;
6700 
6701 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6702 
6703 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6704 	if (!mode)
6705 		return NULL;
6706 
6707 	crtc_state = intel_crtc_state_alloc(crtc);
6708 	if (!crtc_state) {
6709 		kfree(mode);
6710 		return NULL;
6711 	}
6712 
6713 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6714 		kfree(crtc_state);
6715 		kfree(mode);
6716 		return NULL;
6717 	}
6718 
6719 	intel_encoder_get_config(encoder, crtc_state);
6720 
6721 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6722 
6723 	kfree(crtc_state);
6724 
6725 	return mode;
6726 }
6727 
6728 /**
6729  * intel_wm_need_update - Check whether watermarks need updating
6730  * @cur: current plane state
6731  * @new: new plane state
6732  *
6733  * Check current plane state versus the new one to determine whether
6734  * watermarks need to be recalculated.
6735  *
6736  * Returns true or false.
6737  */
6738 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6739 				 struct intel_plane_state *new)
6740 {
6741 	/* Update watermarks on tiling or size changes. */
6742 	if (new->uapi.visible != cur->uapi.visible)
6743 		return true;
6744 
6745 	if (!cur->hw.fb || !new->hw.fb)
6746 		return false;
6747 
6748 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6749 	    cur->hw.rotation != new->hw.rotation ||
6750 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6751 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6752 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6753 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6754 		return true;
6755 
6756 	return false;
6757 }
6758 
6759 static bool needs_scaling(const struct intel_plane_state *state)
6760 {
6761 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6762 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6763 	int dst_w = drm_rect_width(&state->uapi.dst);
6764 	int dst_h = drm_rect_height(&state->uapi.dst);
6765 
6766 	return (src_w != dst_w || src_h != dst_h);
6767 }
6768 
6769 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6770 				    struct intel_crtc_state *crtc_state,
6771 				    const struct intel_plane_state *old_plane_state,
6772 				    struct intel_plane_state *plane_state)
6773 {
6774 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6775 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6776 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6777 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6778 	bool was_crtc_enabled = old_crtc_state->hw.active;
6779 	bool is_crtc_enabled = crtc_state->hw.active;
6780 	bool turn_off, turn_on, visible, was_visible;
6781 	int ret;
6782 
6783 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6784 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6785 		if (ret)
6786 			return ret;
6787 	}
6788 
6789 	was_visible = old_plane_state->uapi.visible;
6790 	visible = plane_state->uapi.visible;
6791 
6792 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6793 		was_visible = false;
6794 
6795 	/*
6796 	 * Visibility is calculated as if the crtc was on, but
6797 	 * after scaler setup everything depends on it being off
6798 	 * when the crtc isn't active.
6799 	 *
6800 	 * FIXME this is wrong for watermarks. Watermarks should also
6801 	 * be computed as if the pipe would be active. Perhaps move
6802 	 * per-plane wm computation to the .check_plane() hook, and
6803 	 * only combine the results from all planes in the current place?
6804 	 */
6805 	if (!is_crtc_enabled) {
6806 		intel_plane_set_invisible(crtc_state, plane_state);
6807 		visible = false;
6808 	}
6809 
6810 	if (!was_visible && !visible)
6811 		return 0;
6812 
6813 	turn_off = was_visible && (!visible || mode_changed);
6814 	turn_on = visible && (!was_visible || mode_changed);
6815 
6816 	drm_dbg_atomic(&dev_priv->drm,
6817 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6818 		       crtc->base.base.id, crtc->base.name,
6819 		       plane->base.base.id, plane->base.name,
6820 		       was_visible, visible,
6821 		       turn_off, turn_on, mode_changed);
6822 
6823 	if (turn_on) {
6824 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6825 			crtc_state->update_wm_pre = true;
6826 
6827 		/* must disable cxsr around plane enable/disable */
6828 		if (plane->id != PLANE_CURSOR)
6829 			crtc_state->disable_cxsr = true;
6830 	} else if (turn_off) {
6831 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6832 			crtc_state->update_wm_post = true;
6833 
6834 		/* must disable cxsr around plane enable/disable */
6835 		if (plane->id != PLANE_CURSOR)
6836 			crtc_state->disable_cxsr = true;
6837 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
6838 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6839 			/* FIXME bollocks */
6840 			crtc_state->update_wm_pre = true;
6841 			crtc_state->update_wm_post = true;
6842 		}
6843 	}
6844 
6845 	if (visible || was_visible)
6846 		crtc_state->fb_bits |= plane->frontbuffer_bit;
6847 
6848 	/*
6849 	 * ILK/SNB DVSACNTR/Sprite Enable
6850 	 * IVB SPR_CTL/Sprite Enable
6851 	 * "When in Self Refresh Big FIFO mode, a write to enable the
6852 	 *  plane will be internally buffered and delayed while Big FIFO
6853 	 *  mode is exiting."
6854 	 *
6855 	 * Which means that enabling the sprite can take an extra frame
6856 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
6857 	 * down to LP0 and wait for vblank in order to make sure the
6858 	 * sprite gets enabled on the next vblank after the register write.
6859 	 * Doing otherwise would risk enabling the sprite one frame after
6860 	 * we've already signalled flip completion. We can resume LP1+
6861 	 * once the sprite has been enabled.
6862 	 *
6863 	 *
6864 	 * WaCxSRDisabledForSpriteScaling:ivb
6865 	 * IVB SPR_SCALE/Scaling Enable
6866 	 * "Low Power watermarks must be disabled for at least one
6867 	 *  frame before enabling sprite scaling, and kept disabled
6868 	 *  until sprite scaling is disabled."
6869 	 *
6870 	 * ILK/SNB DVSASCALE/Scaling Enable
6871 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
6872 	 *  masked off while Big FIFO mode is exiting."
6873 	 *
6874 	 * Despite the w/a only being listed for IVB we assume that
6875 	 * the ILK/SNB note has similar ramifications, hence we apply
6876 	 * the w/a on all three platforms.
6877 	 *
6878 	 * With experimental results seems this is needed also for primary
6879 	 * plane, not only sprite plane.
6880 	 */
6881 	if (plane->id != PLANE_CURSOR &&
6882 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6883 	     IS_IVYBRIDGE(dev_priv)) &&
6884 	    (turn_on || (!needs_scaling(old_plane_state) &&
6885 			 needs_scaling(plane_state))))
6886 		crtc_state->disable_lp_wm = true;
6887 
6888 	return 0;
6889 }
6890 
6891 static bool encoders_cloneable(const struct intel_encoder *a,
6892 			       const struct intel_encoder *b)
6893 {
6894 	/* masks could be asymmetric, so check both ways */
6895 	return a == b || (a->cloneable & (1 << b->type) &&
6896 			  b->cloneable & (1 << a->type));
6897 }
6898 
6899 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6900 					 struct intel_crtc *crtc,
6901 					 struct intel_encoder *encoder)
6902 {
6903 	struct intel_encoder *source_encoder;
6904 	struct drm_connector *connector;
6905 	struct drm_connector_state *connector_state;
6906 	int i;
6907 
6908 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6909 		if (connector_state->crtc != &crtc->base)
6910 			continue;
6911 
6912 		source_encoder =
6913 			to_intel_encoder(connector_state->best_encoder);
6914 		if (!encoders_cloneable(encoder, source_encoder))
6915 			return false;
6916 	}
6917 
6918 	return true;
6919 }
6920 
6921 static int icl_add_linked_planes(struct intel_atomic_state *state)
6922 {
6923 	struct intel_plane *plane, *linked;
6924 	struct intel_plane_state *plane_state, *linked_plane_state;
6925 	int i;
6926 
6927 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6928 		linked = plane_state->planar_linked_plane;
6929 
6930 		if (!linked)
6931 			continue;
6932 
6933 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
6934 		if (IS_ERR(linked_plane_state))
6935 			return PTR_ERR(linked_plane_state);
6936 
6937 		drm_WARN_ON(state->base.dev,
6938 			    linked_plane_state->planar_linked_plane != plane);
6939 		drm_WARN_ON(state->base.dev,
6940 			    linked_plane_state->planar_slave == plane_state->planar_slave);
6941 	}
6942 
6943 	return 0;
6944 }
6945 
6946 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6947 {
6948 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6949 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6950 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6951 	struct intel_plane *plane, *linked;
6952 	struct intel_plane_state *plane_state;
6953 	int i;
6954 
6955 	if (DISPLAY_VER(dev_priv) < 11)
6956 		return 0;
6957 
6958 	/*
6959 	 * Destroy all old plane links and make the slave plane invisible
6960 	 * in the crtc_state->active_planes mask.
6961 	 */
6962 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6963 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6964 			continue;
6965 
6966 		plane_state->planar_linked_plane = NULL;
6967 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
6968 			crtc_state->enabled_planes &= ~BIT(plane->id);
6969 			crtc_state->active_planes &= ~BIT(plane->id);
6970 			crtc_state->update_planes |= BIT(plane->id);
6971 		}
6972 
6973 		plane_state->planar_slave = false;
6974 	}
6975 
6976 	if (!crtc_state->nv12_planes)
6977 		return 0;
6978 
6979 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6980 		struct intel_plane_state *linked_state = NULL;
6981 
6982 		if (plane->pipe != crtc->pipe ||
6983 		    !(crtc_state->nv12_planes & BIT(plane->id)))
6984 			continue;
6985 
6986 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6987 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6988 				continue;
6989 
6990 			if (crtc_state->active_planes & BIT(linked->id))
6991 				continue;
6992 
6993 			linked_state = intel_atomic_get_plane_state(state, linked);
6994 			if (IS_ERR(linked_state))
6995 				return PTR_ERR(linked_state);
6996 
6997 			break;
6998 		}
6999 
7000 		if (!linked_state) {
7001 			drm_dbg_kms(&dev_priv->drm,
7002 				    "Need %d free Y planes for planar YUV\n",
7003 				    hweight8(crtc_state->nv12_planes));
7004 
7005 			return -EINVAL;
7006 		}
7007 
7008 		plane_state->planar_linked_plane = linked;
7009 
7010 		linked_state->planar_slave = true;
7011 		linked_state->planar_linked_plane = plane;
7012 		crtc_state->enabled_planes |= BIT(linked->id);
7013 		crtc_state->active_planes |= BIT(linked->id);
7014 		crtc_state->update_planes |= BIT(linked->id);
7015 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
7016 			    linked->base.name, plane->base.name);
7017 
7018 		/* Copy parameters to slave plane */
7019 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
7020 		linked_state->color_ctl = plane_state->color_ctl;
7021 		linked_state->view = plane_state->view;
7022 
7023 		intel_plane_copy_hw_state(linked_state, plane_state);
7024 		linked_state->uapi.src = plane_state->uapi.src;
7025 		linked_state->uapi.dst = plane_state->uapi.dst;
7026 
7027 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
7028 			if (linked->id == PLANE_SPRITE5)
7029 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
7030 			else if (linked->id == PLANE_SPRITE4)
7031 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
7032 			else if (linked->id == PLANE_SPRITE3)
7033 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
7034 			else if (linked->id == PLANE_SPRITE2)
7035 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
7036 			else
7037 				MISSING_CASE(linked->id);
7038 		}
7039 	}
7040 
7041 	return 0;
7042 }
7043 
7044 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
7045 {
7046 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
7047 	struct intel_atomic_state *state =
7048 		to_intel_atomic_state(new_crtc_state->uapi.state);
7049 	const struct intel_crtc_state *old_crtc_state =
7050 		intel_atomic_get_old_crtc_state(state, crtc);
7051 
7052 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
7053 }
7054 
7055 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
7056 {
7057 	const struct drm_display_mode *pipe_mode =
7058 		&crtc_state->hw.pipe_mode;
7059 	int linetime_wm;
7060 
7061 	if (!crtc_state->hw.enable)
7062 		return 0;
7063 
7064 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7065 					pipe_mode->crtc_clock);
7066 
7067 	return min(linetime_wm, 0x1ff);
7068 }
7069 
7070 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
7071 			       const struct intel_cdclk_state *cdclk_state)
7072 {
7073 	const struct drm_display_mode *pipe_mode =
7074 		&crtc_state->hw.pipe_mode;
7075 	int linetime_wm;
7076 
7077 	if (!crtc_state->hw.enable)
7078 		return 0;
7079 
7080 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
7081 					cdclk_state->logical.cdclk);
7082 
7083 	return min(linetime_wm, 0x1ff);
7084 }
7085 
7086 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
7087 {
7088 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7089 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7090 	const struct drm_display_mode *pipe_mode =
7091 		&crtc_state->hw.pipe_mode;
7092 	int linetime_wm;
7093 
7094 	if (!crtc_state->hw.enable)
7095 		return 0;
7096 
7097 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
7098 				   crtc_state->pixel_rate);
7099 
7100 	/* Display WA #1135: BXT:ALL GLK:ALL */
7101 	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
7102 	    dev_priv->ipc_enabled)
7103 		linetime_wm /= 2;
7104 
7105 	return min(linetime_wm, 0x1ff);
7106 }
7107 
7108 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
7109 				   struct intel_crtc *crtc)
7110 {
7111 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7112 	struct intel_crtc_state *crtc_state =
7113 		intel_atomic_get_new_crtc_state(state, crtc);
7114 	const struct intel_cdclk_state *cdclk_state;
7115 
7116 	if (DISPLAY_VER(dev_priv) >= 9)
7117 		crtc_state->linetime = skl_linetime_wm(crtc_state);
7118 	else
7119 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
7120 
7121 	if (!hsw_crtc_supports_ips(crtc))
7122 		return 0;
7123 
7124 	cdclk_state = intel_atomic_get_cdclk_state(state);
7125 	if (IS_ERR(cdclk_state))
7126 		return PTR_ERR(cdclk_state);
7127 
7128 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
7129 						       cdclk_state);
7130 
7131 	return 0;
7132 }
7133 
7134 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
7135 				   struct intel_crtc *crtc)
7136 {
7137 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7138 	struct intel_crtc_state *crtc_state =
7139 		intel_atomic_get_new_crtc_state(state, crtc);
7140 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
7141 	int ret;
7142 
7143 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
7144 	    mode_changed && !crtc_state->hw.active)
7145 		crtc_state->update_wm_post = true;
7146 
7147 	if (mode_changed && crtc_state->hw.enable &&
7148 	    dev_priv->display.crtc_compute_clock &&
7149 	    !crtc_state->bigjoiner_slave &&
7150 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
7151 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
7152 		if (ret)
7153 			return ret;
7154 	}
7155 
7156 	/*
7157 	 * May need to update pipe gamma enable bits
7158 	 * when C8 planes are getting enabled/disabled.
7159 	 */
7160 	if (c8_planes_changed(crtc_state))
7161 		crtc_state->uapi.color_mgmt_changed = true;
7162 
7163 	if (mode_changed || crtc_state->update_pipe ||
7164 	    crtc_state->uapi.color_mgmt_changed) {
7165 		ret = intel_color_check(crtc_state);
7166 		if (ret)
7167 			return ret;
7168 	}
7169 
7170 	if (dev_priv->display.compute_pipe_wm) {
7171 		ret = dev_priv->display.compute_pipe_wm(state, crtc);
7172 		if (ret) {
7173 			drm_dbg_kms(&dev_priv->drm,
7174 				    "Target pipe watermarks are invalid\n");
7175 			return ret;
7176 		}
7177 
7178 	}
7179 
7180 	if (dev_priv->display.compute_intermediate_wm) {
7181 		if (drm_WARN_ON(&dev_priv->drm,
7182 				!dev_priv->display.compute_pipe_wm))
7183 			return 0;
7184 
7185 		/*
7186 		 * Calculate 'intermediate' watermarks that satisfy both the
7187 		 * old state and the new state.  We can program these
7188 		 * immediately.
7189 		 */
7190 		ret = dev_priv->display.compute_intermediate_wm(state, crtc);
7191 		if (ret) {
7192 			drm_dbg_kms(&dev_priv->drm,
7193 				    "No valid intermediate pipe watermarks are possible\n");
7194 			return ret;
7195 		}
7196 	}
7197 
7198 	if (DISPLAY_VER(dev_priv) >= 9) {
7199 		if (mode_changed || crtc_state->update_pipe) {
7200 			ret = skl_update_scaler_crtc(crtc_state);
7201 			if (ret)
7202 				return ret;
7203 		}
7204 
7205 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
7206 		if (ret)
7207 			return ret;
7208 	}
7209 
7210 	if (HAS_IPS(dev_priv)) {
7211 		ret = hsw_compute_ips_config(crtc_state);
7212 		if (ret)
7213 			return ret;
7214 	}
7215 
7216 	if (DISPLAY_VER(dev_priv) >= 9 ||
7217 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
7218 		ret = hsw_compute_linetime_wm(state, crtc);
7219 		if (ret)
7220 			return ret;
7221 
7222 	}
7223 
7224 	if (!mode_changed) {
7225 		ret = intel_psr2_sel_fetch_update(state, crtc);
7226 		if (ret)
7227 			return ret;
7228 	}
7229 
7230 	return 0;
7231 }
7232 
7233 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7234 {
7235 	struct intel_connector *connector;
7236 	struct drm_connector_list_iter conn_iter;
7237 
7238 	drm_connector_list_iter_begin(dev, &conn_iter);
7239 	for_each_intel_connector_iter(connector, &conn_iter) {
7240 		struct drm_connector_state *conn_state = connector->base.state;
7241 		struct intel_encoder *encoder =
7242 			to_intel_encoder(connector->base.encoder);
7243 
7244 		if (conn_state->crtc)
7245 			drm_connector_put(&connector->base);
7246 
7247 		if (encoder) {
7248 			struct intel_crtc *crtc =
7249 				to_intel_crtc(encoder->base.crtc);
7250 			const struct intel_crtc_state *crtc_state =
7251 				to_intel_crtc_state(crtc->base.state);
7252 
7253 			conn_state->best_encoder = &encoder->base;
7254 			conn_state->crtc = &crtc->base;
7255 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7256 
7257 			drm_connector_get(&connector->base);
7258 		} else {
7259 			conn_state->best_encoder = NULL;
7260 			conn_state->crtc = NULL;
7261 		}
7262 	}
7263 	drm_connector_list_iter_end(&conn_iter);
7264 }
7265 
7266 static int
7267 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7268 		      struct intel_crtc_state *pipe_config)
7269 {
7270 	struct drm_connector *connector = conn_state->connector;
7271 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7272 	const struct drm_display_info *info = &connector->display_info;
7273 	int bpp;
7274 
7275 	switch (conn_state->max_bpc) {
7276 	case 6 ... 7:
7277 		bpp = 6 * 3;
7278 		break;
7279 	case 8 ... 9:
7280 		bpp = 8 * 3;
7281 		break;
7282 	case 10 ... 11:
7283 		bpp = 10 * 3;
7284 		break;
7285 	case 12 ... 16:
7286 		bpp = 12 * 3;
7287 		break;
7288 	default:
7289 		MISSING_CASE(conn_state->max_bpc);
7290 		return -EINVAL;
7291 	}
7292 
7293 	if (bpp < pipe_config->pipe_bpp) {
7294 		drm_dbg_kms(&i915->drm,
7295 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7296 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7297 			    connector->base.id, connector->name,
7298 			    bpp, 3 * info->bpc,
7299 			    3 * conn_state->max_requested_bpc,
7300 			    pipe_config->pipe_bpp);
7301 
7302 		pipe_config->pipe_bpp = bpp;
7303 	}
7304 
7305 	return 0;
7306 }
7307 
7308 static int
7309 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7310 			  struct intel_crtc_state *pipe_config)
7311 {
7312 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7313 	struct drm_atomic_state *state = pipe_config->uapi.state;
7314 	struct drm_connector *connector;
7315 	struct drm_connector_state *connector_state;
7316 	int bpp, i;
7317 
7318 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7319 	    IS_CHERRYVIEW(dev_priv)))
7320 		bpp = 10*3;
7321 	else if (DISPLAY_VER(dev_priv) >= 5)
7322 		bpp = 12*3;
7323 	else
7324 		bpp = 8*3;
7325 
7326 	pipe_config->pipe_bpp = bpp;
7327 
7328 	/* Clamp display bpp to connector max bpp */
7329 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7330 		int ret;
7331 
7332 		if (connector_state->crtc != &crtc->base)
7333 			continue;
7334 
7335 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7336 		if (ret)
7337 			return ret;
7338 	}
7339 
7340 	return 0;
7341 }
7342 
7343 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7344 				    const struct drm_display_mode *mode)
7345 {
7346 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7347 		    "type: 0x%x flags: 0x%x\n",
7348 		    mode->crtc_clock,
7349 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7350 		    mode->crtc_hsync_end, mode->crtc_htotal,
7351 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7352 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7353 		    mode->type, mode->flags);
7354 }
7355 
7356 static void
7357 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7358 		      const char *id, unsigned int lane_count,
7359 		      const struct intel_link_m_n *m_n)
7360 {
7361 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7362 
7363 	drm_dbg_kms(&i915->drm,
7364 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7365 		    id, lane_count,
7366 		    m_n->gmch_m, m_n->gmch_n,
7367 		    m_n->link_m, m_n->link_n, m_n->tu);
7368 }
7369 
7370 static void
7371 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7372 		     const union hdmi_infoframe *frame)
7373 {
7374 	if (!drm_debug_enabled(DRM_UT_KMS))
7375 		return;
7376 
7377 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7378 }
7379 
7380 static void
7381 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7382 		      const struct drm_dp_vsc_sdp *vsc)
7383 {
7384 	if (!drm_debug_enabled(DRM_UT_KMS))
7385 		return;
7386 
7387 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7388 }
7389 
7390 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7391 
7392 static const char * const output_type_str[] = {
7393 	OUTPUT_TYPE(UNUSED),
7394 	OUTPUT_TYPE(ANALOG),
7395 	OUTPUT_TYPE(DVO),
7396 	OUTPUT_TYPE(SDVO),
7397 	OUTPUT_TYPE(LVDS),
7398 	OUTPUT_TYPE(TVOUT),
7399 	OUTPUT_TYPE(HDMI),
7400 	OUTPUT_TYPE(DP),
7401 	OUTPUT_TYPE(EDP),
7402 	OUTPUT_TYPE(DSI),
7403 	OUTPUT_TYPE(DDI),
7404 	OUTPUT_TYPE(DP_MST),
7405 };
7406 
7407 #undef OUTPUT_TYPE
7408 
7409 static void snprintf_output_types(char *buf, size_t len,
7410 				  unsigned int output_types)
7411 {
7412 	char *str = buf;
7413 	int i;
7414 
7415 	str[0] = '\0';
7416 
7417 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7418 		int r;
7419 
7420 		if ((output_types & BIT(i)) == 0)
7421 			continue;
7422 
7423 		r = snprintf(str, len, "%s%s",
7424 			     str != buf ? "," : "", output_type_str[i]);
7425 		if (r >= len)
7426 			break;
7427 		str += r;
7428 		len -= r;
7429 
7430 		output_types &= ~BIT(i);
7431 	}
7432 
7433 	WARN_ON_ONCE(output_types != 0);
7434 }
7435 
7436 static const char * const output_format_str[] = {
7437 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7438 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7439 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7440 };
7441 
7442 static const char *output_formats(enum intel_output_format format)
7443 {
7444 	if (format >= ARRAY_SIZE(output_format_str))
7445 		return "invalid";
7446 	return output_format_str[format];
7447 }
7448 
7449 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7450 {
7451 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7452 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7453 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7454 
7455 	if (!fb) {
7456 		drm_dbg_kms(&i915->drm,
7457 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7458 			    plane->base.base.id, plane->base.name,
7459 			    yesno(plane_state->uapi.visible));
7460 		return;
7461 	}
7462 
7463 	drm_dbg_kms(&i915->drm,
7464 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7465 		    plane->base.base.id, plane->base.name,
7466 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7467 		    fb->modifier, yesno(plane_state->uapi.visible));
7468 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7469 		    plane_state->hw.rotation, plane_state->scaler_id);
7470 	if (plane_state->uapi.visible)
7471 		drm_dbg_kms(&i915->drm,
7472 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7473 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7474 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7475 }
7476 
7477 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7478 				   struct intel_atomic_state *state,
7479 				   const char *context)
7480 {
7481 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7482 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7483 	const struct intel_plane_state *plane_state;
7484 	struct intel_plane *plane;
7485 	char buf[64];
7486 	int i;
7487 
7488 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7489 		    crtc->base.base.id, crtc->base.name,
7490 		    yesno(pipe_config->hw.enable), context);
7491 
7492 	if (!pipe_config->hw.enable)
7493 		goto dump_planes;
7494 
7495 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7496 	drm_dbg_kms(&dev_priv->drm,
7497 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7498 		    yesno(pipe_config->hw.active),
7499 		    buf, pipe_config->output_types,
7500 		    output_formats(pipe_config->output_format));
7501 
7502 	drm_dbg_kms(&dev_priv->drm,
7503 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7504 		    transcoder_name(pipe_config->cpu_transcoder),
7505 		    pipe_config->pipe_bpp, pipe_config->dither);
7506 
7507 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7508 		    transcoder_name(pipe_config->mst_master_transcoder));
7509 
7510 	drm_dbg_kms(&dev_priv->drm,
7511 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7512 		    transcoder_name(pipe_config->master_transcoder),
7513 		    pipe_config->sync_mode_slaves_mask);
7514 
7515 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7516 		    pipe_config->bigjoiner_slave ? "slave" :
7517 		    pipe_config->bigjoiner ? "master" : "no");
7518 
7519 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7520 		    enableddisabled(pipe_config->splitter.enable),
7521 		    pipe_config->splitter.link_count,
7522 		    pipe_config->splitter.pixel_overlap);
7523 
7524 	if (pipe_config->has_pch_encoder)
7525 		intel_dump_m_n_config(pipe_config, "fdi",
7526 				      pipe_config->fdi_lanes,
7527 				      &pipe_config->fdi_m_n);
7528 
7529 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7530 		intel_dump_m_n_config(pipe_config, "dp m_n",
7531 				pipe_config->lane_count, &pipe_config->dp_m_n);
7532 		if (pipe_config->has_drrs)
7533 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7534 					      pipe_config->lane_count,
7535 					      &pipe_config->dp_m2_n2);
7536 	}
7537 
7538 	drm_dbg_kms(&dev_priv->drm,
7539 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7540 		    pipe_config->has_audio, pipe_config->has_infoframe,
7541 		    pipe_config->infoframes.enable);
7542 
7543 	if (pipe_config->infoframes.enable &
7544 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7545 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7546 			    pipe_config->infoframes.gcp);
7547 	if (pipe_config->infoframes.enable &
7548 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7549 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7550 	if (pipe_config->infoframes.enable &
7551 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7552 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7553 	if (pipe_config->infoframes.enable &
7554 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7555 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7556 	if (pipe_config->infoframes.enable &
7557 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7558 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7559 	if (pipe_config->infoframes.enable &
7560 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7561 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7562 	if (pipe_config->infoframes.enable &
7563 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7564 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7565 
7566 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7567 		    yesno(pipe_config->vrr.enable),
7568 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7569 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband,
7570 		    pipe_config->vrr.flipline,
7571 		    intel_vrr_vmin_vblank_start(pipe_config),
7572 		    intel_vrr_vmax_vblank_start(pipe_config));
7573 
7574 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7575 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7576 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7577 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7578 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7579 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7580 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7581 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7582 	drm_dbg_kms(&dev_priv->drm,
7583 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7584 		    pipe_config->port_clock,
7585 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7586 		    pipe_config->pixel_rate);
7587 
7588 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7589 		    pipe_config->linetime, pipe_config->ips_linetime);
7590 
7591 	if (DISPLAY_VER(dev_priv) >= 9)
7592 		drm_dbg_kms(&dev_priv->drm,
7593 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7594 			    crtc->num_scalers,
7595 			    pipe_config->scaler_state.scaler_users,
7596 			    pipe_config->scaler_state.scaler_id);
7597 
7598 	if (HAS_GMCH(dev_priv))
7599 		drm_dbg_kms(&dev_priv->drm,
7600 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7601 			    pipe_config->gmch_pfit.control,
7602 			    pipe_config->gmch_pfit.pgm_ratios,
7603 			    pipe_config->gmch_pfit.lvds_border_bits);
7604 	else
7605 		drm_dbg_kms(&dev_priv->drm,
7606 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7607 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7608 			    enableddisabled(pipe_config->pch_pfit.enabled),
7609 			    yesno(pipe_config->pch_pfit.force_thru));
7610 
7611 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7612 		    pipe_config->ips_enabled, pipe_config->double_wide);
7613 
7614 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7615 
7616 	if (IS_CHERRYVIEW(dev_priv))
7617 		drm_dbg_kms(&dev_priv->drm,
7618 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7619 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7620 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7621 	else
7622 		drm_dbg_kms(&dev_priv->drm,
7623 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7624 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7625 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7626 
7627 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7628 		    pipe_config->hw.degamma_lut ?
7629 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7630 		    pipe_config->hw.gamma_lut ?
7631 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7632 
7633 dump_planes:
7634 	if (!state)
7635 		return;
7636 
7637 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7638 		if (plane->pipe == crtc->pipe)
7639 			intel_dump_plane_state(plane_state);
7640 	}
7641 }
7642 
7643 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7644 {
7645 	struct drm_device *dev = state->base.dev;
7646 	struct drm_connector *connector;
7647 	struct drm_connector_list_iter conn_iter;
7648 	unsigned int used_ports = 0;
7649 	unsigned int used_mst_ports = 0;
7650 	bool ret = true;
7651 
7652 	/*
7653 	 * We're going to peek into connector->state,
7654 	 * hence connection_mutex must be held.
7655 	 */
7656 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7657 
7658 	/*
7659 	 * Walk the connector list instead of the encoder
7660 	 * list to detect the problem on ddi platforms
7661 	 * where there's just one encoder per digital port.
7662 	 */
7663 	drm_connector_list_iter_begin(dev, &conn_iter);
7664 	drm_for_each_connector_iter(connector, &conn_iter) {
7665 		struct drm_connector_state *connector_state;
7666 		struct intel_encoder *encoder;
7667 
7668 		connector_state =
7669 			drm_atomic_get_new_connector_state(&state->base,
7670 							   connector);
7671 		if (!connector_state)
7672 			connector_state = connector->state;
7673 
7674 		if (!connector_state->best_encoder)
7675 			continue;
7676 
7677 		encoder = to_intel_encoder(connector_state->best_encoder);
7678 
7679 		drm_WARN_ON(dev, !connector_state->crtc);
7680 
7681 		switch (encoder->type) {
7682 		case INTEL_OUTPUT_DDI:
7683 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7684 				break;
7685 			fallthrough;
7686 		case INTEL_OUTPUT_DP:
7687 		case INTEL_OUTPUT_HDMI:
7688 		case INTEL_OUTPUT_EDP:
7689 			/* the same port mustn't appear more than once */
7690 			if (used_ports & BIT(encoder->port))
7691 				ret = false;
7692 
7693 			used_ports |= BIT(encoder->port);
7694 			break;
7695 		case INTEL_OUTPUT_DP_MST:
7696 			used_mst_ports |=
7697 				1 << encoder->port;
7698 			break;
7699 		default:
7700 			break;
7701 		}
7702 	}
7703 	drm_connector_list_iter_end(&conn_iter);
7704 
7705 	/* can't mix MST and SST/HDMI on the same port */
7706 	if (used_ports & used_mst_ports)
7707 		return false;
7708 
7709 	return ret;
7710 }
7711 
7712 static void
7713 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7714 					   struct intel_crtc_state *crtc_state)
7715 {
7716 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7717 
7718 	if (crtc_state->bigjoiner_slave) {
7719 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7720 								  crtc_state->bigjoiner_linked_crtc);
7721 
7722 		/* No need to copy state if the master state is unchanged */
7723 		if (!from_crtc_state)
7724 			return;
7725 	}
7726 
7727 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7728 }
7729 
7730 static void
7731 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7732 				 struct intel_crtc_state *crtc_state)
7733 {
7734 	crtc_state->hw.enable = crtc_state->uapi.enable;
7735 	crtc_state->hw.active = crtc_state->uapi.active;
7736 	crtc_state->hw.mode = crtc_state->uapi.mode;
7737 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7738 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7739 
7740 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7741 }
7742 
7743 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7744 {
7745 	if (crtc_state->bigjoiner_slave)
7746 		return;
7747 
7748 	crtc_state->uapi.enable = crtc_state->hw.enable;
7749 	crtc_state->uapi.active = crtc_state->hw.active;
7750 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7751 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7752 
7753 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7754 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7755 
7756 	/* copy color blobs to uapi */
7757 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7758 				  crtc_state->hw.degamma_lut);
7759 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7760 				  crtc_state->hw.gamma_lut);
7761 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7762 				  crtc_state->hw.ctm);
7763 }
7764 
7765 static int
7766 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7767 			  const struct intel_crtc_state *from_crtc_state)
7768 {
7769 	struct intel_crtc_state *saved_state;
7770 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7771 
7772 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7773 	if (!saved_state)
7774 		return -ENOMEM;
7775 
7776 	saved_state->uapi = crtc_state->uapi;
7777 	saved_state->scaler_state = crtc_state->scaler_state;
7778 	saved_state->shared_dpll = crtc_state->shared_dpll;
7779 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7780 	saved_state->crc_enabled = crtc_state->crc_enabled;
7781 
7782 	intel_crtc_free_hw_state(crtc_state);
7783 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7784 	kfree(saved_state);
7785 
7786 	/* Re-init hw state */
7787 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7788 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7789 	crtc_state->hw.active = from_crtc_state->hw.active;
7790 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7791 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7792 
7793 	/* Some fixups */
7794 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7795 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7796 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7797 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7798 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7799 	crtc_state->bigjoiner_slave = true;
7800 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7801 	crtc_state->has_audio = false;
7802 
7803 	return 0;
7804 }
7805 
7806 static int
7807 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7808 				 struct intel_crtc_state *crtc_state)
7809 {
7810 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7811 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7812 	struct intel_crtc_state *saved_state;
7813 
7814 	saved_state = intel_crtc_state_alloc(crtc);
7815 	if (!saved_state)
7816 		return -ENOMEM;
7817 
7818 	/* free the old crtc_state->hw members */
7819 	intel_crtc_free_hw_state(crtc_state);
7820 
7821 	/* FIXME: before the switch to atomic started, a new pipe_config was
7822 	 * kzalloc'd. Code that depends on any field being zero should be
7823 	 * fixed, so that the crtc_state can be safely duplicated. For now,
7824 	 * only fields that are know to not cause problems are preserved. */
7825 
7826 	saved_state->uapi = crtc_state->uapi;
7827 	saved_state->scaler_state = crtc_state->scaler_state;
7828 	saved_state->shared_dpll = crtc_state->shared_dpll;
7829 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7830 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7831 	       sizeof(saved_state->icl_port_dplls));
7832 	saved_state->crc_enabled = crtc_state->crc_enabled;
7833 	if (IS_G4X(dev_priv) ||
7834 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7835 		saved_state->wm = crtc_state->wm;
7836 
7837 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7838 	kfree(saved_state);
7839 
7840 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7841 
7842 	return 0;
7843 }
7844 
7845 static int
7846 intel_modeset_pipe_config(struct intel_atomic_state *state,
7847 			  struct intel_crtc_state *pipe_config)
7848 {
7849 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
7850 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7851 	struct drm_connector *connector;
7852 	struct drm_connector_state *connector_state;
7853 	int base_bpp, ret, i;
7854 	bool retry = true;
7855 
7856 	pipe_config->cpu_transcoder =
7857 		(enum transcoder) to_intel_crtc(crtc)->pipe;
7858 
7859 	/*
7860 	 * Sanitize sync polarity flags based on requested ones. If neither
7861 	 * positive or negative polarity is requested, treat this as meaning
7862 	 * negative polarity.
7863 	 */
7864 	if (!(pipe_config->hw.adjusted_mode.flags &
7865 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7866 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7867 
7868 	if (!(pipe_config->hw.adjusted_mode.flags &
7869 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7870 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7871 
7872 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7873 					pipe_config);
7874 	if (ret)
7875 		return ret;
7876 
7877 	base_bpp = pipe_config->pipe_bpp;
7878 
7879 	/*
7880 	 * Determine the real pipe dimensions. Note that stereo modes can
7881 	 * increase the actual pipe size due to the frame doubling and
7882 	 * insertion of additional space for blanks between the frame. This
7883 	 * is stored in the crtc timings. We use the requested mode to do this
7884 	 * computation to clearly distinguish it from the adjusted mode, which
7885 	 * can be changed by the connectors in the below retry loop.
7886 	 */
7887 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
7888 			       &pipe_config->pipe_src_w,
7889 			       &pipe_config->pipe_src_h);
7890 
7891 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7892 		struct intel_encoder *encoder =
7893 			to_intel_encoder(connector_state->best_encoder);
7894 
7895 		if (connector_state->crtc != crtc)
7896 			continue;
7897 
7898 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7899 			drm_dbg_kms(&i915->drm,
7900 				    "rejecting invalid cloning configuration\n");
7901 			return -EINVAL;
7902 		}
7903 
7904 		/*
7905 		 * Determine output_types before calling the .compute_config()
7906 		 * hooks so that the hooks can use this information safely.
7907 		 */
7908 		if (encoder->compute_output_type)
7909 			pipe_config->output_types |=
7910 				BIT(encoder->compute_output_type(encoder, pipe_config,
7911 								 connector_state));
7912 		else
7913 			pipe_config->output_types |= BIT(encoder->type);
7914 	}
7915 
7916 encoder_retry:
7917 	/* Ensure the port clock defaults are reset when retrying. */
7918 	pipe_config->port_clock = 0;
7919 	pipe_config->pixel_multiplier = 1;
7920 
7921 	/* Fill in default crtc timings, allow encoders to overwrite them. */
7922 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7923 			      CRTC_STEREO_DOUBLE);
7924 
7925 	/* Pass our mode to the connectors and the CRTC to give them a chance to
7926 	 * adjust it according to limitations or connector properties, and also
7927 	 * a chance to reject the mode entirely.
7928 	 */
7929 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7930 		struct intel_encoder *encoder =
7931 			to_intel_encoder(connector_state->best_encoder);
7932 
7933 		if (connector_state->crtc != crtc)
7934 			continue;
7935 
7936 		ret = encoder->compute_config(encoder, pipe_config,
7937 					      connector_state);
7938 		if (ret < 0) {
7939 			if (ret != -EDEADLK)
7940 				drm_dbg_kms(&i915->drm,
7941 					    "Encoder config failure: %d\n",
7942 					    ret);
7943 			return ret;
7944 		}
7945 	}
7946 
7947 	/* Set default port clock if not overwritten by the encoder. Needs to be
7948 	 * done afterwards in case the encoder adjusts the mode. */
7949 	if (!pipe_config->port_clock)
7950 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7951 			* pipe_config->pixel_multiplier;
7952 
7953 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7954 	if (ret == -EDEADLK)
7955 		return ret;
7956 	if (ret < 0) {
7957 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7958 		return ret;
7959 	}
7960 
7961 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
7962 		if (drm_WARN(&i915->drm, !retry,
7963 			     "loop in pipe configuration computation\n"))
7964 			return -EINVAL;
7965 
7966 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7967 		retry = false;
7968 		goto encoder_retry;
7969 	}
7970 
7971 	/* Dithering seems to not pass-through bits correctly when it should, so
7972 	 * only enable it on 6bpc panels and when its not a compliance
7973 	 * test requesting 6bpc video pattern.
7974 	 */
7975 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7976 		!pipe_config->dither_force_disable;
7977 	drm_dbg_kms(&i915->drm,
7978 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7979 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7980 
7981 	return 0;
7982 }
7983 
7984 static int
7985 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7986 {
7987 	struct intel_atomic_state *state =
7988 		to_intel_atomic_state(crtc_state->uapi.state);
7989 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7990 	struct drm_connector_state *conn_state;
7991 	struct drm_connector *connector;
7992 	int i;
7993 
7994 	for_each_new_connector_in_state(&state->base, connector,
7995 					conn_state, i) {
7996 		struct intel_encoder *encoder =
7997 			to_intel_encoder(conn_state->best_encoder);
7998 		int ret;
7999 
8000 		if (conn_state->crtc != &crtc->base ||
8001 		    !encoder->compute_config_late)
8002 			continue;
8003 
8004 		ret = encoder->compute_config_late(encoder, crtc_state,
8005 						   conn_state);
8006 		if (ret)
8007 			return ret;
8008 	}
8009 
8010 	return 0;
8011 }
8012 
8013 bool intel_fuzzy_clock_check(int clock1, int clock2)
8014 {
8015 	int diff;
8016 
8017 	if (clock1 == clock2)
8018 		return true;
8019 
8020 	if (!clock1 || !clock2)
8021 		return false;
8022 
8023 	diff = abs(clock1 - clock2);
8024 
8025 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8026 		return true;
8027 
8028 	return false;
8029 }
8030 
8031 static bool
8032 intel_compare_m_n(unsigned int m, unsigned int n,
8033 		  unsigned int m2, unsigned int n2,
8034 		  bool exact)
8035 {
8036 	if (m == m2 && n == n2)
8037 		return true;
8038 
8039 	if (exact || !m || !n || !m2 || !n2)
8040 		return false;
8041 
8042 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
8043 
8044 	if (n > n2) {
8045 		while (n > n2) {
8046 			m2 <<= 1;
8047 			n2 <<= 1;
8048 		}
8049 	} else if (n < n2) {
8050 		while (n < n2) {
8051 			m <<= 1;
8052 			n <<= 1;
8053 		}
8054 	}
8055 
8056 	if (n != n2)
8057 		return false;
8058 
8059 	return intel_fuzzy_clock_check(m, m2);
8060 }
8061 
8062 static bool
8063 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
8064 		       const struct intel_link_m_n *m2_n2,
8065 		       bool exact)
8066 {
8067 	return m_n->tu == m2_n2->tu &&
8068 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
8069 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
8070 		intel_compare_m_n(m_n->link_m, m_n->link_n,
8071 				  m2_n2->link_m, m2_n2->link_n, exact);
8072 }
8073 
8074 static bool
8075 intel_compare_infoframe(const union hdmi_infoframe *a,
8076 			const union hdmi_infoframe *b)
8077 {
8078 	return memcmp(a, b, sizeof(*a)) == 0;
8079 }
8080 
8081 static bool
8082 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
8083 			 const struct drm_dp_vsc_sdp *b)
8084 {
8085 	return memcmp(a, b, sizeof(*a)) == 0;
8086 }
8087 
8088 static void
8089 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
8090 			       bool fastset, const char *name,
8091 			       const union hdmi_infoframe *a,
8092 			       const union hdmi_infoframe *b)
8093 {
8094 	if (fastset) {
8095 		if (!drm_debug_enabled(DRM_UT_KMS))
8096 			return;
8097 
8098 		drm_dbg_kms(&dev_priv->drm,
8099 			    "fastset mismatch in %s infoframe\n", name);
8100 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8101 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
8102 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8103 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
8104 	} else {
8105 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
8106 		drm_err(&dev_priv->drm, "expected:\n");
8107 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
8108 		drm_err(&dev_priv->drm, "found:\n");
8109 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
8110 	}
8111 }
8112 
8113 static void
8114 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
8115 				bool fastset, const char *name,
8116 				const struct drm_dp_vsc_sdp *a,
8117 				const struct drm_dp_vsc_sdp *b)
8118 {
8119 	if (fastset) {
8120 		if (!drm_debug_enabled(DRM_UT_KMS))
8121 			return;
8122 
8123 		drm_dbg_kms(&dev_priv->drm,
8124 			    "fastset mismatch in %s dp sdp\n", name);
8125 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
8126 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
8127 		drm_dbg_kms(&dev_priv->drm, "found:\n");
8128 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
8129 	} else {
8130 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
8131 		drm_err(&dev_priv->drm, "expected:\n");
8132 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
8133 		drm_err(&dev_priv->drm, "found:\n");
8134 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
8135 	}
8136 }
8137 
8138 static void __printf(4, 5)
8139 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
8140 		     const char *name, const char *format, ...)
8141 {
8142 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
8143 	struct va_format vaf;
8144 	va_list args;
8145 
8146 	va_start(args, format);
8147 	vaf.fmt = format;
8148 	vaf.va = &args;
8149 
8150 	if (fastset)
8151 		drm_dbg_kms(&i915->drm,
8152 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
8153 			    crtc->base.base.id, crtc->base.name, name, &vaf);
8154 	else
8155 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
8156 			crtc->base.base.id, crtc->base.name, name, &vaf);
8157 
8158 	va_end(args);
8159 }
8160 
8161 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
8162 {
8163 	if (dev_priv->params.fastboot != -1)
8164 		return dev_priv->params.fastboot;
8165 
8166 	/* Enable fastboot by default on Skylake and newer */
8167 	if (DISPLAY_VER(dev_priv) >= 9)
8168 		return true;
8169 
8170 	/* Enable fastboot by default on VLV and CHV */
8171 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8172 		return true;
8173 
8174 	/* Disabled by default on all others */
8175 	return false;
8176 }
8177 
8178 static bool
8179 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
8180 			  const struct intel_crtc_state *pipe_config,
8181 			  bool fastset)
8182 {
8183 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
8184 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
8185 	bool ret = true;
8186 	u32 bp_gamma = 0;
8187 	bool fixup_inherited = fastset &&
8188 		current_config->inherited && !pipe_config->inherited;
8189 
8190 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
8191 		drm_dbg_kms(&dev_priv->drm,
8192 			    "initial modeset and fastboot not set\n");
8193 		ret = false;
8194 	}
8195 
8196 #define PIPE_CONF_CHECK_X(name) do { \
8197 	if (current_config->name != pipe_config->name) { \
8198 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8199 				     "(expected 0x%08x, found 0x%08x)", \
8200 				     current_config->name, \
8201 				     pipe_config->name); \
8202 		ret = false; \
8203 	} \
8204 } while (0)
8205 
8206 #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
8207 	if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
8208 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8209 				     "(expected 0x%08x, found 0x%08x)", \
8210 				     current_config->name & (mask), \
8211 				     pipe_config->name & (mask)); \
8212 		ret = false; \
8213 	} \
8214 } while (0)
8215 
8216 #define PIPE_CONF_CHECK_I(name) do { \
8217 	if (current_config->name != pipe_config->name) { \
8218 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8219 				     "(expected %i, found %i)", \
8220 				     current_config->name, \
8221 				     pipe_config->name); \
8222 		ret = false; \
8223 	} \
8224 } while (0)
8225 
8226 #define PIPE_CONF_CHECK_BOOL(name) do { \
8227 	if (current_config->name != pipe_config->name) { \
8228 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
8229 				     "(expected %s, found %s)", \
8230 				     yesno(current_config->name), \
8231 				     yesno(pipe_config->name)); \
8232 		ret = false; \
8233 	} \
8234 } while (0)
8235 
8236 /*
8237  * Checks state where we only read out the enabling, but not the entire
8238  * state itself (like full infoframes or ELD for audio). These states
8239  * require a full modeset on bootup to fix up.
8240  */
8241 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8242 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8243 		PIPE_CONF_CHECK_BOOL(name); \
8244 	} else { \
8245 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8246 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8247 				     yesno(current_config->name), \
8248 				     yesno(pipe_config->name)); \
8249 		ret = false; \
8250 	} \
8251 } while (0)
8252 
8253 #define PIPE_CONF_CHECK_P(name) do { \
8254 	if (current_config->name != pipe_config->name) { \
8255 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8256 				     "(expected %p, found %p)", \
8257 				     current_config->name, \
8258 				     pipe_config->name); \
8259 		ret = false; \
8260 	} \
8261 } while (0)
8262 
8263 #define PIPE_CONF_CHECK_M_N(name) do { \
8264 	if (!intel_compare_link_m_n(&current_config->name, \
8265 				    &pipe_config->name,\
8266 				    !fastset)) { \
8267 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8268 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8269 				     "found tu %i, gmch %i/%i link %i/%i)", \
8270 				     current_config->name.tu, \
8271 				     current_config->name.gmch_m, \
8272 				     current_config->name.gmch_n, \
8273 				     current_config->name.link_m, \
8274 				     current_config->name.link_n, \
8275 				     pipe_config->name.tu, \
8276 				     pipe_config->name.gmch_m, \
8277 				     pipe_config->name.gmch_n, \
8278 				     pipe_config->name.link_m, \
8279 				     pipe_config->name.link_n); \
8280 		ret = false; \
8281 	} \
8282 } while (0)
8283 
8284 /* This is required for BDW+ where there is only one set of registers for
8285  * switching between high and low RR.
8286  * This macro can be used whenever a comparison has to be made between one
8287  * hw state and multiple sw state variables.
8288  */
8289 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8290 	if (!intel_compare_link_m_n(&current_config->name, \
8291 				    &pipe_config->name, !fastset) && \
8292 	    !intel_compare_link_m_n(&current_config->alt_name, \
8293 				    &pipe_config->name, !fastset)) { \
8294 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8295 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8296 				     "or tu %i gmch %i/%i link %i/%i, " \
8297 				     "found tu %i, gmch %i/%i link %i/%i)", \
8298 				     current_config->name.tu, \
8299 				     current_config->name.gmch_m, \
8300 				     current_config->name.gmch_n, \
8301 				     current_config->name.link_m, \
8302 				     current_config->name.link_n, \
8303 				     current_config->alt_name.tu, \
8304 				     current_config->alt_name.gmch_m, \
8305 				     current_config->alt_name.gmch_n, \
8306 				     current_config->alt_name.link_m, \
8307 				     current_config->alt_name.link_n, \
8308 				     pipe_config->name.tu, \
8309 				     pipe_config->name.gmch_m, \
8310 				     pipe_config->name.gmch_n, \
8311 				     pipe_config->name.link_m, \
8312 				     pipe_config->name.link_n); \
8313 		ret = false; \
8314 	} \
8315 } while (0)
8316 
8317 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8318 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8319 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8320 				     "(%x) (expected %i, found %i)", \
8321 				     (mask), \
8322 				     current_config->name & (mask), \
8323 				     pipe_config->name & (mask)); \
8324 		ret = false; \
8325 	} \
8326 } while (0)
8327 
8328 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8329 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8330 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8331 				     "(expected %i, found %i)", \
8332 				     current_config->name, \
8333 				     pipe_config->name); \
8334 		ret = false; \
8335 	} \
8336 } while (0)
8337 
8338 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8339 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8340 				     &pipe_config->infoframes.name)) { \
8341 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8342 					       &current_config->infoframes.name, \
8343 					       &pipe_config->infoframes.name); \
8344 		ret = false; \
8345 	} \
8346 } while (0)
8347 
8348 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8349 	if (!current_config->has_psr && !pipe_config->has_psr && \
8350 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8351 				      &pipe_config->infoframes.name)) { \
8352 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8353 						&current_config->infoframes.name, \
8354 						&pipe_config->infoframes.name); \
8355 		ret = false; \
8356 	} \
8357 } while (0)
8358 
8359 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8360 	if (current_config->name1 != pipe_config->name1) { \
8361 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8362 				"(expected %i, found %i, won't compare lut values)", \
8363 				current_config->name1, \
8364 				pipe_config->name1); \
8365 		ret = false;\
8366 	} else { \
8367 		if (!intel_color_lut_equal(current_config->name2, \
8368 					pipe_config->name2, pipe_config->name1, \
8369 					bit_precision)) { \
8370 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8371 					"hw_state doesn't match sw_state"); \
8372 			ret = false; \
8373 		} \
8374 	} \
8375 } while (0)
8376 
8377 #define PIPE_CONF_QUIRK(quirk) \
8378 	((current_config->quirks | pipe_config->quirks) & (quirk))
8379 
8380 	PIPE_CONF_CHECK_I(cpu_transcoder);
8381 
8382 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8383 	PIPE_CONF_CHECK_I(fdi_lanes);
8384 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8385 
8386 	PIPE_CONF_CHECK_I(lane_count);
8387 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8388 
8389 	if (DISPLAY_VER(dev_priv) < 8) {
8390 		PIPE_CONF_CHECK_M_N(dp_m_n);
8391 
8392 		if (current_config->has_drrs)
8393 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8394 	} else
8395 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8396 
8397 	PIPE_CONF_CHECK_X(output_types);
8398 
8399 	/* FIXME do the readout properly and get rid of this quirk */
8400 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8401 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8402 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8403 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8404 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8405 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8406 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8407 
8408 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8409 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8410 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8411 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8412 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8413 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8414 
8415 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8416 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8417 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8418 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8419 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8420 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8421 
8422 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8423 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8424 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8425 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8426 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8427 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8428 
8429 		PIPE_CONF_CHECK_I(pixel_multiplier);
8430 
8431 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8432 				      DRM_MODE_FLAG_INTERLACE);
8433 
8434 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8435 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8436 					      DRM_MODE_FLAG_PHSYNC);
8437 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8438 					      DRM_MODE_FLAG_NHSYNC);
8439 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8440 					      DRM_MODE_FLAG_PVSYNC);
8441 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8442 					      DRM_MODE_FLAG_NVSYNC);
8443 		}
8444 	}
8445 
8446 	PIPE_CONF_CHECK_I(output_format);
8447 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8448 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8449 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8450 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8451 
8452 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8453 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8454 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8455 	/* FIXME do the readout properly and get rid of this quirk */
8456 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8457 		PIPE_CONF_CHECK_BOOL(fec_enable);
8458 
8459 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8460 
8461 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8462 	/* pfit ratios are autocomputed by the hw on gen4+ */
8463 	if (DISPLAY_VER(dev_priv) < 4)
8464 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8465 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8466 
8467 	/*
8468 	 * Changing the EDP transcoder input mux
8469 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8470 	 */
8471 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8472 
8473 	if (!fastset) {
8474 		PIPE_CONF_CHECK_I(pipe_src_w);
8475 		PIPE_CONF_CHECK_I(pipe_src_h);
8476 
8477 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8478 		if (current_config->pch_pfit.enabled) {
8479 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8480 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8481 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8482 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8483 		}
8484 
8485 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8486 		/* FIXME do the readout properly and get rid of this quirk */
8487 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8488 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8489 
8490 		PIPE_CONF_CHECK_X(gamma_mode);
8491 		if (IS_CHERRYVIEW(dev_priv))
8492 			PIPE_CONF_CHECK_X(cgm_mode);
8493 		else
8494 			PIPE_CONF_CHECK_X(csc_mode);
8495 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8496 		PIPE_CONF_CHECK_BOOL(csc_enable);
8497 
8498 		PIPE_CONF_CHECK_I(linetime);
8499 		PIPE_CONF_CHECK_I(ips_linetime);
8500 
8501 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8502 		if (bp_gamma)
8503 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8504 
8505 		PIPE_CONF_CHECK_BOOL(has_psr);
8506 		PIPE_CONF_CHECK_BOOL(has_psr2);
8507 		PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
8508 		PIPE_CONF_CHECK_I(dc3co_exitline);
8509 	}
8510 
8511 	PIPE_CONF_CHECK_BOOL(double_wide);
8512 
8513 	if (dev_priv->dpll.mgr)
8514 		PIPE_CONF_CHECK_P(shared_dpll);
8515 
8516 	/* FIXME do the readout properly and get rid of this quirk */
8517 	if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8518 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8519 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8520 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8521 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8522 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8523 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8524 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8525 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8526 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8527 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8528 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8529 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8530 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8531 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8532 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8533 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8534 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8535 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8536 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8537 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8538 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8539 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8540 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8541 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8542 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8543 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8544 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8545 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8546 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8547 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8548 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8549 	}
8550 
8551 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8552 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8553 		PIPE_CONF_CHECK_X(dsi_pll.div);
8554 
8555 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8556 			PIPE_CONF_CHECK_I(pipe_bpp);
8557 
8558 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8559 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8560 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8561 
8562 		PIPE_CONF_CHECK_I(min_voltage_level);
8563 	}
8564 
8565 	if (fastset && (current_config->has_psr || pipe_config->has_psr))
8566 		PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
8567 					    ~intel_hdmi_infoframe_enable(DP_SDP_VSC));
8568 	else
8569 		PIPE_CONF_CHECK_X(infoframes.enable);
8570 
8571 	PIPE_CONF_CHECK_X(infoframes.gcp);
8572 	PIPE_CONF_CHECK_INFOFRAME(avi);
8573 	PIPE_CONF_CHECK_INFOFRAME(spd);
8574 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8575 	PIPE_CONF_CHECK_INFOFRAME(drm);
8576 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8577 
8578 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8579 	PIPE_CONF_CHECK_I(master_transcoder);
8580 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8581 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8582 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8583 
8584 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8585 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8586 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8587 
8588 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8589 	PIPE_CONF_CHECK_I(splitter.link_count);
8590 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8591 
8592 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8593 
8594 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8595 	PIPE_CONF_CHECK_I(vrr.vmin);
8596 	PIPE_CONF_CHECK_I(vrr.vmax);
8597 	PIPE_CONF_CHECK_I(vrr.flipline);
8598 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8599 	PIPE_CONF_CHECK_I(vrr.guardband);
8600 
8601 #undef PIPE_CONF_CHECK_X
8602 #undef PIPE_CONF_CHECK_I
8603 #undef PIPE_CONF_CHECK_BOOL
8604 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8605 #undef PIPE_CONF_CHECK_P
8606 #undef PIPE_CONF_CHECK_FLAGS
8607 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8608 #undef PIPE_CONF_CHECK_COLOR_LUT
8609 #undef PIPE_CONF_QUIRK
8610 
8611 	return ret;
8612 }
8613 
8614 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8615 					   const struct intel_crtc_state *pipe_config)
8616 {
8617 	if (pipe_config->has_pch_encoder) {
8618 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8619 							    &pipe_config->fdi_m_n);
8620 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8621 
8622 		/*
8623 		 * FDI already provided one idea for the dotclock.
8624 		 * Yell if the encoder disagrees.
8625 		 */
8626 		drm_WARN(&dev_priv->drm,
8627 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8628 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8629 			 fdi_dotclock, dotclock);
8630 	}
8631 }
8632 
8633 static void verify_wm_state(struct intel_crtc *crtc,
8634 			    struct intel_crtc_state *new_crtc_state)
8635 {
8636 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8637 	struct skl_hw_state {
8638 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8639 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8640 		struct skl_pipe_wm wm;
8641 	} *hw;
8642 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8643 	int level, max_level = ilk_wm_max_level(dev_priv);
8644 	struct intel_plane *plane;
8645 	u8 hw_enabled_slices;
8646 
8647 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8648 		return;
8649 
8650 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8651 	if (!hw)
8652 		return;
8653 
8654 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8655 
8656 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8657 
8658 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8659 
8660 	if (DISPLAY_VER(dev_priv) >= 11 &&
8661 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8662 		drm_err(&dev_priv->drm,
8663 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8664 			dev_priv->dbuf.enabled_slices,
8665 			hw_enabled_slices);
8666 
8667 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8668 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8669 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8670 
8671 		/* Watermarks */
8672 		for (level = 0; level <= max_level; level++) {
8673 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8674 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8675 
8676 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8677 				continue;
8678 
8679 			drm_err(&dev_priv->drm,
8680 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8681 				plane->base.base.id, plane->base.name, level,
8682 				sw_wm_level->enable,
8683 				sw_wm_level->blocks,
8684 				sw_wm_level->lines,
8685 				hw_wm_level->enable,
8686 				hw_wm_level->blocks,
8687 				hw_wm_level->lines);
8688 		}
8689 
8690 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8691 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8692 
8693 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8694 			drm_err(&dev_priv->drm,
8695 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8696 				plane->base.base.id, plane->base.name,
8697 				sw_wm_level->enable,
8698 				sw_wm_level->blocks,
8699 				sw_wm_level->lines,
8700 				hw_wm_level->enable,
8701 				hw_wm_level->blocks,
8702 				hw_wm_level->lines);
8703 		}
8704 
8705 		hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
8706 		sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
8707 
8708 		if (HAS_HW_SAGV_WM(dev_priv) &&
8709 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8710 			drm_err(&dev_priv->drm,
8711 				"[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8712 				plane->base.base.id, plane->base.name,
8713 				sw_wm_level->enable,
8714 				sw_wm_level->blocks,
8715 				sw_wm_level->lines,
8716 				hw_wm_level->enable,
8717 				hw_wm_level->blocks,
8718 				hw_wm_level->lines);
8719 		}
8720 
8721 		hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
8722 		sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
8723 
8724 		if (HAS_HW_SAGV_WM(dev_priv) &&
8725 		    !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8726 			drm_err(&dev_priv->drm,
8727 				"[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8728 				plane->base.base.id, plane->base.name,
8729 				sw_wm_level->enable,
8730 				sw_wm_level->blocks,
8731 				sw_wm_level->lines,
8732 				hw_wm_level->enable,
8733 				hw_wm_level->blocks,
8734 				hw_wm_level->lines);
8735 		}
8736 
8737 		/* DDB */
8738 		hw_ddb_entry = &hw->ddb_y[plane->id];
8739 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8740 
8741 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8742 			drm_err(&dev_priv->drm,
8743 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8744 				plane->base.base.id, plane->base.name,
8745 				sw_ddb_entry->start, sw_ddb_entry->end,
8746 				hw_ddb_entry->start, hw_ddb_entry->end);
8747 		}
8748 	}
8749 
8750 	kfree(hw);
8751 }
8752 
8753 static void
8754 verify_connector_state(struct intel_atomic_state *state,
8755 		       struct intel_crtc *crtc)
8756 {
8757 	struct drm_connector *connector;
8758 	struct drm_connector_state *new_conn_state;
8759 	int i;
8760 
8761 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8762 		struct drm_encoder *encoder = connector->encoder;
8763 		struct intel_crtc_state *crtc_state = NULL;
8764 
8765 		if (new_conn_state->crtc != &crtc->base)
8766 			continue;
8767 
8768 		if (crtc)
8769 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8770 
8771 		intel_connector_verify_state(crtc_state, new_conn_state);
8772 
8773 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8774 		     "connector's atomic encoder doesn't match legacy encoder\n");
8775 	}
8776 }
8777 
8778 static void
8779 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8780 {
8781 	struct intel_encoder *encoder;
8782 	struct drm_connector *connector;
8783 	struct drm_connector_state *old_conn_state, *new_conn_state;
8784 	int i;
8785 
8786 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8787 		bool enabled = false, found = false;
8788 		enum pipe pipe;
8789 
8790 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8791 			    encoder->base.base.id,
8792 			    encoder->base.name);
8793 
8794 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8795 						   new_conn_state, i) {
8796 			if (old_conn_state->best_encoder == &encoder->base)
8797 				found = true;
8798 
8799 			if (new_conn_state->best_encoder != &encoder->base)
8800 				continue;
8801 			found = enabled = true;
8802 
8803 			I915_STATE_WARN(new_conn_state->crtc !=
8804 					encoder->base.crtc,
8805 			     "connector's crtc doesn't match encoder crtc\n");
8806 		}
8807 
8808 		if (!found)
8809 			continue;
8810 
8811 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
8812 		     "encoder's enabled state mismatch "
8813 		     "(expected %i, found %i)\n",
8814 		     !!encoder->base.crtc, enabled);
8815 
8816 		if (!encoder->base.crtc) {
8817 			bool active;
8818 
8819 			active = encoder->get_hw_state(encoder, &pipe);
8820 			I915_STATE_WARN(active,
8821 			     "encoder detached but still enabled on pipe %c.\n",
8822 			     pipe_name(pipe));
8823 		}
8824 	}
8825 }
8826 
8827 static void
8828 verify_crtc_state(struct intel_crtc *crtc,
8829 		  struct intel_crtc_state *old_crtc_state,
8830 		  struct intel_crtc_state *new_crtc_state)
8831 {
8832 	struct drm_device *dev = crtc->base.dev;
8833 	struct drm_i915_private *dev_priv = to_i915(dev);
8834 	struct intel_encoder *encoder;
8835 	struct intel_crtc_state *pipe_config = old_crtc_state;
8836 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
8837 	struct intel_crtc *master = crtc;
8838 
8839 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8840 	intel_crtc_free_hw_state(old_crtc_state);
8841 	intel_crtc_state_reset(old_crtc_state, crtc);
8842 	old_crtc_state->uapi.state = state;
8843 
8844 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8845 		    crtc->base.name);
8846 
8847 	pipe_config->hw.enable = new_crtc_state->hw.enable;
8848 
8849 	intel_crtc_get_pipe_config(pipe_config);
8850 
8851 	/* we keep both pipes enabled on 830 */
8852 	if (IS_I830(dev_priv) && pipe_config->hw.active)
8853 		pipe_config->hw.active = new_crtc_state->hw.active;
8854 
8855 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8856 			"crtc active state doesn't match with hw state "
8857 			"(expected %i, found %i)\n",
8858 			new_crtc_state->hw.active, pipe_config->hw.active);
8859 
8860 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8861 			"transitional active state does not match atomic hw state "
8862 			"(expected %i, found %i)\n",
8863 			new_crtc_state->hw.active, crtc->active);
8864 
8865 	if (new_crtc_state->bigjoiner_slave)
8866 		master = new_crtc_state->bigjoiner_linked_crtc;
8867 
8868 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
8869 		enum pipe pipe;
8870 		bool active;
8871 
8872 		active = encoder->get_hw_state(encoder, &pipe);
8873 		I915_STATE_WARN(active != new_crtc_state->hw.active,
8874 				"[ENCODER:%i] active %i with crtc active %i\n",
8875 				encoder->base.base.id, active,
8876 				new_crtc_state->hw.active);
8877 
8878 		I915_STATE_WARN(active && master->pipe != pipe,
8879 				"Encoder connected to wrong pipe %c\n",
8880 				pipe_name(pipe));
8881 
8882 		if (active)
8883 			intel_encoder_get_config(encoder, pipe_config);
8884 	}
8885 
8886 	if (!new_crtc_state->hw.active)
8887 		return;
8888 
8889 	if (new_crtc_state->bigjoiner_slave)
8890 		/* No PLLs set for slave */
8891 		pipe_config->shared_dpll = NULL;
8892 
8893 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
8894 
8895 	if (!intel_pipe_config_compare(new_crtc_state,
8896 				       pipe_config, false)) {
8897 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
8898 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8899 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8900 	}
8901 }
8902 
8903 static void
8904 intel_verify_planes(struct intel_atomic_state *state)
8905 {
8906 	struct intel_plane *plane;
8907 	const struct intel_plane_state *plane_state;
8908 	int i;
8909 
8910 	for_each_new_intel_plane_in_state(state, plane,
8911 					  plane_state, i)
8912 		assert_plane(plane, plane_state->planar_slave ||
8913 			     plane_state->uapi.visible);
8914 }
8915 
8916 static void
8917 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8918 			 struct intel_shared_dpll *pll,
8919 			 struct intel_crtc *crtc,
8920 			 struct intel_crtc_state *new_crtc_state)
8921 {
8922 	struct intel_dpll_hw_state dpll_hw_state;
8923 	u8 pipe_mask;
8924 	bool active;
8925 
8926 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8927 
8928 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8929 
8930 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8931 
8932 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8933 		I915_STATE_WARN(!pll->on && pll->active_mask,
8934 		     "pll in active use but not on in sw tracking\n");
8935 		I915_STATE_WARN(pll->on && !pll->active_mask,
8936 		     "pll is on but not used by any active pipe\n");
8937 		I915_STATE_WARN(pll->on != active,
8938 		     "pll on state mismatch (expected %i, found %i)\n",
8939 		     pll->on, active);
8940 	}
8941 
8942 	if (!crtc) {
8943 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8944 				"more active pll users than references: 0x%x vs 0x%x\n",
8945 				pll->active_mask, pll->state.pipe_mask);
8946 
8947 		return;
8948 	}
8949 
8950 	pipe_mask = BIT(crtc->pipe);
8951 
8952 	if (new_crtc_state->hw.active)
8953 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8954 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8955 				pipe_name(crtc->pipe), pll->active_mask);
8956 	else
8957 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8958 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8959 				pipe_name(crtc->pipe), pll->active_mask);
8960 
8961 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8962 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8963 			pipe_mask, pll->state.pipe_mask);
8964 
8965 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8966 					  &dpll_hw_state,
8967 					  sizeof(dpll_hw_state)),
8968 			"pll hw state mismatch\n");
8969 }
8970 
8971 static void
8972 verify_shared_dpll_state(struct intel_crtc *crtc,
8973 			 struct intel_crtc_state *old_crtc_state,
8974 			 struct intel_crtc_state *new_crtc_state)
8975 {
8976 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8977 
8978 	if (new_crtc_state->shared_dpll)
8979 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8980 
8981 	if (old_crtc_state->shared_dpll &&
8982 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8983 		u8 pipe_mask = BIT(crtc->pipe);
8984 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8985 
8986 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8987 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8988 				pipe_name(crtc->pipe), pll->active_mask);
8989 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8990 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8991 				pipe_name(crtc->pipe), pll->state.pipe_mask);
8992 	}
8993 }
8994 
8995 static void
8996 verify_mpllb_state(struct intel_atomic_state *state,
8997 		   struct intel_crtc_state *new_crtc_state)
8998 {
8999 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9000 	struct intel_mpllb_state mpllb_hw_state = { 0 };
9001 	struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
9002 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9003 	struct intel_encoder *encoder;
9004 
9005 	if (!IS_DG2(i915))
9006 		return;
9007 
9008 	if (!new_crtc_state->hw.active)
9009 		return;
9010 
9011 	if (new_crtc_state->bigjoiner_slave)
9012 		return;
9013 
9014 	encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
9015 	intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
9016 
9017 #define MPLLB_CHECK(name) do { \
9018 	if (mpllb_sw_state->name != mpllb_hw_state.name) { \
9019 		pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
9020 				     "(expected 0x%08x, found 0x%08x)", \
9021 				     mpllb_sw_state->name, \
9022 				     mpllb_hw_state.name); \
9023 	} \
9024 } while (0)
9025 
9026 	MPLLB_CHECK(mpllb_cp);
9027 	MPLLB_CHECK(mpllb_div);
9028 	MPLLB_CHECK(mpllb_div2);
9029 	MPLLB_CHECK(mpllb_fracn1);
9030 	MPLLB_CHECK(mpllb_fracn2);
9031 	MPLLB_CHECK(mpllb_sscen);
9032 	MPLLB_CHECK(mpllb_sscstep);
9033 
9034 	/*
9035 	 * ref_control is handled by the hardware/firemware and never
9036 	 * programmed by the software, but the proper values are supplied
9037 	 * in the bspec for verification purposes.
9038 	 */
9039 	MPLLB_CHECK(ref_control);
9040 
9041 #undef MPLLB_CHECK
9042 }
9043 
9044 static void
9045 intel_modeset_verify_crtc(struct intel_crtc *crtc,
9046 			  struct intel_atomic_state *state,
9047 			  struct intel_crtc_state *old_crtc_state,
9048 			  struct intel_crtc_state *new_crtc_state)
9049 {
9050 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
9051 		return;
9052 
9053 	verify_wm_state(crtc, new_crtc_state);
9054 	verify_connector_state(state, crtc);
9055 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
9056 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
9057 	verify_mpllb_state(state, new_crtc_state);
9058 }
9059 
9060 static void
9061 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
9062 {
9063 	int i;
9064 
9065 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
9066 		verify_single_dpll_state(dev_priv,
9067 					 &dev_priv->dpll.shared_dplls[i],
9068 					 NULL, NULL);
9069 }
9070 
9071 static void
9072 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
9073 			      struct intel_atomic_state *state)
9074 {
9075 	verify_encoder_state(dev_priv, state);
9076 	verify_connector_state(state, NULL);
9077 	verify_disabled_dpll_state(dev_priv);
9078 }
9079 
9080 int intel_modeset_all_pipes(struct intel_atomic_state *state)
9081 {
9082 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9083 	struct intel_crtc *crtc;
9084 
9085 	/*
9086 	 * Add all pipes to the state, and force
9087 	 * a modeset on all the active ones.
9088 	 */
9089 	for_each_intel_crtc(&dev_priv->drm, crtc) {
9090 		struct intel_crtc_state *crtc_state;
9091 		int ret;
9092 
9093 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9094 		if (IS_ERR(crtc_state))
9095 			return PTR_ERR(crtc_state);
9096 
9097 		if (!crtc_state->hw.active ||
9098 		    drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
9099 			continue;
9100 
9101 		crtc_state->uapi.mode_changed = true;
9102 
9103 		ret = drm_atomic_add_affected_connectors(&state->base,
9104 							 &crtc->base);
9105 		if (ret)
9106 			return ret;
9107 
9108 		ret = intel_atomic_add_affected_planes(state, crtc);
9109 		if (ret)
9110 			return ret;
9111 
9112 		crtc_state->update_planes |= crtc_state->active_planes;
9113 	}
9114 
9115 	return 0;
9116 }
9117 
9118 static void
9119 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
9120 {
9121 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9123 	struct drm_display_mode adjusted_mode =
9124 		crtc_state->hw.adjusted_mode;
9125 
9126 	if (crtc_state->vrr.enable) {
9127 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
9128 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
9129 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
9130 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
9131 	}
9132 
9133 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
9134 
9135 	crtc->mode_flags = crtc_state->mode_flags;
9136 
9137 	/*
9138 	 * The scanline counter increments at the leading edge of hsync.
9139 	 *
9140 	 * On most platforms it starts counting from vtotal-1 on the
9141 	 * first active line. That means the scanline counter value is
9142 	 * always one less than what we would expect. Ie. just after
9143 	 * start of vblank, which also occurs at start of hsync (on the
9144 	 * last active line), the scanline counter will read vblank_start-1.
9145 	 *
9146 	 * On gen2 the scanline counter starts counting from 1 instead
9147 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
9148 	 * to keep the value positive), instead of adding one.
9149 	 *
9150 	 * On HSW+ the behaviour of the scanline counter depends on the output
9151 	 * type. For DP ports it behaves like most other platforms, but on HDMI
9152 	 * there's an extra 1 line difference. So we need to add two instead of
9153 	 * one to the value.
9154 	 *
9155 	 * On VLV/CHV DSI the scanline counter would appear to increment
9156 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
9157 	 * that means we can't tell whether we're in vblank or not while
9158 	 * we're on that particular line. We must still set scanline_offset
9159 	 * to 1 so that the vblank timestamps come out correct when we query
9160 	 * the scanline counter from within the vblank interrupt handler.
9161 	 * However if queried just before the start of vblank we'll get an
9162 	 * answer that's slightly in the future.
9163 	 */
9164 	if (DISPLAY_VER(dev_priv) == 2) {
9165 		int vtotal;
9166 
9167 		vtotal = adjusted_mode.crtc_vtotal;
9168 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9169 			vtotal /= 2;
9170 
9171 		crtc->scanline_offset = vtotal - 1;
9172 	} else if (HAS_DDI(dev_priv) &&
9173 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
9174 		crtc->scanline_offset = 2;
9175 	} else {
9176 		crtc->scanline_offset = 1;
9177 	}
9178 }
9179 
9180 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
9181 {
9182 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9183 	struct intel_crtc_state *new_crtc_state;
9184 	struct intel_crtc *crtc;
9185 	int i;
9186 
9187 	if (!dev_priv->display.crtc_compute_clock)
9188 		return;
9189 
9190 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9191 		if (!intel_crtc_needs_modeset(new_crtc_state))
9192 			continue;
9193 
9194 		intel_release_shared_dplls(state, crtc);
9195 	}
9196 }
9197 
9198 /*
9199  * This implements the workaround described in the "notes" section of the mode
9200  * set sequence documentation. When going from no pipes or single pipe to
9201  * multiple pipes, and planes are enabled after the pipe, we need to wait at
9202  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
9203  */
9204 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
9205 {
9206 	struct intel_crtc_state *crtc_state;
9207 	struct intel_crtc *crtc;
9208 	struct intel_crtc_state *first_crtc_state = NULL;
9209 	struct intel_crtc_state *other_crtc_state = NULL;
9210 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
9211 	int i;
9212 
9213 	/* look at all crtc's that are going to be enabled in during modeset */
9214 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9215 		if (!crtc_state->hw.active ||
9216 		    !intel_crtc_needs_modeset(crtc_state))
9217 			continue;
9218 
9219 		if (first_crtc_state) {
9220 			other_crtc_state = crtc_state;
9221 			break;
9222 		} else {
9223 			first_crtc_state = crtc_state;
9224 			first_pipe = crtc->pipe;
9225 		}
9226 	}
9227 
9228 	/* No workaround needed? */
9229 	if (!first_crtc_state)
9230 		return 0;
9231 
9232 	/* w/a possibly needed, check how many crtc's are already enabled. */
9233 	for_each_intel_crtc(state->base.dev, crtc) {
9234 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
9235 		if (IS_ERR(crtc_state))
9236 			return PTR_ERR(crtc_state);
9237 
9238 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
9239 
9240 		if (!crtc_state->hw.active ||
9241 		    intel_crtc_needs_modeset(crtc_state))
9242 			continue;
9243 
9244 		/* 2 or more enabled crtcs means no need for w/a */
9245 		if (enabled_pipe != INVALID_PIPE)
9246 			return 0;
9247 
9248 		enabled_pipe = crtc->pipe;
9249 	}
9250 
9251 	if (enabled_pipe != INVALID_PIPE)
9252 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
9253 	else if (other_crtc_state)
9254 		other_crtc_state->hsw_workaround_pipe = first_pipe;
9255 
9256 	return 0;
9257 }
9258 
9259 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
9260 			   u8 active_pipes)
9261 {
9262 	const struct intel_crtc_state *crtc_state;
9263 	struct intel_crtc *crtc;
9264 	int i;
9265 
9266 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9267 		if (crtc_state->hw.active)
9268 			active_pipes |= BIT(crtc->pipe);
9269 		else
9270 			active_pipes &= ~BIT(crtc->pipe);
9271 	}
9272 
9273 	return active_pipes;
9274 }
9275 
9276 static int intel_modeset_checks(struct intel_atomic_state *state)
9277 {
9278 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9279 
9280 	state->modeset = true;
9281 
9282 	if (IS_HASWELL(dev_priv))
9283 		return hsw_mode_set_planes_workaround(state);
9284 
9285 	return 0;
9286 }
9287 
9288 /*
9289  * Handle calculation of various watermark data at the end of the atomic check
9290  * phase.  The code here should be run after the per-crtc and per-plane 'check'
9291  * handlers to ensure that all derived state has been updated.
9292  */
9293 static int calc_watermark_data(struct intel_atomic_state *state)
9294 {
9295 	struct drm_device *dev = state->base.dev;
9296 	struct drm_i915_private *dev_priv = to_i915(dev);
9297 
9298 	/* Is there platform-specific watermark information to calculate? */
9299 	if (dev_priv->display.compute_global_watermarks)
9300 		return dev_priv->display.compute_global_watermarks(state);
9301 
9302 	return 0;
9303 }
9304 
9305 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
9306 				     struct intel_crtc_state *new_crtc_state)
9307 {
9308 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
9309 		return;
9310 
9311 	new_crtc_state->uapi.mode_changed = false;
9312 	new_crtc_state->update_pipe = true;
9313 }
9314 
9315 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
9316 				    struct intel_crtc_state *new_crtc_state)
9317 {
9318 	/*
9319 	 * If we're not doing the full modeset we want to
9320 	 * keep the current M/N values as they may be
9321 	 * sufficiently different to the computed values
9322 	 * to cause problems.
9323 	 *
9324 	 * FIXME: should really copy more fuzzy state here
9325 	 */
9326 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
9327 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
9328 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
9329 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
9330 }
9331 
9332 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
9333 					  struct intel_crtc *crtc,
9334 					  u8 plane_ids_mask)
9335 {
9336 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9337 	struct intel_plane *plane;
9338 
9339 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
9340 		struct intel_plane_state *plane_state;
9341 
9342 		if ((plane_ids_mask & BIT(plane->id)) == 0)
9343 			continue;
9344 
9345 		plane_state = intel_atomic_get_plane_state(state, plane);
9346 		if (IS_ERR(plane_state))
9347 			return PTR_ERR(plane_state);
9348 	}
9349 
9350 	return 0;
9351 }
9352 
9353 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
9354 				     struct intel_crtc *crtc)
9355 {
9356 	const struct intel_crtc_state *old_crtc_state =
9357 		intel_atomic_get_old_crtc_state(state, crtc);
9358 	const struct intel_crtc_state *new_crtc_state =
9359 		intel_atomic_get_new_crtc_state(state, crtc);
9360 
9361 	return intel_crtc_add_planes_to_state(state, crtc,
9362 					      old_crtc_state->enabled_planes |
9363 					      new_crtc_state->enabled_planes);
9364 }
9365 
9366 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
9367 {
9368 	/* See {hsw,vlv,ivb}_plane_ratio() */
9369 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
9370 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9371 		IS_IVYBRIDGE(dev_priv);
9372 }
9373 
9374 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
9375 					   struct intel_crtc *crtc,
9376 					   struct intel_crtc *other)
9377 {
9378 	const struct intel_plane_state *plane_state;
9379 	struct intel_plane *plane;
9380 	u8 plane_ids = 0;
9381 	int i;
9382 
9383 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9384 		if (plane->pipe == crtc->pipe)
9385 			plane_ids |= BIT(plane->id);
9386 	}
9387 
9388 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9389 }
9390 
9391 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9392 {
9393 	const struct intel_crtc_state *crtc_state;
9394 	struct intel_crtc *crtc;
9395 	int i;
9396 
9397 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9398 		int ret;
9399 
9400 		if (!crtc_state->bigjoiner)
9401 			continue;
9402 
9403 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9404 						      crtc_state->bigjoiner_linked_crtc);
9405 		if (ret)
9406 			return ret;
9407 	}
9408 
9409 	return 0;
9410 }
9411 
9412 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9413 {
9414 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9415 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9416 	struct intel_plane_state *plane_state;
9417 	struct intel_plane *plane;
9418 	struct intel_crtc *crtc;
9419 	int i, ret;
9420 
9421 	ret = icl_add_linked_planes(state);
9422 	if (ret)
9423 		return ret;
9424 
9425 	ret = intel_bigjoiner_add_affected_planes(state);
9426 	if (ret)
9427 		return ret;
9428 
9429 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9430 		ret = intel_plane_atomic_check(state, plane);
9431 		if (ret) {
9432 			drm_dbg_atomic(&dev_priv->drm,
9433 				       "[PLANE:%d:%s] atomic driver check failed\n",
9434 				       plane->base.base.id, plane->base.name);
9435 			return ret;
9436 		}
9437 	}
9438 
9439 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9440 					    new_crtc_state, i) {
9441 		u8 old_active_planes, new_active_planes;
9442 
9443 		ret = icl_check_nv12_planes(new_crtc_state);
9444 		if (ret)
9445 			return ret;
9446 
9447 		/*
9448 		 * On some platforms the number of active planes affects
9449 		 * the planes' minimum cdclk calculation. Add such planes
9450 		 * to the state before we compute the minimum cdclk.
9451 		 */
9452 		if (!active_planes_affects_min_cdclk(dev_priv))
9453 			continue;
9454 
9455 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9456 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9457 
9458 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9459 			continue;
9460 
9461 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9462 		if (ret)
9463 			return ret;
9464 	}
9465 
9466 	return 0;
9467 }
9468 
9469 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9470 				    bool *need_cdclk_calc)
9471 {
9472 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9473 	const struct intel_cdclk_state *old_cdclk_state;
9474 	const struct intel_cdclk_state *new_cdclk_state;
9475 	struct intel_plane_state *plane_state;
9476 	struct intel_bw_state *new_bw_state;
9477 	struct intel_plane *plane;
9478 	int min_cdclk = 0;
9479 	enum pipe pipe;
9480 	int ret;
9481 	int i;
9482 	/*
9483 	 * active_planes bitmask has been updated, and potentially
9484 	 * affected planes are part of the state. We can now
9485 	 * compute the minimum cdclk for each plane.
9486 	 */
9487 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9488 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9489 		if (ret)
9490 			return ret;
9491 	}
9492 
9493 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9494 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9495 
9496 	if (new_cdclk_state &&
9497 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9498 		*need_cdclk_calc = true;
9499 
9500 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9501 	if (ret)
9502 		return ret;
9503 
9504 	new_bw_state = intel_atomic_get_new_bw_state(state);
9505 
9506 	if (!new_cdclk_state || !new_bw_state)
9507 		return 0;
9508 
9509 	for_each_pipe(dev_priv, pipe) {
9510 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9511 
9512 		/*
9513 		 * Currently do this change only if we need to increase
9514 		 */
9515 		if (new_bw_state->min_cdclk > min_cdclk)
9516 			*need_cdclk_calc = true;
9517 	}
9518 
9519 	return 0;
9520 }
9521 
9522 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9523 {
9524 	struct intel_crtc_state *crtc_state;
9525 	struct intel_crtc *crtc;
9526 	int i;
9527 
9528 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9529 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9530 		int ret;
9531 
9532 		ret = intel_crtc_atomic_check(state, crtc);
9533 		if (ret) {
9534 			drm_dbg_atomic(&i915->drm,
9535 				       "[CRTC:%d:%s] atomic driver check failed\n",
9536 				       crtc->base.base.id, crtc->base.name);
9537 			return ret;
9538 		}
9539 	}
9540 
9541 	return 0;
9542 }
9543 
9544 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9545 					       u8 transcoders)
9546 {
9547 	const struct intel_crtc_state *new_crtc_state;
9548 	struct intel_crtc *crtc;
9549 	int i;
9550 
9551 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9552 		if (new_crtc_state->hw.enable &&
9553 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9554 		    intel_crtc_needs_modeset(new_crtc_state))
9555 			return true;
9556 	}
9557 
9558 	return false;
9559 }
9560 
9561 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9562 					struct intel_crtc *crtc,
9563 					struct intel_crtc_state *old_crtc_state,
9564 					struct intel_crtc_state *new_crtc_state)
9565 {
9566 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9567 	struct intel_crtc *slave, *master;
9568 
9569 	/* slave being enabled, is master is still claiming this crtc? */
9570 	if (old_crtc_state->bigjoiner_slave) {
9571 		slave = crtc;
9572 		master = old_crtc_state->bigjoiner_linked_crtc;
9573 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9574 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9575 			goto claimed;
9576 	}
9577 
9578 	if (!new_crtc_state->bigjoiner)
9579 		return 0;
9580 
9581 	slave = intel_dsc_get_bigjoiner_secondary(crtc);
9582 	if (!slave) {
9583 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9584 			      "CRTC + 1 to be used, doesn't exist\n",
9585 			      crtc->base.base.id, crtc->base.name);
9586 		return -EINVAL;
9587 	}
9588 
9589 	new_crtc_state->bigjoiner_linked_crtc = slave;
9590 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9591 	master = crtc;
9592 	if (IS_ERR(slave_crtc_state))
9593 		return PTR_ERR(slave_crtc_state);
9594 
9595 	/* master being enabled, slave was already configured? */
9596 	if (slave_crtc_state->uapi.enable)
9597 		goto claimed;
9598 
9599 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9600 		      slave->base.base.id, slave->base.name);
9601 
9602 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9603 
9604 claimed:
9605 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9606 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9607 		      slave->base.base.id, slave->base.name,
9608 		      master->base.base.id, master->base.name);
9609 	return -EINVAL;
9610 }
9611 
9612 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9613 				 struct intel_crtc_state *master_crtc_state)
9614 {
9615 	struct intel_crtc_state *slave_crtc_state =
9616 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9617 
9618 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9619 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9620 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9621 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9622 }
9623 
9624 /**
9625  * DOC: asynchronous flip implementation
9626  *
9627  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9628  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9629  * Correspondingly, support is currently added for primary plane only.
9630  *
9631  * Async flip can only change the plane surface address, so anything else
9632  * changing is rejected from the intel_atomic_check_async() function.
9633  * Once this check is cleared, flip done interrupt is enabled using
9634  * the intel_crtc_enable_flip_done() function.
9635  *
9636  * As soon as the surface address register is written, flip done interrupt is
9637  * generated and the requested events are sent to the usersapce in the interrupt
9638  * handler itself. The timestamp and sequence sent during the flip done event
9639  * correspond to the last vblank and have no relation to the actual time when
9640  * the flip done event was sent.
9641  */
9642 static int intel_atomic_check_async(struct intel_atomic_state *state)
9643 {
9644 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9645 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9646 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9647 	struct intel_crtc *crtc;
9648 	struct intel_plane *plane;
9649 	int i;
9650 
9651 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9652 					    new_crtc_state, i) {
9653 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9654 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9655 			return -EINVAL;
9656 		}
9657 
9658 		if (!new_crtc_state->hw.active) {
9659 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9660 			return -EINVAL;
9661 		}
9662 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9663 			drm_dbg_kms(&i915->drm,
9664 				    "Active planes cannot be changed during async flip\n");
9665 			return -EINVAL;
9666 		}
9667 	}
9668 
9669 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9670 					     new_plane_state, i) {
9671 		/*
9672 		 * TODO: Async flip is only supported through the page flip IOCTL
9673 		 * as of now. So support currently added for primary plane only.
9674 		 * Support for other planes on platforms on which supports
9675 		 * this(vlv/chv and icl+) should be added when async flip is
9676 		 * enabled in the atomic IOCTL path.
9677 		 */
9678 		if (!plane->async_flip)
9679 			return -EINVAL;
9680 
9681 		/*
9682 		 * FIXME: This check is kept generic for all platforms.
9683 		 * Need to verify this for all gen9 platforms to enable
9684 		 * this selectively if required.
9685 		 */
9686 		switch (new_plane_state->hw.fb->modifier) {
9687 		case I915_FORMAT_MOD_X_TILED:
9688 		case I915_FORMAT_MOD_Y_TILED:
9689 		case I915_FORMAT_MOD_Yf_TILED:
9690 			break;
9691 		default:
9692 			drm_dbg_kms(&i915->drm,
9693 				    "Linear memory/CCS does not support async flips\n");
9694 			return -EINVAL;
9695 		}
9696 
9697 		if (old_plane_state->view.color_plane[0].stride !=
9698 		    new_plane_state->view.color_plane[0].stride) {
9699 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9700 			return -EINVAL;
9701 		}
9702 
9703 		if (old_plane_state->hw.fb->modifier !=
9704 		    new_plane_state->hw.fb->modifier) {
9705 			drm_dbg_kms(&i915->drm,
9706 				    "Framebuffer modifiers cannot be changed in async flip\n");
9707 			return -EINVAL;
9708 		}
9709 
9710 		if (old_plane_state->hw.fb->format !=
9711 		    new_plane_state->hw.fb->format) {
9712 			drm_dbg_kms(&i915->drm,
9713 				    "Framebuffer format cannot be changed in async flip\n");
9714 			return -EINVAL;
9715 		}
9716 
9717 		if (old_plane_state->hw.rotation !=
9718 		    new_plane_state->hw.rotation) {
9719 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9720 			return -EINVAL;
9721 		}
9722 
9723 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9724 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9725 			drm_dbg_kms(&i915->drm,
9726 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9727 			return -EINVAL;
9728 		}
9729 
9730 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9731 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9732 			return -EINVAL;
9733 		}
9734 
9735 		if (old_plane_state->hw.pixel_blend_mode !=
9736 		    new_plane_state->hw.pixel_blend_mode) {
9737 			drm_dbg_kms(&i915->drm,
9738 				    "Pixel blend mode cannot be changed in async flip\n");
9739 			return -EINVAL;
9740 		}
9741 
9742 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9743 			drm_dbg_kms(&i915->drm,
9744 				    "Color encoding cannot be changed in async flip\n");
9745 			return -EINVAL;
9746 		}
9747 
9748 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9749 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9750 			return -EINVAL;
9751 		}
9752 	}
9753 
9754 	return 0;
9755 }
9756 
9757 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9758 {
9759 	struct intel_crtc_state *crtc_state;
9760 	struct intel_crtc *crtc;
9761 	int i;
9762 
9763 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9764 		struct intel_crtc_state *linked_crtc_state;
9765 		struct intel_crtc *linked_crtc;
9766 		int ret;
9767 
9768 		if (!crtc_state->bigjoiner)
9769 			continue;
9770 
9771 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9772 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9773 		if (IS_ERR(linked_crtc_state))
9774 			return PTR_ERR(linked_crtc_state);
9775 
9776 		if (!intel_crtc_needs_modeset(crtc_state))
9777 			continue;
9778 
9779 		linked_crtc_state->uapi.mode_changed = true;
9780 
9781 		ret = drm_atomic_add_affected_connectors(&state->base,
9782 							 &linked_crtc->base);
9783 		if (ret)
9784 			return ret;
9785 
9786 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9787 		if (ret)
9788 			return ret;
9789 	}
9790 
9791 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9792 		/* Kill old bigjoiner link, we may re-establish afterwards */
9793 		if (intel_crtc_needs_modeset(crtc_state) &&
9794 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9795 			kill_bigjoiner_slave(state, crtc_state);
9796 	}
9797 
9798 	return 0;
9799 }
9800 
9801 /**
9802  * intel_atomic_check - validate state object
9803  * @dev: drm device
9804  * @_state: state to validate
9805  */
9806 static int intel_atomic_check(struct drm_device *dev,
9807 			      struct drm_atomic_state *_state)
9808 {
9809 	struct drm_i915_private *dev_priv = to_i915(dev);
9810 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
9811 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9812 	struct intel_crtc *crtc;
9813 	int ret, i;
9814 	bool any_ms = false;
9815 
9816 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9817 					    new_crtc_state, i) {
9818 		if (new_crtc_state->inherited != old_crtc_state->inherited)
9819 			new_crtc_state->uapi.mode_changed = true;
9820 	}
9821 
9822 	intel_vrr_check_modeset(state);
9823 
9824 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
9825 	if (ret)
9826 		goto fail;
9827 
9828 	ret = intel_bigjoiner_add_affected_crtcs(state);
9829 	if (ret)
9830 		goto fail;
9831 
9832 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9833 					    new_crtc_state, i) {
9834 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9835 			/* Light copy */
9836 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9837 
9838 			continue;
9839 		}
9840 
9841 		if (!new_crtc_state->uapi.enable) {
9842 			if (!new_crtc_state->bigjoiner_slave) {
9843 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9844 				any_ms = true;
9845 			}
9846 			continue;
9847 		}
9848 
9849 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9850 		if (ret)
9851 			goto fail;
9852 
9853 		ret = intel_modeset_pipe_config(state, new_crtc_state);
9854 		if (ret)
9855 			goto fail;
9856 
9857 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9858 						   new_crtc_state);
9859 		if (ret)
9860 			goto fail;
9861 	}
9862 
9863 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9864 					    new_crtc_state, i) {
9865 		if (!intel_crtc_needs_modeset(new_crtc_state))
9866 			continue;
9867 
9868 		ret = intel_modeset_pipe_config_late(new_crtc_state);
9869 		if (ret)
9870 			goto fail;
9871 
9872 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9873 	}
9874 
9875 	/**
9876 	 * Check if fastset is allowed by external dependencies like other
9877 	 * pipes and transcoders.
9878 	 *
9879 	 * Right now it only forces a fullmodeset when the MST master
9880 	 * transcoder did not changed but the pipe of the master transcoder
9881 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9882 	 * in case of port synced crtcs, if one of the synced crtcs
9883 	 * needs a full modeset, all other synced crtcs should be
9884 	 * forced a full modeset.
9885 	 */
9886 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9887 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9888 			continue;
9889 
9890 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9891 			enum transcoder master = new_crtc_state->mst_master_transcoder;
9892 
9893 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9894 				new_crtc_state->uapi.mode_changed = true;
9895 				new_crtc_state->update_pipe = false;
9896 			}
9897 		}
9898 
9899 		if (is_trans_port_sync_mode(new_crtc_state)) {
9900 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
9901 
9902 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9903 				trans |= BIT(new_crtc_state->master_transcoder);
9904 
9905 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
9906 				new_crtc_state->uapi.mode_changed = true;
9907 				new_crtc_state->update_pipe = false;
9908 			}
9909 		}
9910 
9911 		if (new_crtc_state->bigjoiner) {
9912 			struct intel_crtc_state *linked_crtc_state =
9913 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9914 
9915 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
9916 				new_crtc_state->uapi.mode_changed = true;
9917 				new_crtc_state->update_pipe = false;
9918 			}
9919 		}
9920 	}
9921 
9922 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9923 					    new_crtc_state, i) {
9924 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9925 			any_ms = true;
9926 			continue;
9927 		}
9928 
9929 		if (!new_crtc_state->update_pipe)
9930 			continue;
9931 
9932 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9933 	}
9934 
9935 	if (any_ms && !check_digital_port_conflicts(state)) {
9936 		drm_dbg_kms(&dev_priv->drm,
9937 			    "rejecting conflicting digital port configuration\n");
9938 		ret = -EINVAL;
9939 		goto fail;
9940 	}
9941 
9942 	ret = drm_dp_mst_atomic_check(&state->base);
9943 	if (ret)
9944 		goto fail;
9945 
9946 	ret = intel_atomic_check_planes(state);
9947 	if (ret)
9948 		goto fail;
9949 
9950 	intel_fbc_choose_crtc(dev_priv, state);
9951 	ret = calc_watermark_data(state);
9952 	if (ret)
9953 		goto fail;
9954 
9955 	ret = intel_bw_atomic_check(state);
9956 	if (ret)
9957 		goto fail;
9958 
9959 	ret = intel_atomic_check_cdclk(state, &any_ms);
9960 	if (ret)
9961 		goto fail;
9962 
9963 	if (intel_any_crtc_needs_modeset(state))
9964 		any_ms = true;
9965 
9966 	if (any_ms) {
9967 		ret = intel_modeset_checks(state);
9968 		if (ret)
9969 			goto fail;
9970 
9971 		ret = intel_modeset_calc_cdclk(state);
9972 		if (ret)
9973 			return ret;
9974 
9975 		intel_modeset_clear_plls(state);
9976 	}
9977 
9978 	ret = intel_atomic_check_crtcs(state);
9979 	if (ret)
9980 		goto fail;
9981 
9982 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9983 					    new_crtc_state, i) {
9984 		if (new_crtc_state->uapi.async_flip) {
9985 			ret = intel_atomic_check_async(state);
9986 			if (ret)
9987 				goto fail;
9988 		}
9989 
9990 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
9991 		    !new_crtc_state->update_pipe)
9992 			continue;
9993 
9994 		intel_dump_pipe_config(new_crtc_state, state,
9995 				       intel_crtc_needs_modeset(new_crtc_state) ?
9996 				       "[modeset]" : "[fastset]");
9997 	}
9998 
9999 	return 0;
10000 
10001  fail:
10002 	if (ret == -EDEADLK)
10003 		return ret;
10004 
10005 	/*
10006 	 * FIXME would probably be nice to know which crtc specifically
10007 	 * caused the failure, in cases where we can pinpoint it.
10008 	 */
10009 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10010 					    new_crtc_state, i)
10011 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
10012 
10013 	return ret;
10014 }
10015 
10016 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
10017 {
10018 	struct intel_crtc_state *crtc_state;
10019 	struct intel_crtc *crtc;
10020 	int i, ret;
10021 
10022 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
10023 	if (ret < 0)
10024 		return ret;
10025 
10026 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
10027 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
10028 
10029 		if (mode_changed || crtc_state->update_pipe ||
10030 		    crtc_state->uapi.color_mgmt_changed) {
10031 			intel_dsb_prepare(crtc_state);
10032 		}
10033 	}
10034 
10035 	return 0;
10036 }
10037 
10038 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
10039 				  struct intel_crtc_state *crtc_state)
10040 {
10041 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10042 
10043 	if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes)
10044 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10045 
10046 	if (crtc_state->has_pch_encoder) {
10047 		enum pipe pch_transcoder =
10048 			intel_crtc_pch_transcoder(crtc);
10049 
10050 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
10051 	}
10052 }
10053 
10054 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
10055 			       const struct intel_crtc_state *new_crtc_state)
10056 {
10057 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
10058 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10059 
10060 	/*
10061 	 * Update pipe size and adjust fitter if needed: the reason for this is
10062 	 * that in compute_mode_changes we check the native mode (not the pfit
10063 	 * mode) to see if we can flip rather than do a full mode set. In the
10064 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
10065 	 * pfit state, we'll end up with a big fb scanned out into the wrong
10066 	 * sized surface.
10067 	 */
10068 	intel_set_pipe_src_size(new_crtc_state);
10069 
10070 	/* on skylake this is done by detaching scalers */
10071 	if (DISPLAY_VER(dev_priv) >= 9) {
10072 		if (new_crtc_state->pch_pfit.enabled)
10073 			skl_pfit_enable(new_crtc_state);
10074 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10075 		if (new_crtc_state->pch_pfit.enabled)
10076 			ilk_pfit_enable(new_crtc_state);
10077 		else if (old_crtc_state->pch_pfit.enabled)
10078 			ilk_pfit_disable(old_crtc_state);
10079 	}
10080 
10081 	/*
10082 	 * The register is supposedly single buffered so perhaps
10083 	 * not 100% correct to do this here. But SKL+ calculate
10084 	 * this based on the adjust pixel rate so pfit changes do
10085 	 * affect it and so it must be updated for fastsets.
10086 	 * HSW/BDW only really need this here for fastboot, after
10087 	 * that the value should not change without a full modeset.
10088 	 */
10089 	if (DISPLAY_VER(dev_priv) >= 9 ||
10090 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
10091 		hsw_set_linetime_wm(new_crtc_state);
10092 
10093 	if (DISPLAY_VER(dev_priv) >= 11)
10094 		icl_set_pipe_chicken(new_crtc_state);
10095 }
10096 
10097 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
10098 				   struct intel_crtc *crtc)
10099 {
10100 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10101 	const struct intel_crtc_state *old_crtc_state =
10102 		intel_atomic_get_old_crtc_state(state, crtc);
10103 	const struct intel_crtc_state *new_crtc_state =
10104 		intel_atomic_get_new_crtc_state(state, crtc);
10105 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10106 
10107 	/*
10108 	 * During modesets pipe configuration was programmed as the
10109 	 * CRTC was enabled.
10110 	 */
10111 	if (!modeset) {
10112 		if (new_crtc_state->uapi.color_mgmt_changed ||
10113 		    new_crtc_state->update_pipe)
10114 			intel_color_commit(new_crtc_state);
10115 
10116 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
10117 			bdw_set_pipemisc(new_crtc_state);
10118 
10119 		if (new_crtc_state->update_pipe)
10120 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
10121 
10122 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
10123 	}
10124 
10125 	if (dev_priv->display.atomic_update_watermarks)
10126 		dev_priv->display.atomic_update_watermarks(state, crtc);
10127 }
10128 
10129 static void commit_pipe_post_planes(struct intel_atomic_state *state,
10130 				    struct intel_crtc *crtc)
10131 {
10132 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10133 	const struct intel_crtc_state *new_crtc_state =
10134 		intel_atomic_get_new_crtc_state(state, crtc);
10135 
10136 	/*
10137 	 * Disable the scaler(s) after the plane(s) so that we don't
10138 	 * get a catastrophic underrun even if the two operations
10139 	 * end up happening in two different frames.
10140 	 */
10141 	if (DISPLAY_VER(dev_priv) >= 9 &&
10142 	    !intel_crtc_needs_modeset(new_crtc_state))
10143 		skl_detach_scalers(new_crtc_state);
10144 }
10145 
10146 static void intel_enable_crtc(struct intel_atomic_state *state,
10147 			      struct intel_crtc *crtc)
10148 {
10149 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10150 	const struct intel_crtc_state *new_crtc_state =
10151 		intel_atomic_get_new_crtc_state(state, crtc);
10152 
10153 	if (!intel_crtc_needs_modeset(new_crtc_state))
10154 		return;
10155 
10156 	intel_crtc_update_active_timings(new_crtc_state);
10157 
10158 	dev_priv->display.crtc_enable(state, crtc);
10159 
10160 	if (new_crtc_state->bigjoiner_slave)
10161 		return;
10162 
10163 	/* vblanks work again, re-enable pipe CRC. */
10164 	intel_crtc_enable_pipe_crc(crtc);
10165 }
10166 
10167 static void intel_update_crtc(struct intel_atomic_state *state,
10168 			      struct intel_crtc *crtc)
10169 {
10170 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10171 	const struct intel_crtc_state *old_crtc_state =
10172 		intel_atomic_get_old_crtc_state(state, crtc);
10173 	struct intel_crtc_state *new_crtc_state =
10174 		intel_atomic_get_new_crtc_state(state, crtc);
10175 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10176 
10177 	if (!modeset) {
10178 		if (new_crtc_state->preload_luts &&
10179 		    (new_crtc_state->uapi.color_mgmt_changed ||
10180 		     new_crtc_state->update_pipe))
10181 			intel_color_load_luts(new_crtc_state);
10182 
10183 		intel_pre_plane_update(state, crtc);
10184 
10185 		if (new_crtc_state->update_pipe)
10186 			intel_encoders_update_pipe(state, crtc);
10187 	}
10188 
10189 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
10190 		intel_fbc_disable(crtc);
10191 	else
10192 		intel_fbc_enable(state, crtc);
10193 
10194 	/* Perform vblank evasion around commit operation */
10195 	intel_pipe_update_start(new_crtc_state);
10196 
10197 	commit_pipe_pre_planes(state, crtc);
10198 
10199 	if (DISPLAY_VER(dev_priv) >= 9)
10200 		skl_update_planes_on_crtc(state, crtc);
10201 	else
10202 		i9xx_update_planes_on_crtc(state, crtc);
10203 
10204 	commit_pipe_post_planes(state, crtc);
10205 
10206 	intel_pipe_update_end(new_crtc_state);
10207 
10208 	/*
10209 	 * We usually enable FIFO underrun interrupts as part of the
10210 	 * CRTC enable sequence during modesets.  But when we inherit a
10211 	 * valid pipe configuration from the BIOS we need to take care
10212 	 * of enabling them on the CRTC's first fastset.
10213 	 */
10214 	if (new_crtc_state->update_pipe && !modeset &&
10215 	    old_crtc_state->inherited)
10216 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
10217 }
10218 
10219 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
10220 					  struct intel_crtc_state *old_crtc_state,
10221 					  struct intel_crtc_state *new_crtc_state,
10222 					  struct intel_crtc *crtc)
10223 {
10224 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10225 
10226 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
10227 
10228 	intel_encoders_pre_disable(state, crtc);
10229 
10230 	intel_crtc_disable_planes(state, crtc);
10231 
10232 	/*
10233 	 * We still need special handling for disabling bigjoiner master
10234 	 * and slaves since for slave we do not have encoder or plls
10235 	 * so we dont need to disable those.
10236 	 */
10237 	if (old_crtc_state->bigjoiner) {
10238 		intel_crtc_disable_planes(state,
10239 					  old_crtc_state->bigjoiner_linked_crtc);
10240 		old_crtc_state->bigjoiner_linked_crtc->active = false;
10241 	}
10242 
10243 	/*
10244 	 * We need to disable pipe CRC before disabling the pipe,
10245 	 * or we race against vblank off.
10246 	 */
10247 	intel_crtc_disable_pipe_crc(crtc);
10248 
10249 	dev_priv->display.crtc_disable(state, crtc);
10250 	crtc->active = false;
10251 	intel_fbc_disable(crtc);
10252 	intel_disable_shared_dpll(old_crtc_state);
10253 
10254 	/* FIXME unify this for all platforms */
10255 	if (!new_crtc_state->hw.active &&
10256 	    !HAS_GMCH(dev_priv) &&
10257 	    dev_priv->display.initial_watermarks)
10258 		dev_priv->display.initial_watermarks(state, crtc);
10259 }
10260 
10261 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
10262 {
10263 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10264 	struct intel_crtc *crtc;
10265 	u32 handled = 0;
10266 	int i;
10267 
10268 	/* Only disable port sync and MST slaves */
10269 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10270 					    new_crtc_state, i) {
10271 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
10272 			continue;
10273 
10274 		if (!old_crtc_state->hw.active)
10275 			continue;
10276 
10277 		/* In case of Transcoder port Sync master slave CRTCs can be
10278 		 * assigned in any order and we need to make sure that
10279 		 * slave CRTCs are disabled first and then master CRTC since
10280 		 * Slave vblanks are masked till Master Vblanks.
10281 		 */
10282 		if (!is_trans_port_sync_slave(old_crtc_state) &&
10283 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
10284 			continue;
10285 
10286 		intel_pre_plane_update(state, crtc);
10287 		intel_old_crtc_state_disables(state, old_crtc_state,
10288 					      new_crtc_state, crtc);
10289 		handled |= BIT(crtc->pipe);
10290 	}
10291 
10292 	/* Disable everything else left on */
10293 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10294 					    new_crtc_state, i) {
10295 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
10296 		    (handled & BIT(crtc->pipe)) ||
10297 		    old_crtc_state->bigjoiner_slave)
10298 			continue;
10299 
10300 		intel_pre_plane_update(state, crtc);
10301 		if (old_crtc_state->bigjoiner) {
10302 			struct intel_crtc *slave =
10303 				old_crtc_state->bigjoiner_linked_crtc;
10304 
10305 			intel_pre_plane_update(state, slave);
10306 		}
10307 
10308 		if (old_crtc_state->hw.active)
10309 			intel_old_crtc_state_disables(state, old_crtc_state,
10310 						      new_crtc_state, crtc);
10311 	}
10312 }
10313 
10314 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
10315 {
10316 	struct intel_crtc_state *new_crtc_state;
10317 	struct intel_crtc *crtc;
10318 	int i;
10319 
10320 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10321 		if (!new_crtc_state->hw.active)
10322 			continue;
10323 
10324 		intel_enable_crtc(state, crtc);
10325 		intel_update_crtc(state, crtc);
10326 	}
10327 }
10328 
10329 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
10330 {
10331 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
10332 	struct intel_crtc *crtc;
10333 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10334 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
10335 	u8 update_pipes = 0, modeset_pipes = 0;
10336 	int i;
10337 
10338 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10339 		enum pipe pipe = crtc->pipe;
10340 
10341 		if (!new_crtc_state->hw.active)
10342 			continue;
10343 
10344 		/* ignore allocations for crtc's that have been turned off. */
10345 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
10346 			entries[pipe] = old_crtc_state->wm.skl.ddb;
10347 			update_pipes |= BIT(pipe);
10348 		} else {
10349 			modeset_pipes |= BIT(pipe);
10350 		}
10351 	}
10352 
10353 	/*
10354 	 * Whenever the number of active pipes changes, we need to make sure we
10355 	 * update the pipes in the right order so that their ddb allocations
10356 	 * never overlap with each other between CRTC updates. Otherwise we'll
10357 	 * cause pipe underruns and other bad stuff.
10358 	 *
10359 	 * So first lets enable all pipes that do not need a fullmodeset as
10360 	 * those don't have any external dependency.
10361 	 */
10362 	while (update_pipes) {
10363 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10364 						    new_crtc_state, i) {
10365 			enum pipe pipe = crtc->pipe;
10366 
10367 			if ((update_pipes & BIT(pipe)) == 0)
10368 				continue;
10369 
10370 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10371 							entries, I915_MAX_PIPES, pipe))
10372 				continue;
10373 
10374 			entries[pipe] = new_crtc_state->wm.skl.ddb;
10375 			update_pipes &= ~BIT(pipe);
10376 
10377 			intel_update_crtc(state, crtc);
10378 
10379 			/*
10380 			 * If this is an already active pipe, it's DDB changed,
10381 			 * and this isn't the last pipe that needs updating
10382 			 * then we need to wait for a vblank to pass for the
10383 			 * new ddb allocation to take effect.
10384 			 */
10385 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
10386 						 &old_crtc_state->wm.skl.ddb) &&
10387 			    (update_pipes | modeset_pipes))
10388 				intel_wait_for_vblank(dev_priv, pipe);
10389 		}
10390 	}
10391 
10392 	update_pipes = modeset_pipes;
10393 
10394 	/*
10395 	 * Enable all pipes that needs a modeset and do not depends on other
10396 	 * pipes
10397 	 */
10398 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10399 		enum pipe pipe = crtc->pipe;
10400 
10401 		if ((modeset_pipes & BIT(pipe)) == 0)
10402 			continue;
10403 
10404 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10405 		    is_trans_port_sync_master(new_crtc_state) ||
10406 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10407 			continue;
10408 
10409 		modeset_pipes &= ~BIT(pipe);
10410 
10411 		intel_enable_crtc(state, crtc);
10412 	}
10413 
10414 	/*
10415 	 * Then we enable all remaining pipes that depend on other
10416 	 * pipes: MST slaves and port sync masters, big joiner master
10417 	 */
10418 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10419 		enum pipe pipe = crtc->pipe;
10420 
10421 		if ((modeset_pipes & BIT(pipe)) == 0)
10422 			continue;
10423 
10424 		modeset_pipes &= ~BIT(pipe);
10425 
10426 		intel_enable_crtc(state, crtc);
10427 	}
10428 
10429 	/*
10430 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10431 	 */
10432 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10433 		enum pipe pipe = crtc->pipe;
10434 
10435 		if ((update_pipes & BIT(pipe)) == 0)
10436 			continue;
10437 
10438 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10439 									entries, I915_MAX_PIPES, pipe));
10440 
10441 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10442 		update_pipes &= ~BIT(pipe);
10443 
10444 		intel_update_crtc(state, crtc);
10445 	}
10446 
10447 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10448 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10449 }
10450 
10451 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10452 {
10453 	struct intel_atomic_state *state, *next;
10454 	struct llist_node *freed;
10455 
10456 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10457 	llist_for_each_entry_safe(state, next, freed, freed)
10458 		drm_atomic_state_put(&state->base);
10459 }
10460 
10461 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10462 {
10463 	struct drm_i915_private *dev_priv =
10464 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10465 
10466 	intel_atomic_helper_free_state(dev_priv);
10467 }
10468 
10469 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10470 {
10471 	struct wait_queue_entry wait_fence, wait_reset;
10472 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10473 
10474 #ifdef notyet
10475 	init_wait_entry(&wait_fence, 0);
10476 	init_wait_entry(&wait_reset, 0);
10477 	for (;;) {
10478 		prepare_to_wait(&intel_state->commit_ready.wait,
10479 				&wait_fence, TASK_UNINTERRUPTIBLE);
10480 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10481 					      I915_RESET_MODESET),
10482 				&wait_reset, TASK_UNINTERRUPTIBLE);
10483 
10484 
10485 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10486 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10487 			break;
10488 
10489 		schedule();
10490 	}
10491 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10492 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10493 				  I915_RESET_MODESET),
10494 		    &wait_reset);
10495 #else
10496 	/* XXX above recurses sch_mtx */
10497 	init_wait_entry(&wait_fence, 0);
10498 	for (;;) {
10499 		prepare_to_wait(&intel_state->commit_ready.wait,
10500 				&wait_fence, TASK_UNINTERRUPTIBLE);
10501 
10502 
10503 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10504 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10505 			break;
10506 
10507 		schedule();
10508 	}
10509 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10510 #endif
10511 }
10512 
10513 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10514 {
10515 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10516 	struct intel_crtc *crtc;
10517 	int i;
10518 
10519 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10520 					    new_crtc_state, i)
10521 		intel_dsb_cleanup(old_crtc_state);
10522 }
10523 
10524 static void intel_atomic_cleanup_work(struct work_struct *work)
10525 {
10526 	struct intel_atomic_state *state =
10527 		container_of(work, struct intel_atomic_state, base.commit_work);
10528 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10529 
10530 	intel_cleanup_dsbs(state);
10531 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10532 	drm_atomic_helper_commit_cleanup_done(&state->base);
10533 	drm_atomic_state_put(&state->base);
10534 
10535 	intel_atomic_helper_free_state(i915);
10536 }
10537 
10538 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10539 {
10540 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10541 	struct intel_plane *plane;
10542 	struct intel_plane_state *plane_state;
10543 	int i;
10544 
10545 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10546 		struct drm_framebuffer *fb = plane_state->hw.fb;
10547 		int ret;
10548 
10549 		if (!fb ||
10550 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10551 			continue;
10552 
10553 		/*
10554 		 * The layout of the fast clear color value expected by HW
10555 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10556 		 * - 4 x 4 bytes per-channel value
10557 		 *   (in surface type specific float/int format provided by the fb user)
10558 		 * - 8 bytes native color value used by the display
10559 		 *   (converted/written by GPU during a fast clear operation using the
10560 		 *    above per-channel values)
10561 		 *
10562 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10563 		 * caller made sure that the object is synced wrt. the related color clear value
10564 		 * GPU write on it.
10565 		 */
10566 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10567 						     fb->offsets[2] + 16,
10568 						     &plane_state->ccval,
10569 						     sizeof(plane_state->ccval));
10570 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10571 		drm_WARN_ON(&i915->drm, ret);
10572 	}
10573 }
10574 
10575 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10576 {
10577 	struct drm_device *dev = state->base.dev;
10578 	struct drm_i915_private *dev_priv = to_i915(dev);
10579 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10580 	struct intel_crtc *crtc;
10581 	u64 put_domains[I915_MAX_PIPES] = {};
10582 	intel_wakeref_t wakeref = 0;
10583 	int i;
10584 
10585 	intel_atomic_commit_fence_wait(state);
10586 
10587 	drm_atomic_helper_wait_for_dependencies(&state->base);
10588 
10589 	if (state->modeset)
10590 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10591 
10592 	intel_atomic_prepare_plane_clear_colors(state);
10593 
10594 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10595 					    new_crtc_state, i) {
10596 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10597 		    new_crtc_state->update_pipe) {
10598 
10599 			put_domains[crtc->pipe] =
10600 				modeset_get_crtc_power_domains(new_crtc_state);
10601 		}
10602 	}
10603 
10604 	intel_commit_modeset_disables(state);
10605 
10606 	/* FIXME: Eventually get rid of our crtc->config pointer */
10607 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10608 		crtc->config = new_crtc_state;
10609 
10610 	if (state->modeset) {
10611 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10612 
10613 		intel_set_cdclk_pre_plane_update(state);
10614 
10615 		intel_modeset_verify_disabled(dev_priv, state);
10616 	}
10617 
10618 	intel_sagv_pre_plane_update(state);
10619 
10620 	/* Complete the events for pipes that have now been disabled */
10621 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10622 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10623 
10624 		/* Complete events for now disable pipes here. */
10625 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10626 			spin_lock_irq(&dev->event_lock);
10627 			drm_crtc_send_vblank_event(&crtc->base,
10628 						   new_crtc_state->uapi.event);
10629 			spin_unlock_irq(&dev->event_lock);
10630 
10631 			new_crtc_state->uapi.event = NULL;
10632 		}
10633 	}
10634 
10635 	if (state->modeset)
10636 		intel_encoders_update_prepare(state);
10637 
10638 	intel_dbuf_pre_plane_update(state);
10639 
10640 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10641 		if (new_crtc_state->uapi.async_flip)
10642 			intel_crtc_enable_flip_done(state, crtc);
10643 	}
10644 
10645 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10646 	dev_priv->display.commit_modeset_enables(state);
10647 
10648 	if (state->modeset) {
10649 		intel_encoders_update_complete(state);
10650 
10651 		intel_set_cdclk_post_plane_update(state);
10652 	}
10653 
10654 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10655 	 * already, but still need the state for the delayed optimization. To
10656 	 * fix this:
10657 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10658 	 * - schedule that vblank worker _before_ calling hw_done
10659 	 * - at the start of commit_tail, cancel it _synchrously
10660 	 * - switch over to the vblank wait helper in the core after that since
10661 	 *   we don't need out special handling any more.
10662 	 */
10663 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10664 
10665 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10666 		if (new_crtc_state->uapi.async_flip)
10667 			intel_crtc_disable_flip_done(state, crtc);
10668 
10669 		if (new_crtc_state->hw.active &&
10670 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10671 		    !new_crtc_state->preload_luts &&
10672 		    (new_crtc_state->uapi.color_mgmt_changed ||
10673 		     new_crtc_state->update_pipe))
10674 			intel_color_load_luts(new_crtc_state);
10675 	}
10676 
10677 	/*
10678 	 * Now that the vblank has passed, we can go ahead and program the
10679 	 * optimal watermarks on platforms that need two-step watermark
10680 	 * programming.
10681 	 *
10682 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10683 	 */
10684 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10685 					    new_crtc_state, i) {
10686 		/*
10687 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10688 		 * So re-enable underrun reporting after some planes get enabled.
10689 		 *
10690 		 * We do this before .optimize_watermarks() so that we have a
10691 		 * chance of catching underruns with the intermediate watermarks
10692 		 * vs. the new plane configuration.
10693 		 */
10694 		if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
10695 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10696 
10697 		if (dev_priv->display.optimize_watermarks)
10698 			dev_priv->display.optimize_watermarks(state, crtc);
10699 	}
10700 
10701 	intel_dbuf_post_plane_update(state);
10702 
10703 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10704 		intel_post_plane_update(state, crtc);
10705 
10706 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10707 
10708 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10709 
10710 		/*
10711 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10712 		 * cleanup. So copy and reset the dsb structure to sync with
10713 		 * commit_done and later do dsb cleanup in cleanup_work.
10714 		 */
10715 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10716 	}
10717 
10718 	/* Underruns don't always raise interrupts, so check manually */
10719 	intel_check_cpu_fifo_underruns(dev_priv);
10720 	intel_check_pch_fifo_underruns(dev_priv);
10721 
10722 	if (state->modeset)
10723 		intel_verify_planes(state);
10724 
10725 	intel_sagv_post_plane_update(state);
10726 
10727 	drm_atomic_helper_commit_hw_done(&state->base);
10728 
10729 	if (state->modeset) {
10730 		/* As one of the primary mmio accessors, KMS has a high
10731 		 * likelihood of triggering bugs in unclaimed access. After we
10732 		 * finish modesetting, see if an error has been flagged, and if
10733 		 * so enable debugging for the next modeset - and hope we catch
10734 		 * the culprit.
10735 		 */
10736 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10737 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10738 	}
10739 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10740 
10741 	/*
10742 	 * Defer the cleanup of the old state to a separate worker to not
10743 	 * impede the current task (userspace for blocking modesets) that
10744 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10745 	 * deferring to a new worker seems overkill, but we would place a
10746 	 * schedule point (cond_resched()) here anyway to keep latencies
10747 	 * down.
10748 	 */
10749 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10750 	queue_work(system_highpri_wq, &state->base.commit_work);
10751 }
10752 
10753 static void intel_atomic_commit_work(struct work_struct *work)
10754 {
10755 	struct intel_atomic_state *state =
10756 		container_of(work, struct intel_atomic_state, base.commit_work);
10757 
10758 	intel_atomic_commit_tail(state);
10759 }
10760 
10761 static int __i915_sw_fence_call
10762 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10763 			  enum i915_sw_fence_notify notify)
10764 {
10765 	struct intel_atomic_state *state =
10766 		container_of(fence, struct intel_atomic_state, commit_ready);
10767 
10768 	switch (notify) {
10769 	case FENCE_COMPLETE:
10770 		/* we do blocking waits in the worker, nothing to do here */
10771 		break;
10772 	case FENCE_FREE:
10773 		{
10774 			struct intel_atomic_helper *helper =
10775 				&to_i915(state->base.dev)->atomic_helper;
10776 
10777 			if (llist_add(&state->freed, &helper->free_list))
10778 				schedule_work(&helper->free_work);
10779 			break;
10780 		}
10781 	}
10782 
10783 	return NOTIFY_DONE;
10784 }
10785 
10786 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10787 {
10788 	struct intel_plane_state *old_plane_state, *new_plane_state;
10789 	struct intel_plane *plane;
10790 	int i;
10791 
10792 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10793 					     new_plane_state, i)
10794 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10795 					to_intel_frontbuffer(new_plane_state->hw.fb),
10796 					plane->frontbuffer_bit);
10797 }
10798 
10799 static int intel_atomic_commit(struct drm_device *dev,
10800 			       struct drm_atomic_state *_state,
10801 			       bool nonblock)
10802 {
10803 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10804 	struct drm_i915_private *dev_priv = to_i915(dev);
10805 	int ret = 0;
10806 
10807 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10808 
10809 	drm_atomic_state_get(&state->base);
10810 	i915_sw_fence_init(&state->commit_ready,
10811 			   intel_atomic_commit_ready);
10812 
10813 	/*
10814 	 * The intel_legacy_cursor_update() fast path takes care
10815 	 * of avoiding the vblank waits for simple cursor
10816 	 * movement and flips. For cursor on/off and size changes,
10817 	 * we want to perform the vblank waits so that watermark
10818 	 * updates happen during the correct frames. Gen9+ have
10819 	 * double buffered watermarks and so shouldn't need this.
10820 	 *
10821 	 * Unset state->legacy_cursor_update before the call to
10822 	 * drm_atomic_helper_setup_commit() because otherwise
10823 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
10824 	 * we get FIFO underruns because we didn't wait
10825 	 * for vblank.
10826 	 *
10827 	 * FIXME doing watermarks and fb cleanup from a vblank worker
10828 	 * (assuming we had any) would solve these problems.
10829 	 */
10830 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10831 		struct intel_crtc_state *new_crtc_state;
10832 		struct intel_crtc *crtc;
10833 		int i;
10834 
10835 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10836 			if (new_crtc_state->wm.need_postvbl_update ||
10837 			    new_crtc_state->update_wm_post)
10838 				state->base.legacy_cursor_update = false;
10839 	}
10840 
10841 	ret = intel_atomic_prepare_commit(state);
10842 	if (ret) {
10843 		drm_dbg_atomic(&dev_priv->drm,
10844 			       "Preparing state failed with %i\n", ret);
10845 		i915_sw_fence_commit(&state->commit_ready);
10846 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10847 		return ret;
10848 	}
10849 
10850 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10851 	if (!ret)
10852 		ret = drm_atomic_helper_swap_state(&state->base, true);
10853 	if (!ret)
10854 		intel_atomic_swap_global_state(state);
10855 
10856 	if (ret) {
10857 		struct intel_crtc_state *new_crtc_state;
10858 		struct intel_crtc *crtc;
10859 		int i;
10860 
10861 		i915_sw_fence_commit(&state->commit_ready);
10862 
10863 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10864 			intel_dsb_cleanup(new_crtc_state);
10865 
10866 		drm_atomic_helper_cleanup_planes(dev, &state->base);
10867 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10868 		return ret;
10869 	}
10870 	intel_shared_dpll_swap_state(state);
10871 	intel_atomic_track_fbs(state);
10872 
10873 	drm_atomic_state_get(&state->base);
10874 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10875 
10876 	i915_sw_fence_commit(&state->commit_ready);
10877 	if (nonblock && state->modeset) {
10878 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10879 	} else if (nonblock) {
10880 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
10881 	} else {
10882 		if (state->modeset)
10883 			flush_workqueue(dev_priv->modeset_wq);
10884 		intel_atomic_commit_tail(state);
10885 	}
10886 
10887 	return 0;
10888 }
10889 
10890 struct wait_rps_boost {
10891 	struct wait_queue_entry wait;
10892 
10893 	struct drm_crtc *crtc;
10894 	struct i915_request *request;
10895 };
10896 
10897 static int do_rps_boost(struct wait_queue_entry *_wait,
10898 			unsigned mode, int sync, void *key)
10899 {
10900 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10901 	struct i915_request *rq = wait->request;
10902 
10903 	/*
10904 	 * If we missed the vblank, but the request is already running it
10905 	 * is reasonable to assume that it will complete before the next
10906 	 * vblank without our intervention, so leave RPS alone.
10907 	 */
10908 	if (!i915_request_started(rq))
10909 		intel_rps_boost(rq);
10910 	i915_request_put(rq);
10911 
10912 	drm_crtc_vblank_put(wait->crtc);
10913 
10914 	list_del(&wait->wait.entry);
10915 	kfree(wait);
10916 	return 1;
10917 }
10918 
10919 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10920 				       struct dma_fence *fence)
10921 {
10922 	struct wait_rps_boost *wait;
10923 
10924 	if (!dma_fence_is_i915(fence))
10925 		return;
10926 
10927 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10928 		return;
10929 
10930 	if (drm_crtc_vblank_get(crtc))
10931 		return;
10932 
10933 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10934 	if (!wait) {
10935 		drm_crtc_vblank_put(crtc);
10936 		return;
10937 	}
10938 
10939 	wait->request = to_request(dma_fence_get(fence));
10940 	wait->crtc = crtc;
10941 
10942 	wait->wait.func = do_rps_boost;
10943 	wait->wait.flags = 0;
10944 
10945 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10946 }
10947 
10948 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10949 {
10950 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10951 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10952 	struct drm_framebuffer *fb = plane_state->hw.fb;
10953 	struct i915_vma *vma;
10954 	bool phys_cursor =
10955 		plane->id == PLANE_CURSOR &&
10956 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10957 
10958 	if (!intel_fb_uses_dpt(fb)) {
10959 		vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10960 						 &plane_state->view.gtt,
10961 						 intel_plane_uses_fence(plane_state),
10962 						 &plane_state->flags);
10963 		if (IS_ERR(vma))
10964 			return PTR_ERR(vma);
10965 
10966 		plane_state->ggtt_vma = vma;
10967 	} else {
10968 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10969 
10970 		vma = intel_dpt_pin(intel_fb->dpt_vm);
10971 		if (IS_ERR(vma))
10972 			return PTR_ERR(vma);
10973 
10974 		plane_state->ggtt_vma = vma;
10975 
10976 		vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
10977 					   &plane_state->flags, intel_fb->dpt_vm);
10978 		if (IS_ERR(vma)) {
10979 			intel_dpt_unpin(intel_fb->dpt_vm);
10980 			plane_state->ggtt_vma = NULL;
10981 			return PTR_ERR(vma);
10982 		}
10983 
10984 		plane_state->dpt_vma = vma;
10985 
10986 		WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
10987 	}
10988 
10989 	return 0;
10990 }
10991 
10992 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10993 {
10994 	struct drm_framebuffer *fb = old_plane_state->hw.fb;
10995 	struct i915_vma *vma;
10996 
10997 	if (!intel_fb_uses_dpt(fb)) {
10998 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
10999 		if (vma)
11000 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11001 	} else {
11002 		struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11003 
11004 		vma = fetch_and_zero(&old_plane_state->dpt_vma);
11005 		if (vma)
11006 			intel_unpin_fb_vma(vma, old_plane_state->flags);
11007 
11008 		vma = fetch_and_zero(&old_plane_state->ggtt_vma);
11009 		if (vma)
11010 			intel_dpt_unpin(intel_fb->dpt_vm);
11011 	}
11012 }
11013 
11014 /**
11015  * intel_prepare_plane_fb - Prepare fb for usage on plane
11016  * @_plane: drm plane to prepare for
11017  * @_new_plane_state: the plane state being prepared
11018  *
11019  * Prepares a framebuffer for usage on a display plane.  Generally this
11020  * involves pinning the underlying object and updating the frontbuffer tracking
11021  * bits.  Some older platforms need special physical address handling for
11022  * cursor planes.
11023  *
11024  * Returns 0 on success, negative error code on failure.
11025  */
11026 int
11027 intel_prepare_plane_fb(struct drm_plane *_plane,
11028 		       struct drm_plane_state *_new_plane_state)
11029 {
11030 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
11031 	struct intel_plane *plane = to_intel_plane(_plane);
11032 	struct intel_plane_state *new_plane_state =
11033 		to_intel_plane_state(_new_plane_state);
11034 	struct intel_atomic_state *state =
11035 		to_intel_atomic_state(new_plane_state->uapi.state);
11036 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11037 	const struct intel_plane_state *old_plane_state =
11038 		intel_atomic_get_old_plane_state(state, plane);
11039 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
11040 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
11041 	int ret;
11042 
11043 	if (old_obj) {
11044 		const struct intel_crtc_state *crtc_state =
11045 			intel_atomic_get_new_crtc_state(state,
11046 							to_intel_crtc(old_plane_state->hw.crtc));
11047 
11048 		/* Big Hammer, we also need to ensure that any pending
11049 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
11050 		 * current scanout is retired before unpinning the old
11051 		 * framebuffer. Note that we rely on userspace rendering
11052 		 * into the buffer attached to the pipe they are waiting
11053 		 * on. If not, userspace generates a GPU hang with IPEHR
11054 		 * point to the MI_WAIT_FOR_EVENT.
11055 		 *
11056 		 * This should only fail upon a hung GPU, in which case we
11057 		 * can safely continue.
11058 		 */
11059 		if (intel_crtc_needs_modeset(crtc_state)) {
11060 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
11061 							      old_obj->base.resv, NULL,
11062 							      false, 0,
11063 							      GFP_KERNEL);
11064 			if (ret < 0)
11065 				return ret;
11066 		}
11067 	}
11068 
11069 	if (new_plane_state->uapi.fence) { /* explicit fencing */
11070 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
11071 					     &attr);
11072 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
11073 						    new_plane_state->uapi.fence,
11074 						    i915_fence_timeout(dev_priv),
11075 						    GFP_KERNEL);
11076 		if (ret < 0)
11077 			return ret;
11078 	}
11079 
11080 	if (!obj)
11081 		return 0;
11082 
11083 
11084 	ret = intel_plane_pin_fb(new_plane_state);
11085 	if (ret)
11086 		return ret;
11087 
11088 	i915_gem_object_wait_priority(obj, 0, &attr);
11089 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
11090 
11091 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
11092 		struct dma_fence *fence;
11093 
11094 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
11095 						      obj->base.resv, NULL,
11096 						      false,
11097 						      i915_fence_timeout(dev_priv),
11098 						      GFP_KERNEL);
11099 		if (ret < 0)
11100 			goto unpin_fb;
11101 
11102 		fence = dma_resv_get_excl_unlocked(obj->base.resv);
11103 		if (fence) {
11104 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11105 						   fence);
11106 			dma_fence_put(fence);
11107 		}
11108 	} else {
11109 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
11110 					   new_plane_state->uapi.fence);
11111 	}
11112 
11113 	/*
11114 	 * We declare pageflips to be interactive and so merit a small bias
11115 	 * towards upclocking to deliver the frame on time. By only changing
11116 	 * the RPS thresholds to sample more regularly and aim for higher
11117 	 * clocks we can hopefully deliver low power workloads (like kodi)
11118 	 * that are not quite steady state without resorting to forcing
11119 	 * maximum clocks following a vblank miss (see do_rps_boost()).
11120 	 */
11121 	if (!state->rps_interactive) {
11122 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
11123 		state->rps_interactive = true;
11124 	}
11125 
11126 	return 0;
11127 
11128 unpin_fb:
11129 	intel_plane_unpin_fb(new_plane_state);
11130 
11131 	return ret;
11132 }
11133 
11134 /**
11135  * intel_cleanup_plane_fb - Cleans up an fb after plane use
11136  * @plane: drm plane to clean up for
11137  * @_old_plane_state: the state from the previous modeset
11138  *
11139  * Cleans up a framebuffer that has just been removed from a plane.
11140  */
11141 void
11142 intel_cleanup_plane_fb(struct drm_plane *plane,
11143 		       struct drm_plane_state *_old_plane_state)
11144 {
11145 	struct intel_plane_state *old_plane_state =
11146 		to_intel_plane_state(_old_plane_state);
11147 	struct intel_atomic_state *state =
11148 		to_intel_atomic_state(old_plane_state->uapi.state);
11149 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
11150 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
11151 
11152 	if (!obj)
11153 		return;
11154 
11155 	if (state->rps_interactive) {
11156 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
11157 		state->rps_interactive = false;
11158 	}
11159 
11160 	/* Should only be called after a successful intel_prepare_plane_fb()! */
11161 	intel_plane_unpin_fb(old_plane_state);
11162 }
11163 
11164 /**
11165  * intel_plane_destroy - destroy a plane
11166  * @plane: plane to destroy
11167  *
11168  * Common destruction function for all types of planes (primary, cursor,
11169  * sprite).
11170  */
11171 void intel_plane_destroy(struct drm_plane *plane)
11172 {
11173 	drm_plane_cleanup(plane);
11174 	kfree(to_intel_plane(plane));
11175 }
11176 
11177 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
11178 {
11179 	struct intel_plane *plane;
11180 
11181 	for_each_intel_plane(&dev_priv->drm, plane) {
11182 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
11183 								  plane->pipe);
11184 
11185 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
11186 	}
11187 }
11188 
11189 
11190 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
11191 				      struct drm_file *file)
11192 {
11193 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
11194 	struct drm_crtc *drmmode_crtc;
11195 	struct intel_crtc *crtc;
11196 
11197 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
11198 	if (!drmmode_crtc)
11199 		return -ENOENT;
11200 
11201 	crtc = to_intel_crtc(drmmode_crtc);
11202 	pipe_from_crtc_id->pipe = crtc->pipe;
11203 
11204 	return 0;
11205 }
11206 
11207 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
11208 {
11209 	struct drm_device *dev = encoder->base.dev;
11210 	struct intel_encoder *source_encoder;
11211 	u32 possible_clones = 0;
11212 
11213 	for_each_intel_encoder(dev, source_encoder) {
11214 		if (encoders_cloneable(encoder, source_encoder))
11215 			possible_clones |= drm_encoder_mask(&source_encoder->base);
11216 	}
11217 
11218 	return possible_clones;
11219 }
11220 
11221 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
11222 {
11223 	struct drm_device *dev = encoder->base.dev;
11224 	struct intel_crtc *crtc;
11225 	u32 possible_crtcs = 0;
11226 
11227 	for_each_intel_crtc(dev, crtc) {
11228 		if (encoder->pipe_mask & BIT(crtc->pipe))
11229 			possible_crtcs |= drm_crtc_mask(&crtc->base);
11230 	}
11231 
11232 	return possible_crtcs;
11233 }
11234 
11235 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
11236 {
11237 	if (!IS_MOBILE(dev_priv))
11238 		return false;
11239 
11240 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
11241 		return false;
11242 
11243 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
11244 		return false;
11245 
11246 	return true;
11247 }
11248 
11249 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
11250 {
11251 	if (DISPLAY_VER(dev_priv) >= 9)
11252 		return false;
11253 
11254 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
11255 		return false;
11256 
11257 	if (HAS_PCH_LPT_H(dev_priv) &&
11258 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
11259 		return false;
11260 
11261 	/* DDI E can't be used if DDI A requires 4 lanes */
11262 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
11263 		return false;
11264 
11265 	if (!dev_priv->vbt.int_crt_support)
11266 		return false;
11267 
11268 	return true;
11269 }
11270 
11271 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
11272 {
11273 	struct intel_encoder *encoder;
11274 	bool dpd_is_edp = false;
11275 
11276 	intel_pps_unlock_regs_wa(dev_priv);
11277 
11278 	if (!HAS_DISPLAY(dev_priv))
11279 		return;
11280 
11281 	if (IS_DG2(dev_priv)) {
11282 		intel_ddi_init(dev_priv, PORT_A);
11283 		intel_ddi_init(dev_priv, PORT_B);
11284 		intel_ddi_init(dev_priv, PORT_C);
11285 		intel_ddi_init(dev_priv, PORT_D_XELPD);
11286 	} else if (IS_ALDERLAKE_P(dev_priv)) {
11287 		intel_ddi_init(dev_priv, PORT_A);
11288 		intel_ddi_init(dev_priv, PORT_B);
11289 		intel_ddi_init(dev_priv, PORT_TC1);
11290 		intel_ddi_init(dev_priv, PORT_TC2);
11291 		intel_ddi_init(dev_priv, PORT_TC3);
11292 		intel_ddi_init(dev_priv, PORT_TC4);
11293 	} else if (IS_ALDERLAKE_S(dev_priv)) {
11294 		intel_ddi_init(dev_priv, PORT_A);
11295 		intel_ddi_init(dev_priv, PORT_TC1);
11296 		intel_ddi_init(dev_priv, PORT_TC2);
11297 		intel_ddi_init(dev_priv, PORT_TC3);
11298 		intel_ddi_init(dev_priv, PORT_TC4);
11299 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
11300 		intel_ddi_init(dev_priv, PORT_A);
11301 		intel_ddi_init(dev_priv, PORT_B);
11302 		intel_ddi_init(dev_priv, PORT_TC1);
11303 		intel_ddi_init(dev_priv, PORT_TC2);
11304 	} else if (DISPLAY_VER(dev_priv) >= 12) {
11305 		intel_ddi_init(dev_priv, PORT_A);
11306 		intel_ddi_init(dev_priv, PORT_B);
11307 		intel_ddi_init(dev_priv, PORT_TC1);
11308 		intel_ddi_init(dev_priv, PORT_TC2);
11309 		intel_ddi_init(dev_priv, PORT_TC3);
11310 		intel_ddi_init(dev_priv, PORT_TC4);
11311 		intel_ddi_init(dev_priv, PORT_TC5);
11312 		intel_ddi_init(dev_priv, PORT_TC6);
11313 		icl_dsi_init(dev_priv);
11314 	} else if (IS_JSL_EHL(dev_priv)) {
11315 		intel_ddi_init(dev_priv, PORT_A);
11316 		intel_ddi_init(dev_priv, PORT_B);
11317 		intel_ddi_init(dev_priv, PORT_C);
11318 		intel_ddi_init(dev_priv, PORT_D);
11319 		icl_dsi_init(dev_priv);
11320 	} else if (DISPLAY_VER(dev_priv) == 11) {
11321 		intel_ddi_init(dev_priv, PORT_A);
11322 		intel_ddi_init(dev_priv, PORT_B);
11323 		intel_ddi_init(dev_priv, PORT_C);
11324 		intel_ddi_init(dev_priv, PORT_D);
11325 		intel_ddi_init(dev_priv, PORT_E);
11326 		intel_ddi_init(dev_priv, PORT_F);
11327 		icl_dsi_init(dev_priv);
11328 	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
11329 		intel_ddi_init(dev_priv, PORT_A);
11330 		intel_ddi_init(dev_priv, PORT_B);
11331 		intel_ddi_init(dev_priv, PORT_C);
11332 		vlv_dsi_init(dev_priv);
11333 	} else if (DISPLAY_VER(dev_priv) >= 9) {
11334 		intel_ddi_init(dev_priv, PORT_A);
11335 		intel_ddi_init(dev_priv, PORT_B);
11336 		intel_ddi_init(dev_priv, PORT_C);
11337 		intel_ddi_init(dev_priv, PORT_D);
11338 		intel_ddi_init(dev_priv, PORT_E);
11339 	} else if (HAS_DDI(dev_priv)) {
11340 		u32 found;
11341 
11342 		if (intel_ddi_crt_present(dev_priv))
11343 			intel_crt_init(dev_priv);
11344 
11345 		/* Haswell uses DDI functions to detect digital outputs. */
11346 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
11347 		if (found)
11348 			intel_ddi_init(dev_priv, PORT_A);
11349 
11350 		found = intel_de_read(dev_priv, SFUSE_STRAP);
11351 		if (found & SFUSE_STRAP_DDIB_DETECTED)
11352 			intel_ddi_init(dev_priv, PORT_B);
11353 		if (found & SFUSE_STRAP_DDIC_DETECTED)
11354 			intel_ddi_init(dev_priv, PORT_C);
11355 		if (found & SFUSE_STRAP_DDID_DETECTED)
11356 			intel_ddi_init(dev_priv, PORT_D);
11357 		if (found & SFUSE_STRAP_DDIF_DETECTED)
11358 			intel_ddi_init(dev_priv, PORT_F);
11359 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11360 		int found;
11361 
11362 		/*
11363 		 * intel_edp_init_connector() depends on this completing first,
11364 		 * to prevent the registration of both eDP and LVDS and the
11365 		 * incorrect sharing of the PPS.
11366 		 */
11367 		intel_lvds_init(dev_priv);
11368 		intel_crt_init(dev_priv);
11369 
11370 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
11371 
11372 		if (ilk_has_edp_a(dev_priv))
11373 			g4x_dp_init(dev_priv, DP_A, PORT_A);
11374 
11375 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
11376 			/* PCH SDVOB multiplex with HDMIB */
11377 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
11378 			if (!found)
11379 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
11380 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
11381 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
11382 		}
11383 
11384 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
11385 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
11386 
11387 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
11388 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
11389 
11390 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
11391 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
11392 
11393 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
11394 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
11395 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
11396 		bool has_edp, has_port;
11397 
11398 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
11399 			intel_crt_init(dev_priv);
11400 
11401 		/*
11402 		 * The DP_DETECTED bit is the latched state of the DDC
11403 		 * SDA pin at boot. However since eDP doesn't require DDC
11404 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
11405 		 * eDP ports may have been muxed to an alternate function.
11406 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
11407 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
11408 		 * detect eDP ports.
11409 		 *
11410 		 * Sadly the straps seem to be missing sometimes even for HDMI
11411 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
11412 		 * and VBT for the presence of the port. Additionally we can't
11413 		 * trust the port type the VBT declares as we've seen at least
11414 		 * HDMI ports that the VBT claim are DP or eDP.
11415 		 */
11416 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
11417 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
11418 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
11419 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
11420 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
11421 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
11422 
11423 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
11424 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
11425 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
11426 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
11427 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
11428 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
11429 
11430 		if (IS_CHERRYVIEW(dev_priv)) {
11431 			/*
11432 			 * eDP not supported on port D,
11433 			 * so no need to worry about it
11434 			 */
11435 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11436 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11437 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11438 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11439 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11440 		}
11441 
11442 		vlv_dsi_init(dev_priv);
11443 	} else if (IS_PINEVIEW(dev_priv)) {
11444 		intel_lvds_init(dev_priv);
11445 		intel_crt_init(dev_priv);
11446 	} else if (IS_DISPLAY_VER(dev_priv, 3, 4)) {
11447 		bool found = false;
11448 
11449 		if (IS_MOBILE(dev_priv))
11450 			intel_lvds_init(dev_priv);
11451 
11452 		intel_crt_init(dev_priv);
11453 
11454 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11455 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11456 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11457 			if (!found && IS_G4X(dev_priv)) {
11458 				drm_dbg_kms(&dev_priv->drm,
11459 					    "probing HDMI on SDVOB\n");
11460 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11461 			}
11462 
11463 			if (!found && IS_G4X(dev_priv))
11464 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11465 		}
11466 
11467 		/* Before G4X SDVOC doesn't have its own detect register */
11468 
11469 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11470 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11471 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11472 		}
11473 
11474 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11475 
11476 			if (IS_G4X(dev_priv)) {
11477 				drm_dbg_kms(&dev_priv->drm,
11478 					    "probing HDMI on SDVOC\n");
11479 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11480 			}
11481 			if (IS_G4X(dev_priv))
11482 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11483 		}
11484 
11485 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11486 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11487 
11488 		if (SUPPORTS_TV(dev_priv))
11489 			intel_tv_init(dev_priv);
11490 	} else if (DISPLAY_VER(dev_priv) == 2) {
11491 		if (IS_I85X(dev_priv))
11492 			intel_lvds_init(dev_priv);
11493 
11494 		intel_crt_init(dev_priv);
11495 		intel_dvo_init(dev_priv);
11496 	}
11497 
11498 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11499 		encoder->base.possible_crtcs =
11500 			intel_encoder_possible_crtcs(encoder);
11501 		encoder->base.possible_clones =
11502 			intel_encoder_possible_clones(encoder);
11503 	}
11504 
11505 	intel_init_pch_refclk(dev_priv);
11506 
11507 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11508 }
11509 
11510 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11511 {
11512 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11513 
11514 	drm_framebuffer_cleanup(fb);
11515 
11516 	if (intel_fb_uses_dpt(fb))
11517 		intel_dpt_destroy(intel_fb->dpt_vm);
11518 
11519 	intel_frontbuffer_put(intel_fb->frontbuffer);
11520 
11521 	kfree(intel_fb);
11522 }
11523 
11524 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11525 						struct drm_file *file,
11526 						unsigned int *handle)
11527 {
11528 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11529 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11530 
11531 	if (i915_gem_object_is_userptr(obj)) {
11532 		drm_dbg(&i915->drm,
11533 			"attempting to use a userptr for a framebuffer, denied\n");
11534 		return -EINVAL;
11535 	}
11536 
11537 	return drm_gem_handle_create(file, &obj->base, handle);
11538 }
11539 
11540 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11541 					struct drm_file *file,
11542 					unsigned flags, unsigned color,
11543 					struct drm_clip_rect *clips,
11544 					unsigned num_clips)
11545 {
11546 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11547 
11548 	i915_gem_object_flush_if_display(obj);
11549 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11550 
11551 	return 0;
11552 }
11553 
11554 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11555 	.destroy = intel_user_framebuffer_destroy,
11556 	.create_handle = intel_user_framebuffer_create_handle,
11557 	.dirty = intel_user_framebuffer_dirty,
11558 };
11559 
11560 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11561 				  struct drm_i915_gem_object *obj,
11562 				  struct drm_mode_fb_cmd2 *mode_cmd)
11563 {
11564 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11565 	struct drm_framebuffer *fb = &intel_fb->base;
11566 	u32 max_stride;
11567 	unsigned int tiling, stride;
11568 	int ret = -EINVAL;
11569 	int i;
11570 
11571 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11572 	if (!intel_fb->frontbuffer)
11573 		return -ENOMEM;
11574 
11575 	i915_gem_object_lock(obj, NULL);
11576 	tiling = i915_gem_object_get_tiling(obj);
11577 	stride = i915_gem_object_get_stride(obj);
11578 	i915_gem_object_unlock(obj);
11579 
11580 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11581 		/*
11582 		 * If there's a fence, enforce that
11583 		 * the fb modifier and tiling mode match.
11584 		 */
11585 		if (tiling != I915_TILING_NONE &&
11586 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11587 			drm_dbg_kms(&dev_priv->drm,
11588 				    "tiling_mode doesn't match fb modifier\n");
11589 			goto err;
11590 		}
11591 	} else {
11592 		if (tiling == I915_TILING_X) {
11593 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11594 		} else if (tiling == I915_TILING_Y) {
11595 			drm_dbg_kms(&dev_priv->drm,
11596 				    "No Y tiling for legacy addfb\n");
11597 			goto err;
11598 		}
11599 	}
11600 
11601 	if (!drm_any_plane_has_format(&dev_priv->drm,
11602 				      mode_cmd->pixel_format,
11603 				      mode_cmd->modifier[0])) {
11604 		drm_dbg_kms(&dev_priv->drm,
11605 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11606 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11607 		goto err;
11608 	}
11609 
11610 	/*
11611 	 * gen2/3 display engine uses the fence if present,
11612 	 * so the tiling mode must match the fb modifier exactly.
11613 	 */
11614 	if (DISPLAY_VER(dev_priv) < 4 &&
11615 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11616 		drm_dbg_kms(&dev_priv->drm,
11617 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11618 		goto err;
11619 	}
11620 
11621 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11622 					 mode_cmd->modifier[0]);
11623 	if (mode_cmd->pitches[0] > max_stride) {
11624 		drm_dbg_kms(&dev_priv->drm,
11625 			    "%s pitch (%u) must be at most %d\n",
11626 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11627 			    "tiled" : "linear",
11628 			    mode_cmd->pitches[0], max_stride);
11629 		goto err;
11630 	}
11631 
11632 	/*
11633 	 * If there's a fence, enforce that
11634 	 * the fb pitch and fence stride match.
11635 	 */
11636 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11637 		drm_dbg_kms(&dev_priv->drm,
11638 			    "pitch (%d) must match tiling stride (%d)\n",
11639 			    mode_cmd->pitches[0], stride);
11640 		goto err;
11641 	}
11642 
11643 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11644 	if (mode_cmd->offsets[0] != 0) {
11645 		drm_dbg_kms(&dev_priv->drm,
11646 			    "plane 0 offset (0x%08x) must be 0\n",
11647 			    mode_cmd->offsets[0]);
11648 		goto err;
11649 	}
11650 
11651 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11652 
11653 	for (i = 0; i < fb->format->num_planes; i++) {
11654 		u32 stride_alignment;
11655 
11656 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11657 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11658 				    i);
11659 			goto err;
11660 		}
11661 
11662 		stride_alignment = intel_fb_stride_alignment(fb, i);
11663 		if (fb->pitches[i] & (stride_alignment - 1)) {
11664 			drm_dbg_kms(&dev_priv->drm,
11665 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11666 				    i, fb->pitches[i], stride_alignment);
11667 			goto err;
11668 		}
11669 
11670 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11671 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11672 
11673 			if (fb->pitches[i] != ccs_aux_stride) {
11674 				drm_dbg_kms(&dev_priv->drm,
11675 					    "ccs aux plane %d pitch (%d) must be %d\n",
11676 					    i,
11677 					    fb->pitches[i], ccs_aux_stride);
11678 				goto err;
11679 			}
11680 		}
11681 
11682 		/* TODO: Add POT stride remapping support for CCS formats as well. */
11683 		if (IS_ALDERLAKE_P(dev_priv) &&
11684 		    mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
11685 		    !intel_fb_needs_pot_stride_remap(intel_fb) &&
11686 		    !is_power_of_2(mode_cmd->pitches[i])) {
11687 			drm_dbg_kms(&dev_priv->drm,
11688 				    "plane %d pitch (%d) must be power of two for tiled buffers\n",
11689 				    i, mode_cmd->pitches[i]);
11690 			goto err;
11691 		}
11692 
11693 		fb->obj[i] = &obj->base;
11694 	}
11695 
11696 	ret = intel_fill_fb_info(dev_priv, intel_fb);
11697 	if (ret)
11698 		goto err;
11699 
11700 	if (intel_fb_uses_dpt(fb)) {
11701 		struct i915_address_space *vm;
11702 
11703 		vm = intel_dpt_create(intel_fb);
11704 		if (IS_ERR(vm)) {
11705 			ret = PTR_ERR(vm);
11706 			goto err;
11707 		}
11708 
11709 		intel_fb->dpt_vm = vm;
11710 	}
11711 
11712 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11713 	if (ret) {
11714 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11715 		goto err;
11716 	}
11717 
11718 	return 0;
11719 
11720 err:
11721 	intel_frontbuffer_put(intel_fb->frontbuffer);
11722 	return ret;
11723 }
11724 
11725 static struct drm_framebuffer *
11726 intel_user_framebuffer_create(struct drm_device *dev,
11727 			      struct drm_file *filp,
11728 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11729 {
11730 	struct drm_framebuffer *fb;
11731 	struct drm_i915_gem_object *obj;
11732 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11733 	struct drm_i915_private *i915;
11734 
11735 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11736 	if (!obj)
11737 		return ERR_PTR(-ENOENT);
11738 
11739 	/* object is backed with LMEM for discrete */
11740 	i915 = to_i915(obj->base.dev);
11741 	if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
11742 		/* object is "remote", not in local memory */
11743 		i915_gem_object_put(obj);
11744 		return ERR_PTR(-EREMOTE);
11745 	}
11746 
11747 	fb = intel_framebuffer_create(obj, &mode_cmd);
11748 	i915_gem_object_put(obj);
11749 
11750 	return fb;
11751 }
11752 
11753 static enum drm_mode_status
11754 intel_mode_valid(struct drm_device *dev,
11755 		 const struct drm_display_mode *mode)
11756 {
11757 	struct drm_i915_private *dev_priv = to_i915(dev);
11758 	int hdisplay_max, htotal_max;
11759 	int vdisplay_max, vtotal_max;
11760 
11761 	/*
11762 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11763 	 * of DBLSCAN modes to the output's mode list when they detect
11764 	 * the scaling mode property on the connector. And they don't
11765 	 * ask the kernel to validate those modes in any way until
11766 	 * modeset time at which point the client gets a protocol error.
11767 	 * So in order to not upset those clients we silently ignore the
11768 	 * DBLSCAN flag on such connectors. For other connectors we will
11769 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11770 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11771 	 * as we never want such modes on the connector's mode list.
11772 	 */
11773 
11774 	if (mode->vscan > 1)
11775 		return MODE_NO_VSCAN;
11776 
11777 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11778 		return MODE_H_ILLEGAL;
11779 
11780 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11781 			   DRM_MODE_FLAG_NCSYNC |
11782 			   DRM_MODE_FLAG_PCSYNC))
11783 		return MODE_HSYNC;
11784 
11785 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11786 			   DRM_MODE_FLAG_PIXMUX |
11787 			   DRM_MODE_FLAG_CLKDIV2))
11788 		return MODE_BAD;
11789 
11790 	/* Transcoder timing limits */
11791 	if (DISPLAY_VER(dev_priv) >= 11) {
11792 		hdisplay_max = 16384;
11793 		vdisplay_max = 8192;
11794 		htotal_max = 16384;
11795 		vtotal_max = 8192;
11796 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11797 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11798 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11799 		vdisplay_max = 4096;
11800 		htotal_max = 8192;
11801 		vtotal_max = 8192;
11802 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11803 		hdisplay_max = 4096;
11804 		vdisplay_max = 4096;
11805 		htotal_max = 8192;
11806 		vtotal_max = 8192;
11807 	} else {
11808 		hdisplay_max = 2048;
11809 		vdisplay_max = 2048;
11810 		htotal_max = 4096;
11811 		vtotal_max = 4096;
11812 	}
11813 
11814 	if (mode->hdisplay > hdisplay_max ||
11815 	    mode->hsync_start > htotal_max ||
11816 	    mode->hsync_end > htotal_max ||
11817 	    mode->htotal > htotal_max)
11818 		return MODE_H_ILLEGAL;
11819 
11820 	if (mode->vdisplay > vdisplay_max ||
11821 	    mode->vsync_start > vtotal_max ||
11822 	    mode->vsync_end > vtotal_max ||
11823 	    mode->vtotal > vtotal_max)
11824 		return MODE_V_ILLEGAL;
11825 
11826 	if (DISPLAY_VER(dev_priv) >= 5) {
11827 		if (mode->hdisplay < 64 ||
11828 		    mode->htotal - mode->hdisplay < 32)
11829 			return MODE_H_ILLEGAL;
11830 
11831 		if (mode->vtotal - mode->vdisplay < 5)
11832 			return MODE_V_ILLEGAL;
11833 	} else {
11834 		if (mode->htotal - mode->hdisplay < 32)
11835 			return MODE_H_ILLEGAL;
11836 
11837 		if (mode->vtotal - mode->vdisplay < 3)
11838 			return MODE_V_ILLEGAL;
11839 	}
11840 
11841 	return MODE_OK;
11842 }
11843 
11844 enum drm_mode_status
11845 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11846 				const struct drm_display_mode *mode,
11847 				bool bigjoiner)
11848 {
11849 	int plane_width_max, plane_height_max;
11850 
11851 	/*
11852 	 * intel_mode_valid() should be
11853 	 * sufficient on older platforms.
11854 	 */
11855 	if (DISPLAY_VER(dev_priv) < 9)
11856 		return MODE_OK;
11857 
11858 	/*
11859 	 * Most people will probably want a fullscreen
11860 	 * plane so let's not advertize modes that are
11861 	 * too big for that.
11862 	 */
11863 	if (DISPLAY_VER(dev_priv) >= 11) {
11864 		plane_width_max = 5120 << bigjoiner;
11865 		plane_height_max = 4320;
11866 	} else {
11867 		plane_width_max = 5120;
11868 		plane_height_max = 4096;
11869 	}
11870 
11871 	if (mode->hdisplay > plane_width_max)
11872 		return MODE_H_ILLEGAL;
11873 
11874 	if (mode->vdisplay > plane_height_max)
11875 		return MODE_V_ILLEGAL;
11876 
11877 	return MODE_OK;
11878 }
11879 
11880 static const struct drm_mode_config_funcs intel_mode_funcs = {
11881 	.fb_create = intel_user_framebuffer_create,
11882 	.get_format_info = intel_get_format_info,
11883 	.output_poll_changed = intel_fbdev_output_poll_changed,
11884 	.mode_valid = intel_mode_valid,
11885 	.atomic_check = intel_atomic_check,
11886 	.atomic_commit = intel_atomic_commit,
11887 	.atomic_state_alloc = intel_atomic_state_alloc,
11888 	.atomic_state_clear = intel_atomic_state_clear,
11889 	.atomic_state_free = intel_atomic_state_free,
11890 };
11891 
11892 /**
11893  * intel_init_display_hooks - initialize the display modesetting hooks
11894  * @dev_priv: device private
11895  */
11896 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11897 {
11898 	if (!HAS_DISPLAY(dev_priv))
11899 		return;
11900 
11901 	intel_init_cdclk_hooks(dev_priv);
11902 	intel_init_audio_hooks(dev_priv);
11903 
11904 	intel_dpll_init_clock_hook(dev_priv);
11905 
11906 	if (DISPLAY_VER(dev_priv) >= 9) {
11907 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11908 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11909 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11910 	} else if (HAS_DDI(dev_priv)) {
11911 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11912 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11913 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11914 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11915 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11916 		dev_priv->display.crtc_enable = ilk_crtc_enable;
11917 		dev_priv->display.crtc_disable = ilk_crtc_disable;
11918 	} else if (IS_CHERRYVIEW(dev_priv) ||
11919 		   IS_VALLEYVIEW(dev_priv)) {
11920 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11921 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11922 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11923 	} else {
11924 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11925 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
11926 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11927 	}
11928 
11929 	intel_fdi_init_hook(dev_priv);
11930 
11931 	if (DISPLAY_VER(dev_priv) >= 9) {
11932 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11933 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11934 	} else {
11935 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11936 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11937 	}
11938 
11939 }
11940 
11941 void intel_modeset_init_hw(struct drm_i915_private *i915)
11942 {
11943 	struct intel_cdclk_state *cdclk_state;
11944 
11945 	if (!HAS_DISPLAY(i915))
11946 		return;
11947 
11948 	cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
11949 
11950 	intel_update_cdclk(i915);
11951 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11952 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11953 }
11954 
11955 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11956 {
11957 	struct drm_plane *plane;
11958 	struct intel_crtc *crtc;
11959 
11960 	for_each_intel_crtc(state->dev, crtc) {
11961 		struct intel_crtc_state *crtc_state;
11962 
11963 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
11964 		if (IS_ERR(crtc_state))
11965 			return PTR_ERR(crtc_state);
11966 
11967 		if (crtc_state->hw.active) {
11968 			/*
11969 			 * Preserve the inherited flag to avoid
11970 			 * taking the full modeset path.
11971 			 */
11972 			crtc_state->inherited = true;
11973 		}
11974 	}
11975 
11976 	drm_for_each_plane(plane, state->dev) {
11977 		struct drm_plane_state *plane_state;
11978 
11979 		plane_state = drm_atomic_get_plane_state(state, plane);
11980 		if (IS_ERR(plane_state))
11981 			return PTR_ERR(plane_state);
11982 	}
11983 
11984 	return 0;
11985 }
11986 
11987 /*
11988  * Calculate what we think the watermarks should be for the state we've read
11989  * out of the hardware and then immediately program those watermarks so that
11990  * we ensure the hardware settings match our internal state.
11991  *
11992  * We can calculate what we think WM's should be by creating a duplicate of the
11993  * current state (which was constructed during hardware readout) and running it
11994  * through the atomic check code to calculate new watermark values in the
11995  * state object.
11996  */
11997 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11998 {
11999 	struct drm_atomic_state *state;
12000 	struct intel_atomic_state *intel_state;
12001 	struct intel_crtc *crtc;
12002 	struct intel_crtc_state *crtc_state;
12003 	struct drm_modeset_acquire_ctx ctx;
12004 	int ret;
12005 	int i;
12006 
12007 	/* Only supported on platforms that use atomic watermark design */
12008 	if (!dev_priv->display.optimize_watermarks)
12009 		return;
12010 
12011 	state = drm_atomic_state_alloc(&dev_priv->drm);
12012 	if (drm_WARN_ON(&dev_priv->drm, !state))
12013 		return;
12014 
12015 	intel_state = to_intel_atomic_state(state);
12016 
12017 	drm_modeset_acquire_init(&ctx, 0);
12018 
12019 retry:
12020 	state->acquire_ctx = &ctx;
12021 
12022 	/*
12023 	 * Hardware readout is the only time we don't want to calculate
12024 	 * intermediate watermarks (since we don't trust the current
12025 	 * watermarks).
12026 	 */
12027 	if (!HAS_GMCH(dev_priv))
12028 		intel_state->skip_intermediate_wm = true;
12029 
12030 	ret = sanitize_watermarks_add_affected(state);
12031 	if (ret)
12032 		goto fail;
12033 
12034 	ret = intel_atomic_check(&dev_priv->drm, state);
12035 	if (ret)
12036 		goto fail;
12037 
12038 	/* Write calculated watermark values back */
12039 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
12040 		crtc_state->wm.need_postvbl_update = true;
12041 		dev_priv->display.optimize_watermarks(intel_state, crtc);
12042 
12043 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
12044 	}
12045 
12046 fail:
12047 	if (ret == -EDEADLK) {
12048 		drm_atomic_state_clear(state);
12049 		drm_modeset_backoff(&ctx);
12050 		goto retry;
12051 	}
12052 
12053 	/*
12054 	 * If we fail here, it means that the hardware appears to be
12055 	 * programmed in a way that shouldn't be possible, given our
12056 	 * understanding of watermark requirements.  This might mean a
12057 	 * mistake in the hardware readout code or a mistake in the
12058 	 * watermark calculations for a given platform.  Raise a WARN
12059 	 * so that this is noticeable.
12060 	 *
12061 	 * If this actually happens, we'll have to just leave the
12062 	 * BIOS-programmed watermarks untouched and hope for the best.
12063 	 */
12064 	drm_WARN(&dev_priv->drm, ret,
12065 		 "Could not determine valid watermarks for inherited state\n");
12066 
12067 	drm_atomic_state_put(state);
12068 
12069 	drm_modeset_drop_locks(&ctx);
12070 	drm_modeset_acquire_fini(&ctx);
12071 }
12072 
12073 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
12074 {
12075 	if (IS_IRONLAKE(dev_priv)) {
12076 		u32 fdi_pll_clk =
12077 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
12078 
12079 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
12080 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
12081 		dev_priv->fdi_pll_freq = 270000;
12082 	} else {
12083 		return;
12084 	}
12085 
12086 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
12087 }
12088 
12089 static int intel_initial_commit(struct drm_device *dev)
12090 {
12091 	struct drm_atomic_state *state = NULL;
12092 	struct drm_modeset_acquire_ctx ctx;
12093 	struct intel_crtc *crtc;
12094 	int ret = 0;
12095 
12096 	state = drm_atomic_state_alloc(dev);
12097 	if (!state)
12098 		return -ENOMEM;
12099 
12100 	drm_modeset_acquire_init(&ctx, 0);
12101 
12102 retry:
12103 	state->acquire_ctx = &ctx;
12104 
12105 	for_each_intel_crtc(dev, crtc) {
12106 		struct intel_crtc_state *crtc_state =
12107 			intel_atomic_get_crtc_state(state, crtc);
12108 
12109 		if (IS_ERR(crtc_state)) {
12110 			ret = PTR_ERR(crtc_state);
12111 			goto out;
12112 		}
12113 
12114 		if (crtc_state->hw.active) {
12115 			struct intel_encoder *encoder;
12116 
12117 			/*
12118 			 * We've not yet detected sink capabilities
12119 			 * (audio,infoframes,etc.) and thus we don't want to
12120 			 * force a full state recomputation yet. We want that to
12121 			 * happen only for the first real commit from userspace.
12122 			 * So preserve the inherited flag for the time being.
12123 			 */
12124 			crtc_state->inherited = true;
12125 
12126 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
12127 			if (ret)
12128 				goto out;
12129 
12130 			/*
12131 			 * FIXME hack to force a LUT update to avoid the
12132 			 * plane update forcing the pipe gamma on without
12133 			 * having a proper LUT loaded. Remove once we
12134 			 * have readout for pipe gamma enable.
12135 			 */
12136 			crtc_state->uapi.color_mgmt_changed = true;
12137 
12138 			for_each_intel_encoder_mask(dev, encoder,
12139 						    crtc_state->uapi.encoder_mask) {
12140 				if (encoder->initial_fastset_check &&
12141 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
12142 					ret = drm_atomic_add_affected_connectors(state,
12143 										 &crtc->base);
12144 					if (ret)
12145 						goto out;
12146 				}
12147 			}
12148 		}
12149 	}
12150 
12151 	ret = drm_atomic_commit(state);
12152 
12153 out:
12154 	if (ret == -EDEADLK) {
12155 		drm_atomic_state_clear(state);
12156 		drm_modeset_backoff(&ctx);
12157 		goto retry;
12158 	}
12159 
12160 	drm_atomic_state_put(state);
12161 
12162 	drm_modeset_drop_locks(&ctx);
12163 	drm_modeset_acquire_fini(&ctx);
12164 
12165 	return ret;
12166 }
12167 
12168 static void intel_mode_config_init(struct drm_i915_private *i915)
12169 {
12170 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
12171 
12172 	drm_mode_config_init(&i915->drm);
12173 	INIT_LIST_HEAD(&i915->global_obj_list);
12174 
12175 	mode_config->min_width = 0;
12176 	mode_config->min_height = 0;
12177 
12178 	mode_config->preferred_depth = 24;
12179 	mode_config->prefer_shadow = 1;
12180 
12181 	mode_config->funcs = &intel_mode_funcs;
12182 
12183 	mode_config->async_page_flip = has_async_flips(i915);
12184 
12185 	/*
12186 	 * Maximum framebuffer dimensions, chosen to match
12187 	 * the maximum render engine surface size on gen4+.
12188 	 */
12189 	if (DISPLAY_VER(i915) >= 7) {
12190 		mode_config->max_width = 16384;
12191 		mode_config->max_height = 16384;
12192 	} else if (DISPLAY_VER(i915) >= 4) {
12193 		mode_config->max_width = 8192;
12194 		mode_config->max_height = 8192;
12195 	} else if (DISPLAY_VER(i915) == 3) {
12196 		mode_config->max_width = 4096;
12197 		mode_config->max_height = 4096;
12198 	} else {
12199 		mode_config->max_width = 2048;
12200 		mode_config->max_height = 2048;
12201 	}
12202 
12203 	if (IS_I845G(i915) || IS_I865G(i915)) {
12204 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
12205 		mode_config->cursor_height = 1023;
12206 	} else if (IS_I830(i915) || IS_I85X(i915) ||
12207 		   IS_I915G(i915) || IS_I915GM(i915)) {
12208 		mode_config->cursor_width = 64;
12209 		mode_config->cursor_height = 64;
12210 	} else {
12211 		mode_config->cursor_width = 256;
12212 		mode_config->cursor_height = 256;
12213 	}
12214 }
12215 
12216 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
12217 {
12218 	intel_atomic_global_obj_cleanup(i915);
12219 	drm_mode_config_cleanup(&i915->drm);
12220 }
12221 
12222 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
12223 {
12224 	if (plane_config->fb) {
12225 		struct drm_framebuffer *fb = &plane_config->fb->base;
12226 
12227 		/* We may only have the stub and not a full framebuffer */
12228 		if (drm_framebuffer_read_refcount(fb))
12229 			drm_framebuffer_put(fb);
12230 		else
12231 			kfree(fb);
12232 	}
12233 
12234 	if (plane_config->vma)
12235 		i915_vma_put(plane_config->vma);
12236 }
12237 
12238 /* part #1: call before irq install */
12239 int intel_modeset_init_noirq(struct drm_i915_private *i915)
12240 {
12241 	int ret;
12242 
12243 	if (i915_inject_probe_failure(i915))
12244 		return -ENODEV;
12245 
12246 	if (HAS_DISPLAY(i915)) {
12247 		ret = drm_vblank_init(&i915->drm,
12248 				      INTEL_NUM_PIPES(i915));
12249 		if (ret)
12250 			return ret;
12251 	}
12252 
12253 	intel_bios_init(i915);
12254 
12255 	ret = intel_vga_register(i915);
12256 	if (ret)
12257 		goto cleanup_bios;
12258 
12259 	/* FIXME: completely on the wrong abstraction layer */
12260 	intel_power_domains_init_hw(i915, false);
12261 
12262 	if (!HAS_DISPLAY(i915))
12263 		return 0;
12264 
12265 	intel_dmc_ucode_init(i915);
12266 
12267 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
12268 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
12269 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
12270 
12271 	i915->framestart_delay = 1; /* 1-4 */
12272 
12273 	i915->window2_delay = 0; /* No DSB so no window2 delay */
12274 
12275 	intel_mode_config_init(i915);
12276 
12277 	ret = intel_cdclk_init(i915);
12278 	if (ret)
12279 		goto cleanup_vga_client_pw_domain_dmc;
12280 
12281 	ret = intel_dbuf_init(i915);
12282 	if (ret)
12283 		goto cleanup_vga_client_pw_domain_dmc;
12284 
12285 	ret = intel_bw_init(i915);
12286 	if (ret)
12287 		goto cleanup_vga_client_pw_domain_dmc;
12288 
12289 	init_llist_head(&i915->atomic_helper.free_list);
12290 	INIT_WORK(&i915->atomic_helper.free_work,
12291 		  intel_atomic_helper_free_state_worker);
12292 
12293 	intel_init_quirks(i915);
12294 
12295 	intel_fbc_init(i915);
12296 
12297 	return 0;
12298 
12299 cleanup_vga_client_pw_domain_dmc:
12300 	intel_dmc_ucode_fini(i915);
12301 	intel_power_domains_driver_remove(i915);
12302 	intel_vga_unregister(i915);
12303 cleanup_bios:
12304 	intel_bios_driver_remove(i915);
12305 
12306 	return ret;
12307 }
12308 
12309 /* part #2: call after irq install, but before gem init */
12310 int intel_modeset_init_nogem(struct drm_i915_private *i915)
12311 {
12312 	struct drm_device *dev = &i915->drm;
12313 	enum pipe pipe;
12314 	struct intel_crtc *crtc;
12315 	int ret;
12316 
12317 	if (!HAS_DISPLAY(i915))
12318 		return 0;
12319 
12320 	intel_init_pm(i915);
12321 
12322 	intel_panel_sanitize_ssc(i915);
12323 
12324 	intel_pps_setup(i915);
12325 
12326 	intel_gmbus_setup(i915);
12327 
12328 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
12329 		    INTEL_NUM_PIPES(i915),
12330 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
12331 
12332 	for_each_pipe(i915, pipe) {
12333 		ret = intel_crtc_init(i915, pipe);
12334 		if (ret) {
12335 			intel_mode_config_cleanup(i915);
12336 			return ret;
12337 		}
12338 	}
12339 
12340 	intel_plane_possible_crtcs_init(i915);
12341 	intel_shared_dpll_init(dev);
12342 	intel_update_fdi_pll_freq(i915);
12343 
12344 	intel_update_czclk(i915);
12345 	intel_modeset_init_hw(i915);
12346 	intel_dpll_update_ref_clks(i915);
12347 
12348 	intel_hdcp_component_init(i915);
12349 
12350 	if (i915->max_cdclk_freq == 0)
12351 		intel_update_max_cdclk(i915);
12352 
12353 	/*
12354 	 * If the platform has HTI, we need to find out whether it has reserved
12355 	 * any display resources before we create our display outputs.
12356 	 */
12357 	if (INTEL_INFO(i915)->display.has_hti)
12358 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
12359 
12360 	/* Just disable it once at startup */
12361 	intel_vga_disable(i915);
12362 	intel_setup_outputs(i915);
12363 
12364 	drm_modeset_lock_all(dev);
12365 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
12366 	drm_modeset_unlock_all(dev);
12367 
12368 	for_each_intel_crtc(dev, crtc) {
12369 		struct intel_initial_plane_config plane_config = {};
12370 
12371 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
12372 			continue;
12373 
12374 		/*
12375 		 * Note that reserving the BIOS fb up front prevents us
12376 		 * from stuffing other stolen allocations like the ring
12377 		 * on top.  This prevents some ugliness at boot time, and
12378 		 * can even allow for smooth boot transitions if the BIOS
12379 		 * fb is large enough for the active pipe configuration.
12380 		 */
12381 		i915->display.get_initial_plane_config(crtc, &plane_config);
12382 
12383 		/*
12384 		 * If the fb is shared between multiple heads, we'll
12385 		 * just get the first one.
12386 		 */
12387 		intel_find_initial_plane_obj(crtc, &plane_config);
12388 
12389 		plane_config_fini(&plane_config);
12390 	}
12391 
12392 	/*
12393 	 * Make sure hardware watermarks really match the state we read out.
12394 	 * Note that we need to do this after reconstructing the BIOS fb's
12395 	 * since the watermark calculation done here will use pstate->fb.
12396 	 */
12397 	if (!HAS_GMCH(i915))
12398 		sanitize_watermarks(i915);
12399 
12400 	return 0;
12401 }
12402 
12403 /* part #3: call after gem init */
12404 int intel_modeset_init(struct drm_i915_private *i915)
12405 {
12406 	int ret;
12407 
12408 	if (!HAS_DISPLAY(i915))
12409 		return 0;
12410 
12411 	/*
12412 	 * Force all active planes to recompute their states. So that on
12413 	 * mode_setcrtc after probe, all the intel_plane_state variables
12414 	 * are already calculated and there is no assert_plane warnings
12415 	 * during bootup.
12416 	 */
12417 	ret = intel_initial_commit(&i915->drm);
12418 	if (ret)
12419 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
12420 
12421 	intel_overlay_setup(i915);
12422 
12423 	ret = intel_fbdev_init(&i915->drm);
12424 	if (ret)
12425 		return ret;
12426 
12427 	/* Only enable hotplug handling once the fbdev is fully set up. */
12428 	intel_hpd_init(i915);
12429 	intel_hpd_poll_disable(i915);
12430 
12431 	intel_init_ipc(i915);
12432 
12433 	return 0;
12434 }
12435 
12436 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12437 {
12438 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12439 	/* 640x480@60Hz, ~25175 kHz */
12440 	struct dpll clock = {
12441 		.m1 = 18,
12442 		.m2 = 7,
12443 		.p1 = 13,
12444 		.p2 = 4,
12445 		.n = 2,
12446 	};
12447 	u32 dpll, fp;
12448 	int i;
12449 
12450 	drm_WARN_ON(&dev_priv->drm,
12451 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
12452 
12453 	drm_dbg_kms(&dev_priv->drm,
12454 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
12455 		    pipe_name(pipe), clock.vco, clock.dot);
12456 
12457 	fp = i9xx_dpll_compute_fp(&clock);
12458 	dpll = DPLL_DVO_2X_MODE |
12459 		DPLL_VGA_MODE_DIS |
12460 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
12461 		PLL_P2_DIVIDE_BY_4 |
12462 		PLL_REF_INPUT_DREFCLK |
12463 		DPLL_VCO_ENABLE;
12464 
12465 	intel_de_write(dev_priv, FP0(pipe), fp);
12466 	intel_de_write(dev_priv, FP1(pipe), fp);
12467 
12468 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
12469 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
12470 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
12471 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
12472 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
12473 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
12474 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
12475 
12476 	/*
12477 	 * Apparently we need to have VGA mode enabled prior to changing
12478 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12479 	 * dividers, even though the register value does change.
12480 	 */
12481 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12482 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12483 
12484 	/* Wait for the clocks to stabilize. */
12485 	intel_de_posting_read(dev_priv, DPLL(pipe));
12486 	udelay(150);
12487 
12488 	/* The pixel multiplier can only be updated once the
12489 	 * DPLL is enabled and the clocks are stable.
12490 	 *
12491 	 * So write it again.
12492 	 */
12493 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12494 
12495 	/* We do this three times for luck */
12496 	for (i = 0; i < 3 ; i++) {
12497 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12498 		intel_de_posting_read(dev_priv, DPLL(pipe));
12499 		udelay(150); /* wait for warmup */
12500 	}
12501 
12502 	intel_de_write(dev_priv, PIPECONF(pipe),
12503 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12504 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12505 
12506 	intel_wait_for_pipe_scanline_moving(crtc);
12507 }
12508 
12509 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12510 {
12511 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12512 
12513 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12514 		    pipe_name(pipe));
12515 
12516 	drm_WARN_ON(&dev_priv->drm,
12517 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12518 		    DISPLAY_PLANE_ENABLE);
12519 	drm_WARN_ON(&dev_priv->drm,
12520 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12521 		    DISPLAY_PLANE_ENABLE);
12522 	drm_WARN_ON(&dev_priv->drm,
12523 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12524 		    DISPLAY_PLANE_ENABLE);
12525 	drm_WARN_ON(&dev_priv->drm,
12526 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12527 	drm_WARN_ON(&dev_priv->drm,
12528 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12529 
12530 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12531 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12532 
12533 	intel_wait_for_pipe_scanline_stopped(crtc);
12534 
12535 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12536 	intel_de_posting_read(dev_priv, DPLL(pipe));
12537 }
12538 
12539 static void
12540 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12541 {
12542 	struct intel_crtc *crtc;
12543 
12544 	if (DISPLAY_VER(dev_priv) >= 4)
12545 		return;
12546 
12547 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12548 		struct intel_plane *plane =
12549 			to_intel_plane(crtc->base.primary);
12550 		struct intel_crtc *plane_crtc;
12551 		enum pipe pipe;
12552 
12553 		if (!plane->get_hw_state(plane, &pipe))
12554 			continue;
12555 
12556 		if (pipe == crtc->pipe)
12557 			continue;
12558 
12559 		drm_dbg_kms(&dev_priv->drm,
12560 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12561 			    plane->base.base.id, plane->base.name);
12562 
12563 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12564 		intel_plane_disable_noatomic(plane_crtc, plane);
12565 	}
12566 }
12567 
12568 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12569 {
12570 	struct drm_device *dev = crtc->base.dev;
12571 	struct intel_encoder *encoder;
12572 
12573 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12574 		return true;
12575 
12576 	return false;
12577 }
12578 
12579 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12580 {
12581 	struct drm_device *dev = encoder->base.dev;
12582 	struct intel_connector *connector;
12583 
12584 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12585 		return connector;
12586 
12587 	return NULL;
12588 }
12589 
12590 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12591 			      enum pipe pch_transcoder)
12592 {
12593 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12594 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12595 }
12596 
12597 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12598 {
12599 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12600 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12601 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12602 
12603 	if (DISPLAY_VER(dev_priv) >= 9 ||
12604 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12605 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12606 		u32 val;
12607 
12608 		if (transcoder_is_dsi(cpu_transcoder))
12609 			return;
12610 
12611 		val = intel_de_read(dev_priv, reg);
12612 		val &= ~HSW_FRAME_START_DELAY_MASK;
12613 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12614 		intel_de_write(dev_priv, reg, val);
12615 	} else {
12616 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12617 		u32 val;
12618 
12619 		val = intel_de_read(dev_priv, reg);
12620 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12621 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12622 		intel_de_write(dev_priv, reg, val);
12623 	}
12624 
12625 	if (!crtc_state->has_pch_encoder)
12626 		return;
12627 
12628 	if (HAS_PCH_IBX(dev_priv)) {
12629 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12630 		u32 val;
12631 
12632 		val = intel_de_read(dev_priv, reg);
12633 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12634 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12635 		intel_de_write(dev_priv, reg, val);
12636 	} else {
12637 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12638 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12639 		u32 val;
12640 
12641 		val = intel_de_read(dev_priv, reg);
12642 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12643 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12644 		intel_de_write(dev_priv, reg, val);
12645 	}
12646 }
12647 
12648 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12649 				struct drm_modeset_acquire_ctx *ctx)
12650 {
12651 	struct drm_device *dev = crtc->base.dev;
12652 	struct drm_i915_private *dev_priv = to_i915(dev);
12653 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12654 
12655 	if (crtc_state->hw.active) {
12656 		struct intel_plane *plane;
12657 
12658 		/* Clear any frame start delays used for debugging left by the BIOS */
12659 		intel_sanitize_frame_start_delay(crtc_state);
12660 
12661 		/* Disable everything but the primary plane */
12662 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12663 			const struct intel_plane_state *plane_state =
12664 				to_intel_plane_state(plane->base.state);
12665 
12666 			if (plane_state->uapi.visible &&
12667 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12668 				intel_plane_disable_noatomic(crtc, plane);
12669 		}
12670 
12671 		/*
12672 		 * Disable any background color set by the BIOS, but enable the
12673 		 * gamma and CSC to match how we program our planes.
12674 		 */
12675 		if (DISPLAY_VER(dev_priv) >= 9)
12676 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12677 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12678 	}
12679 
12680 	/* Adjust the state of the output pipe according to whether we
12681 	 * have active connectors/encoders. */
12682 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12683 	    !crtc_state->bigjoiner_slave)
12684 		intel_crtc_disable_noatomic(crtc, ctx);
12685 
12686 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12687 		/*
12688 		 * We start out with underrun reporting disabled to avoid races.
12689 		 * For correct bookkeeping mark this on active crtcs.
12690 		 *
12691 		 * Also on gmch platforms we dont have any hardware bits to
12692 		 * disable the underrun reporting. Which means we need to start
12693 		 * out with underrun reporting disabled also on inactive pipes,
12694 		 * since otherwise we'll complain about the garbage we read when
12695 		 * e.g. coming up after runtime pm.
12696 		 *
12697 		 * No protection against concurrent access is required - at
12698 		 * worst a fifo underrun happens which also sets this to false.
12699 		 */
12700 		crtc->cpu_fifo_underrun_disabled = true;
12701 		/*
12702 		 * We track the PCH trancoder underrun reporting state
12703 		 * within the crtc. With crtc for pipe A housing the underrun
12704 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12705 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12706 		 * and marking underrun reporting as disabled for the non-existing
12707 		 * PCH transcoders B and C would prevent enabling the south
12708 		 * error interrupt (see cpt_can_enable_serr_int()).
12709 		 */
12710 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12711 			crtc->pch_fifo_underrun_disabled = true;
12712 	}
12713 }
12714 
12715 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12716 {
12717 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12718 
12719 	/*
12720 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12721 	 * the hardware when a high res displays plugged in. DPLL P
12722 	 * divider is zero, and the pipe timings are bonkers. We'll
12723 	 * try to disable everything in that case.
12724 	 *
12725 	 * FIXME would be nice to be able to sanitize this state
12726 	 * without several WARNs, but for now let's take the easy
12727 	 * road.
12728 	 */
12729 	return IS_SANDYBRIDGE(dev_priv) &&
12730 		crtc_state->hw.active &&
12731 		crtc_state->shared_dpll &&
12732 		crtc_state->port_clock == 0;
12733 }
12734 
12735 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12736 {
12737 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12738 	struct intel_connector *connector;
12739 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12740 	struct intel_crtc_state *crtc_state = crtc ?
12741 		to_intel_crtc_state(crtc->base.state) : NULL;
12742 
12743 	/* We need to check both for a crtc link (meaning that the
12744 	 * encoder is active and trying to read from a pipe) and the
12745 	 * pipe itself being active. */
12746 	bool has_active_crtc = crtc_state &&
12747 		crtc_state->hw.active;
12748 
12749 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12750 		drm_dbg_kms(&dev_priv->drm,
12751 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12752 			    pipe_name(crtc->pipe));
12753 		has_active_crtc = false;
12754 	}
12755 
12756 	connector = intel_encoder_find_connector(encoder);
12757 	if (connector && !has_active_crtc) {
12758 		drm_dbg_kms(&dev_priv->drm,
12759 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12760 			    encoder->base.base.id,
12761 			    encoder->base.name);
12762 
12763 		/* Connector is active, but has no active pipe. This is
12764 		 * fallout from our resume register restoring. Disable
12765 		 * the encoder manually again. */
12766 		if (crtc_state) {
12767 			struct drm_encoder *best_encoder;
12768 
12769 			drm_dbg_kms(&dev_priv->drm,
12770 				    "[ENCODER:%d:%s] manually disabled\n",
12771 				    encoder->base.base.id,
12772 				    encoder->base.name);
12773 
12774 			/* avoid oopsing in case the hooks consult best_encoder */
12775 			best_encoder = connector->base.state->best_encoder;
12776 			connector->base.state->best_encoder = &encoder->base;
12777 
12778 			/* FIXME NULL atomic state passed! */
12779 			if (encoder->disable)
12780 				encoder->disable(NULL, encoder, crtc_state,
12781 						 connector->base.state);
12782 			if (encoder->post_disable)
12783 				encoder->post_disable(NULL, encoder, crtc_state,
12784 						      connector->base.state);
12785 
12786 			connector->base.state->best_encoder = best_encoder;
12787 		}
12788 		encoder->base.crtc = NULL;
12789 
12790 		/* Inconsistent output/port/pipe state happens presumably due to
12791 		 * a bug in one of the get_hw_state functions. Or someplace else
12792 		 * in our code, like the register restore mess on resume. Clamp
12793 		 * things to off as a safer default. */
12794 
12795 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12796 		connector->base.encoder = NULL;
12797 	}
12798 
12799 	/* notify opregion of the sanitized encoder state */
12800 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12801 
12802 	if (HAS_DDI(dev_priv))
12803 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12804 }
12805 
12806 /* FIXME read out full plane state for all planes */
12807 static void readout_plane_state(struct drm_i915_private *dev_priv)
12808 {
12809 	struct intel_plane *plane;
12810 	struct intel_crtc *crtc;
12811 
12812 	for_each_intel_plane(&dev_priv->drm, plane) {
12813 		struct intel_plane_state *plane_state =
12814 			to_intel_plane_state(plane->base.state);
12815 		struct intel_crtc_state *crtc_state;
12816 		enum pipe pipe = PIPE_A;
12817 		bool visible;
12818 
12819 		visible = plane->get_hw_state(plane, &pipe);
12820 
12821 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12822 		crtc_state = to_intel_crtc_state(crtc->base.state);
12823 
12824 		intel_set_plane_visible(crtc_state, plane_state, visible);
12825 
12826 		drm_dbg_kms(&dev_priv->drm,
12827 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12828 			    plane->base.base.id, plane->base.name,
12829 			    enableddisabled(visible), pipe_name(pipe));
12830 	}
12831 
12832 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12833 		struct intel_crtc_state *crtc_state =
12834 			to_intel_crtc_state(crtc->base.state);
12835 
12836 		fixup_plane_bitmasks(crtc_state);
12837 	}
12838 }
12839 
12840 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12841 {
12842 	struct drm_i915_private *dev_priv = to_i915(dev);
12843 	struct intel_cdclk_state *cdclk_state =
12844 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12845 	struct intel_dbuf_state *dbuf_state =
12846 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12847 	enum pipe pipe;
12848 	struct intel_crtc *crtc;
12849 	struct intel_encoder *encoder;
12850 	struct intel_connector *connector;
12851 	struct drm_connector_list_iter conn_iter;
12852 	u8 active_pipes = 0;
12853 
12854 	for_each_intel_crtc(dev, crtc) {
12855 		struct intel_crtc_state *crtc_state =
12856 			to_intel_crtc_state(crtc->base.state);
12857 
12858 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12859 		intel_crtc_free_hw_state(crtc_state);
12860 		intel_crtc_state_reset(crtc_state, crtc);
12861 
12862 		intel_crtc_get_pipe_config(crtc_state);
12863 
12864 		crtc_state->hw.enable = crtc_state->hw.active;
12865 
12866 		crtc->base.enabled = crtc_state->hw.enable;
12867 		crtc->active = crtc_state->hw.active;
12868 
12869 		if (crtc_state->hw.active)
12870 			active_pipes |= BIT(crtc->pipe);
12871 
12872 		drm_dbg_kms(&dev_priv->drm,
12873 			    "[CRTC:%d:%s] hw state readout: %s\n",
12874 			    crtc->base.base.id, crtc->base.name,
12875 			    enableddisabled(crtc_state->hw.active));
12876 	}
12877 
12878 	dev_priv->active_pipes = cdclk_state->active_pipes =
12879 		dbuf_state->active_pipes = active_pipes;
12880 
12881 	readout_plane_state(dev_priv);
12882 
12883 	for_each_intel_encoder(dev, encoder) {
12884 		struct intel_crtc_state *crtc_state = NULL;
12885 
12886 		pipe = 0;
12887 
12888 		if (encoder->get_hw_state(encoder, &pipe)) {
12889 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12890 			crtc_state = to_intel_crtc_state(crtc->base.state);
12891 
12892 			encoder->base.crtc = &crtc->base;
12893 			intel_encoder_get_config(encoder, crtc_state);
12894 
12895 			/* read out to slave crtc as well for bigjoiner */
12896 			if (crtc_state->bigjoiner) {
12897 				/* encoder should read be linked to bigjoiner master */
12898 				WARN_ON(crtc_state->bigjoiner_slave);
12899 
12900 				crtc = crtc_state->bigjoiner_linked_crtc;
12901 				crtc_state = to_intel_crtc_state(crtc->base.state);
12902 				intel_encoder_get_config(encoder, crtc_state);
12903 			}
12904 		} else {
12905 			encoder->base.crtc = NULL;
12906 		}
12907 
12908 		if (encoder->sync_state)
12909 			encoder->sync_state(encoder, crtc_state);
12910 
12911 		drm_dbg_kms(&dev_priv->drm,
12912 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12913 			    encoder->base.base.id, encoder->base.name,
12914 			    enableddisabled(encoder->base.crtc),
12915 			    pipe_name(pipe));
12916 	}
12917 
12918 	intel_dpll_readout_hw_state(dev_priv);
12919 
12920 	drm_connector_list_iter_begin(dev, &conn_iter);
12921 	for_each_intel_connector_iter(connector, &conn_iter) {
12922 		if (connector->get_hw_state(connector)) {
12923 			struct intel_crtc_state *crtc_state;
12924 			struct intel_crtc *crtc;
12925 
12926 			connector->base.dpms = DRM_MODE_DPMS_ON;
12927 
12928 			encoder = intel_attached_encoder(connector);
12929 			connector->base.encoder = &encoder->base;
12930 
12931 			crtc = to_intel_crtc(encoder->base.crtc);
12932 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12933 
12934 			if (crtc_state && crtc_state->hw.active) {
12935 				/*
12936 				 * This has to be done during hardware readout
12937 				 * because anything calling .crtc_disable may
12938 				 * rely on the connector_mask being accurate.
12939 				 */
12940 				crtc_state->uapi.connector_mask |=
12941 					drm_connector_mask(&connector->base);
12942 				crtc_state->uapi.encoder_mask |=
12943 					drm_encoder_mask(&encoder->base);
12944 			}
12945 		} else {
12946 			connector->base.dpms = DRM_MODE_DPMS_OFF;
12947 			connector->base.encoder = NULL;
12948 		}
12949 		drm_dbg_kms(&dev_priv->drm,
12950 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
12951 			    connector->base.base.id, connector->base.name,
12952 			    enableddisabled(connector->base.encoder));
12953 	}
12954 	drm_connector_list_iter_end(&conn_iter);
12955 
12956 	for_each_intel_crtc(dev, crtc) {
12957 		struct intel_bw_state *bw_state =
12958 			to_intel_bw_state(dev_priv->bw_obj.state);
12959 		struct intel_crtc_state *crtc_state =
12960 			to_intel_crtc_state(crtc->base.state);
12961 		struct intel_plane *plane;
12962 		int min_cdclk = 0;
12963 
12964 		if (crtc_state->bigjoiner_slave)
12965 			continue;
12966 
12967 		if (crtc_state->hw.active) {
12968 			/*
12969 			 * The initial mode needs to be set in order to keep
12970 			 * the atomic core happy. It wants a valid mode if the
12971 			 * crtc's enabled, so we do the above call.
12972 			 *
12973 			 * But we don't set all the derived state fully, hence
12974 			 * set a flag to indicate that a full recalculation is
12975 			 * needed on the next commit.
12976 			 */
12977 			crtc_state->inherited = true;
12978 
12979 			intel_crtc_update_active_timings(crtc_state);
12980 
12981 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
12982 		}
12983 
12984 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12985 			const struct intel_plane_state *plane_state =
12986 				to_intel_plane_state(plane->base.state);
12987 
12988 			/*
12989 			 * FIXME don't have the fb yet, so can't
12990 			 * use intel_plane_data_rate() :(
12991 			 */
12992 			if (plane_state->uapi.visible)
12993 				crtc_state->data_rate[plane->id] =
12994 					4 * crtc_state->pixel_rate;
12995 			/*
12996 			 * FIXME don't have the fb yet, so can't
12997 			 * use plane->min_cdclk() :(
12998 			 */
12999 			if (plane_state->uapi.visible && plane->min_cdclk) {
13000 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
13001 					crtc_state->min_cdclk[plane->id] =
13002 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
13003 				else
13004 					crtc_state->min_cdclk[plane->id] =
13005 						crtc_state->pixel_rate;
13006 			}
13007 			drm_dbg_kms(&dev_priv->drm,
13008 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
13009 				    plane->base.base.id, plane->base.name,
13010 				    crtc_state->min_cdclk[plane->id]);
13011 		}
13012 
13013 		if (crtc_state->hw.active) {
13014 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
13015 			if (drm_WARN_ON(dev, min_cdclk < 0))
13016 				min_cdclk = 0;
13017 		}
13018 
13019 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
13020 		cdclk_state->min_voltage_level[crtc->pipe] =
13021 			crtc_state->min_voltage_level;
13022 
13023 		intel_bw_crtc_update(bw_state, crtc_state);
13024 
13025 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
13026 
13027 		/* discard our incomplete slave state, copy it from master */
13028 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
13029 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
13030 			struct intel_crtc_state *slave_crtc_state =
13031 				to_intel_crtc_state(slave->base.state);
13032 
13033 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
13034 			slave->base.mode = crtc->base.mode;
13035 
13036 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
13037 			cdclk_state->min_voltage_level[slave->pipe] =
13038 				crtc_state->min_voltage_level;
13039 
13040 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
13041 				const struct intel_plane_state *plane_state =
13042 					to_intel_plane_state(plane->base.state);
13043 
13044 				/*
13045 				 * FIXME don't have the fb yet, so can't
13046 				 * use intel_plane_data_rate() :(
13047 				 */
13048 				if (plane_state->uapi.visible)
13049 					crtc_state->data_rate[plane->id] =
13050 						4 * crtc_state->pixel_rate;
13051 				else
13052 					crtc_state->data_rate[plane->id] = 0;
13053 			}
13054 
13055 			intel_bw_crtc_update(bw_state, slave_crtc_state);
13056 			drm_calc_timestamping_constants(&slave->base,
13057 							&slave_crtc_state->hw.adjusted_mode);
13058 		}
13059 	}
13060 }
13061 
13062 static void
13063 get_encoder_power_domains(struct drm_i915_private *dev_priv)
13064 {
13065 	struct intel_encoder *encoder;
13066 
13067 	for_each_intel_encoder(&dev_priv->drm, encoder) {
13068 		struct intel_crtc_state *crtc_state;
13069 
13070 		if (!encoder->get_power_domains)
13071 			continue;
13072 
13073 		/*
13074 		 * MST-primary and inactive encoders don't have a crtc state
13075 		 * and neither of these require any power domain references.
13076 		 */
13077 		if (!encoder->base.crtc)
13078 			continue;
13079 
13080 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
13081 		encoder->get_power_domains(encoder, crtc_state);
13082 	}
13083 }
13084 
13085 static void intel_early_display_was(struct drm_i915_private *dev_priv)
13086 {
13087 	/*
13088 	 * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
13089 	 * Also known as Wa_14010480278.
13090 	 */
13091 	if (IS_DISPLAY_VER(dev_priv, 10, 12))
13092 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
13093 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
13094 
13095 	if (IS_HASWELL(dev_priv)) {
13096 		/*
13097 		 * WaRsPkgCStateDisplayPMReq:hsw
13098 		 * System hang if this isn't done before disabling all planes!
13099 		 */
13100 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
13101 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
13102 	}
13103 
13104 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
13105 		/* Display WA #1142:kbl,cfl,cml */
13106 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
13107 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
13108 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
13109 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
13110 			     KBL_ARB_FILL_SPARE_14);
13111 	}
13112 }
13113 
13114 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
13115 				       enum port port, i915_reg_t hdmi_reg)
13116 {
13117 	u32 val = intel_de_read(dev_priv, hdmi_reg);
13118 
13119 	if (val & SDVO_ENABLE ||
13120 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
13121 		return;
13122 
13123 	drm_dbg_kms(&dev_priv->drm,
13124 		    "Sanitizing transcoder select for HDMI %c\n",
13125 		    port_name(port));
13126 
13127 	val &= ~SDVO_PIPE_SEL_MASK;
13128 	val |= SDVO_PIPE_SEL(PIPE_A);
13129 
13130 	intel_de_write(dev_priv, hdmi_reg, val);
13131 }
13132 
13133 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
13134 				     enum port port, i915_reg_t dp_reg)
13135 {
13136 	u32 val = intel_de_read(dev_priv, dp_reg);
13137 
13138 	if (val & DP_PORT_EN ||
13139 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
13140 		return;
13141 
13142 	drm_dbg_kms(&dev_priv->drm,
13143 		    "Sanitizing transcoder select for DP %c\n",
13144 		    port_name(port));
13145 
13146 	val &= ~DP_PIPE_SEL_MASK;
13147 	val |= DP_PIPE_SEL(PIPE_A);
13148 
13149 	intel_de_write(dev_priv, dp_reg, val);
13150 }
13151 
13152 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
13153 {
13154 	/*
13155 	 * The BIOS may select transcoder B on some of the PCH
13156 	 * ports even it doesn't enable the port. This would trip
13157 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
13158 	 * Sanitize the transcoder select bits to prevent that. We
13159 	 * assume that the BIOS never actually enabled the port,
13160 	 * because if it did we'd actually have to toggle the port
13161 	 * on and back off to make the transcoder A select stick
13162 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
13163 	 * intel_disable_sdvo()).
13164 	 */
13165 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
13166 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
13167 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
13168 
13169 	/* PCH SDVOB multiplex with HDMIB */
13170 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
13171 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
13172 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
13173 }
13174 
13175 /* Scan out the current hw modeset state,
13176  * and sanitizes it to the current state
13177  */
13178 static void
13179 intel_modeset_setup_hw_state(struct drm_device *dev,
13180 			     struct drm_modeset_acquire_ctx *ctx)
13181 {
13182 	struct drm_i915_private *dev_priv = to_i915(dev);
13183 	struct intel_encoder *encoder;
13184 	struct intel_crtc *crtc;
13185 	intel_wakeref_t wakeref;
13186 
13187 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
13188 
13189 	intel_early_display_was(dev_priv);
13190 	intel_modeset_readout_hw_state(dev);
13191 
13192 	/* HW state is read out, now we need to sanitize this mess. */
13193 	get_encoder_power_domains(dev_priv);
13194 
13195 	if (HAS_PCH_IBX(dev_priv))
13196 		ibx_sanitize_pch_ports(dev_priv);
13197 
13198 	/*
13199 	 * intel_sanitize_plane_mapping() may need to do vblank
13200 	 * waits, so we need vblank interrupts restored beforehand.
13201 	 */
13202 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13203 		struct intel_crtc_state *crtc_state =
13204 			to_intel_crtc_state(crtc->base.state);
13205 
13206 		drm_crtc_vblank_reset(&crtc->base);
13207 
13208 		if (crtc_state->hw.active)
13209 			intel_crtc_vblank_on(crtc_state);
13210 	}
13211 
13212 	intel_sanitize_plane_mapping(dev_priv);
13213 
13214 	for_each_intel_encoder(dev, encoder)
13215 		intel_sanitize_encoder(encoder);
13216 
13217 	for_each_intel_crtc(&dev_priv->drm, crtc) {
13218 		struct intel_crtc_state *crtc_state =
13219 			to_intel_crtc_state(crtc->base.state);
13220 
13221 		intel_sanitize_crtc(crtc, ctx);
13222 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
13223 	}
13224 
13225 	intel_modeset_update_connector_atomic_state(dev);
13226 
13227 	intel_dpll_sanitize_state(dev_priv);
13228 
13229 	if (IS_G4X(dev_priv)) {
13230 		g4x_wm_get_hw_state(dev_priv);
13231 		g4x_wm_sanitize(dev_priv);
13232 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
13233 		vlv_wm_get_hw_state(dev_priv);
13234 		vlv_wm_sanitize(dev_priv);
13235 	} else if (DISPLAY_VER(dev_priv) >= 9) {
13236 		skl_wm_get_hw_state(dev_priv);
13237 		skl_wm_sanitize(dev_priv);
13238 	} else if (HAS_PCH_SPLIT(dev_priv)) {
13239 		ilk_wm_get_hw_state(dev_priv);
13240 	}
13241 
13242 	for_each_intel_crtc(dev, crtc) {
13243 		struct intel_crtc_state *crtc_state =
13244 			to_intel_crtc_state(crtc->base.state);
13245 		u64 put_domains;
13246 
13247 		put_domains = modeset_get_crtc_power_domains(crtc_state);
13248 		if (drm_WARN_ON(dev, put_domains))
13249 			modeset_put_crtc_power_domains(crtc, put_domains);
13250 	}
13251 
13252 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
13253 }
13254 
13255 void intel_display_resume(struct drm_device *dev)
13256 {
13257 	struct drm_i915_private *dev_priv = to_i915(dev);
13258 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
13259 	struct drm_modeset_acquire_ctx ctx;
13260 	int ret;
13261 
13262 	if (!HAS_DISPLAY(dev_priv))
13263 		return;
13264 
13265 	dev_priv->modeset_restore_state = NULL;
13266 	if (state)
13267 		state->acquire_ctx = &ctx;
13268 
13269 	drm_modeset_acquire_init(&ctx, 0);
13270 
13271 	while (1) {
13272 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
13273 		if (ret != -EDEADLK)
13274 			break;
13275 
13276 		drm_modeset_backoff(&ctx);
13277 	}
13278 
13279 	if (!ret)
13280 		ret = __intel_display_resume(dev, state, &ctx);
13281 
13282 	intel_enable_ipc(dev_priv);
13283 	drm_modeset_drop_locks(&ctx);
13284 	drm_modeset_acquire_fini(&ctx);
13285 
13286 	if (ret)
13287 		drm_err(&dev_priv->drm,
13288 			"Restoring old state failed with %i\n", ret);
13289 	if (state)
13290 		drm_atomic_state_put(state);
13291 }
13292 
13293 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
13294 {
13295 	struct intel_connector *connector;
13296 	struct drm_connector_list_iter conn_iter;
13297 
13298 	/* Kill all the work that may have been queued by hpd. */
13299 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
13300 	for_each_intel_connector_iter(connector, &conn_iter) {
13301 #ifdef __linux__
13302 		if (connector->modeset_retry_work.func)
13303 #else
13304 		if (connector->modeset_retry_work.task.t_func)
13305 #endif
13306 			cancel_work_sync(&connector->modeset_retry_work);
13307 		if (connector->hdcp.shim) {
13308 			cancel_delayed_work_sync(&connector->hdcp.check_work);
13309 			cancel_work_sync(&connector->hdcp.prop_work);
13310 		}
13311 	}
13312 	drm_connector_list_iter_end(&conn_iter);
13313 }
13314 
13315 /* part #1: call before irq uninstall */
13316 void intel_modeset_driver_remove(struct drm_i915_private *i915)
13317 {
13318 	if (!HAS_DISPLAY(i915))
13319 		return;
13320 
13321 	flush_workqueue(i915->flip_wq);
13322 	flush_workqueue(i915->modeset_wq);
13323 
13324 	flush_work(&i915->atomic_helper.free_work);
13325 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
13326 }
13327 
13328 /* part #2: call after irq uninstall */
13329 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
13330 {
13331 	if (!HAS_DISPLAY(i915))
13332 		return;
13333 
13334 	/*
13335 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
13336 	 * poll handlers. Hence disable polling after hpd handling is shut down.
13337 	 */
13338 	intel_hpd_poll_fini(i915);
13339 
13340 	/*
13341 	 * MST topology needs to be suspended so we don't have any calls to
13342 	 * fbdev after it's finalized. MST will be destroyed later as part of
13343 	 * drm_mode_config_cleanup()
13344 	 */
13345 	intel_dp_mst_suspend(i915);
13346 
13347 	/* poll work can call into fbdev, hence clean that up afterwards */
13348 	intel_fbdev_fini(i915);
13349 
13350 	intel_unregister_dsm_handler();
13351 
13352 	intel_fbc_global_disable(i915);
13353 
13354 	/* flush any delayed tasks or pending work */
13355 	flush_scheduled_work();
13356 
13357 	intel_hdcp_component_fini(i915);
13358 
13359 	intel_mode_config_cleanup(i915);
13360 
13361 	intel_overlay_cleanup(i915);
13362 
13363 	intel_gmbus_teardown(i915);
13364 
13365 	destroy_workqueue(i915->flip_wq);
13366 	destroy_workqueue(i915->modeset_wq);
13367 
13368 	intel_fbc_cleanup_cfb(i915);
13369 }
13370 
13371 /* part #3: call after gem init */
13372 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
13373 {
13374 	intel_dmc_ucode_fini(i915);
13375 
13376 	intel_power_domains_driver_remove(i915);
13377 
13378 	intel_vga_unregister(i915);
13379 
13380 	intel_bios_driver_remove(i915);
13381 }
13382 
13383 void intel_display_driver_register(struct drm_i915_private *i915)
13384 {
13385 	if (!HAS_DISPLAY(i915))
13386 		return;
13387 
13388 	intel_display_debugfs_register(i915);
13389 
13390 	/* Must be done after probing outputs */
13391 	intel_opregion_register(i915);
13392 	acpi_video_register();
13393 
13394 	intel_audio_init(i915);
13395 
13396 	/*
13397 	 * Some ports require correctly set-up hpd registers for
13398 	 * detection to work properly (leading to ghost connected
13399 	 * connector status), e.g. VGA on gm45.  Hence we can only set
13400 	 * up the initial fbdev config after hpd irqs are fully
13401 	 * enabled. We do it last so that the async config cannot run
13402 	 * before the connectors are registered.
13403 	 */
13404 	intel_fbdev_initial_config_async(&i915->drm);
13405 
13406 	/*
13407 	 * We need to coordinate the hotplugs with the asynchronous
13408 	 * fbdev configuration, for which we use the
13409 	 * fbdev->async_cookie.
13410 	 */
13411 	drm_kms_helper_poll_init(&i915->drm);
13412 }
13413 
13414 void intel_display_driver_unregister(struct drm_i915_private *i915)
13415 {
13416 	if (!HAS_DISPLAY(i915))
13417 		return;
13418 
13419 	intel_fbdev_unregister(i915);
13420 	intel_audio_deinit(i915);
13421 
13422 	/*
13423 	 * After flushing the fbdev (incl. a late async config which
13424 	 * will have delayed queuing of a hotplug event), then flush
13425 	 * the hotplug events.
13426 	 */
13427 	drm_kms_helper_poll_fini(&i915->drm);
13428 	drm_atomic_helper_shutdown(&i915->drm);
13429 
13430 	acpi_video_unregister();
13431 	intel_opregion_unregister(i915);
13432 }
13433