xref: /dflybsd-src/sys/dev/drm/i915/intel_display.c (revision 6994034513cc2f8ff3d79e00d28295f4dcceaf02)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drmP.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "intel_dsi.h"
37 #include "i915_trace.h"
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
45 #include <linux/reservation.h>
46 #include <linux/dma-buf.h>
47 
48 /* Primary plane formats for gen <= 3 */
49 static const uint32_t i8xx_primary_formats[] = {
50 	DRM_FORMAT_C8,
51 	DRM_FORMAT_RGB565,
52 	DRM_FORMAT_XRGB1555,
53 	DRM_FORMAT_XRGB8888,
54 };
55 
56 /* Primary plane formats for gen >= 4 */
57 static const uint32_t i965_primary_formats[] = {
58 	DRM_FORMAT_C8,
59 	DRM_FORMAT_RGB565,
60 	DRM_FORMAT_XRGB8888,
61 	DRM_FORMAT_XBGR8888,
62 	DRM_FORMAT_XRGB2101010,
63 	DRM_FORMAT_XBGR2101010,
64 };
65 
66 static const uint32_t skl_primary_formats[] = {
67 	DRM_FORMAT_C8,
68 	DRM_FORMAT_RGB565,
69 	DRM_FORMAT_XRGB8888,
70 	DRM_FORMAT_XBGR8888,
71 	DRM_FORMAT_ARGB8888,
72 	DRM_FORMAT_ABGR8888,
73 	DRM_FORMAT_XRGB2101010,
74 	DRM_FORMAT_XBGR2101010,
75 	DRM_FORMAT_YUYV,
76 	DRM_FORMAT_YVYU,
77 	DRM_FORMAT_UYVY,
78 	DRM_FORMAT_VYUY,
79 };
80 
81 /* Cursor formats */
82 static const uint32_t intel_cursor_formats[] = {
83 	DRM_FORMAT_ARGB8888,
84 };
85 
86 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
87 				struct intel_crtc_state *pipe_config);
88 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
89 				   struct intel_crtc_state *pipe_config);
90 
91 static int intel_framebuffer_init(struct drm_device *dev,
92 				  struct intel_framebuffer *ifb,
93 				  struct drm_mode_fb_cmd2 *mode_cmd,
94 				  struct drm_i915_gem_object *obj);
95 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
96 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
97 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
98 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
99 					 struct intel_link_m_n *m_n,
100 					 struct intel_link_m_n *m2_n2);
101 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
102 static void haswell_set_pipeconf(struct drm_crtc *crtc);
103 static void haswell_set_pipemisc(struct drm_crtc *crtc);
104 static void vlv_prepare_pll(struct intel_crtc *crtc,
105 			    const struct intel_crtc_state *pipe_config);
106 static void chv_prepare_pll(struct intel_crtc *crtc,
107 			    const struct intel_crtc_state *pipe_config);
108 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
109 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
111 	struct intel_crtc_state *crtc_state);
112 static void skylake_pfit_enable(struct intel_crtc *crtc);
113 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
114 static void ironlake_pfit_enable(struct intel_crtc *crtc);
115 static void intel_modeset_setup_hw_state(struct drm_device *dev);
116 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
117 
118 typedef struct {
119 	int	min, max;
120 } intel_range_t;
121 
122 typedef struct {
123 	int	dot_limit;
124 	int	p2_slow, p2_fast;
125 } intel_p2_t;
126 
127 typedef struct intel_limit intel_limit_t;
128 struct intel_limit {
129 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
130 	intel_p2_t	    p2;
131 };
132 
133 /* returns HPLL frequency in kHz */
134 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
135 {
136 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
137 
138 	/* Obtain SKU information */
139 	mutex_lock(&dev_priv->sb_lock);
140 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
141 		CCK_FUSE_HPLL_FREQ_MASK;
142 	mutex_unlock(&dev_priv->sb_lock);
143 
144 	return vco_freq[hpll_freq] * 1000;
145 }
146 
147 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
148 		      const char *name, u32 reg, int ref_freq)
149 {
150 	u32 val;
151 	int divider;
152 
153 	mutex_lock(&dev_priv->sb_lock);
154 	val = vlv_cck_read(dev_priv, reg);
155 	mutex_unlock(&dev_priv->sb_lock);
156 
157 	divider = val & CCK_FREQUENCY_VALUES;
158 
159 	WARN((val & CCK_FREQUENCY_STATUS) !=
160 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
161 	     "%s change in progress\n", name);
162 
163 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
164 }
165 
166 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
167 				  const char *name, u32 reg)
168 {
169 	if (dev_priv->hpll_freq == 0)
170 		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
171 
172 	return vlv_get_cck_clock(dev_priv, name, reg,
173 				 dev_priv->hpll_freq);
174 }
175 
176 static int
177 intel_pch_rawclk(struct drm_i915_private *dev_priv)
178 {
179 	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
180 }
181 
182 static int
183 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
184 {
185 	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
186 				      CCK_DISPLAY_REF_CLOCK_CONTROL);
187 }
188 
189 static int
190 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
191 {
192 	uint32_t clkcfg;
193 
194 	/* hrawclock is 1/4 the FSB frequency */
195 	clkcfg = I915_READ(CLKCFG);
196 	switch (clkcfg & CLKCFG_FSB_MASK) {
197 	case CLKCFG_FSB_400:
198 		return 100000;
199 	case CLKCFG_FSB_533:
200 		return 133333;
201 	case CLKCFG_FSB_667:
202 		return 166667;
203 	case CLKCFG_FSB_800:
204 		return 200000;
205 	case CLKCFG_FSB_1067:
206 		return 266667;
207 	case CLKCFG_FSB_1333:
208 		return 333333;
209 	/* these two are just a guess; one of them might be right */
210 	case CLKCFG_FSB_1600:
211 	case CLKCFG_FSB_1600_ALT:
212 		return 400000;
213 	default:
214 		return 133333;
215 	}
216 }
217 
218 static void intel_update_rawclk(struct drm_i915_private *dev_priv)
219 {
220 	if (HAS_PCH_SPLIT(dev_priv))
221 		dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
222 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
223 		dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
224 	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
225 		dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
226 	else
227 		return; /* no rawclk on other platforms, or no need to know it */
228 
229 	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
230 }
231 
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235 		return;
236 
237 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238 						      CCK_CZ_CLOCK_CONTROL);
239 
240 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
241 }
242 
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245 		    const struct intel_crtc_state *pipe_config)
246 {
247 	if (HAS_DDI(dev_priv))
248 		return pipe_config->port_clock; /* SPLL */
249 	else if (IS_GEN5(dev_priv))
250 		return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
251 	else
252 		return 270000;
253 }
254 
255 static const intel_limit_t intel_limits_i8xx_dac = {
256 	.dot = { .min = 25000, .max = 350000 },
257 	.vco = { .min = 908000, .max = 1512000 },
258 	.n = { .min = 2, .max = 16 },
259 	.m = { .min = 96, .max = 140 },
260 	.m1 = { .min = 18, .max = 26 },
261 	.m2 = { .min = 6, .max = 16 },
262 	.p = { .min = 4, .max = 128 },
263 	.p1 = { .min = 2, .max = 33 },
264 	.p2 = { .dot_limit = 165000,
265 		.p2_slow = 4, .p2_fast = 2 },
266 };
267 
268 static const intel_limit_t intel_limits_i8xx_dvo = {
269 	.dot = { .min = 25000, .max = 350000 },
270 	.vco = { .min = 908000, .max = 1512000 },
271 	.n = { .min = 2, .max = 16 },
272 	.m = { .min = 96, .max = 140 },
273 	.m1 = { .min = 18, .max = 26 },
274 	.m2 = { .min = 6, .max = 16 },
275 	.p = { .min = 4, .max = 128 },
276 	.p1 = { .min = 2, .max = 33 },
277 	.p2 = { .dot_limit = 165000,
278 		.p2_slow = 4, .p2_fast = 4 },
279 };
280 
281 static const intel_limit_t intel_limits_i8xx_lvds = {
282 	.dot = { .min = 25000, .max = 350000 },
283 	.vco = { .min = 908000, .max = 1512000 },
284 	.n = { .min = 2, .max = 16 },
285 	.m = { .min = 96, .max = 140 },
286 	.m1 = { .min = 18, .max = 26 },
287 	.m2 = { .min = 6, .max = 16 },
288 	.p = { .min = 4, .max = 128 },
289 	.p1 = { .min = 1, .max = 6 },
290 	.p2 = { .dot_limit = 165000,
291 		.p2_slow = 14, .p2_fast = 7 },
292 };
293 
294 static const intel_limit_t intel_limits_i9xx_sdvo = {
295 	.dot = { .min = 20000, .max = 400000 },
296 	.vco = { .min = 1400000, .max = 2800000 },
297 	.n = { .min = 1, .max = 6 },
298 	.m = { .min = 70, .max = 120 },
299 	.m1 = { .min = 8, .max = 18 },
300 	.m2 = { .min = 3, .max = 7 },
301 	.p = { .min = 5, .max = 80 },
302 	.p1 = { .min = 1, .max = 8 },
303 	.p2 = { .dot_limit = 200000,
304 		.p2_slow = 10, .p2_fast = 5 },
305 };
306 
307 static const intel_limit_t intel_limits_i9xx_lvds = {
308 	.dot = { .min = 20000, .max = 400000 },
309 	.vco = { .min = 1400000, .max = 2800000 },
310 	.n = { .min = 1, .max = 6 },
311 	.m = { .min = 70, .max = 120 },
312 	.m1 = { .min = 8, .max = 18 },
313 	.m2 = { .min = 3, .max = 7 },
314 	.p = { .min = 7, .max = 98 },
315 	.p1 = { .min = 1, .max = 8 },
316 	.p2 = { .dot_limit = 112000,
317 		.p2_slow = 14, .p2_fast = 7 },
318 };
319 
320 
321 static const intel_limit_t intel_limits_g4x_sdvo = {
322 	.dot = { .min = 25000, .max = 270000 },
323 	.vco = { .min = 1750000, .max = 3500000},
324 	.n = { .min = 1, .max = 4 },
325 	.m = { .min = 104, .max = 138 },
326 	.m1 = { .min = 17, .max = 23 },
327 	.m2 = { .min = 5, .max = 11 },
328 	.p = { .min = 10, .max = 30 },
329 	.p1 = { .min = 1, .max = 3},
330 	.p2 = { .dot_limit = 270000,
331 		.p2_slow = 10,
332 		.p2_fast = 10
333 	},
334 };
335 
336 static const intel_limit_t intel_limits_g4x_hdmi = {
337 	.dot = { .min = 22000, .max = 400000 },
338 	.vco = { .min = 1750000, .max = 3500000},
339 	.n = { .min = 1, .max = 4 },
340 	.m = { .min = 104, .max = 138 },
341 	.m1 = { .min = 16, .max = 23 },
342 	.m2 = { .min = 5, .max = 11 },
343 	.p = { .min = 5, .max = 80 },
344 	.p1 = { .min = 1, .max = 8},
345 	.p2 = { .dot_limit = 165000,
346 		.p2_slow = 10, .p2_fast = 5 },
347 };
348 
349 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
350 	.dot = { .min = 20000, .max = 115000 },
351 	.vco = { .min = 1750000, .max = 3500000 },
352 	.n = { .min = 1, .max = 3 },
353 	.m = { .min = 104, .max = 138 },
354 	.m1 = { .min = 17, .max = 23 },
355 	.m2 = { .min = 5, .max = 11 },
356 	.p = { .min = 28, .max = 112 },
357 	.p1 = { .min = 2, .max = 8 },
358 	.p2 = { .dot_limit = 0,
359 		.p2_slow = 14, .p2_fast = 14
360 	},
361 };
362 
363 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
364 	.dot = { .min = 80000, .max = 224000 },
365 	.vco = { .min = 1750000, .max = 3500000 },
366 	.n = { .min = 1, .max = 3 },
367 	.m = { .min = 104, .max = 138 },
368 	.m1 = { .min = 17, .max = 23 },
369 	.m2 = { .min = 5, .max = 11 },
370 	.p = { .min = 14, .max = 42 },
371 	.p1 = { .min = 2, .max = 6 },
372 	.p2 = { .dot_limit = 0,
373 		.p2_slow = 7, .p2_fast = 7
374 	},
375 };
376 
377 static const intel_limit_t intel_limits_pineview_sdvo = {
378 	.dot = { .min = 20000, .max = 400000},
379 	.vco = { .min = 1700000, .max = 3500000 },
380 	/* Pineview's Ncounter is a ring counter */
381 	.n = { .min = 3, .max = 6 },
382 	.m = { .min = 2, .max = 256 },
383 	/* Pineview only has one combined m divider, which we treat as m2. */
384 	.m1 = { .min = 0, .max = 0 },
385 	.m2 = { .min = 0, .max = 254 },
386 	.p = { .min = 5, .max = 80 },
387 	.p1 = { .min = 1, .max = 8 },
388 	.p2 = { .dot_limit = 200000,
389 		.p2_slow = 10, .p2_fast = 5 },
390 };
391 
392 static const intel_limit_t intel_limits_pineview_lvds = {
393 	.dot = { .min = 20000, .max = 400000 },
394 	.vco = { .min = 1700000, .max = 3500000 },
395 	.n = { .min = 3, .max = 6 },
396 	.m = { .min = 2, .max = 256 },
397 	.m1 = { .min = 0, .max = 0 },
398 	.m2 = { .min = 0, .max = 254 },
399 	.p = { .min = 7, .max = 112 },
400 	.p1 = { .min = 1, .max = 8 },
401 	.p2 = { .dot_limit = 112000,
402 		.p2_slow = 14, .p2_fast = 14 },
403 };
404 
405 /* Ironlake / Sandybridge
406  *
407  * We calculate clock using (register_value + 2) for N/M1/M2, so here
408  * the range value for them is (actual_value - 2).
409  */
410 static const intel_limit_t intel_limits_ironlake_dac = {
411 	.dot = { .min = 25000, .max = 350000 },
412 	.vco = { .min = 1760000, .max = 3510000 },
413 	.n = { .min = 1, .max = 5 },
414 	.m = { .min = 79, .max = 127 },
415 	.m1 = { .min = 12, .max = 22 },
416 	.m2 = { .min = 5, .max = 9 },
417 	.p = { .min = 5, .max = 80 },
418 	.p1 = { .min = 1, .max = 8 },
419 	.p2 = { .dot_limit = 225000,
420 		.p2_slow = 10, .p2_fast = 5 },
421 };
422 
423 static const intel_limit_t intel_limits_ironlake_single_lvds = {
424 	.dot = { .min = 25000, .max = 350000 },
425 	.vco = { .min = 1760000, .max = 3510000 },
426 	.n = { .min = 1, .max = 3 },
427 	.m = { .min = 79, .max = 118 },
428 	.m1 = { .min = 12, .max = 22 },
429 	.m2 = { .min = 5, .max = 9 },
430 	.p = { .min = 28, .max = 112 },
431 	.p1 = { .min = 2, .max = 8 },
432 	.p2 = { .dot_limit = 225000,
433 		.p2_slow = 14, .p2_fast = 14 },
434 };
435 
436 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
437 	.dot = { .min = 25000, .max = 350000 },
438 	.vco = { .min = 1760000, .max = 3510000 },
439 	.n = { .min = 1, .max = 3 },
440 	.m = { .min = 79, .max = 127 },
441 	.m1 = { .min = 12, .max = 22 },
442 	.m2 = { .min = 5, .max = 9 },
443 	.p = { .min = 14, .max = 56 },
444 	.p1 = { .min = 2, .max = 8 },
445 	.p2 = { .dot_limit = 225000,
446 		.p2_slow = 7, .p2_fast = 7 },
447 };
448 
449 /* LVDS 100mhz refclk limits. */
450 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
451 	.dot = { .min = 25000, .max = 350000 },
452 	.vco = { .min = 1760000, .max = 3510000 },
453 	.n = { .min = 1, .max = 2 },
454 	.m = { .min = 79, .max = 126 },
455 	.m1 = { .min = 12, .max = 22 },
456 	.m2 = { .min = 5, .max = 9 },
457 	.p = { .min = 28, .max = 112 },
458 	.p1 = { .min = 2, .max = 8 },
459 	.p2 = { .dot_limit = 225000,
460 		.p2_slow = 14, .p2_fast = 14 },
461 };
462 
463 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
464 	.dot = { .min = 25000, .max = 350000 },
465 	.vco = { .min = 1760000, .max = 3510000 },
466 	.n = { .min = 1, .max = 3 },
467 	.m = { .min = 79, .max = 126 },
468 	.m1 = { .min = 12, .max = 22 },
469 	.m2 = { .min = 5, .max = 9 },
470 	.p = { .min = 14, .max = 42 },
471 	.p1 = { .min = 2, .max = 6 },
472 	.p2 = { .dot_limit = 225000,
473 		.p2_slow = 7, .p2_fast = 7 },
474 };
475 
476 static const intel_limit_t intel_limits_vlv = {
477 	 /*
478 	  * These are the data rate limits (measured in fast clocks)
479 	  * since those are the strictest limits we have. The fast
480 	  * clock and actual rate limits are more relaxed, so checking
481 	  * them would make no difference.
482 	  */
483 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
484 	.vco = { .min = 4000000, .max = 6000000 },
485 	.n = { .min = 1, .max = 7 },
486 	.m1 = { .min = 2, .max = 3 },
487 	.m2 = { .min = 11, .max = 156 },
488 	.p1 = { .min = 2, .max = 3 },
489 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
490 };
491 
492 static const intel_limit_t intel_limits_chv = {
493 	/*
494 	 * These are the data rate limits (measured in fast clocks)
495 	 * since those are the strictest limits we have.  The fast
496 	 * clock and actual rate limits are more relaxed, so checking
497 	 * them would make no difference.
498 	 */
499 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
500 	.vco = { .min = 4800000, .max = 6480000 },
501 	.n = { .min = 1, .max = 1 },
502 	.m1 = { .min = 2, .max = 2 },
503 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
504 	.p1 = { .min = 2, .max = 4 },
505 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
506 };
507 
508 static const intel_limit_t intel_limits_bxt = {
509 	/* FIXME: find real dot limits */
510 	.dot = { .min = 0, .max = INT_MAX },
511 	.vco = { .min = 4800000, .max = 6700000 },
512 	.n = { .min = 1, .max = 1 },
513 	.m1 = { .min = 2, .max = 2 },
514 	/* FIXME: find real m2 limits */
515 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
516 	.p1 = { .min = 2, .max = 4 },
517 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
518 };
519 
520 static bool
521 needs_modeset(struct drm_crtc_state *state)
522 {
523 	return drm_atomic_crtc_needs_modeset(state);
524 }
525 
526 /**
527  * Returns whether any output on the specified pipe is of the specified type
528  */
529 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
530 {
531 	struct drm_device *dev = crtc->base.dev;
532 	struct intel_encoder *encoder;
533 
534 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
535 		if (encoder->type == type)
536 			return true;
537 
538 	return false;
539 }
540 
541 /**
542  * Returns whether any output on the specified pipe will have the specified
543  * type after a staged modeset is complete, i.e., the same as
544  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
545  * encoder->crtc.
546  */
547 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
548 				      int type)
549 {
550 	struct drm_atomic_state *state = crtc_state->base.state;
551 	struct drm_connector *connector;
552 	struct drm_connector_state *connector_state;
553 	struct intel_encoder *encoder;
554 	int i, num_connectors = 0;
555 
556 	for_each_connector_in_state(state, connector, connector_state, i) {
557 		if (connector_state->crtc != crtc_state->base.crtc)
558 			continue;
559 
560 		num_connectors++;
561 
562 		encoder = to_intel_encoder(connector_state->best_encoder);
563 		if (encoder->type == type)
564 			return true;
565 	}
566 
567 	WARN_ON(num_connectors == 0);
568 
569 	return false;
570 }
571 
572 /*
573  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
574  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
575  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
576  * The helpers' return value is the rate of the clock that is fed to the
577  * display engine's pipe which can be the above fast dot clock rate or a
578  * divided-down version of it.
579  */
580 /* m1 is reserved as 0 in Pineview, n is a ring counter */
581 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
582 {
583 	clock->m = clock->m2 + 2;
584 	clock->p = clock->p1 * clock->p2;
585 	if (WARN_ON(clock->n == 0 || clock->p == 0))
586 		return 0;
587 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
588 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
589 
590 	return clock->dot;
591 }
592 
593 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
594 {
595 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
596 }
597 
598 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
599 {
600 	clock->m = i9xx_dpll_compute_m(clock);
601 	clock->p = clock->p1 * clock->p2;
602 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
603 		return 0;
604 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
605 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
606 
607 	return clock->dot;
608 }
609 
610 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
611 {
612 	clock->m = clock->m1 * clock->m2;
613 	clock->p = clock->p1 * clock->p2;
614 	if (WARN_ON(clock->n == 0 || clock->p == 0))
615 		return 0;
616 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
617 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
618 
619 	return clock->dot / 5;
620 }
621 
622 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
623 {
624 	clock->m = clock->m1 * clock->m2;
625 	clock->p = clock->p1 * clock->p2;
626 	if (WARN_ON(clock->n == 0 || clock->p == 0))
627 		return 0;
628 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
629 			clock->n << 22);
630 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
631 
632 	return clock->dot / 5;
633 }
634 
635 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
636 /**
637  * Returns whether the given set of divisors are valid for a given refclk with
638  * the given connectors.
639  */
640 
641 static bool intel_PLL_is_valid(struct drm_device *dev,
642 			       const intel_limit_t *limit,
643 			       const intel_clock_t *clock)
644 {
645 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
646 		INTELPllInvalid("n out of range\n");
647 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
648 		INTELPllInvalid("p1 out of range\n");
649 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
650 		INTELPllInvalid("m2 out of range\n");
651 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
652 		INTELPllInvalid("m1 out of range\n");
653 
654 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
655 	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
656 		if (clock->m1 <= clock->m2)
657 			INTELPllInvalid("m1 <= m2\n");
658 
659 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
660 		if (clock->p < limit->p.min || limit->p.max < clock->p)
661 			INTELPllInvalid("p out of range\n");
662 		if (clock->m < limit->m.min || limit->m.max < clock->m)
663 			INTELPllInvalid("m out of range\n");
664 	}
665 
666 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
667 		INTELPllInvalid("vco out of range\n");
668 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
669 	 * connector, etc., rather than just a single range.
670 	 */
671 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
672 		INTELPllInvalid("dot out of range\n");
673 
674 	return true;
675 }
676 
677 static int
678 i9xx_select_p2_div(const intel_limit_t *limit,
679 		   const struct intel_crtc_state *crtc_state,
680 		   int target)
681 {
682 	struct drm_device *dev = crtc_state->base.crtc->dev;
683 
684 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
685 		/*
686 		 * For LVDS just rely on its current settings for dual-channel.
687 		 * We haven't figured out how to reliably set up different
688 		 * single/dual channel state, if we even can.
689 		 */
690 		if (intel_is_dual_link_lvds(dev))
691 			return limit->p2.p2_fast;
692 		else
693 			return limit->p2.p2_slow;
694 	} else {
695 		if (target < limit->p2.dot_limit)
696 			return limit->p2.p2_slow;
697 		else
698 			return limit->p2.p2_fast;
699 	}
700 }
701 
702 /*
703  * Returns a set of divisors for the desired target clock with the given
704  * refclk, or FALSE.  The returned values represent the clock equation:
705  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
706  *
707  * Target and reference clocks are specified in kHz.
708  *
709  * If match_clock is provided, then best_clock P divider must match the P
710  * divider from @match_clock used for LVDS downclocking.
711  */
712 static bool
713 i9xx_find_best_dpll(const intel_limit_t *limit,
714 		    struct intel_crtc_state *crtc_state,
715 		    int target, int refclk, intel_clock_t *match_clock,
716 		    intel_clock_t *best_clock)
717 {
718 	struct drm_device *dev = crtc_state->base.crtc->dev;
719 	intel_clock_t clock;
720 	int err = target;
721 
722 	memset(best_clock, 0, sizeof(*best_clock));
723 
724 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
725 
726 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
727 	     clock.m1++) {
728 		for (clock.m2 = limit->m2.min;
729 		     clock.m2 <= limit->m2.max; clock.m2++) {
730 			if (clock.m2 >= clock.m1)
731 				break;
732 			for (clock.n = limit->n.min;
733 			     clock.n <= limit->n.max; clock.n++) {
734 				for (clock.p1 = limit->p1.min;
735 					clock.p1 <= limit->p1.max; clock.p1++) {
736 					int this_err;
737 
738 					i9xx_calc_dpll_params(refclk, &clock);
739 					if (!intel_PLL_is_valid(dev, limit,
740 								&clock))
741 						continue;
742 					if (match_clock &&
743 					    clock.p != match_clock->p)
744 						continue;
745 
746 					this_err = abs(clock.dot - target);
747 					if (this_err < err) {
748 						*best_clock = clock;
749 						err = this_err;
750 					}
751 				}
752 			}
753 		}
754 	}
755 
756 	return (err != target);
757 }
758 
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.  The returned values represent the clock equation:
762  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
763  *
764  * Target and reference clocks are specified in kHz.
765  *
766  * If match_clock is provided, then best_clock P divider must match the P
767  * divider from @match_clock used for LVDS downclocking.
768  */
769 static bool
770 pnv_find_best_dpll(const intel_limit_t *limit,
771 		   struct intel_crtc_state *crtc_state,
772 		   int target, int refclk, intel_clock_t *match_clock,
773 		   intel_clock_t *best_clock)
774 {
775 	struct drm_device *dev = crtc_state->base.crtc->dev;
776 	intel_clock_t clock;
777 	int err = target;
778 
779 	memset(best_clock, 0, sizeof(*best_clock));
780 
781 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
782 
783 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
784 	     clock.m1++) {
785 		for (clock.m2 = limit->m2.min;
786 		     clock.m2 <= limit->m2.max; clock.m2++) {
787 			for (clock.n = limit->n.min;
788 			     clock.n <= limit->n.max; clock.n++) {
789 				for (clock.p1 = limit->p1.min;
790 					clock.p1 <= limit->p1.max; clock.p1++) {
791 					int this_err;
792 
793 					pnv_calc_dpll_params(refclk, &clock);
794 					if (!intel_PLL_is_valid(dev, limit,
795 								&clock))
796 						continue;
797 					if (match_clock &&
798 					    clock.p != match_clock->p)
799 						continue;
800 
801 					this_err = abs(clock.dot - target);
802 					if (this_err < err) {
803 						*best_clock = clock;
804 						err = this_err;
805 					}
806 				}
807 			}
808 		}
809 	}
810 
811 	return (err != target);
812 }
813 
814 /*
815  * Returns a set of divisors for the desired target clock with the given
816  * refclk, or FALSE.  The returned values represent the clock equation:
817  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
818  *
819  * Target and reference clocks are specified in kHz.
820  *
821  * If match_clock is provided, then best_clock P divider must match the P
822  * divider from @match_clock used for LVDS downclocking.
823  */
824 static bool
825 g4x_find_best_dpll(const intel_limit_t *limit,
826 		   struct intel_crtc_state *crtc_state,
827 		   int target, int refclk, intel_clock_t *match_clock,
828 		   intel_clock_t *best_clock)
829 {
830 	struct drm_device *dev = crtc_state->base.crtc->dev;
831 	intel_clock_t clock;
832 	int max_n;
833 	bool found = false;
834 	/* approximately equals target * 0.00585 */
835 	int err_most = (target >> 8) + (target >> 9);
836 
837 	memset(best_clock, 0, sizeof(*best_clock));
838 
839 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
840 
841 	max_n = limit->n.max;
842 	/* based on hardware requirement, prefer smaller n to precision */
843 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
844 		/* based on hardware requirement, prefere larger m1,m2 */
845 		for (clock.m1 = limit->m1.max;
846 		     clock.m1 >= limit->m1.min; clock.m1--) {
847 			for (clock.m2 = limit->m2.max;
848 			     clock.m2 >= limit->m2.min; clock.m2--) {
849 				for (clock.p1 = limit->p1.max;
850 				     clock.p1 >= limit->p1.min; clock.p1--) {
851 					int this_err;
852 
853 					i9xx_calc_dpll_params(refclk, &clock);
854 					if (!intel_PLL_is_valid(dev, limit,
855 								&clock))
856 						continue;
857 
858 					this_err = abs(clock.dot - target);
859 					if (this_err < err_most) {
860 						*best_clock = clock;
861 						err_most = this_err;
862 						max_n = clock.n;
863 						found = true;
864 					}
865 				}
866 			}
867 		}
868 	}
869 	return found;
870 }
871 
872 /*
873  * Check if the calculated PLL configuration is more optimal compared to the
874  * best configuration and error found so far. Return the calculated error.
875  */
876 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
877 			       const intel_clock_t *calculated_clock,
878 			       const intel_clock_t *best_clock,
879 			       unsigned int best_error_ppm,
880 			       unsigned int *error_ppm)
881 {
882 	/*
883 	 * For CHV ignore the error and consider only the P value.
884 	 * Prefer a bigger P value based on HW requirements.
885 	 */
886 	if (IS_CHERRYVIEW(dev)) {
887 		*error_ppm = 0;
888 
889 		return calculated_clock->p > best_clock->p;
890 	}
891 
892 	if (WARN_ON_ONCE(!target_freq))
893 		return false;
894 
895 	*error_ppm = div_u64(1000000ULL *
896 				abs(target_freq - calculated_clock->dot),
897 			     target_freq);
898 	/*
899 	 * Prefer a better P value over a better (smaller) error if the error
900 	 * is small. Ensure this preference for future configurations too by
901 	 * setting the error to 0.
902 	 */
903 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
904 		*error_ppm = 0;
905 
906 		return true;
907 	}
908 
909 	return *error_ppm + 10 < best_error_ppm;
910 }
911 
912 /*
913  * Returns a set of divisors for the desired target clock with the given
914  * refclk, or FALSE.  The returned values represent the clock equation:
915  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
916  */
917 static bool
918 vlv_find_best_dpll(const intel_limit_t *limit,
919 		   struct intel_crtc_state *crtc_state,
920 		   int target, int refclk, intel_clock_t *match_clock,
921 		   intel_clock_t *best_clock)
922 {
923 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
924 	struct drm_device *dev = crtc->base.dev;
925 	intel_clock_t clock;
926 	unsigned int bestppm = 1000000;
927 	/* min update 19.2 MHz */
928 	int max_n = min(limit->n.max, refclk / 19200);
929 	bool found = false;
930 
931 	target *= 5; /* fast clock */
932 
933 	memset(best_clock, 0, sizeof(*best_clock));
934 
935 	/* based on hardware requirement, prefer smaller n to precision */
936 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
937 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
938 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
939 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
940 				clock.p = clock.p1 * clock.p2;
941 				/* based on hardware requirement, prefer bigger m1,m2 values */
942 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
943 					unsigned int ppm;
944 
945 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
946 								     refclk * clock.m1);
947 
948 					vlv_calc_dpll_params(refclk, &clock);
949 
950 					if (!intel_PLL_is_valid(dev, limit,
951 								&clock))
952 						continue;
953 
954 					if (!vlv_PLL_is_optimal(dev, target,
955 								&clock,
956 								best_clock,
957 								bestppm, &ppm))
958 						continue;
959 
960 					*best_clock = clock;
961 					bestppm = ppm;
962 					found = true;
963 				}
964 			}
965 		}
966 	}
967 
968 	return found;
969 }
970 
971 /*
972  * Returns a set of divisors for the desired target clock with the given
973  * refclk, or FALSE.  The returned values represent the clock equation:
974  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
975  */
976 static bool
977 chv_find_best_dpll(const intel_limit_t *limit,
978 		   struct intel_crtc_state *crtc_state,
979 		   int target, int refclk, intel_clock_t *match_clock,
980 		   intel_clock_t *best_clock)
981 {
982 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
983 	struct drm_device *dev = crtc->base.dev;
984 	unsigned int best_error_ppm;
985 	intel_clock_t clock;
986 	uint64_t m2;
987 	int found = false;
988 
989 	memset(best_clock, 0, sizeof(*best_clock));
990 	best_error_ppm = 1000000;
991 
992 	/*
993 	 * Based on hardware doc, the n always set to 1, and m1 always
994 	 * set to 2.  If requires to support 200Mhz refclk, we need to
995 	 * revisit this because n may not 1 anymore.
996 	 */
997 	clock.n = 1, clock.m1 = 2;
998 	target *= 5;	/* fast clock */
999 
1000 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1001 		for (clock.p2 = limit->p2.p2_fast;
1002 				clock.p2 >= limit->p2.p2_slow;
1003 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1004 			unsigned int error_ppm;
1005 
1006 			clock.p = clock.p1 * clock.p2;
1007 
1008 			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1009 					clock.n) << 22, refclk * clock.m1);
1010 
1011 			if (m2 > INT_MAX/clock.m1)
1012 				continue;
1013 
1014 			clock.m2 = m2;
1015 
1016 			chv_calc_dpll_params(refclk, &clock);
1017 
1018 			if (!intel_PLL_is_valid(dev, limit, &clock))
1019 				continue;
1020 
1021 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1022 						best_error_ppm, &error_ppm))
1023 				continue;
1024 
1025 			*best_clock = clock;
1026 			best_error_ppm = error_ppm;
1027 			found = true;
1028 		}
1029 	}
1030 
1031 	return found;
1032 }
1033 
1034 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1035 			intel_clock_t *best_clock)
1036 {
1037 	int refclk = 100000;
1038 	const intel_limit_t *limit = &intel_limits_bxt;
1039 
1040 	return chv_find_best_dpll(limit, crtc_state,
1041 				  target_clock, refclk, NULL, best_clock);
1042 }
1043 
1044 bool intel_crtc_active(struct drm_crtc *crtc)
1045 {
1046 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1047 
1048 	/* Be paranoid as we can arrive here with only partial
1049 	 * state retrieved from the hardware during setup.
1050 	 *
1051 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1052 	 * as Haswell has gained clock readout/fastboot support.
1053 	 *
1054 	 * We can ditch the crtc->primary->fb check as soon as we can
1055 	 * properly reconstruct framebuffers.
1056 	 *
1057 	 * FIXME: The intel_crtc->active here should be switched to
1058 	 * crtc->state->active once we have proper CRTC states wired up
1059 	 * for atomic.
1060 	 */
1061 	return intel_crtc->active && crtc->primary->state->fb &&
1062 		intel_crtc->config->base.adjusted_mode.crtc_clock;
1063 }
1064 
1065 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1066 					     enum i915_pipe pipe)
1067 {
1068 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1069 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1070 
1071 	return intel_crtc->config->cpu_transcoder;
1072 }
1073 
1074 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
1075 {
1076 	struct drm_i915_private *dev_priv = dev->dev_private;
1077 	i915_reg_t reg = PIPEDSL(pipe);
1078 	u32 line1, line2;
1079 	u32 line_mask;
1080 
1081 	if (IS_GEN2(dev))
1082 		line_mask = DSL_LINEMASK_GEN2;
1083 	else
1084 		line_mask = DSL_LINEMASK_GEN3;
1085 
1086 	line1 = I915_READ(reg) & line_mask;
1087 	msleep(5);
1088 	line2 = I915_READ(reg) & line_mask;
1089 
1090 	return line1 == line2;
1091 }
1092 
1093 /*
1094  * intel_wait_for_pipe_off - wait for pipe to turn off
1095  * @crtc: crtc whose pipe to wait for
1096  *
1097  * After disabling a pipe, we can't wait for vblank in the usual way,
1098  * spinning on the vblank interrupt status bit, since we won't actually
1099  * see an interrupt when the pipe is disabled.
1100  *
1101  * On Gen4 and above:
1102  *   wait for the pipe register state bit to turn off
1103  *
1104  * Otherwise:
1105  *   wait for the display line value to settle (it usually
1106  *   ends up stopping at the start of the next frame).
1107  *
1108  */
1109 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1110 {
1111 	struct drm_device *dev = crtc->base.dev;
1112 	struct drm_i915_private *dev_priv = dev->dev_private;
1113 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1114 	enum i915_pipe pipe = crtc->pipe;
1115 
1116 	if (INTEL_INFO(dev)->gen >= 4) {
1117 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1118 
1119 		/* Wait for the Pipe State to go off */
1120 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1121 			     100))
1122 			WARN(1, "pipe_off wait timed out\n");
1123 	} else {
1124 		/* Wait for the display line to settle */
1125 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1126 			WARN(1, "pipe_off wait timed out\n");
1127 	}
1128 }
1129 
1130 /* Only for pre-ILK configs */
1131 void assert_pll(struct drm_i915_private *dev_priv,
1132 		enum i915_pipe pipe, bool state)
1133 {
1134 	u32 val;
1135 	bool cur_state;
1136 
1137 	val = I915_READ(DPLL(pipe));
1138 	cur_state = !!(val & DPLL_VCO_ENABLE);
1139 	I915_STATE_WARN(cur_state != state,
1140 	     "PLL state assertion failure (expected %s, current %s)\n",
1141 			onoff(state), onoff(cur_state));
1142 }
1143 
1144 /* XXX: the dsi pll is shared between MIPI DSI ports */
1145 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1146 {
1147 	u32 val;
1148 	bool cur_state;
1149 
1150 	mutex_lock(&dev_priv->sb_lock);
1151 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1152 	mutex_unlock(&dev_priv->sb_lock);
1153 
1154 	cur_state = val & DSI_PLL_VCO_EN;
1155 	I915_STATE_WARN(cur_state != state,
1156 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1157 			onoff(state), onoff(cur_state));
1158 }
1159 
1160 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1161 			  enum i915_pipe pipe, bool state)
1162 {
1163 	bool cur_state;
1164 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1165 								      pipe);
1166 
1167 	if (HAS_DDI(dev_priv)) {
1168 		/* DDI does not have a specific FDI_TX register */
1169 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1170 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1171 	} else {
1172 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1173 		cur_state = !!(val & FDI_TX_ENABLE);
1174 	}
1175 	I915_STATE_WARN(cur_state != state,
1176 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1177 			onoff(state), onoff(cur_state));
1178 }
1179 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1180 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1181 
1182 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1183 			  enum i915_pipe pipe, bool state)
1184 {
1185 	u32 val;
1186 	bool cur_state;
1187 
1188 	val = I915_READ(FDI_RX_CTL(pipe));
1189 	cur_state = !!(val & FDI_RX_ENABLE);
1190 	I915_STATE_WARN(cur_state != state,
1191 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1192 			onoff(state), onoff(cur_state));
1193 }
1194 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1195 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1196 
1197 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1198 				      enum i915_pipe pipe)
1199 {
1200 	u32 val;
1201 
1202 	/* ILK FDI PLL is always enabled */
1203 	if (INTEL_INFO(dev_priv)->gen == 5)
1204 		return;
1205 
1206 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1207 	if (HAS_DDI(dev_priv))
1208 		return;
1209 
1210 	val = I915_READ(FDI_TX_CTL(pipe));
1211 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1212 }
1213 
1214 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1215 		       enum i915_pipe pipe, bool state)
1216 {
1217 	u32 val;
1218 	bool cur_state;
1219 
1220 	val = I915_READ(FDI_RX_CTL(pipe));
1221 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1222 	I915_STATE_WARN(cur_state != state,
1223 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1224 			onoff(state), onoff(cur_state));
1225 }
1226 
1227 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1228 			   enum i915_pipe pipe)
1229 {
1230 	struct drm_device *dev = dev_priv->dev;
1231 	i915_reg_t pp_reg;
1232 	u32 val;
1233 	enum i915_pipe panel_pipe = PIPE_A;
1234 	bool locked = true;
1235 
1236 	if (WARN_ON(HAS_DDI(dev)))
1237 		return;
1238 
1239 	if (HAS_PCH_SPLIT(dev)) {
1240 		u32 port_sel;
1241 
1242 		pp_reg = PCH_PP_CONTROL;
1243 		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1244 
1245 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1246 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1247 			panel_pipe = PIPE_B;
1248 		/* XXX: else fix for eDP */
1249 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1250 		/* presumably write lock depends on pipe, not port select */
1251 		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1252 		panel_pipe = pipe;
1253 	} else {
1254 		pp_reg = PP_CONTROL;
1255 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1256 			panel_pipe = PIPE_B;
1257 	}
1258 
1259 	val = I915_READ(pp_reg);
1260 	if (!(val & PANEL_POWER_ON) ||
1261 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1262 		locked = false;
1263 
1264 	I915_STATE_WARN(panel_pipe == pipe && locked,
1265 	     "panel assertion failure, pipe %c regs locked\n",
1266 	     pipe_name(pipe));
1267 }
1268 
1269 static void assert_cursor(struct drm_i915_private *dev_priv,
1270 			  enum i915_pipe pipe, bool state)
1271 {
1272 	struct drm_device *dev = dev_priv->dev;
1273 	bool cur_state;
1274 
1275 	if (IS_845G(dev) || IS_I865G(dev))
1276 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1277 	else
1278 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1279 
1280 	I915_STATE_WARN(cur_state != state,
1281 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1282 			pipe_name(pipe), onoff(state), onoff(cur_state));
1283 }
1284 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1285 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1286 
1287 void assert_pipe(struct drm_i915_private *dev_priv,
1288 		 enum i915_pipe pipe, bool state)
1289 {
1290 	bool cur_state;
1291 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1292 								      pipe);
1293 	enum intel_display_power_domain power_domain;
1294 
1295 	/* if we need the pipe quirk it must be always on */
1296 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1297 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1298 		state = true;
1299 
1300 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1301 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1302 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1303 		cur_state = !!(val & PIPECONF_ENABLE);
1304 
1305 		intel_display_power_put(dev_priv, power_domain);
1306 	} else {
1307 		cur_state = false;
1308 	}
1309 
1310 	I915_STATE_WARN(cur_state != state,
1311 	     "pipe %c assertion failure (expected %s, current %s)\n",
1312 			pipe_name(pipe), onoff(state), onoff(cur_state));
1313 }
1314 
1315 static void assert_plane(struct drm_i915_private *dev_priv,
1316 			 enum plane plane, bool state)
1317 {
1318 	u32 val;
1319 	bool cur_state;
1320 
1321 	val = I915_READ(DSPCNTR(plane));
1322 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1323 	I915_STATE_WARN(cur_state != state,
1324 	     "plane %c assertion failure (expected %s, current %s)\n",
1325 			plane_name(plane), onoff(state), onoff(cur_state));
1326 }
1327 
1328 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1329 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1330 
1331 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1332 				   enum i915_pipe pipe)
1333 {
1334 	struct drm_device *dev = dev_priv->dev;
1335 	int i;
1336 
1337 	/* Primary planes are fixed to pipes on gen4+ */
1338 	if (INTEL_INFO(dev)->gen >= 4) {
1339 		u32 val = I915_READ(DSPCNTR(pipe));
1340 		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1341 		     "plane %c assertion failure, should be disabled but not\n",
1342 		     plane_name(pipe));
1343 		return;
1344 	}
1345 
1346 	/* Need to check both planes against the pipe */
1347 	for_each_pipe(dev_priv, i) {
1348 		u32 val = I915_READ(DSPCNTR(i));
1349 		enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1350 			DISPPLANE_SEL_PIPE_SHIFT;
1351 		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1352 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1353 		     plane_name(i), pipe_name(pipe));
1354 	}
1355 }
1356 
1357 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1358 				    enum i915_pipe pipe)
1359 {
1360 	struct drm_device *dev = dev_priv->dev;
1361 	int sprite;
1362 
1363 	if (INTEL_INFO(dev)->gen >= 9) {
1364 		for_each_sprite(dev_priv, pipe, sprite) {
1365 			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1366 			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1367 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1368 			     sprite, pipe_name(pipe));
1369 		}
1370 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1371 		for_each_sprite(dev_priv, pipe, sprite) {
1372 			u32 val = I915_READ(SPCNTR(pipe, sprite));
1373 			I915_STATE_WARN(val & SP_ENABLE,
1374 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1375 			     sprite_name(pipe, sprite), pipe_name(pipe));
1376 		}
1377 	} else if (INTEL_INFO(dev)->gen >= 7) {
1378 		u32 val = I915_READ(SPRCTL(pipe));
1379 		I915_STATE_WARN(val & SPRITE_ENABLE,
1380 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1381 		     plane_name(pipe), pipe_name(pipe));
1382 	} else if (INTEL_INFO(dev)->gen >= 5) {
1383 		u32 val = I915_READ(DVSCNTR(pipe));
1384 		I915_STATE_WARN(val & DVS_ENABLE,
1385 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1386 		     plane_name(pipe), pipe_name(pipe));
1387 	}
1388 }
1389 
1390 static void assert_vblank_disabled(struct drm_crtc *crtc)
1391 {
1392 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1393 		drm_crtc_vblank_put(crtc);
1394 }
1395 
1396 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1397 				    enum i915_pipe pipe)
1398 {
1399 	u32 val;
1400 	bool enabled;
1401 
1402 	val = I915_READ(PCH_TRANSCONF(pipe));
1403 	enabled = !!(val & TRANS_ENABLE);
1404 	I915_STATE_WARN(enabled,
1405 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1406 	     pipe_name(pipe));
1407 }
1408 
1409 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1410 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1411 {
1412 	if ((val & DP_PORT_EN) == 0)
1413 		return false;
1414 
1415 	if (HAS_PCH_CPT(dev_priv)) {
1416 		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1417 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1418 			return false;
1419 	} else if (IS_CHERRYVIEW(dev_priv)) {
1420 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1421 			return false;
1422 	} else {
1423 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1424 			return false;
1425 	}
1426 	return true;
1427 }
1428 
1429 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1430 			      enum i915_pipe pipe, u32 val)
1431 {
1432 	if ((val & SDVO_ENABLE) == 0)
1433 		return false;
1434 
1435 	if (HAS_PCH_CPT(dev_priv)) {
1436 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1437 			return false;
1438 	} else if (IS_CHERRYVIEW(dev_priv)) {
1439 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1440 			return false;
1441 	} else {
1442 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1443 			return false;
1444 	}
1445 	return true;
1446 }
1447 
1448 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1449 			      enum i915_pipe pipe, u32 val)
1450 {
1451 	if ((val & LVDS_PORT_EN) == 0)
1452 		return false;
1453 
1454 	if (HAS_PCH_CPT(dev_priv)) {
1455 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1456 			return false;
1457 	} else {
1458 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1459 			return false;
1460 	}
1461 	return true;
1462 }
1463 
1464 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1465 			      enum i915_pipe pipe, u32 val)
1466 {
1467 	if ((val & ADPA_DAC_ENABLE) == 0)
1468 		return false;
1469 	if (HAS_PCH_CPT(dev_priv)) {
1470 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1471 			return false;
1472 	} else {
1473 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1474 			return false;
1475 	}
1476 	return true;
1477 }
1478 
1479 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1480 				   enum i915_pipe pipe, i915_reg_t reg,
1481 				   u32 port_sel)
1482 {
1483 	u32 val = I915_READ(reg);
1484 	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1485 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1486 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1487 
1488 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1489 	     && (val & DP_PIPEB_SELECT),
1490 	     "IBX PCH dp port still using transcoder B\n");
1491 }
1492 
1493 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1494 				     enum i915_pipe pipe, i915_reg_t reg)
1495 {
1496 	u32 val = I915_READ(reg);
1497 	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1498 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1499 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1500 
1501 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1502 	     && (val & SDVO_PIPE_B_SELECT),
1503 	     "IBX PCH hdmi port still using transcoder B\n");
1504 }
1505 
1506 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1507 				      enum i915_pipe pipe)
1508 {
1509 	u32 val;
1510 
1511 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1512 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1513 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1514 
1515 	val = I915_READ(PCH_ADPA);
1516 	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1517 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1518 	     pipe_name(pipe));
1519 
1520 	val = I915_READ(PCH_LVDS);
1521 	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1522 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1523 	     pipe_name(pipe));
1524 
1525 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1526 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1527 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1528 }
1529 
1530 static void _vlv_enable_pll(struct intel_crtc *crtc,
1531 			    const struct intel_crtc_state *pipe_config)
1532 {
1533 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1534 	enum i915_pipe pipe = crtc->pipe;
1535 
1536 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1537 	POSTING_READ(DPLL(pipe));
1538 	udelay(150);
1539 
1540 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1541 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1542 }
1543 
1544 static void vlv_enable_pll(struct intel_crtc *crtc,
1545 			   const struct intel_crtc_state *pipe_config)
1546 {
1547 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1548 	enum i915_pipe pipe = crtc->pipe;
1549 
1550 	assert_pipe_disabled(dev_priv, pipe);
1551 
1552 	/* PLL is protected by panel, make sure we can write it */
1553 	assert_panel_unlocked(dev_priv, pipe);
1554 
1555 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1556 		_vlv_enable_pll(crtc, pipe_config);
1557 
1558 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1559 	POSTING_READ(DPLL_MD(pipe));
1560 }
1561 
1562 
1563 static void _chv_enable_pll(struct intel_crtc *crtc,
1564 			    const struct intel_crtc_state *pipe_config)
1565 {
1566 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1567 	enum i915_pipe pipe = crtc->pipe;
1568 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1569 	u32 tmp;
1570 
1571 	mutex_lock(&dev_priv->sb_lock);
1572 
1573 	/* Enable back the 10bit clock to display controller */
1574 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1575 	tmp |= DPIO_DCLKP_EN;
1576 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1577 
1578 	mutex_unlock(&dev_priv->sb_lock);
1579 
1580 	/*
1581 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1582 	 */
1583 	udelay(1);
1584 
1585 	/* Enable PLL */
1586 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1587 
1588 	/* Check PLL is locked */
1589 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1590 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1591 }
1592 
1593 static void chv_enable_pll(struct intel_crtc *crtc,
1594 			   const struct intel_crtc_state *pipe_config)
1595 {
1596 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1597 	enum i915_pipe pipe = crtc->pipe;
1598 
1599 	assert_pipe_disabled(dev_priv, pipe);
1600 
1601 	/* PLL is protected by panel, make sure we can write it */
1602 	assert_panel_unlocked(dev_priv, pipe);
1603 
1604 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1605 		_chv_enable_pll(crtc, pipe_config);
1606 
1607 	if (pipe != PIPE_A) {
1608 		/*
1609 		 * WaPixelRepeatModeFixForC0:chv
1610 		 *
1611 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1612 		 * the value from DPLLBMD to either pipe B or C.
1613 		 */
1614 		I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1615 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1616 		I915_WRITE(CBR4_VLV, 0);
1617 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1618 
1619 		/*
1620 		 * DPLLB VGA mode also seems to cause problems.
1621 		 * We should always have it disabled.
1622 		 */
1623 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1624 	} else {
1625 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1626 		POSTING_READ(DPLL_MD(pipe));
1627 	}
1628 }
1629 
1630 static int intel_num_dvo_pipes(struct drm_device *dev)
1631 {
1632 	struct intel_crtc *crtc;
1633 	int count = 0;
1634 
1635 	for_each_intel_crtc(dev, crtc)
1636 		count += crtc->base.state->active &&
1637 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1638 
1639 	return count;
1640 }
1641 
1642 static void i9xx_enable_pll(struct intel_crtc *crtc)
1643 {
1644 	struct drm_device *dev = crtc->base.dev;
1645 	struct drm_i915_private *dev_priv = dev->dev_private;
1646 	i915_reg_t reg = DPLL(crtc->pipe);
1647 	u32 dpll = crtc->config->dpll_hw_state.dpll;
1648 
1649 	assert_pipe_disabled(dev_priv, crtc->pipe);
1650 
1651 	/* PLL is protected by panel, make sure we can write it */
1652 	if (IS_MOBILE(dev) && !IS_I830(dev))
1653 		assert_panel_unlocked(dev_priv, crtc->pipe);
1654 
1655 	/* Enable DVO 2x clock on both PLLs if necessary */
1656 	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1657 		/*
1658 		 * It appears to be important that we don't enable this
1659 		 * for the current pipe before otherwise configuring the
1660 		 * PLL. No idea how this should be handled if multiple
1661 		 * DVO outputs are enabled simultaneosly.
1662 		 */
1663 		dpll |= DPLL_DVO_2X_MODE;
1664 		I915_WRITE(DPLL(!crtc->pipe),
1665 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1666 	}
1667 
1668 	/*
1669 	 * Apparently we need to have VGA mode enabled prior to changing
1670 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1671 	 * dividers, even though the register value does change.
1672 	 */
1673 	I915_WRITE(reg, 0);
1674 
1675 	I915_WRITE(reg, dpll);
1676 
1677 	/* Wait for the clocks to stabilize. */
1678 	POSTING_READ(reg);
1679 	udelay(150);
1680 
1681 	if (INTEL_INFO(dev)->gen >= 4) {
1682 		I915_WRITE(DPLL_MD(crtc->pipe),
1683 			   crtc->config->dpll_hw_state.dpll_md);
1684 	} else {
1685 		/* The pixel multiplier can only be updated once the
1686 		 * DPLL is enabled and the clocks are stable.
1687 		 *
1688 		 * So write it again.
1689 		 */
1690 		I915_WRITE(reg, dpll);
1691 	}
1692 
1693 	/* We do this three times for luck */
1694 	I915_WRITE(reg, dpll);
1695 	POSTING_READ(reg);
1696 	udelay(150); /* wait for warmup */
1697 	I915_WRITE(reg, dpll);
1698 	POSTING_READ(reg);
1699 	udelay(150); /* wait for warmup */
1700 	I915_WRITE(reg, dpll);
1701 	POSTING_READ(reg);
1702 	udelay(150); /* wait for warmup */
1703 }
1704 
1705 /**
1706  * i9xx_disable_pll - disable a PLL
1707  * @dev_priv: i915 private structure
1708  * @pipe: pipe PLL to disable
1709  *
1710  * Disable the PLL for @pipe, making sure the pipe is off first.
1711  *
1712  * Note!  This is for pre-ILK only.
1713  */
1714 static void i9xx_disable_pll(struct intel_crtc *crtc)
1715 {
1716 	struct drm_device *dev = crtc->base.dev;
1717 	struct drm_i915_private *dev_priv = dev->dev_private;
1718 	enum i915_pipe pipe = crtc->pipe;
1719 
1720 	/* Disable DVO 2x clock on both PLLs if necessary */
1721 	if (IS_I830(dev) &&
1722 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1723 	    !intel_num_dvo_pipes(dev)) {
1724 		I915_WRITE(DPLL(PIPE_B),
1725 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1726 		I915_WRITE(DPLL(PIPE_A),
1727 			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1728 	}
1729 
1730 	/* Don't disable pipe or pipe PLLs if needed */
1731 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1732 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1733 		return;
1734 
1735 	/* Make sure the pipe isn't still relying on us */
1736 	assert_pipe_disabled(dev_priv, pipe);
1737 
1738 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1739 	POSTING_READ(DPLL(pipe));
1740 }
1741 
1742 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1743 {
1744 	u32 val;
1745 
1746 	/* Make sure the pipe isn't still relying on us */
1747 	assert_pipe_disabled(dev_priv, pipe);
1748 
1749 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1750 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1751 	if (pipe != PIPE_A)
1752 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1753 
1754 	I915_WRITE(DPLL(pipe), val);
1755 	POSTING_READ(DPLL(pipe));
1756 }
1757 
1758 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1759 {
1760 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1761 	u32 val;
1762 
1763 	/* Make sure the pipe isn't still relying on us */
1764 	assert_pipe_disabled(dev_priv, pipe);
1765 
1766 	val = DPLL_SSC_REF_CLK_CHV |
1767 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1768 	if (pipe != PIPE_A)
1769 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1770 
1771 	I915_WRITE(DPLL(pipe), val);
1772 	POSTING_READ(DPLL(pipe));
1773 
1774 	mutex_lock(&dev_priv->sb_lock);
1775 
1776 	/* Disable 10bit clock to display controller */
1777 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1778 	val &= ~DPIO_DCLKP_EN;
1779 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1780 
1781 	mutex_unlock(&dev_priv->sb_lock);
1782 }
1783 
1784 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1785 			 struct intel_digital_port *dport,
1786 			 unsigned int expected_mask)
1787 {
1788 	u32 port_mask;
1789 	i915_reg_t dpll_reg;
1790 
1791 	switch (dport->port) {
1792 	case PORT_B:
1793 		port_mask = DPLL_PORTB_READY_MASK;
1794 		dpll_reg = DPLL(0);
1795 		break;
1796 	case PORT_C:
1797 		port_mask = DPLL_PORTC_READY_MASK;
1798 		dpll_reg = DPLL(0);
1799 		expected_mask <<= 4;
1800 		break;
1801 	case PORT_D:
1802 		port_mask = DPLL_PORTD_READY_MASK;
1803 		dpll_reg = DPIO_PHY_STATUS;
1804 		break;
1805 	default:
1806 		BUG();
1807 	}
1808 
1809 	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1810 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1811 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1812 }
1813 
1814 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1815 					   enum i915_pipe pipe)
1816 {
1817 	struct drm_device *dev = dev_priv->dev;
1818 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1819 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1820 	i915_reg_t reg;
1821 	uint32_t val, pipeconf_val;
1822 
1823 	/* Make sure PCH DPLL is enabled */
1824 	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1825 
1826 	/* FDI must be feeding us bits for PCH ports */
1827 	assert_fdi_tx_enabled(dev_priv, pipe);
1828 	assert_fdi_rx_enabled(dev_priv, pipe);
1829 
1830 	if (HAS_PCH_CPT(dev)) {
1831 		/* Workaround: Set the timing override bit before enabling the
1832 		 * pch transcoder. */
1833 		reg = TRANS_CHICKEN2(pipe);
1834 		val = I915_READ(reg);
1835 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1836 		I915_WRITE(reg, val);
1837 	}
1838 
1839 	reg = PCH_TRANSCONF(pipe);
1840 	val = I915_READ(reg);
1841 	pipeconf_val = I915_READ(PIPECONF(pipe));
1842 
1843 	if (HAS_PCH_IBX(dev_priv)) {
1844 		/*
1845 		 * Make the BPC in transcoder be consistent with
1846 		 * that in pipeconf reg. For HDMI we must use 8bpc
1847 		 * here for both 8bpc and 12bpc.
1848 		 */
1849 		val &= ~PIPECONF_BPC_MASK;
1850 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1851 			val |= PIPECONF_8BPC;
1852 		else
1853 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1854 	}
1855 
1856 	val &= ~TRANS_INTERLACE_MASK;
1857 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1858 		if (HAS_PCH_IBX(dev_priv) &&
1859 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1860 			val |= TRANS_LEGACY_INTERLACED_ILK;
1861 		else
1862 			val |= TRANS_INTERLACED;
1863 	else
1864 		val |= TRANS_PROGRESSIVE;
1865 
1866 	I915_WRITE(reg, val | TRANS_ENABLE);
1867 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1868 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1869 }
1870 
1871 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1872 				      enum transcoder cpu_transcoder)
1873 {
1874 	u32 val, pipeconf_val;
1875 
1876 	/* FDI must be feeding us bits for PCH ports */
1877 	assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1878 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1879 
1880 	/* Workaround: set timing override bit. */
1881 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1882 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1883 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1884 
1885 	val = TRANS_ENABLE;
1886 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1887 
1888 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1889 	    PIPECONF_INTERLACED_ILK)
1890 		val |= TRANS_INTERLACED;
1891 	else
1892 		val |= TRANS_PROGRESSIVE;
1893 
1894 	I915_WRITE(LPT_TRANSCONF, val);
1895 	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1896 		DRM_ERROR("Failed to enable PCH transcoder\n");
1897 }
1898 
1899 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1900 					    enum i915_pipe pipe)
1901 {
1902 	struct drm_device *dev = dev_priv->dev;
1903 	i915_reg_t reg;
1904 	uint32_t val;
1905 
1906 	/* FDI relies on the transcoder */
1907 	assert_fdi_tx_disabled(dev_priv, pipe);
1908 	assert_fdi_rx_disabled(dev_priv, pipe);
1909 
1910 	/* Ports must be off as well */
1911 	assert_pch_ports_disabled(dev_priv, pipe);
1912 
1913 	reg = PCH_TRANSCONF(pipe);
1914 	val = I915_READ(reg);
1915 	val &= ~TRANS_ENABLE;
1916 	I915_WRITE(reg, val);
1917 	/* wait for PCH transcoder off, transcoder state */
1918 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1919 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1920 
1921 	if (HAS_PCH_CPT(dev)) {
1922 		/* Workaround: Clear the timing override chicken bit again. */
1923 		reg = TRANS_CHICKEN2(pipe);
1924 		val = I915_READ(reg);
1925 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1926 		I915_WRITE(reg, val);
1927 	}
1928 }
1929 
1930 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1931 {
1932 	u32 val;
1933 
1934 	val = I915_READ(LPT_TRANSCONF);
1935 	val &= ~TRANS_ENABLE;
1936 	I915_WRITE(LPT_TRANSCONF, val);
1937 	/* wait for PCH transcoder off, transcoder state */
1938 	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1939 		DRM_ERROR("Failed to disable PCH transcoder\n");
1940 
1941 	/* Workaround: clear timing override bit. */
1942 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1943 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1944 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1945 }
1946 
1947 /**
1948  * intel_enable_pipe - enable a pipe, asserting requirements
1949  * @crtc: crtc responsible for the pipe
1950  *
1951  * Enable @crtc's pipe, making sure that various hardware specific requirements
1952  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1953  */
1954 static void intel_enable_pipe(struct intel_crtc *crtc)
1955 {
1956 	struct drm_device *dev = crtc->base.dev;
1957 	struct drm_i915_private *dev_priv = dev->dev_private;
1958 	enum i915_pipe pipe = crtc->pipe;
1959 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1960 	enum i915_pipe pch_transcoder;
1961 	i915_reg_t reg;
1962 	u32 val;
1963 
1964 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1965 
1966 	assert_planes_disabled(dev_priv, pipe);
1967 	assert_cursor_disabled(dev_priv, pipe);
1968 	assert_sprites_disabled(dev_priv, pipe);
1969 
1970 	if (HAS_PCH_LPT(dev_priv))
1971 		pch_transcoder = TRANSCODER_A;
1972 	else
1973 		pch_transcoder = pipe;
1974 
1975 	/*
1976 	 * A pipe without a PLL won't actually be able to drive bits from
1977 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1978 	 * need the check.
1979 	 */
1980 	if (HAS_GMCH_DISPLAY(dev_priv))
1981 		if (crtc->config->has_dsi_encoder)
1982 			assert_dsi_pll_enabled(dev_priv);
1983 		else
1984 			assert_pll_enabled(dev_priv, pipe);
1985 	else {
1986 		if (crtc->config->has_pch_encoder) {
1987 			/* if driving the PCH, we need FDI enabled */
1988 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1989 			assert_fdi_tx_pll_enabled(dev_priv,
1990 						  (enum i915_pipe) cpu_transcoder);
1991 		}
1992 		/* FIXME: assert CPU port conditions for SNB+ */
1993 	}
1994 
1995 	reg = PIPECONF(cpu_transcoder);
1996 	val = I915_READ(reg);
1997 	if (val & PIPECONF_ENABLE) {
1998 		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1999 			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2000 		return;
2001 	}
2002 
2003 	I915_WRITE(reg, val | PIPECONF_ENABLE);
2004 	POSTING_READ(reg);
2005 
2006 	/*
2007 	 * Until the pipe starts DSL will read as 0, which would cause
2008 	 * an apparent vblank timestamp jump, which messes up also the
2009 	 * frame count when it's derived from the timestamps. So let's
2010 	 * wait for the pipe to start properly before we call
2011 	 * drm_crtc_vblank_on()
2012 	 */
2013 	if (dev->max_vblank_count == 0 &&
2014 	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2015 		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2016 }
2017 
2018 /**
2019  * intel_disable_pipe - disable a pipe, asserting requirements
2020  * @crtc: crtc whose pipes is to be disabled
2021  *
2022  * Disable the pipe of @crtc, making sure that various hardware
2023  * specific requirements are met, if applicable, e.g. plane
2024  * disabled, panel fitter off, etc.
2025  *
2026  * Will wait until the pipe has shut down before returning.
2027  */
2028 static void intel_disable_pipe(struct intel_crtc *crtc)
2029 {
2030 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2031 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2032 	enum i915_pipe pipe = crtc->pipe;
2033 	i915_reg_t reg;
2034 	u32 val;
2035 
2036 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2037 
2038 	/*
2039 	 * Make sure planes won't keep trying to pump pixels to us,
2040 	 * or we might hang the display.
2041 	 */
2042 	assert_planes_disabled(dev_priv, pipe);
2043 	assert_cursor_disabled(dev_priv, pipe);
2044 	assert_sprites_disabled(dev_priv, pipe);
2045 
2046 	reg = PIPECONF(cpu_transcoder);
2047 	val = I915_READ(reg);
2048 	if ((val & PIPECONF_ENABLE) == 0)
2049 		return;
2050 
2051 	/*
2052 	 * Double wide has implications for planes
2053 	 * so best keep it disabled when not needed.
2054 	 */
2055 	if (crtc->config->double_wide)
2056 		val &= ~PIPECONF_DOUBLE_WIDE;
2057 
2058 	/* Don't disable pipe or pipe PLLs if needed */
2059 	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2060 	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2061 		val &= ~PIPECONF_ENABLE;
2062 
2063 	I915_WRITE(reg, val);
2064 	if ((val & PIPECONF_ENABLE) == 0)
2065 		intel_wait_for_pipe_off(crtc);
2066 }
2067 
2068 static bool need_vtd_wa(struct drm_device *dev)
2069 {
2070 #ifdef CONFIG_INTEL_IOMMU
2071 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2072 		return true;
2073 #endif
2074 	return false;
2075 }
2076 
2077 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2078 {
2079 	return IS_GEN2(dev_priv) ? 2048 : 4096;
2080 }
2081 
2082 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2083 					   uint64_t fb_modifier, unsigned int cpp)
2084 {
2085 	switch (fb_modifier) {
2086 	case DRM_FORMAT_MOD_NONE:
2087 		return cpp;
2088 	case I915_FORMAT_MOD_X_TILED:
2089 		if (IS_GEN2(dev_priv))
2090 			return 128;
2091 		else
2092 			return 512;
2093 	case I915_FORMAT_MOD_Y_TILED:
2094 		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2095 			return 128;
2096 		else
2097 			return 512;
2098 	case I915_FORMAT_MOD_Yf_TILED:
2099 		switch (cpp) {
2100 		case 1:
2101 			return 64;
2102 		case 2:
2103 		case 4:
2104 			return 128;
2105 		case 8:
2106 		case 16:
2107 			return 256;
2108 		default:
2109 			MISSING_CASE(cpp);
2110 			return cpp;
2111 		}
2112 		break;
2113 	default:
2114 		MISSING_CASE(fb_modifier);
2115 		return cpp;
2116 	}
2117 }
2118 
2119 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2120 			       uint64_t fb_modifier, unsigned int cpp)
2121 {
2122 	if (fb_modifier == DRM_FORMAT_MOD_NONE)
2123 		return 1;
2124 	else
2125 		return intel_tile_size(dev_priv) /
2126 			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2127 }
2128 
2129 /* Return the tile dimensions in pixel units */
2130 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2131 			    unsigned int *tile_width,
2132 			    unsigned int *tile_height,
2133 			    uint64_t fb_modifier,
2134 			    unsigned int cpp)
2135 {
2136 	unsigned int tile_width_bytes =
2137 		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2138 
2139 	*tile_width = tile_width_bytes / cpp;
2140 	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2141 }
2142 
2143 unsigned int
2144 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2145 		      uint32_t pixel_format, uint64_t fb_modifier)
2146 {
2147 	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2148 	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2149 
2150 	return ALIGN(height, tile_height);
2151 }
2152 
2153 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2154 {
2155 	unsigned int size = 0;
2156 	int i;
2157 
2158 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2159 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2160 
2161 	return size;
2162 }
2163 
2164 static void
2165 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2166 			struct drm_framebuffer *fb,
2167 			unsigned int rotation)
2168 {
2169 	if (intel_rotation_90_or_270(rotation)) {
2170 		*view = i915_ggtt_view_rotated;
2171 		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2172 	} else {
2173 		*view = i915_ggtt_view_normal;
2174 	}
2175 }
2176 
2177 static void
2178 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2179 		   struct drm_framebuffer *fb)
2180 {
2181 	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2182 	unsigned int tile_size, tile_width, tile_height, cpp;
2183 
2184 	tile_size = intel_tile_size(dev_priv);
2185 
2186 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2187 	intel_tile_dims(dev_priv, &tile_width, &tile_height,
2188 			fb->modifier[0], cpp);
2189 
2190 	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2191 	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2192 
2193 	if (info->pixel_format == DRM_FORMAT_NV12) {
2194 		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2195 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2196 				fb->modifier[1], cpp);
2197 
2198 		info->uv_offset = fb->offsets[1];
2199 		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2200 		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2201 	}
2202 }
2203 
2204 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2205 {
2206 	if (INTEL_INFO(dev_priv)->gen >= 9)
2207 		return 256 * 1024;
2208 	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2209 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2210 		return 128 * 1024;
2211 	else if (INTEL_INFO(dev_priv)->gen >= 4)
2212 		return 4 * 1024;
2213 	else
2214 		return 0;
2215 }
2216 
2217 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2218 					 uint64_t fb_modifier)
2219 {
2220 	switch (fb_modifier) {
2221 	case DRM_FORMAT_MOD_NONE:
2222 		return intel_linear_alignment(dev_priv);
2223 	case I915_FORMAT_MOD_X_TILED:
2224 		if (INTEL_INFO(dev_priv)->gen >= 9)
2225 			return 256 * 1024;
2226 		return 0;
2227 	case I915_FORMAT_MOD_Y_TILED:
2228 	case I915_FORMAT_MOD_Yf_TILED:
2229 		return 1 * 1024 * 1024;
2230 	default:
2231 		MISSING_CASE(fb_modifier);
2232 		return 0;
2233 	}
2234 }
2235 
2236 int
2237 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2238 			   unsigned int rotation)
2239 {
2240 	struct drm_device *dev = fb->dev;
2241 	struct drm_i915_private *dev_priv = dev->dev_private;
2242 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2243 	struct i915_ggtt_view view;
2244 	u32 alignment;
2245 	int ret;
2246 
2247 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2248 
2249 	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2250 
2251 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2252 
2253 	/* Note that the w/a also requires 64 PTE of padding following the
2254 	 * bo. We currently fill all unused PTE with the shadow page and so
2255 	 * we should always have valid PTE following the scanout preventing
2256 	 * the VT-d warning.
2257 	 */
2258 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2259 		alignment = 256 * 1024;
2260 
2261 	/*
2262 	 * Global gtt pte registers are special registers which actually forward
2263 	 * writes to a chunk of system memory. Which means that there is no risk
2264 	 * that the register values disappear as soon as we call
2265 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2266 	 * pin/unpin/fence and not more.
2267 	 */
2268 	intel_runtime_pm_get(dev_priv);
2269 
2270 	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2271 						   &view);
2272 	if (ret)
2273 		goto err_pm;
2274 
2275 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2276 	 * fence, whereas 965+ only requires a fence if using
2277 	 * framebuffer compression.  For simplicity, we always install
2278 	 * a fence as the cost is not that onerous.
2279 	 */
2280 	if (view.type == I915_GGTT_VIEW_NORMAL) {
2281 		ret = i915_gem_object_get_fence(obj);
2282 		if (ret == -EDEADLK) {
2283 			/*
2284 			 * -EDEADLK means there are no free fences
2285 			 * no pending flips.
2286 			 *
2287 			 * This is propagated to atomic, but it uses
2288 			 * -EDEADLK to force a locking recovery, so
2289 			 * change the returned error to -EBUSY.
2290 			 */
2291 			ret = -EBUSY;
2292 			goto err_unpin;
2293 		} else if (ret)
2294 			goto err_unpin;
2295 
2296 		i915_gem_object_pin_fence(obj);
2297 	}
2298 
2299 	intel_runtime_pm_put(dev_priv);
2300 	return 0;
2301 
2302 err_unpin:
2303 	i915_gem_object_unpin_from_display_plane(obj, &view);
2304 err_pm:
2305 	intel_runtime_pm_put(dev_priv);
2306 	return ret;
2307 }
2308 
2309 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2310 {
2311 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2312 	struct i915_ggtt_view view;
2313 
2314 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2315 
2316 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2317 
2318 	if (view.type == I915_GGTT_VIEW_NORMAL)
2319 		i915_gem_object_unpin_fence(obj);
2320 
2321 	i915_gem_object_unpin_from_display_plane(obj, &view);
2322 }
2323 
2324 /*
2325  * Adjust the tile offset by moving the difference into
2326  * the x/y offsets.
2327  *
2328  * Input tile dimensions and pitch must already be
2329  * rotated to match x and y, and in pixel units.
2330  */
2331 static u32 intel_adjust_tile_offset(int *x, int *y,
2332 				    unsigned int tile_width,
2333 				    unsigned int tile_height,
2334 				    unsigned int tile_size,
2335 				    unsigned int pitch_tiles,
2336 				    u32 old_offset,
2337 				    u32 new_offset)
2338 {
2339 	unsigned int tiles;
2340 
2341 	WARN_ON(old_offset & (tile_size - 1));
2342 	WARN_ON(new_offset & (tile_size - 1));
2343 	WARN_ON(new_offset > old_offset);
2344 
2345 	tiles = (old_offset - new_offset) / tile_size;
2346 
2347 	*y += tiles / pitch_tiles * tile_height;
2348 	*x += tiles % pitch_tiles * tile_width;
2349 
2350 	return new_offset;
2351 }
2352 
2353 /*
2354  * Computes the linear offset to the base tile and adjusts
2355  * x, y. bytes per pixel is assumed to be a power-of-two.
2356  *
2357  * In the 90/270 rotated case, x and y are assumed
2358  * to be already rotated to match the rotated GTT view, and
2359  * pitch is the tile_height aligned framebuffer height.
2360  */
2361 u32 intel_compute_tile_offset(int *x, int *y,
2362 			      const struct drm_framebuffer *fb, int plane,
2363 			      unsigned int pitch,
2364 			      unsigned int rotation)
2365 {
2366 	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2367 	uint64_t fb_modifier = fb->modifier[plane];
2368 	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2369 	u32 offset, offset_aligned, alignment;
2370 
2371 	alignment = intel_surf_alignment(dev_priv, fb_modifier);
2372 	if (alignment)
2373 		alignment--;
2374 
2375 	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2376 		unsigned int tile_size, tile_width, tile_height;
2377 		unsigned int tile_rows, tiles, pitch_tiles;
2378 
2379 		tile_size = intel_tile_size(dev_priv);
2380 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2381 				fb_modifier, cpp);
2382 
2383 		if (intel_rotation_90_or_270(rotation)) {
2384 			pitch_tiles = pitch / tile_height;
2385 			swap(tile_width, tile_height);
2386 		} else {
2387 			pitch_tiles = pitch / (tile_width * cpp);
2388 		}
2389 
2390 		tile_rows = *y / tile_height;
2391 		*y %= tile_height;
2392 
2393 		tiles = *x / tile_width;
2394 		*x %= tile_width;
2395 
2396 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2397 		offset_aligned = offset & ~alignment;
2398 
2399 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2400 					 tile_size, pitch_tiles,
2401 					 offset, offset_aligned);
2402 	} else {
2403 		offset = *y * pitch + *x * cpp;
2404 		offset_aligned = offset & ~alignment;
2405 
2406 		*y = (offset & alignment) / pitch;
2407 		*x = ((offset & alignment) - *y * pitch) / cpp;
2408 	}
2409 
2410 	return offset_aligned;
2411 }
2412 
2413 static int i9xx_format_to_fourcc(int format)
2414 {
2415 	switch (format) {
2416 	case DISPPLANE_8BPP:
2417 		return DRM_FORMAT_C8;
2418 	case DISPPLANE_BGRX555:
2419 		return DRM_FORMAT_XRGB1555;
2420 	case DISPPLANE_BGRX565:
2421 		return DRM_FORMAT_RGB565;
2422 	default:
2423 	case DISPPLANE_BGRX888:
2424 		return DRM_FORMAT_XRGB8888;
2425 	case DISPPLANE_RGBX888:
2426 		return DRM_FORMAT_XBGR8888;
2427 	case DISPPLANE_BGRX101010:
2428 		return DRM_FORMAT_XRGB2101010;
2429 	case DISPPLANE_RGBX101010:
2430 		return DRM_FORMAT_XBGR2101010;
2431 	}
2432 }
2433 
2434 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2435 {
2436 	switch (format) {
2437 	case PLANE_CTL_FORMAT_RGB_565:
2438 		return DRM_FORMAT_RGB565;
2439 	default:
2440 	case PLANE_CTL_FORMAT_XRGB_8888:
2441 		if (rgb_order) {
2442 			if (alpha)
2443 				return DRM_FORMAT_ABGR8888;
2444 			else
2445 				return DRM_FORMAT_XBGR8888;
2446 		} else {
2447 			if (alpha)
2448 				return DRM_FORMAT_ARGB8888;
2449 			else
2450 				return DRM_FORMAT_XRGB8888;
2451 		}
2452 	case PLANE_CTL_FORMAT_XRGB_2101010:
2453 		if (rgb_order)
2454 			return DRM_FORMAT_XBGR2101010;
2455 		else
2456 			return DRM_FORMAT_XRGB2101010;
2457 	}
2458 }
2459 
2460 static bool
2461 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2462 			      struct intel_initial_plane_config *plane_config)
2463 {
2464 	struct drm_device *dev = crtc->base.dev;
2465 	struct drm_i915_private *dev_priv = to_i915(dev);
2466 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2467 	struct drm_i915_gem_object *obj = NULL;
2468 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2469 	struct drm_framebuffer *fb = &plane_config->fb->base;
2470 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2471 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2472 				    PAGE_SIZE);
2473 
2474 	size_aligned -= base_aligned;
2475 
2476 	if (plane_config->size == 0)
2477 		return false;
2478 
2479 	/* If the FB is too big, just don't use it since fbdev is not very
2480 	 * important and we should probably use that space with FBC or other
2481 	 * features. */
2482 	if (size_aligned * 2 > ggtt->stolen_usable_size)
2483 		return false;
2484 
2485 	mutex_lock(&dev->struct_mutex);
2486 
2487 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2488 							     base_aligned,
2489 							     base_aligned,
2490 							     size_aligned);
2491 	if (!obj) {
2492 		mutex_unlock(&dev->struct_mutex);
2493 		return false;
2494 	}
2495 
2496 	obj->tiling_mode = plane_config->tiling;
2497 	if (obj->tiling_mode == I915_TILING_X)
2498 		obj->stride = fb->pitches[0];
2499 
2500 	mode_cmd.pixel_format = fb->pixel_format;
2501 	mode_cmd.width = fb->width;
2502 	mode_cmd.height = fb->height;
2503 	mode_cmd.pitches[0] = fb->pitches[0];
2504 	mode_cmd.modifier[0] = fb->modifier[0];
2505 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2506 
2507 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2508 				   &mode_cmd, obj)) {
2509 		DRM_DEBUG_KMS("intel fb init failed\n");
2510 		goto out_unref_obj;
2511 	}
2512 
2513 	mutex_unlock(&dev->struct_mutex);
2514 
2515 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2516 	return true;
2517 
2518 out_unref_obj:
2519 	drm_gem_object_unreference(&obj->base);
2520 	mutex_unlock(&dev->struct_mutex);
2521 	return false;
2522 }
2523 
2524 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2525 static void
2526 update_state_fb(struct drm_plane *plane)
2527 {
2528 	if (plane->fb == plane->state->fb)
2529 		return;
2530 
2531 	if (plane->state->fb)
2532 		drm_framebuffer_unreference(plane->state->fb);
2533 	plane->state->fb = plane->fb;
2534 	if (plane->state->fb)
2535 		drm_framebuffer_reference(plane->state->fb);
2536 }
2537 
2538 static void
2539 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2540 			     struct intel_initial_plane_config *plane_config)
2541 {
2542 	struct drm_device *dev = intel_crtc->base.dev;
2543 	struct drm_i915_private *dev_priv = dev->dev_private;
2544 	struct drm_crtc *c;
2545 	struct intel_crtc *i;
2546 	struct drm_i915_gem_object *obj;
2547 	struct drm_plane *primary = intel_crtc->base.primary;
2548 	struct drm_plane_state *plane_state = primary->state;
2549 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2550 	struct intel_plane *intel_plane = to_intel_plane(primary);
2551 	struct intel_plane_state *intel_state =
2552 		to_intel_plane_state(plane_state);
2553 	struct drm_framebuffer *fb;
2554 
2555 	if (!plane_config->fb)
2556 		return;
2557 
2558 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2559 		fb = &plane_config->fb->base;
2560 		goto valid_fb;
2561 	}
2562 
2563 	kfree(plane_config->fb);
2564 
2565 	/*
2566 	 * Failed to alloc the obj, check to see if we should share
2567 	 * an fb with another CRTC instead
2568 	 */
2569 	for_each_crtc(dev, c) {
2570 		i = to_intel_crtc(c);
2571 
2572 		if (c == &intel_crtc->base)
2573 			continue;
2574 
2575 		if (!i->active)
2576 			continue;
2577 
2578 		fb = c->primary->fb;
2579 		if (!fb)
2580 			continue;
2581 
2582 		obj = intel_fb_obj(fb);
2583 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2584 			drm_framebuffer_reference(fb);
2585 			goto valid_fb;
2586 		}
2587 	}
2588 
2589 	/*
2590 	 * We've failed to reconstruct the BIOS FB.  Current display state
2591 	 * indicates that the primary plane is visible, but has a NULL FB,
2592 	 * which will lead to problems later if we don't fix it up.  The
2593 	 * simplest solution is to just disable the primary plane now and
2594 	 * pretend the BIOS never had it enabled.
2595 	 */
2596 	to_intel_plane_state(plane_state)->visible = false;
2597 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2598 	intel_pre_disable_primary_noatomic(&intel_crtc->base);
2599 	intel_plane->disable_plane(primary, &intel_crtc->base);
2600 
2601 	return;
2602 
2603 valid_fb:
2604 	plane_state->src_x = 0;
2605 	plane_state->src_y = 0;
2606 	plane_state->src_w = fb->width << 16;
2607 	plane_state->src_h = fb->height << 16;
2608 
2609 	plane_state->crtc_x = 0;
2610 	plane_state->crtc_y = 0;
2611 	plane_state->crtc_w = fb->width;
2612 	plane_state->crtc_h = fb->height;
2613 
2614 	intel_state->src.x1 = plane_state->src_x;
2615 	intel_state->src.y1 = plane_state->src_y;
2616 	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2617 	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2618 	intel_state->dst.x1 = plane_state->crtc_x;
2619 	intel_state->dst.y1 = plane_state->crtc_y;
2620 	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2621 	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2622 
2623 	obj = intel_fb_obj(fb);
2624 	if (obj->tiling_mode != I915_TILING_NONE)
2625 		dev_priv->preserve_bios_swizzle = true;
2626 
2627 	drm_framebuffer_reference(fb);
2628 	primary->fb = primary->state->fb = fb;
2629 	primary->crtc = primary->state->crtc = &intel_crtc->base;
2630 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2631 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2632 }
2633 
2634 static void i9xx_update_primary_plane(struct drm_plane *primary,
2635 				      const struct intel_crtc_state *crtc_state,
2636 				      const struct intel_plane_state *plane_state)
2637 {
2638 	struct drm_device *dev = primary->dev;
2639 	struct drm_i915_private *dev_priv = dev->dev_private;
2640 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2641 	struct drm_framebuffer *fb = plane_state->base.fb;
2642 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2643 	int plane = intel_crtc->plane;
2644 	u32 linear_offset;
2645 	u32 dspcntr;
2646 	i915_reg_t reg = DSPCNTR(plane);
2647 	unsigned int rotation = plane_state->base.rotation;
2648 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2649 	int x = plane_state->src.x1 >> 16;
2650 	int y = plane_state->src.y1 >> 16;
2651 
2652 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2653 
2654 	dspcntr |= DISPLAY_PLANE_ENABLE;
2655 
2656 	if (INTEL_INFO(dev)->gen < 4) {
2657 		if (intel_crtc->pipe == PIPE_B)
2658 			dspcntr |= DISPPLANE_SEL_PIPE_B;
2659 
2660 		/* pipesrc and dspsize control the size that is scaled from,
2661 		 * which should always be the user's requested size.
2662 		 */
2663 		I915_WRITE(DSPSIZE(plane),
2664 			   ((crtc_state->pipe_src_h - 1) << 16) |
2665 			   (crtc_state->pipe_src_w - 1));
2666 		I915_WRITE(DSPPOS(plane), 0);
2667 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2668 		I915_WRITE(PRIMSIZE(plane),
2669 			   ((crtc_state->pipe_src_h - 1) << 16) |
2670 			   (crtc_state->pipe_src_w - 1));
2671 		I915_WRITE(PRIMPOS(plane), 0);
2672 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2673 	}
2674 
2675 	switch (fb->pixel_format) {
2676 	case DRM_FORMAT_C8:
2677 		dspcntr |= DISPPLANE_8BPP;
2678 		break;
2679 	case DRM_FORMAT_XRGB1555:
2680 		dspcntr |= DISPPLANE_BGRX555;
2681 		break;
2682 	case DRM_FORMAT_RGB565:
2683 		dspcntr |= DISPPLANE_BGRX565;
2684 		break;
2685 	case DRM_FORMAT_XRGB8888:
2686 		dspcntr |= DISPPLANE_BGRX888;
2687 		break;
2688 	case DRM_FORMAT_XBGR8888:
2689 		dspcntr |= DISPPLANE_RGBX888;
2690 		break;
2691 	case DRM_FORMAT_XRGB2101010:
2692 		dspcntr |= DISPPLANE_BGRX101010;
2693 		break;
2694 	case DRM_FORMAT_XBGR2101010:
2695 		dspcntr |= DISPPLANE_RGBX101010;
2696 		break;
2697 	default:
2698 		BUG();
2699 	}
2700 
2701 	if (INTEL_INFO(dev)->gen >= 4 &&
2702 	    obj->tiling_mode != I915_TILING_NONE)
2703 		dspcntr |= DISPPLANE_TILED;
2704 
2705 	if (IS_G4X(dev))
2706 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2707 
2708 	linear_offset = y * fb->pitches[0] + x * cpp;
2709 
2710 	if (INTEL_INFO(dev)->gen >= 4) {
2711 		intel_crtc->dspaddr_offset =
2712 			intel_compute_tile_offset(&x, &y, fb, 0,
2713 						  fb->pitches[0], rotation);
2714 		linear_offset -= intel_crtc->dspaddr_offset;
2715 	} else {
2716 		intel_crtc->dspaddr_offset = linear_offset;
2717 	}
2718 
2719 	if (rotation == DRM_ROTATE_180) {
2720 		dspcntr |= DISPPLANE_ROTATE_180;
2721 
2722 		x += (crtc_state->pipe_src_w - 1);
2723 		y += (crtc_state->pipe_src_h - 1);
2724 
2725 		/* Finding the last pixel of the last line of the display
2726 		data and adding to linear_offset*/
2727 		linear_offset +=
2728 			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2729 			(crtc_state->pipe_src_w - 1) * cpp;
2730 	}
2731 
2732 	intel_crtc->adjusted_x = x;
2733 	intel_crtc->adjusted_y = y;
2734 
2735 	I915_WRITE(reg, dspcntr);
2736 
2737 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2738 	if (INTEL_INFO(dev)->gen >= 4) {
2739 		I915_WRITE(DSPSURF(plane),
2740 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2741 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2742 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2743 	} else
2744 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2745 	POSTING_READ(reg);
2746 }
2747 
2748 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2749 				       struct drm_crtc *crtc)
2750 {
2751 	struct drm_device *dev = crtc->dev;
2752 	struct drm_i915_private *dev_priv = dev->dev_private;
2753 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2754 	int plane = intel_crtc->plane;
2755 
2756 	I915_WRITE(DSPCNTR(plane), 0);
2757 	if (INTEL_INFO(dev_priv)->gen >= 4)
2758 		I915_WRITE(DSPSURF(plane), 0);
2759 	else
2760 		I915_WRITE(DSPADDR(plane), 0);
2761 	POSTING_READ(DSPCNTR(plane));
2762 }
2763 
2764 static void ironlake_update_primary_plane(struct drm_plane *primary,
2765 					  const struct intel_crtc_state *crtc_state,
2766 					  const struct intel_plane_state *plane_state)
2767 {
2768 	struct drm_device *dev = primary->dev;
2769 	struct drm_i915_private *dev_priv = dev->dev_private;
2770 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2771 	struct drm_framebuffer *fb = plane_state->base.fb;
2772 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2773 	int plane = intel_crtc->plane;
2774 	u32 linear_offset;
2775 	u32 dspcntr;
2776 	i915_reg_t reg = DSPCNTR(plane);
2777 	unsigned int rotation = plane_state->base.rotation;
2778 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2779 	int x = plane_state->src.x1 >> 16;
2780 	int y = plane_state->src.y1 >> 16;
2781 
2782 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2783 	dspcntr |= DISPLAY_PLANE_ENABLE;
2784 
2785 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2786 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2787 
2788 	switch (fb->pixel_format) {
2789 	case DRM_FORMAT_C8:
2790 		dspcntr |= DISPPLANE_8BPP;
2791 		break;
2792 	case DRM_FORMAT_RGB565:
2793 		dspcntr |= DISPPLANE_BGRX565;
2794 		break;
2795 	case DRM_FORMAT_XRGB8888:
2796 		dspcntr |= DISPPLANE_BGRX888;
2797 		break;
2798 	case DRM_FORMAT_XBGR8888:
2799 		dspcntr |= DISPPLANE_RGBX888;
2800 		break;
2801 	case DRM_FORMAT_XRGB2101010:
2802 		dspcntr |= DISPPLANE_BGRX101010;
2803 		break;
2804 	case DRM_FORMAT_XBGR2101010:
2805 		dspcntr |= DISPPLANE_RGBX101010;
2806 		break;
2807 	default:
2808 		BUG();
2809 	}
2810 
2811 	if (obj->tiling_mode != I915_TILING_NONE)
2812 		dspcntr |= DISPPLANE_TILED;
2813 
2814 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2815 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2816 
2817 	linear_offset = y * fb->pitches[0] + x * cpp;
2818 	intel_crtc->dspaddr_offset =
2819 		intel_compute_tile_offset(&x, &y, fb, 0,
2820 					  fb->pitches[0], rotation);
2821 	linear_offset -= intel_crtc->dspaddr_offset;
2822 	if (rotation == DRM_ROTATE_180) {
2823 		dspcntr |= DISPPLANE_ROTATE_180;
2824 
2825 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2826 			x += (crtc_state->pipe_src_w - 1);
2827 			y += (crtc_state->pipe_src_h - 1);
2828 
2829 			/* Finding the last pixel of the last line of the display
2830 			data and adding to linear_offset*/
2831 			linear_offset +=
2832 				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2833 				(crtc_state->pipe_src_w - 1) * cpp;
2834 		}
2835 	}
2836 
2837 	intel_crtc->adjusted_x = x;
2838 	intel_crtc->adjusted_y = y;
2839 
2840 	I915_WRITE(reg, dspcntr);
2841 
2842 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2843 	I915_WRITE(DSPSURF(plane),
2844 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2845 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2846 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2847 	} else {
2848 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2849 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2850 	}
2851 	POSTING_READ(reg);
2852 }
2853 
2854 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2855 			      uint64_t fb_modifier, uint32_t pixel_format)
2856 {
2857 	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2858 		return 64;
2859 	} else {
2860 		int cpp = drm_format_plane_cpp(pixel_format, 0);
2861 
2862 		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2863 	}
2864 }
2865 
2866 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2867 			   struct drm_i915_gem_object *obj,
2868 			   unsigned int plane)
2869 {
2870 	struct i915_ggtt_view view;
2871 	struct i915_vma *vma;
2872 	u64 offset;
2873 
2874 	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2875 				intel_plane->base.state->rotation);
2876 
2877 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2878 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2879 		view.type))
2880 		return -1;
2881 
2882 	offset = vma->node.start;
2883 
2884 	if (plane == 1) {
2885 		offset += vma->ggtt_view.params.rotated.uv_start_page *
2886 			  PAGE_SIZE;
2887 	}
2888 
2889 	WARN_ON(upper_32_bits(offset));
2890 
2891 	return lower_32_bits(offset);
2892 }
2893 
2894 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2895 {
2896 	struct drm_device *dev = intel_crtc->base.dev;
2897 	struct drm_i915_private *dev_priv = dev->dev_private;
2898 
2899 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2900 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2901 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2902 }
2903 
2904 /*
2905  * This function detaches (aka. unbinds) unused scalers in hardware
2906  */
2907 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2908 {
2909 	struct intel_crtc_scaler_state *scaler_state;
2910 	int i;
2911 
2912 	scaler_state = &intel_crtc->config->scaler_state;
2913 
2914 	/* loop through and disable scalers that aren't in use */
2915 	for (i = 0; i < intel_crtc->num_scalers; i++) {
2916 		if (!scaler_state->scalers[i].in_use)
2917 			skl_detach_scaler(intel_crtc, i);
2918 	}
2919 }
2920 
2921 u32 skl_plane_ctl_format(uint32_t pixel_format)
2922 {
2923 	switch (pixel_format) {
2924 	case DRM_FORMAT_C8:
2925 		return PLANE_CTL_FORMAT_INDEXED;
2926 	case DRM_FORMAT_RGB565:
2927 		return PLANE_CTL_FORMAT_RGB_565;
2928 	case DRM_FORMAT_XBGR8888:
2929 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2930 	case DRM_FORMAT_XRGB8888:
2931 		return PLANE_CTL_FORMAT_XRGB_8888;
2932 	/*
2933 	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2934 	 * to be already pre-multiplied. We need to add a knob (or a different
2935 	 * DRM_FORMAT) for user-space to configure that.
2936 	 */
2937 	case DRM_FORMAT_ABGR8888:
2938 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2939 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2940 	case DRM_FORMAT_ARGB8888:
2941 		return PLANE_CTL_FORMAT_XRGB_8888 |
2942 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2943 	case DRM_FORMAT_XRGB2101010:
2944 		return PLANE_CTL_FORMAT_XRGB_2101010;
2945 	case DRM_FORMAT_XBGR2101010:
2946 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2947 	case DRM_FORMAT_YUYV:
2948 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2949 	case DRM_FORMAT_YVYU:
2950 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2951 	case DRM_FORMAT_UYVY:
2952 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2953 	case DRM_FORMAT_VYUY:
2954 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2955 	default:
2956 		MISSING_CASE(pixel_format);
2957 	}
2958 
2959 	return 0;
2960 }
2961 
2962 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2963 {
2964 	switch (fb_modifier) {
2965 	case DRM_FORMAT_MOD_NONE:
2966 		break;
2967 	case I915_FORMAT_MOD_X_TILED:
2968 		return PLANE_CTL_TILED_X;
2969 	case I915_FORMAT_MOD_Y_TILED:
2970 		return PLANE_CTL_TILED_Y;
2971 	case I915_FORMAT_MOD_Yf_TILED:
2972 		return PLANE_CTL_TILED_YF;
2973 	default:
2974 		MISSING_CASE(fb_modifier);
2975 	}
2976 
2977 	return 0;
2978 }
2979 
2980 u32 skl_plane_ctl_rotation(unsigned int rotation)
2981 {
2982 	switch (rotation) {
2983 	case DRM_ROTATE_0:
2984 		break;
2985 	/*
2986 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2987 	 * while i915 HW rotation is clockwise, thats why this swapping.
2988 	 */
2989 	case DRM_ROTATE_90:
2990 		return PLANE_CTL_ROTATE_270;
2991 	case DRM_ROTATE_180:
2992 		return PLANE_CTL_ROTATE_180;
2993 	case DRM_ROTATE_270:
2994 		return PLANE_CTL_ROTATE_90;
2995 	default:
2996 		MISSING_CASE(rotation);
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 static void skylake_update_primary_plane(struct drm_plane *plane,
3003 					 const struct intel_crtc_state *crtc_state,
3004 					 const struct intel_plane_state *plane_state)
3005 {
3006 	struct drm_device *dev = plane->dev;
3007 	struct drm_i915_private *dev_priv = dev->dev_private;
3008 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3009 	struct drm_framebuffer *fb = plane_state->base.fb;
3010 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3011 	int pipe = intel_crtc->pipe;
3012 	u32 plane_ctl, stride_div, stride;
3013 	u32 tile_height, plane_offset, plane_size;
3014 	unsigned int rotation = plane_state->base.rotation;
3015 	int x_offset, y_offset;
3016 	u32 surf_addr;
3017 	int scaler_id = plane_state->scaler_id;
3018 	int src_x = plane_state->src.x1 >> 16;
3019 	int src_y = plane_state->src.y1 >> 16;
3020 	int src_w = drm_rect_width(&plane_state->src) >> 16;
3021 	int src_h = drm_rect_height(&plane_state->src) >> 16;
3022 	int dst_x = plane_state->dst.x1;
3023 	int dst_y = plane_state->dst.y1;
3024 	int dst_w = drm_rect_width(&plane_state->dst);
3025 	int dst_h = drm_rect_height(&plane_state->dst);
3026 
3027 	plane_ctl = PLANE_CTL_ENABLE |
3028 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3029 		    PLANE_CTL_PIPE_CSC_ENABLE;
3030 
3031 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3032 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3033 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3034 	plane_ctl |= skl_plane_ctl_rotation(rotation);
3035 
3036 	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3037 					       fb->pixel_format);
3038 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3039 
3040 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3041 
3042 	if (intel_rotation_90_or_270(rotation)) {
3043 		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3044 
3045 		/* stride = Surface height in tiles */
3046 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3047 		stride = DIV_ROUND_UP(fb->height, tile_height);
3048 		x_offset = stride * tile_height - src_y - src_h;
3049 		y_offset = src_x;
3050 		plane_size = (src_w - 1) << 16 | (src_h - 1);
3051 	} else {
3052 		stride = fb->pitches[0] / stride_div;
3053 		x_offset = src_x;
3054 		y_offset = src_y;
3055 		plane_size = (src_h - 1) << 16 | (src_w - 1);
3056 	}
3057 	plane_offset = y_offset << 16 | x_offset;
3058 
3059 	intel_crtc->adjusted_x = x_offset;
3060 	intel_crtc->adjusted_y = y_offset;
3061 
3062 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3063 	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3064 	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3065 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3066 
3067 	if (scaler_id >= 0) {
3068 		uint32_t ps_ctrl = 0;
3069 
3070 		WARN_ON(!dst_w || !dst_h);
3071 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3072 			crtc_state->scaler_state.scalers[scaler_id].mode;
3073 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3074 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3075 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3076 		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3077 		I915_WRITE(PLANE_POS(pipe, 0), 0);
3078 	} else {
3079 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3080 	}
3081 
3082 	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3083 
3084 	POSTING_READ(PLANE_SURF(pipe, 0));
3085 }
3086 
3087 static void skylake_disable_primary_plane(struct drm_plane *primary,
3088 					  struct drm_crtc *crtc)
3089 {
3090 	struct drm_device *dev = crtc->dev;
3091 	struct drm_i915_private *dev_priv = dev->dev_private;
3092 	int pipe = to_intel_crtc(crtc)->pipe;
3093 
3094 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3095 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
3096 	POSTING_READ(PLANE_SURF(pipe, 0));
3097 }
3098 
3099 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3100 static int
3101 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3102 			   int x, int y, enum mode_set_atomic state)
3103 {
3104 	/* Support for kgdboc is disabled, this needs a major rework. */
3105 	DRM_ERROR("legacy panic handler not supported any more.\n");
3106 
3107 	return -ENODEV;
3108 }
3109 
3110 static void intel_complete_page_flips(struct drm_device *dev)
3111 {
3112 	struct drm_crtc *crtc;
3113 
3114 	for_each_crtc(dev, crtc) {
3115 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3116 		enum plane plane = intel_crtc->plane;
3117 
3118 		intel_prepare_page_flip(dev, plane);
3119 		intel_finish_page_flip_plane(dev, plane);
3120 	}
3121 }
3122 
3123 static void intel_update_primary_planes(struct drm_device *dev)
3124 {
3125 	struct drm_crtc *crtc;
3126 
3127 	for_each_crtc(dev, crtc) {
3128 		struct intel_plane *plane = to_intel_plane(crtc->primary);
3129 		struct intel_plane_state *plane_state;
3130 
3131 		drm_modeset_lock_crtc(crtc, &plane->base);
3132 		plane_state = to_intel_plane_state(plane->base.state);
3133 
3134 		if (plane_state->visible)
3135 			plane->update_plane(&plane->base,
3136 					    to_intel_crtc_state(crtc->state),
3137 					    plane_state);
3138 
3139 		drm_modeset_unlock_crtc(crtc);
3140 	}
3141 }
3142 
3143 void intel_prepare_reset(struct drm_device *dev)
3144 {
3145 	/* no reset support for gen2 */
3146 	if (IS_GEN2(dev))
3147 		return;
3148 
3149 	/* reset doesn't touch the display */
3150 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3151 		return;
3152 
3153 	drm_modeset_lock_all(dev);
3154 	/*
3155 	 * Disabling the crtcs gracefully seems nicer. Also the
3156 	 * g33 docs say we should at least disable all the planes.
3157 	 */
3158 	intel_display_suspend(dev);
3159 }
3160 
3161 void intel_finish_reset(struct drm_device *dev)
3162 {
3163 	struct drm_i915_private *dev_priv = to_i915(dev);
3164 
3165 	/*
3166 	 * Flips in the rings will be nuked by the reset,
3167 	 * so complete all pending flips so that user space
3168 	 * will get its events and not get stuck.
3169 	 */
3170 	intel_complete_page_flips(dev);
3171 
3172 	/* no reset support for gen2 */
3173 	if (IS_GEN2(dev))
3174 		return;
3175 
3176 	/* reset doesn't touch the display */
3177 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3178 		/*
3179 		 * Flips in the rings have been nuked by the reset,
3180 		 * so update the base address of all primary
3181 		 * planes to the the last fb to make sure we're
3182 		 * showing the correct fb after a reset.
3183 		 *
3184 		 * FIXME: Atomic will make this obsolete since we won't schedule
3185 		 * CS-based flips (which might get lost in gpu resets) any more.
3186 		 */
3187 		intel_update_primary_planes(dev);
3188 		return;
3189 	}
3190 
3191 	/*
3192 	 * The display has been reset as well,
3193 	 * so need a full re-initialization.
3194 	 */
3195 	intel_runtime_pm_disable_interrupts(dev_priv);
3196 	intel_runtime_pm_enable_interrupts(dev_priv);
3197 
3198 	intel_modeset_init_hw(dev);
3199 
3200 	spin_lock_irq(&dev_priv->irq_lock);
3201 	if (dev_priv->display.hpd_irq_setup)
3202 		dev_priv->display.hpd_irq_setup(dev);
3203 	spin_unlock_irq(&dev_priv->irq_lock);
3204 
3205 	intel_display_resume(dev);
3206 
3207 	intel_hpd_init(dev_priv);
3208 
3209 	drm_modeset_unlock_all(dev);
3210 }
3211 
3212 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3213 {
3214 	struct drm_device *dev = crtc->dev;
3215 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3216 	unsigned reset_counter;
3217 	bool pending;
3218 
3219 	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3220 	if (intel_crtc->reset_counter != reset_counter)
3221 		return false;
3222 
3223 	spin_lock_irq(&dev->event_lock);
3224 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3225 	spin_unlock_irq(&dev->event_lock);
3226 
3227 	return pending;
3228 }
3229 
3230 static void intel_update_pipe_config(struct intel_crtc *crtc,
3231 				     struct intel_crtc_state *old_crtc_state)
3232 {
3233 	struct drm_device *dev = crtc->base.dev;
3234 	struct drm_i915_private *dev_priv = dev->dev_private;
3235 	struct intel_crtc_state *pipe_config =
3236 		to_intel_crtc_state(crtc->base.state);
3237 
3238 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3239 	crtc->base.mode = crtc->base.state->mode;
3240 
3241 	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3242 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3243 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3244 
3245 	/*
3246 	 * Update pipe size and adjust fitter if needed: the reason for this is
3247 	 * that in compute_mode_changes we check the native mode (not the pfit
3248 	 * mode) to see if we can flip rather than do a full mode set. In the
3249 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3250 	 * pfit state, we'll end up with a big fb scanned out into the wrong
3251 	 * sized surface.
3252 	 */
3253 
3254 	I915_WRITE(PIPESRC(crtc->pipe),
3255 		   ((pipe_config->pipe_src_w - 1) << 16) |
3256 		   (pipe_config->pipe_src_h - 1));
3257 
3258 	/* on skylake this is done by detaching scalers */
3259 	if (INTEL_INFO(dev)->gen >= 9) {
3260 		skl_detach_scalers(crtc);
3261 
3262 		if (pipe_config->pch_pfit.enabled)
3263 			skylake_pfit_enable(crtc);
3264 	} else if (HAS_PCH_SPLIT(dev)) {
3265 		if (pipe_config->pch_pfit.enabled)
3266 			ironlake_pfit_enable(crtc);
3267 		else if (old_crtc_state->pch_pfit.enabled)
3268 			ironlake_pfit_disable(crtc, true);
3269 	}
3270 }
3271 
3272 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3273 {
3274 	struct drm_device *dev = crtc->dev;
3275 	struct drm_i915_private *dev_priv = dev->dev_private;
3276 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3277 	int pipe = intel_crtc->pipe;
3278 	i915_reg_t reg;
3279 	u32 temp;
3280 
3281 	/* enable normal train */
3282 	reg = FDI_TX_CTL(pipe);
3283 	temp = I915_READ(reg);
3284 	if (IS_IVYBRIDGE(dev)) {
3285 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3286 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3287 	} else {
3288 		temp &= ~FDI_LINK_TRAIN_NONE;
3289 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3290 	}
3291 	I915_WRITE(reg, temp);
3292 
3293 	reg = FDI_RX_CTL(pipe);
3294 	temp = I915_READ(reg);
3295 	if (HAS_PCH_CPT(dev)) {
3296 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3297 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3298 	} else {
3299 		temp &= ~FDI_LINK_TRAIN_NONE;
3300 		temp |= FDI_LINK_TRAIN_NONE;
3301 	}
3302 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3303 
3304 	/* wait one idle pattern time */
3305 	POSTING_READ(reg);
3306 	udelay(1000);
3307 
3308 	/* IVB wants error correction enabled */
3309 	if (IS_IVYBRIDGE(dev))
3310 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3311 			   FDI_FE_ERRC_ENABLE);
3312 }
3313 
3314 /* The FDI link training functions for ILK/Ibexpeak. */
3315 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3316 {
3317 	struct drm_device *dev = crtc->dev;
3318 	struct drm_i915_private *dev_priv = dev->dev_private;
3319 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3320 	int pipe = intel_crtc->pipe;
3321 	i915_reg_t reg;
3322 	u32 temp, tries;
3323 
3324 	/* FDI needs bits from pipe first */
3325 	assert_pipe_enabled(dev_priv, pipe);
3326 
3327 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3328 	   for train result */
3329 	reg = FDI_RX_IMR(pipe);
3330 	temp = I915_READ(reg);
3331 	temp &= ~FDI_RX_SYMBOL_LOCK;
3332 	temp &= ~FDI_RX_BIT_LOCK;
3333 	I915_WRITE(reg, temp);
3334 	I915_READ(reg);
3335 	udelay(150);
3336 
3337 	/* enable CPU FDI TX and PCH FDI RX */
3338 	reg = FDI_TX_CTL(pipe);
3339 	temp = I915_READ(reg);
3340 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3341 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3342 	temp &= ~FDI_LINK_TRAIN_NONE;
3343 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3344 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3345 
3346 	reg = FDI_RX_CTL(pipe);
3347 	temp = I915_READ(reg);
3348 	temp &= ~FDI_LINK_TRAIN_NONE;
3349 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3350 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3351 
3352 	POSTING_READ(reg);
3353 	udelay(150);
3354 
3355 	/* Ironlake workaround, enable clock pointer after FDI enable*/
3356 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3357 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3358 		   FDI_RX_PHASE_SYNC_POINTER_EN);
3359 
3360 	reg = FDI_RX_IIR(pipe);
3361 	for (tries = 0; tries < 5; tries++) {
3362 		temp = I915_READ(reg);
3363 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3364 
3365 		if ((temp & FDI_RX_BIT_LOCK)) {
3366 			DRM_DEBUG_KMS("FDI train 1 done.\n");
3367 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3368 			break;
3369 		}
3370 	}
3371 	if (tries == 5)
3372 		DRM_ERROR("FDI train 1 fail!\n");
3373 
3374 	/* Train 2 */
3375 	reg = FDI_TX_CTL(pipe);
3376 	temp = I915_READ(reg);
3377 	temp &= ~FDI_LINK_TRAIN_NONE;
3378 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3379 	I915_WRITE(reg, temp);
3380 
3381 	reg = FDI_RX_CTL(pipe);
3382 	temp = I915_READ(reg);
3383 	temp &= ~FDI_LINK_TRAIN_NONE;
3384 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3385 	I915_WRITE(reg, temp);
3386 
3387 	POSTING_READ(reg);
3388 	udelay(150);
3389 
3390 	reg = FDI_RX_IIR(pipe);
3391 	for (tries = 0; tries < 5; tries++) {
3392 		temp = I915_READ(reg);
3393 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3394 
3395 		if (temp & FDI_RX_SYMBOL_LOCK) {
3396 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3397 			DRM_DEBUG_KMS("FDI train 2 done.\n");
3398 			break;
3399 		}
3400 	}
3401 	if (tries == 5)
3402 		DRM_ERROR("FDI train 2 fail!\n");
3403 
3404 	DRM_DEBUG_KMS("FDI train done\n");
3405 
3406 }
3407 
3408 static const int snb_b_fdi_train_param[] = {
3409 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3410 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3411 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3412 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3413 };
3414 
3415 /* The FDI link training functions for SNB/Cougarpoint. */
3416 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3417 {
3418 	struct drm_device *dev = crtc->dev;
3419 	struct drm_i915_private *dev_priv = dev->dev_private;
3420 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3421 	int pipe = intel_crtc->pipe;
3422 	i915_reg_t reg;
3423 	u32 temp, i, retry;
3424 
3425 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3426 	   for train result */
3427 	reg = FDI_RX_IMR(pipe);
3428 	temp = I915_READ(reg);
3429 	temp &= ~FDI_RX_SYMBOL_LOCK;
3430 	temp &= ~FDI_RX_BIT_LOCK;
3431 	I915_WRITE(reg, temp);
3432 
3433 	POSTING_READ(reg);
3434 	udelay(150);
3435 
3436 	/* enable CPU FDI TX and PCH FDI RX */
3437 	reg = FDI_TX_CTL(pipe);
3438 	temp = I915_READ(reg);
3439 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3440 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3441 	temp &= ~FDI_LINK_TRAIN_NONE;
3442 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3443 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3444 	/* SNB-B */
3445 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3446 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3447 
3448 	I915_WRITE(FDI_RX_MISC(pipe),
3449 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3450 
3451 	reg = FDI_RX_CTL(pipe);
3452 	temp = I915_READ(reg);
3453 	if (HAS_PCH_CPT(dev)) {
3454 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3455 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3456 	} else {
3457 		temp &= ~FDI_LINK_TRAIN_NONE;
3458 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3459 	}
3460 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3461 
3462 	POSTING_READ(reg);
3463 	udelay(150);
3464 
3465 	for (i = 0; i < 4; i++) {
3466 		reg = FDI_TX_CTL(pipe);
3467 		temp = I915_READ(reg);
3468 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3469 		temp |= snb_b_fdi_train_param[i];
3470 		I915_WRITE(reg, temp);
3471 
3472 		POSTING_READ(reg);
3473 		udelay(500);
3474 
3475 		for (retry = 0; retry < 5; retry++) {
3476 			reg = FDI_RX_IIR(pipe);
3477 			temp = I915_READ(reg);
3478 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3479 			if (temp & FDI_RX_BIT_LOCK) {
3480 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3481 				DRM_DEBUG_KMS("FDI train 1 done.\n");
3482 				break;
3483 			}
3484 			udelay(50);
3485 		}
3486 		if (retry < 5)
3487 			break;
3488 	}
3489 	if (i == 4)
3490 		DRM_ERROR("FDI train 1 fail!\n");
3491 
3492 	/* Train 2 */
3493 	reg = FDI_TX_CTL(pipe);
3494 	temp = I915_READ(reg);
3495 	temp &= ~FDI_LINK_TRAIN_NONE;
3496 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3497 	if (IS_GEN6(dev)) {
3498 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3499 		/* SNB-B */
3500 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3501 	}
3502 	I915_WRITE(reg, temp);
3503 
3504 	reg = FDI_RX_CTL(pipe);
3505 	temp = I915_READ(reg);
3506 	if (HAS_PCH_CPT(dev)) {
3507 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3508 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3509 	} else {
3510 		temp &= ~FDI_LINK_TRAIN_NONE;
3511 		temp |= FDI_LINK_TRAIN_PATTERN_2;
3512 	}
3513 	I915_WRITE(reg, temp);
3514 
3515 	POSTING_READ(reg);
3516 	udelay(150);
3517 
3518 	for (i = 0; i < 4; i++) {
3519 		reg = FDI_TX_CTL(pipe);
3520 		temp = I915_READ(reg);
3521 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3522 		temp |= snb_b_fdi_train_param[i];
3523 		I915_WRITE(reg, temp);
3524 
3525 		POSTING_READ(reg);
3526 		udelay(500);
3527 
3528 		for (retry = 0; retry < 5; retry++) {
3529 			reg = FDI_RX_IIR(pipe);
3530 			temp = I915_READ(reg);
3531 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3532 			if (temp & FDI_RX_SYMBOL_LOCK) {
3533 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3534 				DRM_DEBUG_KMS("FDI train 2 done.\n");
3535 				break;
3536 			}
3537 			udelay(50);
3538 		}
3539 		if (retry < 5)
3540 			break;
3541 	}
3542 	if (i == 4)
3543 		DRM_ERROR("FDI train 2 fail!\n");
3544 
3545 	DRM_DEBUG_KMS("FDI train done.\n");
3546 }
3547 
3548 /* Manual link training for Ivy Bridge A0 parts */
3549 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3550 {
3551 	struct drm_device *dev = crtc->dev;
3552 	struct drm_i915_private *dev_priv = dev->dev_private;
3553 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3554 	int pipe = intel_crtc->pipe;
3555 	i915_reg_t reg;
3556 	u32 temp, i, j;
3557 
3558 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3559 	   for train result */
3560 	reg = FDI_RX_IMR(pipe);
3561 	temp = I915_READ(reg);
3562 	temp &= ~FDI_RX_SYMBOL_LOCK;
3563 	temp &= ~FDI_RX_BIT_LOCK;
3564 	I915_WRITE(reg, temp);
3565 
3566 	POSTING_READ(reg);
3567 	udelay(150);
3568 
3569 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3570 		      I915_READ(FDI_RX_IIR(pipe)));
3571 
3572 	/* Try each vswing and preemphasis setting twice before moving on */
3573 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3574 		/* disable first in case we need to retry */
3575 		reg = FDI_TX_CTL(pipe);
3576 		temp = I915_READ(reg);
3577 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3578 		temp &= ~FDI_TX_ENABLE;
3579 		I915_WRITE(reg, temp);
3580 
3581 		reg = FDI_RX_CTL(pipe);
3582 		temp = I915_READ(reg);
3583 		temp &= ~FDI_LINK_TRAIN_AUTO;
3584 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3585 		temp &= ~FDI_RX_ENABLE;
3586 		I915_WRITE(reg, temp);
3587 
3588 		/* enable CPU FDI TX and PCH FDI RX */
3589 		reg = FDI_TX_CTL(pipe);
3590 		temp = I915_READ(reg);
3591 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3592 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3593 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3594 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3595 		temp |= snb_b_fdi_train_param[j/2];
3596 		temp |= FDI_COMPOSITE_SYNC;
3597 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3598 
3599 		I915_WRITE(FDI_RX_MISC(pipe),
3600 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3601 
3602 		reg = FDI_RX_CTL(pipe);
3603 		temp = I915_READ(reg);
3604 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3605 		temp |= FDI_COMPOSITE_SYNC;
3606 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3607 
3608 		POSTING_READ(reg);
3609 		udelay(1); /* should be 0.5us */
3610 
3611 		for (i = 0; i < 4; i++) {
3612 			reg = FDI_RX_IIR(pipe);
3613 			temp = I915_READ(reg);
3614 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3615 
3616 			if (temp & FDI_RX_BIT_LOCK ||
3617 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3618 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3619 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3620 					      i);
3621 				break;
3622 			}
3623 			udelay(1); /* should be 0.5us */
3624 		}
3625 		if (i == 4) {
3626 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3627 			continue;
3628 		}
3629 
3630 		/* Train 2 */
3631 		reg = FDI_TX_CTL(pipe);
3632 		temp = I915_READ(reg);
3633 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3634 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3635 		I915_WRITE(reg, temp);
3636 
3637 		reg = FDI_RX_CTL(pipe);
3638 		temp = I915_READ(reg);
3639 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3640 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3641 		I915_WRITE(reg, temp);
3642 
3643 		POSTING_READ(reg);
3644 		udelay(2); /* should be 1.5us */
3645 
3646 		for (i = 0; i < 4; i++) {
3647 			reg = FDI_RX_IIR(pipe);
3648 			temp = I915_READ(reg);
3649 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3650 
3651 			if (temp & FDI_RX_SYMBOL_LOCK ||
3652 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3653 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3654 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3655 					      i);
3656 				goto train_done;
3657 			}
3658 			udelay(2); /* should be 1.5us */
3659 		}
3660 		if (i == 4)
3661 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3662 	}
3663 
3664 train_done:
3665 	DRM_DEBUG_KMS("FDI train done.\n");
3666 }
3667 
3668 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3669 {
3670 	struct drm_device *dev = intel_crtc->base.dev;
3671 	struct drm_i915_private *dev_priv = dev->dev_private;
3672 	int pipe = intel_crtc->pipe;
3673 	i915_reg_t reg;
3674 	u32 temp;
3675 
3676 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3677 	reg = FDI_RX_CTL(pipe);
3678 	temp = I915_READ(reg);
3679 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3680 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3681 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3682 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3683 
3684 	POSTING_READ(reg);
3685 	udelay(200);
3686 
3687 	/* Switch from Rawclk to PCDclk */
3688 	temp = I915_READ(reg);
3689 	I915_WRITE(reg, temp | FDI_PCDCLK);
3690 
3691 	POSTING_READ(reg);
3692 	udelay(200);
3693 
3694 	/* Enable CPU FDI TX PLL, always on for Ironlake */
3695 	reg = FDI_TX_CTL(pipe);
3696 	temp = I915_READ(reg);
3697 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3698 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3699 
3700 		POSTING_READ(reg);
3701 		udelay(100);
3702 	}
3703 }
3704 
3705 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3706 {
3707 	struct drm_device *dev = intel_crtc->base.dev;
3708 	struct drm_i915_private *dev_priv = dev->dev_private;
3709 	int pipe = intel_crtc->pipe;
3710 	i915_reg_t reg;
3711 	u32 temp;
3712 
3713 	/* Switch from PCDclk to Rawclk */
3714 	reg = FDI_RX_CTL(pipe);
3715 	temp = I915_READ(reg);
3716 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3717 
3718 	/* Disable CPU FDI TX PLL */
3719 	reg = FDI_TX_CTL(pipe);
3720 	temp = I915_READ(reg);
3721 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3722 
3723 	POSTING_READ(reg);
3724 	udelay(100);
3725 
3726 	reg = FDI_RX_CTL(pipe);
3727 	temp = I915_READ(reg);
3728 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3729 
3730 	/* Wait for the clocks to turn off. */
3731 	POSTING_READ(reg);
3732 	udelay(100);
3733 }
3734 
3735 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3736 {
3737 	struct drm_device *dev = crtc->dev;
3738 	struct drm_i915_private *dev_priv = dev->dev_private;
3739 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3740 	int pipe = intel_crtc->pipe;
3741 	i915_reg_t reg;
3742 	u32 temp;
3743 
3744 	/* disable CPU FDI tx and PCH FDI rx */
3745 	reg = FDI_TX_CTL(pipe);
3746 	temp = I915_READ(reg);
3747 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3748 	POSTING_READ(reg);
3749 
3750 	reg = FDI_RX_CTL(pipe);
3751 	temp = I915_READ(reg);
3752 	temp &= ~(0x7 << 16);
3753 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3754 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3755 
3756 	POSTING_READ(reg);
3757 	udelay(100);
3758 
3759 	/* Ironlake workaround, disable clock pointer after downing FDI */
3760 	if (HAS_PCH_IBX(dev))
3761 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3762 
3763 	/* still set train pattern 1 */
3764 	reg = FDI_TX_CTL(pipe);
3765 	temp = I915_READ(reg);
3766 	temp &= ~FDI_LINK_TRAIN_NONE;
3767 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3768 	I915_WRITE(reg, temp);
3769 
3770 	reg = FDI_RX_CTL(pipe);
3771 	temp = I915_READ(reg);
3772 	if (HAS_PCH_CPT(dev)) {
3773 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3774 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3775 	} else {
3776 		temp &= ~FDI_LINK_TRAIN_NONE;
3777 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3778 	}
3779 	/* BPC in FDI rx is consistent with that in PIPECONF */
3780 	temp &= ~(0x07 << 16);
3781 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3782 	I915_WRITE(reg, temp);
3783 
3784 	POSTING_READ(reg);
3785 	udelay(100);
3786 }
3787 
3788 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3789 {
3790 	struct intel_crtc *crtc;
3791 
3792 	/* Note that we don't need to be called with mode_config.lock here
3793 	 * as our list of CRTC objects is static for the lifetime of the
3794 	 * device and so cannot disappear as we iterate. Similarly, we can
3795 	 * happily treat the predicates as racy, atomic checks as userspace
3796 	 * cannot claim and pin a new fb without at least acquring the
3797 	 * struct_mutex and so serialising with us.
3798 	 */
3799 	for_each_intel_crtc(dev, crtc) {
3800 		if (atomic_read(&crtc->unpin_work_count) == 0)
3801 			continue;
3802 
3803 		if (crtc->unpin_work)
3804 			intel_wait_for_vblank(dev, crtc->pipe);
3805 
3806 		return true;
3807 	}
3808 
3809 	return false;
3810 }
3811 
3812 static void page_flip_completed(struct intel_crtc *intel_crtc)
3813 {
3814 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3815 	struct intel_unpin_work *work = intel_crtc->unpin_work;
3816 
3817 	/* ensure that the unpin work is consistent wrt ->pending. */
3818 	smp_rmb();
3819 	intel_crtc->unpin_work = NULL;
3820 
3821 	if (work->event)
3822 		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3823 
3824 	drm_crtc_vblank_put(&intel_crtc->base);
3825 
3826 	wake_up_all(&dev_priv->pending_flip_queue);
3827 	queue_work(dev_priv->wq, &work->work);
3828 
3829 	trace_i915_flip_complete(intel_crtc->plane,
3830 				 work->pending_flip_obj);
3831 }
3832 
3833 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3834 {
3835 	struct drm_device *dev = crtc->dev;
3836 	struct drm_i915_private *dev_priv = dev->dev_private;
3837 	long ret;
3838 
3839 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3840 
3841 	ret = wait_event_interruptible_timeout(
3842 					dev_priv->pending_flip_queue,
3843 					!intel_crtc_has_pending_flip(crtc),
3844 					60*HZ);
3845 
3846 	if (ret < 0)
3847 		return ret;
3848 
3849 	if (ret == 0) {
3850 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3851 
3852 		spin_lock_irq(&dev->event_lock);
3853 		if (intel_crtc->unpin_work) {
3854 			WARN_ONCE(1, "Removing stuck page flip\n");
3855 			page_flip_completed(intel_crtc);
3856 		}
3857 		spin_unlock_irq(&dev->event_lock);
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3864 {
3865 	u32 temp;
3866 
3867 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3868 
3869 	mutex_lock(&dev_priv->sb_lock);
3870 
3871 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3872 	temp |= SBI_SSCCTL_DISABLE;
3873 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3874 
3875 	mutex_unlock(&dev_priv->sb_lock);
3876 }
3877 
3878 /* Program iCLKIP clock to the desired frequency */
3879 static void lpt_program_iclkip(struct drm_crtc *crtc)
3880 {
3881 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3882 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3883 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3884 	u32 temp;
3885 
3886 	lpt_disable_iclkip(dev_priv);
3887 
3888 	/* The iCLK virtual clock root frequency is in MHz,
3889 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
3890 	 * divisors, it is necessary to divide one by another, so we
3891 	 * convert the virtual clock precision to KHz here for higher
3892 	 * precision.
3893 	 */
3894 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3895 		u32 iclk_virtual_root_freq = 172800 * 1000;
3896 		u32 iclk_pi_range = 64;
3897 		u32 desired_divisor;
3898 
3899 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3900 						    clock << auxdiv);
3901 		divsel = (desired_divisor / iclk_pi_range) - 2;
3902 		phaseinc = desired_divisor % iclk_pi_range;
3903 
3904 		/*
3905 		 * Near 20MHz is a corner case which is
3906 		 * out of range for the 7-bit divisor
3907 		 */
3908 		if (divsel <= 0x7f)
3909 			break;
3910 	}
3911 
3912 	/* This should not happen with any sane values */
3913 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3914 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3915 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3916 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3917 
3918 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3919 			clock,
3920 			auxdiv,
3921 			divsel,
3922 			phasedir,
3923 			phaseinc);
3924 
3925 	mutex_lock(&dev_priv->sb_lock);
3926 
3927 	/* Program SSCDIVINTPHASE6 */
3928 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3929 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3930 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3931 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3932 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3933 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3934 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3935 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3936 
3937 	/* Program SSCAUXDIV */
3938 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3939 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3940 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3941 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3942 
3943 	/* Enable modulator and associated divider */
3944 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3945 	temp &= ~SBI_SSCCTL_DISABLE;
3946 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3947 
3948 	mutex_unlock(&dev_priv->sb_lock);
3949 
3950 	/* Wait for initialization time */
3951 	udelay(24);
3952 
3953 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3954 }
3955 
3956 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3957 {
3958 	u32 divsel, phaseinc, auxdiv;
3959 	u32 iclk_virtual_root_freq = 172800 * 1000;
3960 	u32 iclk_pi_range = 64;
3961 	u32 desired_divisor;
3962 	u32 temp;
3963 
3964 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3965 		return 0;
3966 
3967 	mutex_lock(&dev_priv->sb_lock);
3968 
3969 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3970 	if (temp & SBI_SSCCTL_DISABLE) {
3971 		mutex_unlock(&dev_priv->sb_lock);
3972 		return 0;
3973 	}
3974 
3975 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3976 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3977 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3978 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3979 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3980 
3981 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3982 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3983 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3984 
3985 	mutex_unlock(&dev_priv->sb_lock);
3986 
3987 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3988 
3989 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3990 				 desired_divisor << auxdiv);
3991 }
3992 
3993 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3994 						enum i915_pipe pch_transcoder)
3995 {
3996 	struct drm_device *dev = crtc->base.dev;
3997 	struct drm_i915_private *dev_priv = dev->dev_private;
3998 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3999 
4000 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4001 		   I915_READ(HTOTAL(cpu_transcoder)));
4002 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4003 		   I915_READ(HBLANK(cpu_transcoder)));
4004 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4005 		   I915_READ(HSYNC(cpu_transcoder)));
4006 
4007 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4008 		   I915_READ(VTOTAL(cpu_transcoder)));
4009 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4010 		   I915_READ(VBLANK(cpu_transcoder)));
4011 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4012 		   I915_READ(VSYNC(cpu_transcoder)));
4013 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4014 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4015 }
4016 
4017 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4018 {
4019 	struct drm_i915_private *dev_priv = dev->dev_private;
4020 	uint32_t temp;
4021 
4022 	temp = I915_READ(SOUTH_CHICKEN1);
4023 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4024 		return;
4025 
4026 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4027 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4028 
4029 	temp &= ~FDI_BC_BIFURCATION_SELECT;
4030 	if (enable)
4031 		temp |= FDI_BC_BIFURCATION_SELECT;
4032 
4033 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4034 	I915_WRITE(SOUTH_CHICKEN1, temp);
4035 	POSTING_READ(SOUTH_CHICKEN1);
4036 }
4037 
4038 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4039 {
4040 	struct drm_device *dev = intel_crtc->base.dev;
4041 
4042 	switch (intel_crtc->pipe) {
4043 	case PIPE_A:
4044 		break;
4045 	case PIPE_B:
4046 		if (intel_crtc->config->fdi_lanes > 2)
4047 			cpt_set_fdi_bc_bifurcation(dev, false);
4048 		else
4049 			cpt_set_fdi_bc_bifurcation(dev, true);
4050 
4051 		break;
4052 	case PIPE_C:
4053 		cpt_set_fdi_bc_bifurcation(dev, true);
4054 
4055 		break;
4056 	default:
4057 		BUG();
4058 	}
4059 }
4060 
4061 /* Return which DP Port should be selected for Transcoder DP control */
4062 static enum port
4063 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4064 {
4065 	struct drm_device *dev = crtc->dev;
4066 	struct intel_encoder *encoder;
4067 
4068 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4069 		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4070 		    encoder->type == INTEL_OUTPUT_EDP)
4071 			return enc_to_dig_port(&encoder->base)->port;
4072 	}
4073 
4074 	return -1;
4075 }
4076 
4077 /*
4078  * Enable PCH resources required for PCH ports:
4079  *   - PCH PLLs
4080  *   - FDI training & RX/TX
4081  *   - update transcoder timings
4082  *   - DP transcoding bits
4083  *   - transcoder
4084  */
4085 static void ironlake_pch_enable(struct drm_crtc *crtc)
4086 {
4087 	struct drm_device *dev = crtc->dev;
4088 	struct drm_i915_private *dev_priv = dev->dev_private;
4089 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4090 	int pipe = intel_crtc->pipe;
4091 	u32 temp;
4092 
4093 	assert_pch_transcoder_disabled(dev_priv, pipe);
4094 
4095 	if (IS_IVYBRIDGE(dev))
4096 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4097 
4098 	/* Write the TU size bits before fdi link training, so that error
4099 	 * detection works. */
4100 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4101 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4102 
4103 	/* For PCH output, training FDI link */
4104 	dev_priv->display.fdi_link_train(crtc);
4105 
4106 	/* We need to program the right clock selection before writing the pixel
4107 	 * mutliplier into the DPLL. */
4108 	if (HAS_PCH_CPT(dev)) {
4109 		u32 sel;
4110 
4111 		temp = I915_READ(PCH_DPLL_SEL);
4112 		temp |= TRANS_DPLL_ENABLE(pipe);
4113 		sel = TRANS_DPLLB_SEL(pipe);
4114 		if (intel_crtc->config->shared_dpll ==
4115 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4116 			temp |= sel;
4117 		else
4118 			temp &= ~sel;
4119 		I915_WRITE(PCH_DPLL_SEL, temp);
4120 	}
4121 
4122 	/* XXX: pch pll's can be enabled any time before we enable the PCH
4123 	 * transcoder, and we actually should do this to not upset any PCH
4124 	 * transcoder that already use the clock when we share it.
4125 	 *
4126 	 * Note that enable_shared_dpll tries to do the right thing, but
4127 	 * get_shared_dpll unconditionally resets the pll - we need that to have
4128 	 * the right LVDS enable sequence. */
4129 	intel_enable_shared_dpll(intel_crtc);
4130 
4131 	/* set transcoder timing, panel must allow it */
4132 	assert_panel_unlocked(dev_priv, pipe);
4133 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4134 
4135 	intel_fdi_normal_train(crtc);
4136 
4137 	/* For PCH DP, enable TRANS_DP_CTL */
4138 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4139 		const struct drm_display_mode *adjusted_mode =
4140 			&intel_crtc->config->base.adjusted_mode;
4141 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4142 		i915_reg_t reg = TRANS_DP_CTL(pipe);
4143 		temp = I915_READ(reg);
4144 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4145 			  TRANS_DP_SYNC_MASK |
4146 			  TRANS_DP_BPC_MASK);
4147 		temp |= TRANS_DP_OUTPUT_ENABLE;
4148 		temp |= bpc << 9; /* same format but at 11:9 */
4149 
4150 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4151 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4152 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4153 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4154 
4155 		switch (intel_trans_dp_port_sel(crtc)) {
4156 		case PORT_B:
4157 			temp |= TRANS_DP_PORT_SEL_B;
4158 			break;
4159 		case PORT_C:
4160 			temp |= TRANS_DP_PORT_SEL_C;
4161 			break;
4162 		case PORT_D:
4163 			temp |= TRANS_DP_PORT_SEL_D;
4164 			break;
4165 		default:
4166 			BUG();
4167 		}
4168 
4169 		I915_WRITE(reg, temp);
4170 	}
4171 
4172 	ironlake_enable_pch_transcoder(dev_priv, pipe);
4173 }
4174 
4175 static void lpt_pch_enable(struct drm_crtc *crtc)
4176 {
4177 	struct drm_device *dev = crtc->dev;
4178 	struct drm_i915_private *dev_priv = dev->dev_private;
4179 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4180 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4181 
4182 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4183 
4184 	lpt_program_iclkip(crtc);
4185 
4186 	/* Set transcoder timing. */
4187 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4188 
4189 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4190 }
4191 
4192 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4193 {
4194 	struct drm_i915_private *dev_priv = dev->dev_private;
4195 	i915_reg_t dslreg = PIPEDSL(pipe);
4196 	u32 temp;
4197 
4198 	temp = I915_READ(dslreg);
4199 	udelay(500);
4200 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4201 		if (wait_for(I915_READ(dslreg) != temp, 5))
4202 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4203 	}
4204 }
4205 
4206 static int
4207 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4208 		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4209 		  int src_w, int src_h, int dst_w, int dst_h)
4210 {
4211 	struct intel_crtc_scaler_state *scaler_state =
4212 		&crtc_state->scaler_state;
4213 	struct intel_crtc *intel_crtc =
4214 		to_intel_crtc(crtc_state->base.crtc);
4215 	int need_scaling;
4216 
4217 	need_scaling = intel_rotation_90_or_270(rotation) ?
4218 		(src_h != dst_w || src_w != dst_h):
4219 		(src_w != dst_w || src_h != dst_h);
4220 
4221 	/*
4222 	 * if plane is being disabled or scaler is no more required or force detach
4223 	 *  - free scaler binded to this plane/crtc
4224 	 *  - in order to do this, update crtc->scaler_usage
4225 	 *
4226 	 * Here scaler state in crtc_state is set free so that
4227 	 * scaler can be assigned to other user. Actual register
4228 	 * update to free the scaler is done in plane/panel-fit programming.
4229 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4230 	 */
4231 	if (force_detach || !need_scaling) {
4232 		if (*scaler_id >= 0) {
4233 			scaler_state->scaler_users &= ~(1 << scaler_user);
4234 			scaler_state->scalers[*scaler_id].in_use = 0;
4235 
4236 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4237 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4238 				intel_crtc->pipe, scaler_user, *scaler_id,
4239 				scaler_state->scaler_users);
4240 			*scaler_id = -1;
4241 		}
4242 		return 0;
4243 	}
4244 
4245 	/* range checks */
4246 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4247 		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4248 
4249 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4250 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4251 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4252 			"size is out of scaler range\n",
4253 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4254 		return -EINVAL;
4255 	}
4256 
4257 	/* mark this plane as a scaler user in crtc_state */
4258 	scaler_state->scaler_users |= (1 << scaler_user);
4259 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4260 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4261 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4262 		scaler_state->scaler_users);
4263 
4264 	return 0;
4265 }
4266 
4267 /**
4268  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4269  *
4270  * @state: crtc's scaler state
4271  *
4272  * Return
4273  *     0 - scaler_usage updated successfully
4274  *    error - requested scaling cannot be supported or other error condition
4275  */
4276 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4277 {
4278 	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4279 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4280 
4281 	DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4282 		      intel_crtc->base.base.id, intel_crtc->base.name,
4283 		      intel_crtc->pipe, SKL_CRTC_INDEX);
4284 
4285 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4286 		&state->scaler_state.scaler_id, DRM_ROTATE_0,
4287 		state->pipe_src_w, state->pipe_src_h,
4288 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4289 }
4290 
4291 /**
4292  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4293  *
4294  * @state: crtc's scaler state
4295  * @plane_state: atomic plane state to update
4296  *
4297  * Return
4298  *     0 - scaler_usage updated successfully
4299  *    error - requested scaling cannot be supported or other error condition
4300  */
4301 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4302 				   struct intel_plane_state *plane_state)
4303 {
4304 
4305 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4306 	struct intel_plane *intel_plane =
4307 		to_intel_plane(plane_state->base.plane);
4308 	struct drm_framebuffer *fb = plane_state->base.fb;
4309 	int ret;
4310 
4311 	bool force_detach = !fb || !plane_state->visible;
4312 
4313 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4314 		      intel_plane->base.base.id, intel_crtc->pipe,
4315 		      drm_plane_index(&intel_plane->base));
4316 
4317 	ret = skl_update_scaler(crtc_state, force_detach,
4318 				drm_plane_index(&intel_plane->base),
4319 				&plane_state->scaler_id,
4320 				plane_state->base.rotation,
4321 				drm_rect_width(&plane_state->src) >> 16,
4322 				drm_rect_height(&plane_state->src) >> 16,
4323 				drm_rect_width(&plane_state->dst),
4324 				drm_rect_height(&plane_state->dst));
4325 
4326 	if (ret || plane_state->scaler_id < 0)
4327 		return ret;
4328 
4329 	/* check colorkey */
4330 	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4331 		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4332 			      intel_plane->base.base.id);
4333 		return -EINVAL;
4334 	}
4335 
4336 	/* Check src format */
4337 	switch (fb->pixel_format) {
4338 	case DRM_FORMAT_RGB565:
4339 	case DRM_FORMAT_XBGR8888:
4340 	case DRM_FORMAT_XRGB8888:
4341 	case DRM_FORMAT_ABGR8888:
4342 	case DRM_FORMAT_ARGB8888:
4343 	case DRM_FORMAT_XRGB2101010:
4344 	case DRM_FORMAT_XBGR2101010:
4345 	case DRM_FORMAT_YUYV:
4346 	case DRM_FORMAT_YVYU:
4347 	case DRM_FORMAT_UYVY:
4348 	case DRM_FORMAT_VYUY:
4349 		break;
4350 	default:
4351 		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4352 			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4353 		return -EINVAL;
4354 	}
4355 
4356 	return 0;
4357 }
4358 
4359 static void skylake_scaler_disable(struct intel_crtc *crtc)
4360 {
4361 	int i;
4362 
4363 	for (i = 0; i < crtc->num_scalers; i++)
4364 		skl_detach_scaler(crtc, i);
4365 }
4366 
4367 static void skylake_pfit_enable(struct intel_crtc *crtc)
4368 {
4369 	struct drm_device *dev = crtc->base.dev;
4370 	struct drm_i915_private *dev_priv = dev->dev_private;
4371 	int pipe = crtc->pipe;
4372 	struct intel_crtc_scaler_state *scaler_state =
4373 		&crtc->config->scaler_state;
4374 
4375 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4376 
4377 	if (crtc->config->pch_pfit.enabled) {
4378 		int id;
4379 
4380 		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4381 			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4382 			return;
4383 		}
4384 
4385 		id = scaler_state->scaler_id;
4386 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4387 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4388 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4389 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4390 
4391 		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4392 	}
4393 }
4394 
4395 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4396 {
4397 	struct drm_device *dev = crtc->base.dev;
4398 	struct drm_i915_private *dev_priv = dev->dev_private;
4399 	int pipe = crtc->pipe;
4400 
4401 	if (crtc->config->pch_pfit.enabled) {
4402 		/* Force use of hard-coded filter coefficients
4403 		 * as some pre-programmed values are broken,
4404 		 * e.g. x201.
4405 		 */
4406 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4407 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4408 						 PF_PIPE_SEL_IVB(pipe));
4409 		else
4410 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4411 		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4412 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4413 	}
4414 }
4415 
4416 void hsw_enable_ips(struct intel_crtc *crtc)
4417 {
4418 	struct drm_device *dev = crtc->base.dev;
4419 	struct drm_i915_private *dev_priv = dev->dev_private;
4420 
4421 	if (!crtc->config->ips_enabled)
4422 		return;
4423 
4424 	/*
4425 	 * We can only enable IPS after we enable a plane and wait for a vblank
4426 	 * This function is called from post_plane_update, which is run after
4427 	 * a vblank wait.
4428 	 */
4429 
4430 	assert_plane_enabled(dev_priv, crtc->plane);
4431 	if (IS_BROADWELL(dev)) {
4432 		mutex_lock(&dev_priv->rps.hw_lock);
4433 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4434 		mutex_unlock(&dev_priv->rps.hw_lock);
4435 		/* Quoting Art Runyan: "its not safe to expect any particular
4436 		 * value in IPS_CTL bit 31 after enabling IPS through the
4437 		 * mailbox." Moreover, the mailbox may return a bogus state,
4438 		 * so we need to just enable it and continue on.
4439 		 */
4440 	} else {
4441 		I915_WRITE(IPS_CTL, IPS_ENABLE);
4442 		/* The bit only becomes 1 in the next vblank, so this wait here
4443 		 * is essentially intel_wait_for_vblank. If we don't have this
4444 		 * and don't wait for vblanks until the end of crtc_enable, then
4445 		 * the HW state readout code will complain that the expected
4446 		 * IPS_CTL value is not the one we read. */
4447 		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4448 			DRM_ERROR("Timed out waiting for IPS enable\n");
4449 	}
4450 }
4451 
4452 void hsw_disable_ips(struct intel_crtc *crtc)
4453 {
4454 	struct drm_device *dev = crtc->base.dev;
4455 	struct drm_i915_private *dev_priv = dev->dev_private;
4456 
4457 	if (!crtc->config->ips_enabled)
4458 		return;
4459 
4460 	assert_plane_enabled(dev_priv, crtc->plane);
4461 	if (IS_BROADWELL(dev)) {
4462 		mutex_lock(&dev_priv->rps.hw_lock);
4463 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4464 		mutex_unlock(&dev_priv->rps.hw_lock);
4465 		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4466 		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4467 			DRM_ERROR("Timed out waiting for IPS disable\n");
4468 	} else {
4469 		I915_WRITE(IPS_CTL, 0);
4470 		POSTING_READ(IPS_CTL);
4471 	}
4472 
4473 	/* We need to wait for a vblank before we can disable the plane. */
4474 	intel_wait_for_vblank(dev, crtc->pipe);
4475 }
4476 
4477 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4478 {
4479 	if (intel_crtc->overlay) {
4480 		struct drm_device *dev = intel_crtc->base.dev;
4481 		struct drm_i915_private *dev_priv = dev->dev_private;
4482 
4483 		mutex_lock(&dev->struct_mutex);
4484 		dev_priv->mm.interruptible = false;
4485 		(void) intel_overlay_switch_off(intel_crtc->overlay);
4486 		dev_priv->mm.interruptible = true;
4487 		mutex_unlock(&dev->struct_mutex);
4488 	}
4489 
4490 	/* Let userspace switch the overlay on again. In most cases userspace
4491 	 * has to recompute where to put it anyway.
4492 	 */
4493 }
4494 
4495 /**
4496  * intel_post_enable_primary - Perform operations after enabling primary plane
4497  * @crtc: the CRTC whose primary plane was just enabled
4498  *
4499  * Performs potentially sleeping operations that must be done after the primary
4500  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4501  * called due to an explicit primary plane update, or due to an implicit
4502  * re-enable that is caused when a sprite plane is updated to no longer
4503  * completely hide the primary plane.
4504  */
4505 static void
4506 intel_post_enable_primary(struct drm_crtc *crtc)
4507 {
4508 	struct drm_device *dev = crtc->dev;
4509 	struct drm_i915_private *dev_priv = dev->dev_private;
4510 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4511 	int pipe = intel_crtc->pipe;
4512 
4513 	/*
4514 	 * FIXME IPS should be fine as long as one plane is
4515 	 * enabled, but in practice it seems to have problems
4516 	 * when going from primary only to sprite only and vice
4517 	 * versa.
4518 	 */
4519 	hsw_enable_ips(intel_crtc);
4520 
4521 	/*
4522 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4523 	 * So don't enable underrun reporting before at least some planes
4524 	 * are enabled.
4525 	 * FIXME: Need to fix the logic to work when we turn off all planes
4526 	 * but leave the pipe running.
4527 	 */
4528 	if (IS_GEN2(dev))
4529 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4530 
4531 	/* Underruns don't always raise interrupts, so check manually. */
4532 	intel_check_cpu_fifo_underruns(dev_priv);
4533 	intel_check_pch_fifo_underruns(dev_priv);
4534 }
4535 
4536 /* FIXME move all this to pre_plane_update() with proper state tracking */
4537 static void
4538 intel_pre_disable_primary(struct drm_crtc *crtc)
4539 {
4540 	struct drm_device *dev = crtc->dev;
4541 	struct drm_i915_private *dev_priv = dev->dev_private;
4542 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4543 	int pipe = intel_crtc->pipe;
4544 
4545 	/*
4546 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4547 	 * So diasble underrun reporting before all the planes get disabled.
4548 	 * FIXME: Need to fix the logic to work when we turn off all planes
4549 	 * but leave the pipe running.
4550 	 */
4551 	if (IS_GEN2(dev))
4552 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4553 
4554 	/*
4555 	 * FIXME IPS should be fine as long as one plane is
4556 	 * enabled, but in practice it seems to have problems
4557 	 * when going from primary only to sprite only and vice
4558 	 * versa.
4559 	 */
4560 	hsw_disable_ips(intel_crtc);
4561 }
4562 
4563 /* FIXME get rid of this and use pre_plane_update */
4564 static void
4565 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4566 {
4567 	struct drm_device *dev = crtc->dev;
4568 	struct drm_i915_private *dev_priv = dev->dev_private;
4569 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4570 	int pipe = intel_crtc->pipe;
4571 
4572 	intel_pre_disable_primary(crtc);
4573 
4574 	/*
4575 	 * Vblank time updates from the shadow to live plane control register
4576 	 * are blocked if the memory self-refresh mode is active at that
4577 	 * moment. So to make sure the plane gets truly disabled, disable
4578 	 * first the self-refresh mode. The self-refresh enable bit in turn
4579 	 * will be checked/applied by the HW only at the next frame start
4580 	 * event which is after the vblank start event, so we need to have a
4581 	 * wait-for-vblank between disabling the plane and the pipe.
4582 	 */
4583 	if (HAS_GMCH_DISPLAY(dev)) {
4584 		intel_set_memory_cxsr(dev_priv, false);
4585 		dev_priv->wm.vlv.cxsr = false;
4586 		intel_wait_for_vblank(dev, pipe);
4587 	}
4588 }
4589 
4590 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4591 {
4592 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4593 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4594 	struct intel_crtc_state *pipe_config =
4595 		to_intel_crtc_state(crtc->base.state);
4596 	struct drm_device *dev = crtc->base.dev;
4597 	struct drm_plane *primary = crtc->base.primary;
4598 	struct drm_plane_state *old_pri_state =
4599 		drm_atomic_get_existing_plane_state(old_state, primary);
4600 
4601 	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4602 
4603 	crtc->wm.cxsr_allowed = true;
4604 
4605 	if (pipe_config->update_wm_post && pipe_config->base.active)
4606 		intel_update_watermarks(&crtc->base);
4607 
4608 	if (old_pri_state) {
4609 		struct intel_plane_state *primary_state =
4610 			to_intel_plane_state(primary->state);
4611 		struct intel_plane_state *old_primary_state =
4612 			to_intel_plane_state(old_pri_state);
4613 
4614 		intel_fbc_post_update(crtc);
4615 
4616 		if (primary_state->visible &&
4617 		    (needs_modeset(&pipe_config->base) ||
4618 		     !old_primary_state->visible))
4619 			intel_post_enable_primary(&crtc->base);
4620 	}
4621 }
4622 
4623 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4624 {
4625 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4626 	struct drm_device *dev = crtc->base.dev;
4627 	struct drm_i915_private *dev_priv = dev->dev_private;
4628 	struct intel_crtc_state *pipe_config =
4629 		to_intel_crtc_state(crtc->base.state);
4630 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4631 	struct drm_plane *primary = crtc->base.primary;
4632 	struct drm_plane_state *old_pri_state =
4633 		drm_atomic_get_existing_plane_state(old_state, primary);
4634 	bool modeset = needs_modeset(&pipe_config->base);
4635 
4636 	if (old_pri_state) {
4637 		struct intel_plane_state *primary_state =
4638 			to_intel_plane_state(primary->state);
4639 		struct intel_plane_state *old_primary_state =
4640 			to_intel_plane_state(old_pri_state);
4641 
4642 		intel_fbc_pre_update(crtc);
4643 
4644 		if (old_primary_state->visible &&
4645 		    (modeset || !primary_state->visible))
4646 			intel_pre_disable_primary(&crtc->base);
4647 	}
4648 
4649 	if (pipe_config->disable_cxsr) {
4650 		crtc->wm.cxsr_allowed = false;
4651 
4652 		/*
4653 		 * Vblank time updates from the shadow to live plane control register
4654 		 * are blocked if the memory self-refresh mode is active at that
4655 		 * moment. So to make sure the plane gets truly disabled, disable
4656 		 * first the self-refresh mode. The self-refresh enable bit in turn
4657 		 * will be checked/applied by the HW only at the next frame start
4658 		 * event which is after the vblank start event, so we need to have a
4659 		 * wait-for-vblank between disabling the plane and the pipe.
4660 		 */
4661 		if (old_crtc_state->base.active) {
4662 			intel_set_memory_cxsr(dev_priv, false);
4663 			dev_priv->wm.vlv.cxsr = false;
4664 			intel_wait_for_vblank(dev, crtc->pipe);
4665 		}
4666 	}
4667 
4668 	/*
4669 	 * IVB workaround: must disable low power watermarks for at least
4670 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
4671 	 * when scaling is disabled.
4672 	 *
4673 	 * WaCxSRDisabledForSpriteScaling:ivb
4674 	 */
4675 	if (pipe_config->disable_lp_wm) {
4676 		ilk_disable_lp_wm(dev);
4677 		intel_wait_for_vblank(dev, crtc->pipe);
4678 	}
4679 
4680 	/*
4681 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
4682 	 * watermark programming here.
4683 	 */
4684 	if (needs_modeset(&pipe_config->base))
4685 		return;
4686 
4687 	/*
4688 	 * For platforms that support atomic watermarks, program the
4689 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4690 	 * will be the intermediate values that are safe for both pre- and
4691 	 * post- vblank; when vblank happens, the 'active' values will be set
4692 	 * to the final 'target' values and we'll do this again to get the
4693 	 * optimal watermarks.  For gen9+ platforms, the values we program here
4694 	 * will be the final target values which will get automatically latched
4695 	 * at vblank time; no further programming will be necessary.
4696 	 *
4697 	 * If a platform hasn't been transitioned to atomic watermarks yet,
4698 	 * we'll continue to update watermarks the old way, if flags tell
4699 	 * us to.
4700 	 */
4701 	if (dev_priv->display.initial_watermarks != NULL)
4702 		dev_priv->display.initial_watermarks(pipe_config);
4703 	else if (pipe_config->update_wm_pre)
4704 		intel_update_watermarks(&crtc->base);
4705 }
4706 
4707 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4708 {
4709 	struct drm_device *dev = crtc->dev;
4710 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4711 	struct drm_plane *p;
4712 	int pipe = intel_crtc->pipe;
4713 
4714 	intel_crtc_dpms_overlay_disable(intel_crtc);
4715 
4716 	drm_for_each_plane_mask(p, dev, plane_mask)
4717 		to_intel_plane(p)->disable_plane(p, crtc);
4718 
4719 	/*
4720 	 * FIXME: Once we grow proper nuclear flip support out of this we need
4721 	 * to compute the mask of flip planes precisely. For the time being
4722 	 * consider this a flip to a NULL plane.
4723 	 */
4724 	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4725 }
4726 
4727 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4728 {
4729 	struct drm_device *dev = crtc->dev;
4730 	struct drm_i915_private *dev_priv = dev->dev_private;
4731 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4732 	struct intel_encoder *encoder;
4733 	int pipe = intel_crtc->pipe;
4734 	struct intel_crtc_state *pipe_config =
4735 		to_intel_crtc_state(crtc->state);
4736 
4737 	if (WARN_ON(intel_crtc->active))
4738 		return;
4739 
4740 	/*
4741 	 * Sometimes spurious CPU pipe underruns happen during FDI
4742 	 * training, at least with VGA+HDMI cloning. Suppress them.
4743 	 *
4744 	 * On ILK we get an occasional spurious CPU pipe underruns
4745 	 * between eDP port A enable and vdd enable. Also PCH port
4746 	 * enable seems to result in the occasional CPU pipe underrun.
4747 	 *
4748 	 * Spurious PCH underruns also occur during PCH enabling.
4749 	 */
4750 	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4751 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4752 	if (intel_crtc->config->has_pch_encoder)
4753 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4754 
4755 	if (intel_crtc->config->has_pch_encoder)
4756 		intel_prepare_shared_dpll(intel_crtc);
4757 
4758 	if (intel_crtc->config->has_dp_encoder)
4759 		intel_dp_set_m_n(intel_crtc, M1_N1);
4760 
4761 	intel_set_pipe_timings(intel_crtc);
4762 	intel_set_pipe_src_size(intel_crtc);
4763 
4764 	if (intel_crtc->config->has_pch_encoder) {
4765 		intel_cpu_transcoder_set_m_n(intel_crtc,
4766 				     &intel_crtc->config->fdi_m_n, NULL);
4767 	}
4768 
4769 	ironlake_set_pipeconf(crtc);
4770 
4771 	intel_crtc->active = true;
4772 
4773 	for_each_encoder_on_crtc(dev, crtc, encoder)
4774 		if (encoder->pre_enable)
4775 			encoder->pre_enable(encoder);
4776 
4777 	if (intel_crtc->config->has_pch_encoder) {
4778 		/* Note: FDI PLL enabling _must_ be done before we enable the
4779 		 * cpu pipes, hence this is separate from all the other fdi/pch
4780 		 * enabling. */
4781 		ironlake_fdi_pll_enable(intel_crtc);
4782 	} else {
4783 		assert_fdi_tx_disabled(dev_priv, pipe);
4784 		assert_fdi_rx_disabled(dev_priv, pipe);
4785 	}
4786 
4787 	ironlake_pfit_enable(intel_crtc);
4788 
4789 	/*
4790 	 * On ILK+ LUT must be loaded before the pipe is running but with
4791 	 * clocks enabled
4792 	 */
4793 	intel_color_load_luts(&pipe_config->base);
4794 
4795 	if (dev_priv->display.initial_watermarks != NULL)
4796 		dev_priv->display.initial_watermarks(intel_crtc->config);
4797 	intel_enable_pipe(intel_crtc);
4798 
4799 	if (intel_crtc->config->has_pch_encoder)
4800 		ironlake_pch_enable(crtc);
4801 
4802 	assert_vblank_disabled(crtc);
4803 	drm_crtc_vblank_on(crtc);
4804 
4805 	for_each_encoder_on_crtc(dev, crtc, encoder)
4806 		encoder->enable(encoder);
4807 
4808 	if (HAS_PCH_CPT(dev))
4809 		cpt_verify_modeset(dev, intel_crtc->pipe);
4810 
4811 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4812 	if (intel_crtc->config->has_pch_encoder)
4813 		intel_wait_for_vblank(dev, pipe);
4814 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4815 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4816 }
4817 
4818 /* IPS only exists on ULT machines and is tied to pipe A. */
4819 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4820 {
4821 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4822 }
4823 
4824 static void haswell_crtc_enable(struct drm_crtc *crtc)
4825 {
4826 	struct drm_device *dev = crtc->dev;
4827 	struct drm_i915_private *dev_priv = dev->dev_private;
4828 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4829 	struct intel_encoder *encoder;
4830 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4831 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4832 	struct intel_crtc_state *pipe_config =
4833 		to_intel_crtc_state(crtc->state);
4834 
4835 	if (WARN_ON(intel_crtc->active))
4836 		return;
4837 
4838 	if (intel_crtc->config->has_pch_encoder)
4839 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4840 						      false);
4841 
4842 	if (intel_crtc->config->shared_dpll)
4843 		intel_enable_shared_dpll(intel_crtc);
4844 
4845 	if (intel_crtc->config->has_dp_encoder)
4846 		intel_dp_set_m_n(intel_crtc, M1_N1);
4847 
4848 	if (!intel_crtc->config->has_dsi_encoder)
4849 		intel_set_pipe_timings(intel_crtc);
4850 
4851 	intel_set_pipe_src_size(intel_crtc);
4852 
4853 	if (cpu_transcoder != TRANSCODER_EDP &&
4854 	    !transcoder_is_dsi(cpu_transcoder)) {
4855 		I915_WRITE(PIPE_MULT(cpu_transcoder),
4856 			   intel_crtc->config->pixel_multiplier - 1);
4857 	}
4858 
4859 	if (intel_crtc->config->has_pch_encoder) {
4860 		intel_cpu_transcoder_set_m_n(intel_crtc,
4861 				     &intel_crtc->config->fdi_m_n, NULL);
4862 	}
4863 
4864 	if (!intel_crtc->config->has_dsi_encoder)
4865 		haswell_set_pipeconf(crtc);
4866 
4867 	haswell_set_pipemisc(crtc);
4868 
4869 	intel_color_set_csc(&pipe_config->base);
4870 
4871 	intel_crtc->active = true;
4872 
4873 	if (intel_crtc->config->has_pch_encoder)
4874 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4875 	else
4876 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4877 
4878 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4879 		if (encoder->pre_enable)
4880 			encoder->pre_enable(encoder);
4881 	}
4882 
4883 	if (intel_crtc->config->has_pch_encoder)
4884 		dev_priv->display.fdi_link_train(crtc);
4885 
4886 	if (!intel_crtc->config->has_dsi_encoder)
4887 		intel_ddi_enable_pipe_clock(intel_crtc);
4888 
4889 	if (INTEL_INFO(dev)->gen >= 9)
4890 		skylake_pfit_enable(intel_crtc);
4891 	else
4892 		ironlake_pfit_enable(intel_crtc);
4893 
4894 	/*
4895 	 * On ILK+ LUT must be loaded before the pipe is running but with
4896 	 * clocks enabled
4897 	 */
4898 	intel_color_load_luts(&pipe_config->base);
4899 
4900 	intel_ddi_set_pipe_settings(crtc);
4901 	if (!intel_crtc->config->has_dsi_encoder)
4902 		intel_ddi_enable_transcoder_func(crtc);
4903 
4904 	if (dev_priv->display.initial_watermarks != NULL)
4905 		dev_priv->display.initial_watermarks(pipe_config);
4906 	else
4907 		intel_update_watermarks(crtc);
4908 
4909 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
4910 	if (!intel_crtc->config->has_dsi_encoder)
4911 		intel_enable_pipe(intel_crtc);
4912 
4913 	if (intel_crtc->config->has_pch_encoder)
4914 		lpt_pch_enable(crtc);
4915 
4916 	if (intel_crtc->config->dp_encoder_is_mst)
4917 		intel_ddi_set_vc_payload_alloc(crtc, true);
4918 
4919 	assert_vblank_disabled(crtc);
4920 	drm_crtc_vblank_on(crtc);
4921 
4922 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4923 		encoder->enable(encoder);
4924 		intel_opregion_notify_encoder(encoder, true);
4925 	}
4926 
4927 	if (intel_crtc->config->has_pch_encoder) {
4928 		intel_wait_for_vblank(dev, pipe);
4929 		intel_wait_for_vblank(dev, pipe);
4930 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4931 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4932 						      true);
4933 	}
4934 
4935 	/* If we change the relative order between pipe/planes enabling, we need
4936 	 * to change the workaround. */
4937 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4938 	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4939 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4940 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4941 	}
4942 }
4943 
4944 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4945 {
4946 	struct drm_device *dev = crtc->base.dev;
4947 	struct drm_i915_private *dev_priv = dev->dev_private;
4948 	int pipe = crtc->pipe;
4949 
4950 	/* To avoid upsetting the power well on haswell only disable the pfit if
4951 	 * it's in use. The hw state code will make sure we get this right. */
4952 	if (force || crtc->config->pch_pfit.enabled) {
4953 		I915_WRITE(PF_CTL(pipe), 0);
4954 		I915_WRITE(PF_WIN_POS(pipe), 0);
4955 		I915_WRITE(PF_WIN_SZ(pipe), 0);
4956 	}
4957 }
4958 
4959 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4960 {
4961 	struct drm_device *dev = crtc->dev;
4962 	struct drm_i915_private *dev_priv = dev->dev_private;
4963 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4964 	struct intel_encoder *encoder;
4965 	int pipe = intel_crtc->pipe;
4966 
4967 	/*
4968 	 * Sometimes spurious CPU pipe underruns happen when the
4969 	 * pipe is already disabled, but FDI RX/TX is still enabled.
4970 	 * Happens at least with VGA+HDMI cloning. Suppress them.
4971 	 */
4972 	if (intel_crtc->config->has_pch_encoder) {
4973 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4974 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4975 	}
4976 
4977 	for_each_encoder_on_crtc(dev, crtc, encoder)
4978 		encoder->disable(encoder);
4979 
4980 	drm_crtc_vblank_off(crtc);
4981 	assert_vblank_disabled(crtc);
4982 
4983 	intel_disable_pipe(intel_crtc);
4984 
4985 	ironlake_pfit_disable(intel_crtc, false);
4986 
4987 	if (intel_crtc->config->has_pch_encoder)
4988 		ironlake_fdi_disable(crtc);
4989 
4990 	for_each_encoder_on_crtc(dev, crtc, encoder)
4991 		if (encoder->post_disable)
4992 			encoder->post_disable(encoder);
4993 
4994 	if (intel_crtc->config->has_pch_encoder) {
4995 		ironlake_disable_pch_transcoder(dev_priv, pipe);
4996 
4997 		if (HAS_PCH_CPT(dev)) {
4998 			i915_reg_t reg;
4999 			u32 temp;
5000 
5001 			/* disable TRANS_DP_CTL */
5002 			reg = TRANS_DP_CTL(pipe);
5003 			temp = I915_READ(reg);
5004 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5005 				  TRANS_DP_PORT_SEL_MASK);
5006 			temp |= TRANS_DP_PORT_SEL_NONE;
5007 			I915_WRITE(reg, temp);
5008 
5009 			/* disable DPLL_SEL */
5010 			temp = I915_READ(PCH_DPLL_SEL);
5011 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5012 			I915_WRITE(PCH_DPLL_SEL, temp);
5013 		}
5014 
5015 		ironlake_fdi_pll_disable(intel_crtc);
5016 	}
5017 
5018 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5019 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5020 }
5021 
5022 static void haswell_crtc_disable(struct drm_crtc *crtc)
5023 {
5024 	struct drm_device *dev = crtc->dev;
5025 	struct drm_i915_private *dev_priv = dev->dev_private;
5026 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5027 	struct intel_encoder *encoder;
5028 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5029 
5030 	if (intel_crtc->config->has_pch_encoder)
5031 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5032 						      false);
5033 
5034 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5035 		intel_opregion_notify_encoder(encoder, false);
5036 		encoder->disable(encoder);
5037 	}
5038 
5039 	drm_crtc_vblank_off(crtc);
5040 	assert_vblank_disabled(crtc);
5041 
5042 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
5043 	if (!intel_crtc->config->has_dsi_encoder)
5044 		intel_disable_pipe(intel_crtc);
5045 
5046 	if (intel_crtc->config->dp_encoder_is_mst)
5047 		intel_ddi_set_vc_payload_alloc(crtc, false);
5048 
5049 	if (!intel_crtc->config->has_dsi_encoder)
5050 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5051 
5052 	if (INTEL_INFO(dev)->gen >= 9)
5053 		skylake_scaler_disable(intel_crtc);
5054 	else
5055 		ironlake_pfit_disable(intel_crtc, false);
5056 
5057 	if (!intel_crtc->config->has_dsi_encoder)
5058 		intel_ddi_disable_pipe_clock(intel_crtc);
5059 
5060 	for_each_encoder_on_crtc(dev, crtc, encoder)
5061 		if (encoder->post_disable)
5062 			encoder->post_disable(encoder);
5063 
5064 	if (intel_crtc->config->has_pch_encoder) {
5065 		lpt_disable_pch_transcoder(dev_priv);
5066 		lpt_disable_iclkip(dev_priv);
5067 		intel_ddi_fdi_disable(crtc);
5068 
5069 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5070 						      true);
5071 	}
5072 }
5073 
5074 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5075 {
5076 	struct drm_device *dev = crtc->base.dev;
5077 	struct drm_i915_private *dev_priv = dev->dev_private;
5078 	struct intel_crtc_state *pipe_config = crtc->config;
5079 
5080 	if (!pipe_config->gmch_pfit.control)
5081 		return;
5082 
5083 	/*
5084 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5085 	 * according to register description and PRM.
5086 	 */
5087 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5088 	assert_pipe_disabled(dev_priv, crtc->pipe);
5089 
5090 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5091 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5092 
5093 	/* Border color in case we don't scale up to the full screen. Black by
5094 	 * default, change to something else for debugging. */
5095 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5096 }
5097 
5098 static enum intel_display_power_domain port_to_power_domain(enum port port)
5099 {
5100 	switch (port) {
5101 	case PORT_A:
5102 		return POWER_DOMAIN_PORT_DDI_A_LANES;
5103 	case PORT_B:
5104 		return POWER_DOMAIN_PORT_DDI_B_LANES;
5105 	case PORT_C:
5106 		return POWER_DOMAIN_PORT_DDI_C_LANES;
5107 	case PORT_D:
5108 		return POWER_DOMAIN_PORT_DDI_D_LANES;
5109 	case PORT_E:
5110 		return POWER_DOMAIN_PORT_DDI_E_LANES;
5111 	default:
5112 		MISSING_CASE(port);
5113 		return POWER_DOMAIN_PORT_OTHER;
5114 	}
5115 }
5116 
5117 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5118 {
5119 	switch (port) {
5120 	case PORT_A:
5121 		return POWER_DOMAIN_AUX_A;
5122 	case PORT_B:
5123 		return POWER_DOMAIN_AUX_B;
5124 	case PORT_C:
5125 		return POWER_DOMAIN_AUX_C;
5126 	case PORT_D:
5127 		return POWER_DOMAIN_AUX_D;
5128 	case PORT_E:
5129 		/* FIXME: Check VBT for actual wiring of PORT E */
5130 		return POWER_DOMAIN_AUX_D;
5131 	default:
5132 		MISSING_CASE(port);
5133 		return POWER_DOMAIN_AUX_A;
5134 	}
5135 }
5136 
5137 enum intel_display_power_domain
5138 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5139 {
5140 	struct drm_device *dev = intel_encoder->base.dev;
5141 	struct intel_digital_port *intel_dig_port;
5142 
5143 	switch (intel_encoder->type) {
5144 	case INTEL_OUTPUT_UNKNOWN:
5145 		/* Only DDI platforms should ever use this output type */
5146 		WARN_ON_ONCE(!HAS_DDI(dev));
5147 	case INTEL_OUTPUT_DISPLAYPORT:
5148 	case INTEL_OUTPUT_HDMI:
5149 	case INTEL_OUTPUT_EDP:
5150 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5151 		return port_to_power_domain(intel_dig_port->port);
5152 	case INTEL_OUTPUT_DP_MST:
5153 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5154 		return port_to_power_domain(intel_dig_port->port);
5155 	case INTEL_OUTPUT_ANALOG:
5156 		return POWER_DOMAIN_PORT_CRT;
5157 	case INTEL_OUTPUT_DSI:
5158 		return POWER_DOMAIN_PORT_DSI;
5159 	default:
5160 		return POWER_DOMAIN_PORT_OTHER;
5161 	}
5162 }
5163 
5164 enum intel_display_power_domain
5165 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5166 {
5167 	struct drm_device *dev = intel_encoder->base.dev;
5168 	struct intel_digital_port *intel_dig_port;
5169 
5170 	switch (intel_encoder->type) {
5171 	case INTEL_OUTPUT_UNKNOWN:
5172 	case INTEL_OUTPUT_HDMI:
5173 		/*
5174 		 * Only DDI platforms should ever use these output types.
5175 		 * We can get here after the HDMI detect code has already set
5176 		 * the type of the shared encoder. Since we can't be sure
5177 		 * what's the status of the given connectors, play safe and
5178 		 * run the DP detection too.
5179 		 */
5180 		WARN_ON_ONCE(!HAS_DDI(dev));
5181 	case INTEL_OUTPUT_DISPLAYPORT:
5182 	case INTEL_OUTPUT_EDP:
5183 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5184 		return port_to_aux_power_domain(intel_dig_port->port);
5185 	case INTEL_OUTPUT_DP_MST:
5186 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5187 		return port_to_aux_power_domain(intel_dig_port->port);
5188 	default:
5189 		MISSING_CASE(intel_encoder->type);
5190 		return POWER_DOMAIN_AUX_A;
5191 	}
5192 }
5193 
5194 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5195 					    struct intel_crtc_state *crtc_state)
5196 {
5197 	struct drm_device *dev = crtc->dev;
5198 	struct drm_encoder *encoder;
5199 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5200 	enum i915_pipe pipe = intel_crtc->pipe;
5201 	unsigned long mask;
5202 	enum transcoder transcoder = crtc_state->cpu_transcoder;
5203 
5204 	if (!crtc_state->base.active)
5205 		return 0;
5206 
5207 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5208 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5209 	if (crtc_state->pch_pfit.enabled ||
5210 	    crtc_state->pch_pfit.force_thru)
5211 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5212 
5213 	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5214 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5215 
5216 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5217 	}
5218 
5219 	if (crtc_state->shared_dpll)
5220 		mask |= BIT(POWER_DOMAIN_PLLS);
5221 
5222 	return mask;
5223 }
5224 
5225 static unsigned long
5226 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5227 			       struct intel_crtc_state *crtc_state)
5228 {
5229 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5230 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5231 	enum intel_display_power_domain domain;
5232 	unsigned long domains, new_domains, old_domains;
5233 
5234 	old_domains = intel_crtc->enabled_power_domains;
5235 	intel_crtc->enabled_power_domains = new_domains =
5236 		get_crtc_power_domains(crtc, crtc_state);
5237 
5238 	domains = new_domains & ~old_domains;
5239 
5240 	for_each_power_domain(domain, domains)
5241 		intel_display_power_get(dev_priv, domain);
5242 
5243 	return old_domains & ~new_domains;
5244 }
5245 
5246 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5247 				      unsigned long domains)
5248 {
5249 	enum intel_display_power_domain domain;
5250 
5251 	for_each_power_domain(domain, domains)
5252 		intel_display_power_put(dev_priv, domain);
5253 }
5254 
5255 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5256 {
5257 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5258 
5259 	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5260 	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5261 		return max_cdclk_freq;
5262 	else if (IS_CHERRYVIEW(dev_priv))
5263 		return max_cdclk_freq*95/100;
5264 	else if (INTEL_INFO(dev_priv)->gen < 4)
5265 		return 2*max_cdclk_freq*90/100;
5266 	else
5267 		return max_cdclk_freq*90/100;
5268 }
5269 
5270 static void intel_update_max_cdclk(struct drm_device *dev)
5271 {
5272 	struct drm_i915_private *dev_priv = dev->dev_private;
5273 
5274 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5275 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5276 
5277 		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5278 			dev_priv->max_cdclk_freq = 675000;
5279 		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5280 			dev_priv->max_cdclk_freq = 540000;
5281 		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5282 			dev_priv->max_cdclk_freq = 450000;
5283 		else
5284 			dev_priv->max_cdclk_freq = 337500;
5285 	} else if (IS_BROXTON(dev)) {
5286 		dev_priv->max_cdclk_freq = 624000;
5287 	} else if (IS_BROADWELL(dev))  {
5288 		/*
5289 		 * FIXME with extra cooling we can allow
5290 		 * 540 MHz for ULX and 675 Mhz for ULT.
5291 		 * How can we know if extra cooling is
5292 		 * available? PCI ID, VTB, something else?
5293 		 */
5294 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5295 			dev_priv->max_cdclk_freq = 450000;
5296 		else if (IS_BDW_ULX(dev))
5297 			dev_priv->max_cdclk_freq = 450000;
5298 		else if (IS_BDW_ULT(dev))
5299 			dev_priv->max_cdclk_freq = 540000;
5300 		else
5301 			dev_priv->max_cdclk_freq = 675000;
5302 	} else if (IS_CHERRYVIEW(dev)) {
5303 		dev_priv->max_cdclk_freq = 320000;
5304 	} else if (IS_VALLEYVIEW(dev)) {
5305 		dev_priv->max_cdclk_freq = 400000;
5306 	} else {
5307 		/* otherwise assume cdclk is fixed */
5308 		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5309 	}
5310 
5311 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5312 
5313 	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5314 			 dev_priv->max_cdclk_freq);
5315 
5316 	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5317 			 dev_priv->max_dotclk_freq);
5318 }
5319 
5320 static void intel_update_cdclk(struct drm_device *dev)
5321 {
5322 	struct drm_i915_private *dev_priv = dev->dev_private;
5323 
5324 	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5325 	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5326 			 dev_priv->cdclk_freq);
5327 
5328 	/*
5329 	 * Program the gmbus_freq based on the cdclk frequency.
5330 	 * BSpec erroneously claims we should aim for 4MHz, but
5331 	 * in fact 1MHz is the correct frequency.
5332 	 */
5333 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5334 		/*
5335 		 * Program the gmbus_freq based on the cdclk frequency.
5336 		 * BSpec erroneously claims we should aim for 4MHz, but
5337 		 * in fact 1MHz is the correct frequency.
5338 		 */
5339 		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5340 	}
5341 
5342 	if (dev_priv->max_cdclk_freq == 0)
5343 		intel_update_max_cdclk(dev);
5344 }
5345 
5346 static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5347 {
5348 	uint32_t divider;
5349 	uint32_t ratio;
5350 	uint32_t current_freq;
5351 	int ret;
5352 
5353 	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5354 	switch (frequency) {
5355 	case 144000:
5356 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5357 		ratio = BXT_DE_PLL_RATIO(60);
5358 		break;
5359 	case 288000:
5360 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5361 		ratio = BXT_DE_PLL_RATIO(60);
5362 		break;
5363 	case 384000:
5364 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5365 		ratio = BXT_DE_PLL_RATIO(60);
5366 		break;
5367 	case 576000:
5368 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5369 		ratio = BXT_DE_PLL_RATIO(60);
5370 		break;
5371 	case 624000:
5372 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5373 		ratio = BXT_DE_PLL_RATIO(65);
5374 		break;
5375 	case 19200:
5376 		/*
5377 		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5378 		 * to suppress GCC warning.
5379 		 */
5380 		ratio = 0;
5381 		divider = 0;
5382 		break;
5383 	default:
5384 		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5385 
5386 		return;
5387 	}
5388 
5389 	mutex_lock(&dev_priv->rps.hw_lock);
5390 	/* Inform power controller of upcoming frequency change */
5391 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5392 				      0x80000000);
5393 	mutex_unlock(&dev_priv->rps.hw_lock);
5394 
5395 	if (ret) {
5396 		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5397 			  ret, frequency);
5398 		return;
5399 	}
5400 
5401 	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5402 	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5403 	current_freq = current_freq * 500 + 1000;
5404 
5405 	/*
5406 	 * DE PLL has to be disabled when
5407 	 * - setting to 19.2MHz (bypass, PLL isn't used)
5408 	 * - before setting to 624MHz (PLL needs toggling)
5409 	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5410 	 */
5411 	if (frequency == 19200 || frequency == 624000 ||
5412 	    current_freq == 624000) {
5413 		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5414 		/* Timeout 200us */
5415 		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5416 			     1))
5417 			DRM_ERROR("timout waiting for DE PLL unlock\n");
5418 	}
5419 
5420 	if (frequency != 19200) {
5421 		uint32_t val;
5422 
5423 		val = I915_READ(BXT_DE_PLL_CTL);
5424 		val &= ~BXT_DE_PLL_RATIO_MASK;
5425 		val |= ratio;
5426 		I915_WRITE(BXT_DE_PLL_CTL, val);
5427 
5428 		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5429 		/* Timeout 200us */
5430 		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5431 			DRM_ERROR("timeout waiting for DE PLL lock\n");
5432 
5433 		val = I915_READ(CDCLK_CTL);
5434 		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5435 		val |= divider;
5436 		/*
5437 		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5438 		 * enable otherwise.
5439 		 */
5440 		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5441 		if (frequency >= 500000)
5442 			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5443 
5444 		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5445 		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5446 		val |= (frequency - 1000) / 500;
5447 		I915_WRITE(CDCLK_CTL, val);
5448 	}
5449 
5450 	mutex_lock(&dev_priv->rps.hw_lock);
5451 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5452 				      DIV_ROUND_UP(frequency, 25000));
5453 	mutex_unlock(&dev_priv->rps.hw_lock);
5454 
5455 	if (ret) {
5456 		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5457 			  ret, frequency);
5458 		return;
5459 	}
5460 
5461 	intel_update_cdclk(dev_priv->dev);
5462 }
5463 
5464 static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
5465 {
5466 	if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
5467 		return false;
5468 
5469 	/* TODO: Check for a valid CDCLK rate */
5470 
5471 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
5472 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
5473 
5474 		return false;
5475 	}
5476 
5477 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
5478 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
5479 
5480 		return false;
5481 	}
5482 
5483 	return true;
5484 }
5485 
5486 bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
5487 {
5488 	return broxton_cdclk_is_enabled(dev_priv);
5489 }
5490 
5491 void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5492 {
5493 	/* check if cd clock is enabled */
5494 	if (broxton_cdclk_is_enabled(dev_priv)) {
5495 		DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5496 		return;
5497 	}
5498 
5499 	DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
5500 
5501 	/*
5502 	 * FIXME:
5503 	 * - The initial CDCLK needs to be read from VBT.
5504 	 *   Need to make this change after VBT has changes for BXT.
5505 	 * - check if setting the max (or any) cdclk freq is really necessary
5506 	 *   here, it belongs to modeset time
5507 	 */
5508 	broxton_set_cdclk(dev_priv, 624000);
5509 
5510 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5511 	POSTING_READ(DBUF_CTL);
5512 
5513 	udelay(10);
5514 
5515 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5516 		DRM_ERROR("DBuf power enable timeout!\n");
5517 }
5518 
5519 void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5520 {
5521 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5522 	POSTING_READ(DBUF_CTL);
5523 
5524 	udelay(10);
5525 
5526 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5527 		DRM_ERROR("DBuf power disable timeout!\n");
5528 
5529 	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5530 	broxton_set_cdclk(dev_priv, 19200);
5531 }
5532 
5533 static const struct skl_cdclk_entry {
5534 	unsigned int freq;
5535 	unsigned int vco;
5536 } skl_cdclk_frequencies[] = {
5537 	{ .freq = 308570, .vco = 8640 },
5538 	{ .freq = 337500, .vco = 8100 },
5539 	{ .freq = 432000, .vco = 8640 },
5540 	{ .freq = 450000, .vco = 8100 },
5541 	{ .freq = 540000, .vco = 8100 },
5542 	{ .freq = 617140, .vco = 8640 },
5543 	{ .freq = 675000, .vco = 8100 },
5544 };
5545 
5546 static unsigned int skl_cdclk_decimal(unsigned int freq)
5547 {
5548 	return (freq - 1000) / 500;
5549 }
5550 
5551 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5552 {
5553 	unsigned int i;
5554 
5555 	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5556 		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5557 
5558 		if (e->freq == freq)
5559 			return e->vco;
5560 	}
5561 
5562 	return 8100;
5563 }
5564 
5565 static void
5566 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5567 {
5568 	unsigned int min_freq;
5569 	u32 val;
5570 
5571 	/* select the minimum CDCLK before enabling DPLL 0 */
5572 	val = I915_READ(CDCLK_CTL);
5573 	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5574 	val |= CDCLK_FREQ_337_308;
5575 
5576 	if (required_vco == 8640)
5577 		min_freq = 308570;
5578 	else
5579 		min_freq = 337500;
5580 
5581 	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5582 
5583 	I915_WRITE(CDCLK_CTL, val);
5584 	POSTING_READ(CDCLK_CTL);
5585 
5586 	/*
5587 	 * We always enable DPLL0 with the lowest link rate possible, but still
5588 	 * taking into account the VCO required to operate the eDP panel at the
5589 	 * desired frequency. The usual DP link rates operate with a VCO of
5590 	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5591 	 * The modeset code is responsible for the selection of the exact link
5592 	 * rate later on, with the constraint of choosing a frequency that
5593 	 * works with required_vco.
5594 	 */
5595 	val = I915_READ(DPLL_CTRL1);
5596 
5597 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5598 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5599 	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5600 	if (required_vco == 8640)
5601 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5602 					    SKL_DPLL0);
5603 	else
5604 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5605 					    SKL_DPLL0);
5606 
5607 	I915_WRITE(DPLL_CTRL1, val);
5608 	POSTING_READ(DPLL_CTRL1);
5609 
5610 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5611 
5612 	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5613 		DRM_ERROR("DPLL0 not locked\n");
5614 }
5615 
5616 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5617 {
5618 	int ret;
5619 	u32 val;
5620 
5621 	/* inform PCU we want to change CDCLK */
5622 	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5623 	mutex_lock(&dev_priv->rps.hw_lock);
5624 	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5625 	mutex_unlock(&dev_priv->rps.hw_lock);
5626 
5627 	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5628 }
5629 
5630 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5631 {
5632 	unsigned int i;
5633 
5634 	for (i = 0; i < 15; i++) {
5635 		if (skl_cdclk_pcu_ready(dev_priv))
5636 			return true;
5637 		udelay(10);
5638 	}
5639 
5640 	return false;
5641 }
5642 
5643 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5644 {
5645 	struct drm_device *dev = dev_priv->dev;
5646 	u32 freq_select, pcu_ack;
5647 
5648 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5649 
5650 	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5651 		DRM_ERROR("failed to inform PCU about cdclk change\n");
5652 		return;
5653 	}
5654 
5655 	/* set CDCLK_CTL */
5656 	switch(freq) {
5657 	case 450000:
5658 	case 432000:
5659 		freq_select = CDCLK_FREQ_450_432;
5660 		pcu_ack = 1;
5661 		break;
5662 	case 540000:
5663 		freq_select = CDCLK_FREQ_540;
5664 		pcu_ack = 2;
5665 		break;
5666 	case 308570:
5667 	case 337500:
5668 	default:
5669 		freq_select = CDCLK_FREQ_337_308;
5670 		pcu_ack = 0;
5671 		break;
5672 	case 617140:
5673 	case 675000:
5674 		freq_select = CDCLK_FREQ_675_617;
5675 		pcu_ack = 3;
5676 		break;
5677 	}
5678 
5679 	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5680 	POSTING_READ(CDCLK_CTL);
5681 
5682 	/* inform PCU of the change */
5683 	mutex_lock(&dev_priv->rps.hw_lock);
5684 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5685 	mutex_unlock(&dev_priv->rps.hw_lock);
5686 
5687 	intel_update_cdclk(dev);
5688 }
5689 
5690 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5691 {
5692 	/* disable DBUF power */
5693 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5694 	POSTING_READ(DBUF_CTL);
5695 
5696 	udelay(10);
5697 
5698 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5699 		DRM_ERROR("DBuf power disable timeout\n");
5700 
5701 	/* disable DPLL0 */
5702 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5703 	if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5704 		DRM_ERROR("Couldn't disable DPLL0\n");
5705 }
5706 
5707 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5708 {
5709 	unsigned int required_vco;
5710 
5711 	/* DPLL0 not enabled (happens on early BIOS versions) */
5712 	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5713 		/* enable DPLL0 */
5714 		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5715 		skl_dpll0_enable(dev_priv, required_vco);
5716 	}
5717 
5718 	/* set CDCLK to the frequency the BIOS chose */
5719 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5720 
5721 	/* enable DBUF power */
5722 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5723 	POSTING_READ(DBUF_CTL);
5724 
5725 	udelay(10);
5726 
5727 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5728 		DRM_ERROR("DBuf power enable timeout\n");
5729 }
5730 
5731 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5732 {
5733 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5734 	uint32_t cdctl = I915_READ(CDCLK_CTL);
5735 	int freq = dev_priv->skl_boot_cdclk;
5736 
5737 	/*
5738 	 * check if the pre-os intialized the display
5739 	 * There is SWF18 scratchpad register defined which is set by the
5740 	 * pre-os which can be used by the OS drivers to check the status
5741 	 */
5742 	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5743 		goto sanitize;
5744 
5745 	/* Is PLL enabled and locked ? */
5746 	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5747 		goto sanitize;
5748 
5749 	/* DPLL okay; verify the cdclock
5750 	 *
5751 	 * Noticed in some instances that the freq selection is correct but
5752 	 * decimal part is programmed wrong from BIOS where pre-os does not
5753 	 * enable display. Verify the same as well.
5754 	 */
5755 	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5756 		/* All well; nothing to sanitize */
5757 		return false;
5758 sanitize:
5759 	/*
5760 	 * As of now initialize with max cdclk till
5761 	 * we get dynamic cdclk support
5762 	 * */
5763 	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5764 	skl_init_cdclk(dev_priv);
5765 
5766 	/* we did have to sanitize */
5767 	return true;
5768 }
5769 
5770 /* Adjust CDclk dividers to allow high res or save power if possible */
5771 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5772 {
5773 	struct drm_i915_private *dev_priv = dev->dev_private;
5774 	u32 val, cmd;
5775 
5776 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5777 					!= dev_priv->cdclk_freq);
5778 
5779 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5780 		cmd = 2;
5781 	else if (cdclk == 266667)
5782 		cmd = 1;
5783 	else
5784 		cmd = 0;
5785 
5786 	mutex_lock(&dev_priv->rps.hw_lock);
5787 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5788 	val &= ~DSPFREQGUAR_MASK;
5789 	val |= (cmd << DSPFREQGUAR_SHIFT);
5790 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5791 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5792 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5793 		     50)) {
5794 		DRM_ERROR("timed out waiting for CDclk change\n");
5795 	}
5796 	mutex_unlock(&dev_priv->rps.hw_lock);
5797 
5798 	mutex_lock(&dev_priv->sb_lock);
5799 
5800 	if (cdclk == 400000) {
5801 		u32 divider;
5802 
5803 		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5804 
5805 		/* adjust cdclk divider */
5806 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5807 		val &= ~CCK_FREQUENCY_VALUES;
5808 		val |= divider;
5809 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5810 
5811 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5812 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5813 			     50))
5814 			DRM_ERROR("timed out waiting for CDclk change\n");
5815 	}
5816 
5817 	/* adjust self-refresh exit latency value */
5818 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5819 	val &= ~0x7f;
5820 
5821 	/*
5822 	 * For high bandwidth configs, we set a higher latency in the bunit
5823 	 * so that the core display fetch happens in time to avoid underruns.
5824 	 */
5825 	if (cdclk == 400000)
5826 		val |= 4500 / 250; /* 4.5 usec */
5827 	else
5828 		val |= 3000 / 250; /* 3.0 usec */
5829 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5830 
5831 	mutex_unlock(&dev_priv->sb_lock);
5832 
5833 	intel_update_cdclk(dev);
5834 }
5835 
5836 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5837 {
5838 	struct drm_i915_private *dev_priv = dev->dev_private;
5839 	u32 val, cmd;
5840 
5841 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5842 						!= dev_priv->cdclk_freq);
5843 
5844 	switch (cdclk) {
5845 	case 333333:
5846 	case 320000:
5847 	case 266667:
5848 	case 200000:
5849 		break;
5850 	default:
5851 		MISSING_CASE(cdclk);
5852 		return;
5853 	}
5854 
5855 	/*
5856 	 * Specs are full of misinformation, but testing on actual
5857 	 * hardware has shown that we just need to write the desired
5858 	 * CCK divider into the Punit register.
5859 	 */
5860 	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5861 
5862 	mutex_lock(&dev_priv->rps.hw_lock);
5863 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5864 	val &= ~DSPFREQGUAR_MASK_CHV;
5865 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5866 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5867 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5868 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5869 		     50)) {
5870 		DRM_ERROR("timed out waiting for CDclk change\n");
5871 	}
5872 	mutex_unlock(&dev_priv->rps.hw_lock);
5873 
5874 	intel_update_cdclk(dev);
5875 }
5876 
5877 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5878 				 int max_pixclk)
5879 {
5880 	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5881 	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5882 
5883 	/*
5884 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5885 	 *   200MHz
5886 	 *   267MHz
5887 	 *   320/333MHz (depends on HPLL freq)
5888 	 *   400MHz (VLV only)
5889 	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5890 	 * of the lower bin and adjust if needed.
5891 	 *
5892 	 * We seem to get an unstable or solid color picture at 200MHz.
5893 	 * Not sure what's wrong. For now use 200MHz only when all pipes
5894 	 * are off.
5895 	 */
5896 	if (!IS_CHERRYVIEW(dev_priv) &&
5897 	    max_pixclk > freq_320*limit/100)
5898 		return 400000;
5899 	else if (max_pixclk > 266667*limit/100)
5900 		return freq_320;
5901 	else if (max_pixclk > 0)
5902 		return 266667;
5903 	else
5904 		return 200000;
5905 }
5906 
5907 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
5908 			      int max_pixclk)
5909 {
5910 	/*
5911 	 * FIXME:
5912 	 * - remove the guardband, it's not needed on BXT
5913 	 * - set 19.2MHz bypass frequency if there are no active pipes
5914 	 */
5915 	if (max_pixclk > 576000*9/10)
5916 		return 624000;
5917 	else if (max_pixclk > 384000*9/10)
5918 		return 576000;
5919 	else if (max_pixclk > 288000*9/10)
5920 		return 384000;
5921 	else if (max_pixclk > 144000*9/10)
5922 		return 288000;
5923 	else
5924 		return 144000;
5925 }
5926 
5927 /* Compute the max pixel clock for new configuration. */
5928 static int intel_mode_max_pixclk(struct drm_device *dev,
5929 				 struct drm_atomic_state *state)
5930 {
5931 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5932 	struct drm_i915_private *dev_priv = dev->dev_private;
5933 	struct drm_crtc *crtc;
5934 	struct drm_crtc_state *crtc_state;
5935 	unsigned max_pixclk = 0, i;
5936 	enum i915_pipe pipe;
5937 
5938 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
5939 	       sizeof(intel_state->min_pixclk));
5940 
5941 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5942 		int pixclk = 0;
5943 
5944 		if (crtc_state->enable)
5945 			pixclk = crtc_state->adjusted_mode.crtc_clock;
5946 
5947 		intel_state->min_pixclk[i] = pixclk;
5948 	}
5949 
5950 	for_each_pipe(dev_priv, pipe)
5951 		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
5952 
5953 	return max_pixclk;
5954 }
5955 
5956 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5957 {
5958 	struct drm_device *dev = state->dev;
5959 	struct drm_i915_private *dev_priv = dev->dev_private;
5960 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5961 	struct intel_atomic_state *intel_state =
5962 		to_intel_atomic_state(state);
5963 
5964 	if (max_pixclk < 0)
5965 		return max_pixclk;
5966 
5967 	intel_state->cdclk = intel_state->dev_cdclk =
5968 		valleyview_calc_cdclk(dev_priv, max_pixclk);
5969 
5970 	if (!intel_state->active_crtcs)
5971 		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
5972 
5973 	return 0;
5974 }
5975 
5976 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5977 {
5978 	struct drm_device *dev = state->dev;
5979 	struct drm_i915_private *dev_priv = dev->dev_private;
5980 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5981 	struct intel_atomic_state *intel_state =
5982 		to_intel_atomic_state(state);
5983 
5984 	if (max_pixclk < 0)
5985 		return max_pixclk;
5986 
5987 	intel_state->cdclk = intel_state->dev_cdclk =
5988 		broxton_calc_cdclk(dev_priv, max_pixclk);
5989 
5990 	if (!intel_state->active_crtcs)
5991 		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
5992 
5993 	return 0;
5994 }
5995 
5996 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5997 {
5998 	unsigned int credits, default_credits;
5999 
6000 	if (IS_CHERRYVIEW(dev_priv))
6001 		default_credits = PFI_CREDIT(12);
6002 	else
6003 		default_credits = PFI_CREDIT(8);
6004 
6005 	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6006 		/* CHV suggested value is 31 or 63 */
6007 		if (IS_CHERRYVIEW(dev_priv))
6008 			credits = PFI_CREDIT_63;
6009 		else
6010 			credits = PFI_CREDIT(15);
6011 	} else {
6012 		credits = default_credits;
6013 	}
6014 
6015 	/*
6016 	 * WA - write default credits before re-programming
6017 	 * FIXME: should we also set the resend bit here?
6018 	 */
6019 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6020 		   default_credits);
6021 
6022 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6023 		   credits | PFI_CREDIT_RESEND);
6024 
6025 	/*
6026 	 * FIXME is this guaranteed to clear
6027 	 * immediately or should we poll for it?
6028 	 */
6029 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6030 }
6031 
6032 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6033 {
6034 	struct drm_device *dev = old_state->dev;
6035 	struct drm_i915_private *dev_priv = dev->dev_private;
6036 	struct intel_atomic_state *old_intel_state =
6037 		to_intel_atomic_state(old_state);
6038 	unsigned req_cdclk = old_intel_state->dev_cdclk;
6039 
6040 	/*
6041 	 * FIXME: We can end up here with all power domains off, yet
6042 	 * with a CDCLK frequency other than the minimum. To account
6043 	 * for this take the PIPE-A power domain, which covers the HW
6044 	 * blocks needed for the following programming. This can be
6045 	 * removed once it's guaranteed that we get here either with
6046 	 * the minimum CDCLK set, or the required power domains
6047 	 * enabled.
6048 	 */
6049 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6050 
6051 	if (IS_CHERRYVIEW(dev))
6052 		cherryview_set_cdclk(dev, req_cdclk);
6053 	else
6054 		valleyview_set_cdclk(dev, req_cdclk);
6055 
6056 	vlv_program_pfi_credits(dev_priv);
6057 
6058 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6059 }
6060 
6061 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6062 {
6063 	struct drm_device *dev = crtc->dev;
6064 	struct drm_i915_private *dev_priv = to_i915(dev);
6065 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6066 	struct intel_encoder *encoder;
6067 	struct intel_crtc_state *pipe_config =
6068 		to_intel_crtc_state(crtc->state);
6069 	int pipe = intel_crtc->pipe;
6070 
6071 	if (WARN_ON(intel_crtc->active))
6072 		return;
6073 
6074 	if (intel_crtc->config->has_dp_encoder)
6075 		intel_dp_set_m_n(intel_crtc, M1_N1);
6076 
6077 	intel_set_pipe_timings(intel_crtc);
6078 	intel_set_pipe_src_size(intel_crtc);
6079 
6080 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6081 		struct drm_i915_private *dev_priv = dev->dev_private;
6082 
6083 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6084 		I915_WRITE(CHV_CANVAS(pipe), 0);
6085 	}
6086 
6087 	i9xx_set_pipeconf(intel_crtc);
6088 
6089 	intel_crtc->active = true;
6090 
6091 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6092 
6093 	for_each_encoder_on_crtc(dev, crtc, encoder)
6094 		if (encoder->pre_pll_enable)
6095 			encoder->pre_pll_enable(encoder);
6096 
6097 	if (IS_CHERRYVIEW(dev)) {
6098 		chv_prepare_pll(intel_crtc, intel_crtc->config);
6099 		chv_enable_pll(intel_crtc, intel_crtc->config);
6100 	} else {
6101 		vlv_prepare_pll(intel_crtc, intel_crtc->config);
6102 		vlv_enable_pll(intel_crtc, intel_crtc->config);
6103 	}
6104 
6105 	for_each_encoder_on_crtc(dev, crtc, encoder)
6106 		if (encoder->pre_enable)
6107 			encoder->pre_enable(encoder);
6108 
6109 	i9xx_pfit_enable(intel_crtc);
6110 
6111 	intel_color_load_luts(&pipe_config->base);
6112 
6113 	intel_update_watermarks(crtc);
6114 	intel_enable_pipe(intel_crtc);
6115 
6116 	assert_vblank_disabled(crtc);
6117 	drm_crtc_vblank_on(crtc);
6118 
6119 	for_each_encoder_on_crtc(dev, crtc, encoder)
6120 		encoder->enable(encoder);
6121 }
6122 
6123 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6124 {
6125 	struct drm_device *dev = crtc->base.dev;
6126 	struct drm_i915_private *dev_priv = dev->dev_private;
6127 
6128 	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6129 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6130 }
6131 
6132 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6133 {
6134 	struct drm_device *dev = crtc->dev;
6135 	struct drm_i915_private *dev_priv = to_i915(dev);
6136 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6137 	struct intel_encoder *encoder;
6138 	struct intel_crtc_state *pipe_config =
6139 		to_intel_crtc_state(crtc->state);
6140 	enum i915_pipe pipe = intel_crtc->pipe;
6141 
6142 	if (WARN_ON(intel_crtc->active))
6143 		return;
6144 
6145 	i9xx_set_pll_dividers(intel_crtc);
6146 
6147 	if (intel_crtc->config->has_dp_encoder)
6148 		intel_dp_set_m_n(intel_crtc, M1_N1);
6149 
6150 	intel_set_pipe_timings(intel_crtc);
6151 	intel_set_pipe_src_size(intel_crtc);
6152 
6153 	i9xx_set_pipeconf(intel_crtc);
6154 
6155 	intel_crtc->active = true;
6156 
6157 	if (!IS_GEN2(dev))
6158 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6159 
6160 	for_each_encoder_on_crtc(dev, crtc, encoder)
6161 		if (encoder->pre_enable)
6162 			encoder->pre_enable(encoder);
6163 
6164 	i9xx_enable_pll(intel_crtc);
6165 
6166 	i9xx_pfit_enable(intel_crtc);
6167 
6168 	intel_color_load_luts(&pipe_config->base);
6169 
6170 	intel_update_watermarks(crtc);
6171 	intel_enable_pipe(intel_crtc);
6172 
6173 	assert_vblank_disabled(crtc);
6174 	drm_crtc_vblank_on(crtc);
6175 
6176 	for_each_encoder_on_crtc(dev, crtc, encoder)
6177 		encoder->enable(encoder);
6178 }
6179 
6180 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6181 {
6182 	struct drm_device *dev = crtc->base.dev;
6183 	struct drm_i915_private *dev_priv = dev->dev_private;
6184 
6185 	if (!crtc->config->gmch_pfit.control)
6186 		return;
6187 
6188 	assert_pipe_disabled(dev_priv, crtc->pipe);
6189 
6190 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6191 			 I915_READ(PFIT_CONTROL));
6192 	I915_WRITE(PFIT_CONTROL, 0);
6193 }
6194 
6195 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6196 {
6197 	struct drm_device *dev = crtc->dev;
6198 	struct drm_i915_private *dev_priv = dev->dev_private;
6199 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6200 	struct intel_encoder *encoder;
6201 	int pipe = intel_crtc->pipe;
6202 
6203 	/*
6204 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6205 	 * wait for planes to fully turn off before disabling the pipe.
6206 	 */
6207 	if (IS_GEN2(dev))
6208 		intel_wait_for_vblank(dev, pipe);
6209 
6210 	for_each_encoder_on_crtc(dev, crtc, encoder)
6211 		encoder->disable(encoder);
6212 
6213 	drm_crtc_vblank_off(crtc);
6214 	assert_vblank_disabled(crtc);
6215 
6216 	intel_disable_pipe(intel_crtc);
6217 
6218 	i9xx_pfit_disable(intel_crtc);
6219 
6220 	for_each_encoder_on_crtc(dev, crtc, encoder)
6221 		if (encoder->post_disable)
6222 			encoder->post_disable(encoder);
6223 
6224 	if (!intel_crtc->config->has_dsi_encoder) {
6225 		if (IS_CHERRYVIEW(dev))
6226 			chv_disable_pll(dev_priv, pipe);
6227 		else if (IS_VALLEYVIEW(dev))
6228 			vlv_disable_pll(dev_priv, pipe);
6229 		else
6230 			i9xx_disable_pll(intel_crtc);
6231 	}
6232 
6233 	for_each_encoder_on_crtc(dev, crtc, encoder)
6234 		if (encoder->post_pll_disable)
6235 			encoder->post_pll_disable(encoder);
6236 
6237 	if (!IS_GEN2(dev))
6238 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6239 }
6240 
6241 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6242 {
6243 	struct intel_encoder *encoder;
6244 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6245 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6246 	enum intel_display_power_domain domain;
6247 	unsigned long domains;
6248 
6249 	if (!intel_crtc->active)
6250 		return;
6251 
6252 	if (to_intel_plane_state(crtc->primary->state)->visible) {
6253 		WARN_ON(intel_crtc->unpin_work);
6254 
6255 		intel_pre_disable_primary_noatomic(crtc);
6256 
6257 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6258 		to_intel_plane_state(crtc->primary->state)->visible = false;
6259 	}
6260 
6261 	dev_priv->display.crtc_disable(crtc);
6262 
6263 	DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
6264 		      crtc->base.id);
6265 
6266 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6267 	crtc->state->active = false;
6268 	intel_crtc->active = false;
6269 	crtc->enabled = false;
6270 	crtc->state->connector_mask = 0;
6271 	crtc->state->encoder_mask = 0;
6272 
6273 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6274 		encoder->base.crtc = NULL;
6275 
6276 	intel_fbc_disable(intel_crtc);
6277 	intel_update_watermarks(crtc);
6278 	intel_disable_shared_dpll(intel_crtc);
6279 
6280 	domains = intel_crtc->enabled_power_domains;
6281 	for_each_power_domain(domain, domains)
6282 		intel_display_power_put(dev_priv, domain);
6283 	intel_crtc->enabled_power_domains = 0;
6284 
6285 	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6286 	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6287 }
6288 
6289 /*
6290  * turn all crtc's off, but do not adjust state
6291  * This has to be paired with a call to intel_modeset_setup_hw_state.
6292  */
6293 int intel_display_suspend(struct drm_device *dev)
6294 {
6295 	struct drm_i915_private *dev_priv = to_i915(dev);
6296 	struct drm_atomic_state *state;
6297 	int ret;
6298 
6299 	state = drm_atomic_helper_suspend(dev);
6300 	ret = PTR_ERR_OR_ZERO(state);
6301 	if (ret)
6302 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6303 	else
6304 		dev_priv->modeset_restore_state = state;
6305 	return ret;
6306 }
6307 
6308 void intel_encoder_destroy(struct drm_encoder *encoder)
6309 {
6310 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6311 
6312 	drm_encoder_cleanup(encoder);
6313 	kfree(intel_encoder);
6314 }
6315 
6316 /* Cross check the actual hw state with our own modeset state tracking (and it's
6317  * internal consistency). */
6318 static void intel_connector_verify_state(struct intel_connector *connector)
6319 {
6320 	struct drm_crtc *crtc = connector->base.state->crtc;
6321 
6322 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6323 		      connector->base.base.id,
6324 		      connector->base.name);
6325 
6326 	if (connector->get_hw_state(connector)) {
6327 		struct intel_encoder *encoder = connector->encoder;
6328 		struct drm_connector_state *conn_state = connector->base.state;
6329 
6330 		I915_STATE_WARN(!crtc,
6331 			 "connector enabled without attached crtc\n");
6332 
6333 		if (!crtc)
6334 			return;
6335 
6336 		I915_STATE_WARN(!crtc->state->active,
6337 		      "connector is active, but attached crtc isn't\n");
6338 
6339 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6340 			return;
6341 
6342 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6343 			"atomic encoder doesn't match attached encoder\n");
6344 
6345 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6346 			"attached encoder crtc differs from connector crtc\n");
6347 	} else {
6348 		I915_STATE_WARN(crtc && crtc->state->active,
6349 			"attached crtc is active, but connector isn't\n");
6350 		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6351 			"best encoder set without crtc!\n");
6352 	}
6353 }
6354 
6355 int intel_connector_init(struct intel_connector *connector)
6356 {
6357 	drm_atomic_helper_connector_reset(&connector->base);
6358 
6359 	if (!connector->base.state)
6360 		return -ENOMEM;
6361 
6362 	return 0;
6363 }
6364 
6365 struct intel_connector *intel_connector_alloc(void)
6366 {
6367 	struct intel_connector *connector;
6368 
6369 	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6370 	if (!connector)
6371 		return NULL;
6372 
6373 	if (intel_connector_init(connector) < 0) {
6374 		kfree(connector);
6375 		return NULL;
6376 	}
6377 
6378 	return connector;
6379 }
6380 
6381 /* Simple connector->get_hw_state implementation for encoders that support only
6382  * one connector and no cloning and hence the encoder state determines the state
6383  * of the connector. */
6384 bool intel_connector_get_hw_state(struct intel_connector *connector)
6385 {
6386 	enum i915_pipe pipe = 0;
6387 	struct intel_encoder *encoder = connector->encoder;
6388 
6389 	return encoder->get_hw_state(encoder, &pipe);
6390 }
6391 
6392 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6393 {
6394 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6395 		return crtc_state->fdi_lanes;
6396 
6397 	return 0;
6398 }
6399 
6400 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
6401 				     struct intel_crtc_state *pipe_config)
6402 {
6403 	struct drm_atomic_state *state = pipe_config->base.state;
6404 	struct intel_crtc *other_crtc;
6405 	struct intel_crtc_state *other_crtc_state;
6406 
6407 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6408 		      pipe_name(pipe), pipe_config->fdi_lanes);
6409 	if (pipe_config->fdi_lanes > 4) {
6410 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6411 			      pipe_name(pipe), pipe_config->fdi_lanes);
6412 		return -EINVAL;
6413 	}
6414 
6415 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6416 		if (pipe_config->fdi_lanes > 2) {
6417 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6418 				      pipe_config->fdi_lanes);
6419 			return -EINVAL;
6420 		} else {
6421 			return 0;
6422 		}
6423 	}
6424 
6425 	if (INTEL_INFO(dev)->num_pipes == 2)
6426 		return 0;
6427 
6428 	/* Ivybridge 3 pipe is really complicated */
6429 	switch (pipe) {
6430 	case PIPE_A:
6431 		return 0;
6432 	case PIPE_B:
6433 		if (pipe_config->fdi_lanes <= 2)
6434 			return 0;
6435 
6436 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6437 		other_crtc_state =
6438 			intel_atomic_get_crtc_state(state, other_crtc);
6439 		if (IS_ERR(other_crtc_state))
6440 			return PTR_ERR(other_crtc_state);
6441 
6442 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6443 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6444 				      pipe_name(pipe), pipe_config->fdi_lanes);
6445 			return -EINVAL;
6446 		}
6447 		return 0;
6448 	case PIPE_C:
6449 		if (pipe_config->fdi_lanes > 2) {
6450 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6451 				      pipe_name(pipe), pipe_config->fdi_lanes);
6452 			return -EINVAL;
6453 		}
6454 
6455 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6456 		other_crtc_state =
6457 			intel_atomic_get_crtc_state(state, other_crtc);
6458 		if (IS_ERR(other_crtc_state))
6459 			return PTR_ERR(other_crtc_state);
6460 
6461 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6462 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6463 			return -EINVAL;
6464 		}
6465 		return 0;
6466 	default:
6467 		BUG();
6468 	}
6469 }
6470 
6471 #define RETRY 1
6472 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6473 				       struct intel_crtc_state *pipe_config)
6474 {
6475 	struct drm_device *dev = intel_crtc->base.dev;
6476 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6477 	int lane, link_bw, fdi_dotclock, ret;
6478 	bool needs_recompute = false;
6479 
6480 retry:
6481 	/* FDI is a binary signal running at ~2.7GHz, encoding
6482 	 * each output octet as 10 bits. The actual frequency
6483 	 * is stored as a divider into a 100MHz clock, and the
6484 	 * mode pixel clock is stored in units of 1KHz.
6485 	 * Hence the bw of each lane in terms of the mode signal
6486 	 * is:
6487 	 */
6488 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6489 
6490 	fdi_dotclock = adjusted_mode->crtc_clock;
6491 
6492 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6493 					   pipe_config->pipe_bpp);
6494 
6495 	pipe_config->fdi_lanes = lane;
6496 
6497 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6498 			       link_bw, &pipe_config->fdi_m_n);
6499 
6500 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6501 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6502 		pipe_config->pipe_bpp -= 2*3;
6503 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6504 			      pipe_config->pipe_bpp);
6505 		needs_recompute = true;
6506 		pipe_config->bw_constrained = true;
6507 
6508 		goto retry;
6509 	}
6510 
6511 	if (needs_recompute)
6512 		return RETRY;
6513 
6514 	return ret;
6515 }
6516 
6517 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6518 				     struct intel_crtc_state *pipe_config)
6519 {
6520 	if (pipe_config->pipe_bpp > 24)
6521 		return false;
6522 
6523 	/* HSW can handle pixel rate up to cdclk? */
6524 	if (IS_HASWELL(dev_priv))
6525 		return true;
6526 
6527 	/*
6528 	 * We compare against max which means we must take
6529 	 * the increased cdclk requirement into account when
6530 	 * calculating the new cdclk.
6531 	 *
6532 	 * Should measure whether using a lower cdclk w/o IPS
6533 	 */
6534 	return ilk_pipe_pixel_rate(pipe_config) <=
6535 		dev_priv->max_cdclk_freq * 95 / 100;
6536 }
6537 
6538 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6539 				   struct intel_crtc_state *pipe_config)
6540 {
6541 	struct drm_device *dev = crtc->base.dev;
6542 	struct drm_i915_private *dev_priv = dev->dev_private;
6543 
6544 	pipe_config->ips_enabled = i915.enable_ips &&
6545 		hsw_crtc_supports_ips(crtc) &&
6546 		pipe_config_supports_ips(dev_priv, pipe_config);
6547 }
6548 
6549 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6550 {
6551 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6552 
6553 	/* GDG double wide on either pipe, otherwise pipe A only */
6554 	return INTEL_INFO(dev_priv)->gen < 4 &&
6555 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6556 }
6557 
6558 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6559 				     struct intel_crtc_state *pipe_config)
6560 {
6561 	struct drm_device *dev = crtc->base.dev;
6562 	struct drm_i915_private *dev_priv = dev->dev_private;
6563 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6564 
6565 	/* FIXME should check pixel clock limits on all platforms */
6566 	if (INTEL_INFO(dev)->gen < 4) {
6567 		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6568 
6569 		/*
6570 		 * Enable double wide mode when the dot clock
6571 		 * is > 90% of the (display) core speed.
6572 		 */
6573 		if (intel_crtc_supports_double_wide(crtc) &&
6574 		    adjusted_mode->crtc_clock > clock_limit) {
6575 			clock_limit *= 2;
6576 			pipe_config->double_wide = true;
6577 		}
6578 
6579 		if (adjusted_mode->crtc_clock > clock_limit) {
6580 			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6581 				      adjusted_mode->crtc_clock, clock_limit,
6582 				      yesno(pipe_config->double_wide));
6583 			return -EINVAL;
6584 		}
6585 	}
6586 
6587 	/*
6588 	 * Pipe horizontal size must be even in:
6589 	 * - DVO ganged mode
6590 	 * - LVDS dual channel mode
6591 	 * - Double wide pipe
6592 	 */
6593 	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6594 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6595 		pipe_config->pipe_src_w &= ~1;
6596 
6597 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6598 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6599 	 */
6600 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6601 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6602 		return -EINVAL;
6603 
6604 	if (HAS_IPS(dev))
6605 		hsw_compute_ips_config(crtc, pipe_config);
6606 
6607 	if (pipe_config->has_pch_encoder)
6608 		return ironlake_fdi_compute_config(crtc, pipe_config);
6609 
6610 	return 0;
6611 }
6612 
6613 static int skylake_get_display_clock_speed(struct drm_device *dev)
6614 {
6615 	struct drm_i915_private *dev_priv = to_i915(dev);
6616 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6617 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6618 	uint32_t linkrate;
6619 
6620 	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6621 		return 24000; /* 24MHz is the cd freq with NSSC ref */
6622 
6623 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6624 		return 540000;
6625 
6626 	linkrate = (I915_READ(DPLL_CTRL1) &
6627 		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6628 
6629 	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6630 	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6631 		/* vco 8640 */
6632 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6633 		case CDCLK_FREQ_450_432:
6634 			return 432000;
6635 		case CDCLK_FREQ_337_308:
6636 			return 308570;
6637 		case CDCLK_FREQ_675_617:
6638 			return 617140;
6639 		default:
6640 			WARN(1, "Unknown cd freq selection\n");
6641 		}
6642 	} else {
6643 		/* vco 8100 */
6644 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6645 		case CDCLK_FREQ_450_432:
6646 			return 450000;
6647 		case CDCLK_FREQ_337_308:
6648 			return 337500;
6649 		case CDCLK_FREQ_675_617:
6650 			return 675000;
6651 		default:
6652 			WARN(1, "Unknown cd freq selection\n");
6653 		}
6654 	}
6655 
6656 	/* error case, do as if DPLL0 isn't enabled */
6657 	return 24000;
6658 }
6659 
6660 static int broxton_get_display_clock_speed(struct drm_device *dev)
6661 {
6662 	struct drm_i915_private *dev_priv = to_i915(dev);
6663 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6664 	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6665 	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6666 	int cdclk;
6667 
6668 	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6669 		return 19200;
6670 
6671 	cdclk = 19200 * pll_ratio / 2;
6672 
6673 	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6674 	case BXT_CDCLK_CD2X_DIV_SEL_1:
6675 		return cdclk;  /* 576MHz or 624MHz */
6676 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6677 		return cdclk * 2 / 3; /* 384MHz */
6678 	case BXT_CDCLK_CD2X_DIV_SEL_2:
6679 		return cdclk / 2; /* 288MHz */
6680 	case BXT_CDCLK_CD2X_DIV_SEL_4:
6681 		return cdclk / 4; /* 144MHz */
6682 	}
6683 
6684 	/* error case, do as if DE PLL isn't enabled */
6685 	return 19200;
6686 }
6687 
6688 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6689 {
6690 	struct drm_i915_private *dev_priv = dev->dev_private;
6691 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6692 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6693 
6694 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6695 		return 800000;
6696 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6697 		return 450000;
6698 	else if (freq == LCPLL_CLK_FREQ_450)
6699 		return 450000;
6700 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6701 		return 540000;
6702 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6703 		return 337500;
6704 	else
6705 		return 675000;
6706 }
6707 
6708 static int haswell_get_display_clock_speed(struct drm_device *dev)
6709 {
6710 	struct drm_i915_private *dev_priv = dev->dev_private;
6711 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6712 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6713 
6714 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6715 		return 800000;
6716 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6717 		return 450000;
6718 	else if (freq == LCPLL_CLK_FREQ_450)
6719 		return 450000;
6720 	else if (IS_HSW_ULT(dev))
6721 		return 337500;
6722 	else
6723 		return 540000;
6724 }
6725 
6726 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6727 {
6728 	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6729 				      CCK_DISPLAY_CLOCK_CONTROL);
6730 }
6731 
6732 static int ilk_get_display_clock_speed(struct drm_device *dev)
6733 {
6734 	return 450000;
6735 }
6736 
6737 static int i945_get_display_clock_speed(struct drm_device *dev)
6738 {
6739 	return 400000;
6740 }
6741 
6742 static int i915_get_display_clock_speed(struct drm_device *dev)
6743 {
6744 	return 333333;
6745 }
6746 
6747 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6748 {
6749 	return 200000;
6750 }
6751 
6752 static int pnv_get_display_clock_speed(struct drm_device *dev)
6753 {
6754 	u16 gcfgc = 0;
6755 
6756 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6757 
6758 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6759 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6760 		return 266667;
6761 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6762 		return 333333;
6763 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6764 		return 444444;
6765 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6766 		return 200000;
6767 	default:
6768 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6769 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6770 		return 133333;
6771 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6772 		return 166667;
6773 	}
6774 }
6775 
6776 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6777 {
6778 	u16 gcfgc = 0;
6779 
6780 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6781 
6782 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6783 		return 133333;
6784 	else {
6785 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6786 		case GC_DISPLAY_CLOCK_333_MHZ:
6787 			return 333333;
6788 		default:
6789 		case GC_DISPLAY_CLOCK_190_200_MHZ:
6790 			return 190000;
6791 		}
6792 	}
6793 }
6794 
6795 static int i865_get_display_clock_speed(struct drm_device *dev)
6796 {
6797 	return 266667;
6798 }
6799 
6800 static int i85x_get_display_clock_speed(struct drm_device *dev)
6801 {
6802 	u16 hpllcc = 0;
6803 
6804 	/*
6805 	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6806 	 * encoding is different :(
6807 	 * FIXME is this the right way to detect 852GM/852GMV?
6808 	 */
6809 	if (dev->pdev->revision == 0x1)
6810 		return 133333;
6811 
6812 #if 0
6813 	pci_bus_read_config_word(dev->pdev->bus,
6814 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6815 #endif
6816 
6817 	/* Assume that the hardware is in the high speed state.  This
6818 	 * should be the default.
6819 	 */
6820 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6821 	case GC_CLOCK_133_200:
6822 	case GC_CLOCK_133_200_2:
6823 	case GC_CLOCK_100_200:
6824 		return 200000;
6825 	case GC_CLOCK_166_250:
6826 		return 250000;
6827 	case GC_CLOCK_100_133:
6828 		return 133333;
6829 	case GC_CLOCK_133_266:
6830 	case GC_CLOCK_133_266_2:
6831 	case GC_CLOCK_166_266:
6832 		return 266667;
6833 	}
6834 
6835 	/* Shouldn't happen */
6836 	return 0;
6837 }
6838 
6839 static int i830_get_display_clock_speed(struct drm_device *dev)
6840 {
6841 	return 133333;
6842 }
6843 
6844 static unsigned int intel_hpll_vco(struct drm_device *dev)
6845 {
6846 	struct drm_i915_private *dev_priv = dev->dev_private;
6847 	static const unsigned int blb_vco[8] = {
6848 		[0] = 3200000,
6849 		[1] = 4000000,
6850 		[2] = 5333333,
6851 		[3] = 4800000,
6852 		[4] = 6400000,
6853 	};
6854 	static const unsigned int pnv_vco[8] = {
6855 		[0] = 3200000,
6856 		[1] = 4000000,
6857 		[2] = 5333333,
6858 		[3] = 4800000,
6859 		[4] = 2666667,
6860 	};
6861 	static const unsigned int cl_vco[8] = {
6862 		[0] = 3200000,
6863 		[1] = 4000000,
6864 		[2] = 5333333,
6865 		[3] = 6400000,
6866 		[4] = 3333333,
6867 		[5] = 3566667,
6868 		[6] = 4266667,
6869 	};
6870 	static const unsigned int elk_vco[8] = {
6871 		[0] = 3200000,
6872 		[1] = 4000000,
6873 		[2] = 5333333,
6874 		[3] = 4800000,
6875 	};
6876 	static const unsigned int ctg_vco[8] = {
6877 		[0] = 3200000,
6878 		[1] = 4000000,
6879 		[2] = 5333333,
6880 		[3] = 6400000,
6881 		[4] = 2666667,
6882 		[5] = 4266667,
6883 	};
6884 	const unsigned int *vco_table;
6885 	unsigned int vco;
6886 	uint8_t tmp = 0;
6887 
6888 	/* FIXME other chipsets? */
6889 	if (IS_GM45(dev))
6890 		vco_table = ctg_vco;
6891 	else if (IS_G4X(dev))
6892 		vco_table = elk_vco;
6893 	else if (IS_CRESTLINE(dev))
6894 		vco_table = cl_vco;
6895 	else if (IS_PINEVIEW(dev))
6896 		vco_table = pnv_vco;
6897 	else if (IS_G33(dev))
6898 		vco_table = blb_vco;
6899 	else
6900 		return 0;
6901 
6902 	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6903 
6904 	vco = vco_table[tmp & 0x7];
6905 	if (vco == 0)
6906 		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6907 	else
6908 		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6909 
6910 	return vco;
6911 }
6912 
6913 static int gm45_get_display_clock_speed(struct drm_device *dev)
6914 {
6915 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6916 	uint16_t tmp = 0;
6917 
6918 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6919 
6920 	cdclk_sel = (tmp >> 12) & 0x1;
6921 
6922 	switch (vco) {
6923 	case 2666667:
6924 	case 4000000:
6925 	case 5333333:
6926 		return cdclk_sel ? 333333 : 222222;
6927 	case 3200000:
6928 		return cdclk_sel ? 320000 : 228571;
6929 	default:
6930 		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
6931 		return 222222;
6932 	}
6933 }
6934 
6935 static int i965gm_get_display_clock_speed(struct drm_device *dev)
6936 {
6937 	static const uint8_t div_3200[] = { 16, 10,  8 };
6938 	static const uint8_t div_4000[] = { 20, 12, 10 };
6939 	static const uint8_t div_5333[] = { 24, 16, 14 };
6940 	const uint8_t *div_table;
6941 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6942 	uint16_t tmp = 0;
6943 
6944 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6945 
6946 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
6947 
6948 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6949 		goto fail;
6950 
6951 	switch (vco) {
6952 	case 3200000:
6953 		div_table = div_3200;
6954 		break;
6955 	case 4000000:
6956 		div_table = div_4000;
6957 		break;
6958 	case 5333333:
6959 		div_table = div_5333;
6960 		break;
6961 	default:
6962 		goto fail;
6963 	}
6964 
6965 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6966 
6967 fail:
6968 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
6969 	return 200000;
6970 }
6971 
6972 static int g33_get_display_clock_speed(struct drm_device *dev)
6973 {
6974 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
6975 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
6976 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
6977 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
6978 	const uint8_t *div_table;
6979 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6980 	uint16_t tmp = 0;
6981 
6982 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6983 
6984 	cdclk_sel = (tmp >> 4) & 0x7;
6985 
6986 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6987 		goto fail;
6988 
6989 	switch (vco) {
6990 	case 3200000:
6991 		div_table = div_3200;
6992 		break;
6993 	case 4000000:
6994 		div_table = div_4000;
6995 		break;
6996 	case 4800000:
6997 		div_table = div_4800;
6998 		break;
6999 	case 5333333:
7000 		div_table = div_5333;
7001 		break;
7002 	default:
7003 		goto fail;
7004 	}
7005 
7006 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7007 
7008 fail:
7009 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7010 	return 190476;
7011 }
7012 
7013 static void
7014 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7015 {
7016 	while (*num > DATA_LINK_M_N_MASK ||
7017 	       *den > DATA_LINK_M_N_MASK) {
7018 		*num >>= 1;
7019 		*den >>= 1;
7020 	}
7021 }
7022 
7023 static void compute_m_n(unsigned int m, unsigned int n,
7024 			uint32_t *ret_m, uint32_t *ret_n)
7025 {
7026 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7027 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7028 	intel_reduce_m_n_ratio(ret_m, ret_n);
7029 }
7030 
7031 void
7032 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7033 		       int pixel_clock, int link_clock,
7034 		       struct intel_link_m_n *m_n)
7035 {
7036 	m_n->tu = 64;
7037 
7038 	compute_m_n(bits_per_pixel * pixel_clock,
7039 		    link_clock * nlanes * 8,
7040 		    &m_n->gmch_m, &m_n->gmch_n);
7041 
7042 	compute_m_n(pixel_clock, link_clock,
7043 		    &m_n->link_m, &m_n->link_n);
7044 }
7045 
7046 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7047 {
7048 	if (i915.panel_use_ssc >= 0)
7049 		return i915.panel_use_ssc != 0;
7050 	return dev_priv->vbt.lvds_use_ssc
7051 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7052 }
7053 
7054 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7055 {
7056 	return (1 << dpll->n) << 16 | dpll->m2;
7057 }
7058 
7059 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7060 {
7061 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7062 }
7063 
7064 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7065 				     struct intel_crtc_state *crtc_state,
7066 				     intel_clock_t *reduced_clock)
7067 {
7068 	struct drm_device *dev = crtc->base.dev;
7069 	u32 fp, fp2 = 0;
7070 
7071 	if (IS_PINEVIEW(dev)) {
7072 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7073 		if (reduced_clock)
7074 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7075 	} else {
7076 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7077 		if (reduced_clock)
7078 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7079 	}
7080 
7081 	crtc_state->dpll_hw_state.fp0 = fp;
7082 
7083 	crtc->lowfreq_avail = false;
7084 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7085 	    reduced_clock) {
7086 		crtc_state->dpll_hw_state.fp1 = fp2;
7087 		crtc->lowfreq_avail = true;
7088 	} else {
7089 		crtc_state->dpll_hw_state.fp1 = fp;
7090 	}
7091 }
7092 
7093 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
7094 		pipe)
7095 {
7096 	u32 reg_val;
7097 
7098 	/*
7099 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7100 	 * and set it to a reasonable value instead.
7101 	 */
7102 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7103 	reg_val &= 0xffffff00;
7104 	reg_val |= 0x00000030;
7105 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7106 
7107 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7108 	reg_val &= 0x00ffffff;
7109 	reg_val |= 0x8c000000;
7110 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7111 
7112 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7113 	reg_val &= 0xffffff00;
7114 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7115 
7116 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7117 	reg_val &= 0x00ffffff;
7118 	reg_val |= 0xb0000000;
7119 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7120 }
7121 
7122 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7123 					 struct intel_link_m_n *m_n)
7124 {
7125 	struct drm_device *dev = crtc->base.dev;
7126 	struct drm_i915_private *dev_priv = dev->dev_private;
7127 	int pipe = crtc->pipe;
7128 
7129 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7130 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7131 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7132 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7133 }
7134 
7135 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7136 					 struct intel_link_m_n *m_n,
7137 					 struct intel_link_m_n *m2_n2)
7138 {
7139 	struct drm_device *dev = crtc->base.dev;
7140 	struct drm_i915_private *dev_priv = dev->dev_private;
7141 	int pipe = crtc->pipe;
7142 	enum transcoder transcoder = crtc->config->cpu_transcoder;
7143 
7144 	if (INTEL_INFO(dev)->gen >= 5) {
7145 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7146 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7147 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7148 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7149 		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7150 		 * for gen < 8) and if DRRS is supported (to make sure the
7151 		 * registers are not unnecessarily accessed).
7152 		 */
7153 		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7154 			crtc->config->has_drrs) {
7155 			I915_WRITE(PIPE_DATA_M2(transcoder),
7156 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7157 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7158 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7159 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7160 		}
7161 	} else {
7162 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7163 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7164 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7165 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7166 	}
7167 }
7168 
7169 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7170 {
7171 	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7172 
7173 	if (m_n == M1_N1) {
7174 		dp_m_n = &crtc->config->dp_m_n;
7175 		dp_m2_n2 = &crtc->config->dp_m2_n2;
7176 	} else if (m_n == M2_N2) {
7177 
7178 		/*
7179 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7180 		 * needs to be programmed into M1_N1.
7181 		 */
7182 		dp_m_n = &crtc->config->dp_m2_n2;
7183 	} else {
7184 		DRM_ERROR("Unsupported divider value\n");
7185 		return;
7186 	}
7187 
7188 	if (crtc->config->has_pch_encoder)
7189 		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7190 	else
7191 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7192 }
7193 
7194 static void vlv_compute_dpll(struct intel_crtc *crtc,
7195 			     struct intel_crtc_state *pipe_config)
7196 {
7197 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7198 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7199 	if (crtc->pipe != PIPE_A)
7200 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7201 
7202 	/* DPLL not used with DSI, but still need the rest set up */
7203 	if (!pipe_config->has_dsi_encoder)
7204 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7205 			DPLL_EXT_BUFFER_ENABLE_VLV;
7206 
7207 	pipe_config->dpll_hw_state.dpll_md =
7208 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7209 }
7210 
7211 static void chv_compute_dpll(struct intel_crtc *crtc,
7212 			     struct intel_crtc_state *pipe_config)
7213 {
7214 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7215 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7216 	if (crtc->pipe != PIPE_A)
7217 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7218 
7219 	/* DPLL not used with DSI, but still need the rest set up */
7220 	if (!pipe_config->has_dsi_encoder)
7221 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7222 
7223 	pipe_config->dpll_hw_state.dpll_md =
7224 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7225 }
7226 
7227 static void vlv_prepare_pll(struct intel_crtc *crtc,
7228 			    const struct intel_crtc_state *pipe_config)
7229 {
7230 	struct drm_device *dev = crtc->base.dev;
7231 	struct drm_i915_private *dev_priv = dev->dev_private;
7232 	enum i915_pipe pipe = crtc->pipe;
7233 	u32 mdiv;
7234 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7235 	u32 coreclk, reg_val;
7236 
7237 	/* Enable Refclk */
7238 	I915_WRITE(DPLL(pipe),
7239 		   pipe_config->dpll_hw_state.dpll &
7240 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7241 
7242 	/* No need to actually set up the DPLL with DSI */
7243 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7244 		return;
7245 
7246 	mutex_lock(&dev_priv->sb_lock);
7247 
7248 	bestn = pipe_config->dpll.n;
7249 	bestm1 = pipe_config->dpll.m1;
7250 	bestm2 = pipe_config->dpll.m2;
7251 	bestp1 = pipe_config->dpll.p1;
7252 	bestp2 = pipe_config->dpll.p2;
7253 
7254 	/* See eDP HDMI DPIO driver vbios notes doc */
7255 
7256 	/* PLL B needs special handling */
7257 	if (pipe == PIPE_B)
7258 		vlv_pllb_recal_opamp(dev_priv, pipe);
7259 
7260 	/* Set up Tx target for periodic Rcomp update */
7261 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7262 
7263 	/* Disable target IRef on PLL */
7264 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7265 	reg_val &= 0x00ffffff;
7266 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7267 
7268 	/* Disable fast lock */
7269 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7270 
7271 	/* Set idtafcrecal before PLL is enabled */
7272 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7273 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7274 	mdiv |= ((bestn << DPIO_N_SHIFT));
7275 	mdiv |= (1 << DPIO_K_SHIFT);
7276 
7277 	/*
7278 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7279 	 * but we don't support that).
7280 	 * Note: don't use the DAC post divider as it seems unstable.
7281 	 */
7282 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7283 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7284 
7285 	mdiv |= DPIO_ENABLE_CALIBRATION;
7286 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7287 
7288 	/* Set HBR and RBR LPF coefficients */
7289 	if (pipe_config->port_clock == 162000 ||
7290 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7291 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7292 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7293 				 0x009f0003);
7294 	else
7295 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7296 				 0x00d0000f);
7297 
7298 	if (pipe_config->has_dp_encoder) {
7299 		/* Use SSC source */
7300 		if (pipe == PIPE_A)
7301 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7302 					 0x0df40000);
7303 		else
7304 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7305 					 0x0df70000);
7306 	} else { /* HDMI or VGA */
7307 		/* Use bend source */
7308 		if (pipe == PIPE_A)
7309 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7310 					 0x0df70000);
7311 		else
7312 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7313 					 0x0df40000);
7314 	}
7315 
7316 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7317 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7318 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7319 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7320 		coreclk |= 0x01000000;
7321 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7322 
7323 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7324 	mutex_unlock(&dev_priv->sb_lock);
7325 }
7326 
7327 static void chv_prepare_pll(struct intel_crtc *crtc,
7328 			    const struct intel_crtc_state *pipe_config)
7329 {
7330 	struct drm_device *dev = crtc->base.dev;
7331 	struct drm_i915_private *dev_priv = dev->dev_private;
7332 	enum i915_pipe pipe = crtc->pipe;
7333 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7334 	u32 loopfilter, tribuf_calcntr;
7335 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7336 	u32 dpio_val;
7337 	int vco;
7338 
7339 	/* Enable Refclk and SSC */
7340 	I915_WRITE(DPLL(pipe),
7341 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7342 
7343 	/* No need to actually set up the DPLL with DSI */
7344 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7345 		return;
7346 
7347 	bestn = pipe_config->dpll.n;
7348 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7349 	bestm1 = pipe_config->dpll.m1;
7350 	bestm2 = pipe_config->dpll.m2 >> 22;
7351 	bestp1 = pipe_config->dpll.p1;
7352 	bestp2 = pipe_config->dpll.p2;
7353 	vco = pipe_config->dpll.vco;
7354 	dpio_val = 0;
7355 	loopfilter = 0;
7356 
7357 	mutex_lock(&dev_priv->sb_lock);
7358 
7359 	/* p1 and p2 divider */
7360 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7361 			5 << DPIO_CHV_S1_DIV_SHIFT |
7362 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7363 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7364 			1 << DPIO_CHV_K_DIV_SHIFT);
7365 
7366 	/* Feedback post-divider - m2 */
7367 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7368 
7369 	/* Feedback refclk divider - n and m1 */
7370 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7371 			DPIO_CHV_M1_DIV_BY_2 |
7372 			1 << DPIO_CHV_N_DIV_SHIFT);
7373 
7374 	/* M2 fraction division */
7375 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7376 
7377 	/* M2 fraction division enable */
7378 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7379 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7380 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7381 	if (bestm2_frac)
7382 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7383 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7384 
7385 	/* Program digital lock detect threshold */
7386 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7387 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7388 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7389 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7390 	if (!bestm2_frac)
7391 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7392 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7393 
7394 	/* Loop filter */
7395 	if (vco == 5400000) {
7396 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7397 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7398 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7399 		tribuf_calcntr = 0x9;
7400 	} else if (vco <= 6200000) {
7401 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7402 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7403 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7404 		tribuf_calcntr = 0x9;
7405 	} else if (vco <= 6480000) {
7406 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7407 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7408 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7409 		tribuf_calcntr = 0x8;
7410 	} else {
7411 		/* Not supported. Apply the same limits as in the max case */
7412 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7413 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7414 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7415 		tribuf_calcntr = 0;
7416 	}
7417 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7418 
7419 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7420 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7421 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7422 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7423 
7424 	/* AFC Recal */
7425 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7426 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7427 			DPIO_AFC_RECAL);
7428 
7429 	mutex_unlock(&dev_priv->sb_lock);
7430 }
7431 
7432 /**
7433  * vlv_force_pll_on - forcibly enable just the PLL
7434  * @dev_priv: i915 private structure
7435  * @pipe: pipe PLL to enable
7436  * @dpll: PLL configuration
7437  *
7438  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7439  * in cases where we need the PLL enabled even when @pipe is not going to
7440  * be enabled.
7441  */
7442 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe,
7443 		     const struct dpll *dpll)
7444 {
7445 	struct intel_crtc *crtc =
7446 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7447 	struct intel_crtc_state *pipe_config;
7448 
7449 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7450 	if (!pipe_config)
7451 		return -ENOMEM;
7452 
7453 	pipe_config->base.crtc = &crtc->base;
7454 	pipe_config->pixel_multiplier = 1;
7455 	pipe_config->dpll = *dpll;
7456 
7457 	if (IS_CHERRYVIEW(dev)) {
7458 		chv_compute_dpll(crtc, pipe_config);
7459 		chv_prepare_pll(crtc, pipe_config);
7460 		chv_enable_pll(crtc, pipe_config);
7461 	} else {
7462 		vlv_compute_dpll(crtc, pipe_config);
7463 		vlv_prepare_pll(crtc, pipe_config);
7464 		vlv_enable_pll(crtc, pipe_config);
7465 	}
7466 
7467 	kfree(pipe_config);
7468 
7469 	return 0;
7470 }
7471 
7472 /**
7473  * vlv_force_pll_off - forcibly disable just the PLL
7474  * @dev_priv: i915 private structure
7475  * @pipe: pipe PLL to disable
7476  *
7477  * Disable the PLL for @pipe. To be used in cases where we need
7478  * the PLL enabled even when @pipe is not going to be enabled.
7479  */
7480 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe)
7481 {
7482 	if (IS_CHERRYVIEW(dev))
7483 		chv_disable_pll(to_i915(dev), pipe);
7484 	else
7485 		vlv_disable_pll(to_i915(dev), pipe);
7486 }
7487 
7488 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7489 			      struct intel_crtc_state *crtc_state,
7490 			      intel_clock_t *reduced_clock)
7491 {
7492 	struct drm_device *dev = crtc->base.dev;
7493 	struct drm_i915_private *dev_priv = dev->dev_private;
7494 	u32 dpll;
7495 	bool is_sdvo;
7496 	struct dpll *clock = &crtc_state->dpll;
7497 
7498 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7499 
7500 	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7501 		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7502 
7503 	dpll = DPLL_VGA_MODE_DIS;
7504 
7505 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7506 		dpll |= DPLLB_MODE_LVDS;
7507 	else
7508 		dpll |= DPLLB_MODE_DAC_SERIAL;
7509 
7510 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7511 		dpll |= (crtc_state->pixel_multiplier - 1)
7512 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7513 	}
7514 
7515 	if (is_sdvo)
7516 		dpll |= DPLL_SDVO_HIGH_SPEED;
7517 
7518 	if (crtc_state->has_dp_encoder)
7519 		dpll |= DPLL_SDVO_HIGH_SPEED;
7520 
7521 	/* compute bitmask from p1 value */
7522 	if (IS_PINEVIEW(dev))
7523 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7524 	else {
7525 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7526 		if (IS_G4X(dev) && reduced_clock)
7527 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7528 	}
7529 	switch (clock->p2) {
7530 	case 5:
7531 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7532 		break;
7533 	case 7:
7534 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7535 		break;
7536 	case 10:
7537 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7538 		break;
7539 	case 14:
7540 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7541 		break;
7542 	}
7543 	if (INTEL_INFO(dev)->gen >= 4)
7544 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7545 
7546 	if (crtc_state->sdvo_tv_clock)
7547 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7548 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7549 		 intel_panel_use_ssc(dev_priv))
7550 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7551 	else
7552 		dpll |= PLL_REF_INPUT_DREFCLK;
7553 
7554 	dpll |= DPLL_VCO_ENABLE;
7555 	crtc_state->dpll_hw_state.dpll = dpll;
7556 
7557 	if (INTEL_INFO(dev)->gen >= 4) {
7558 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7559 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7560 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7561 	}
7562 }
7563 
7564 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7565 			      struct intel_crtc_state *crtc_state,
7566 			      intel_clock_t *reduced_clock)
7567 {
7568 	struct drm_device *dev = crtc->base.dev;
7569 	struct drm_i915_private *dev_priv = dev->dev_private;
7570 	u32 dpll;
7571 	struct dpll *clock = &crtc_state->dpll;
7572 
7573 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7574 
7575 	dpll = DPLL_VGA_MODE_DIS;
7576 
7577 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7578 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7579 	} else {
7580 		if (clock->p1 == 2)
7581 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7582 		else
7583 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7584 		if (clock->p2 == 4)
7585 			dpll |= PLL_P2_DIVIDE_BY_4;
7586 	}
7587 
7588 	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7589 		dpll |= DPLL_DVO_2X_MODE;
7590 
7591 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7592 	    intel_panel_use_ssc(dev_priv))
7593 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7594 	else
7595 		dpll |= PLL_REF_INPUT_DREFCLK;
7596 
7597 	dpll |= DPLL_VCO_ENABLE;
7598 	crtc_state->dpll_hw_state.dpll = dpll;
7599 }
7600 
7601 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7602 {
7603 	struct drm_device *dev = intel_crtc->base.dev;
7604 	struct drm_i915_private *dev_priv = dev->dev_private;
7605 	enum i915_pipe pipe = intel_crtc->pipe;
7606 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7607 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7608 	uint32_t crtc_vtotal, crtc_vblank_end;
7609 	int vsyncshift = 0;
7610 
7611 	/* We need to be careful not to changed the adjusted mode, for otherwise
7612 	 * the hw state checker will get angry at the mismatch. */
7613 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7614 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7615 
7616 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7617 		/* the chip adds 2 halflines automatically */
7618 		crtc_vtotal -= 1;
7619 		crtc_vblank_end -= 1;
7620 
7621 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7622 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7623 		else
7624 			vsyncshift = adjusted_mode->crtc_hsync_start -
7625 				adjusted_mode->crtc_htotal / 2;
7626 		if (vsyncshift < 0)
7627 			vsyncshift += adjusted_mode->crtc_htotal;
7628 	}
7629 
7630 	if (INTEL_INFO(dev)->gen > 3)
7631 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7632 
7633 	I915_WRITE(HTOTAL(cpu_transcoder),
7634 		   (adjusted_mode->crtc_hdisplay - 1) |
7635 		   ((adjusted_mode->crtc_htotal - 1) << 16));
7636 	I915_WRITE(HBLANK(cpu_transcoder),
7637 		   (adjusted_mode->crtc_hblank_start - 1) |
7638 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7639 	I915_WRITE(HSYNC(cpu_transcoder),
7640 		   (adjusted_mode->crtc_hsync_start - 1) |
7641 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7642 
7643 	I915_WRITE(VTOTAL(cpu_transcoder),
7644 		   (adjusted_mode->crtc_vdisplay - 1) |
7645 		   ((crtc_vtotal - 1) << 16));
7646 	I915_WRITE(VBLANK(cpu_transcoder),
7647 		   (adjusted_mode->crtc_vblank_start - 1) |
7648 		   ((crtc_vblank_end - 1) << 16));
7649 	I915_WRITE(VSYNC(cpu_transcoder),
7650 		   (adjusted_mode->crtc_vsync_start - 1) |
7651 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7652 
7653 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7654 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7655 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7656 	 * bits. */
7657 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7658 	    (pipe == PIPE_B || pipe == PIPE_C))
7659 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7660 
7661 }
7662 
7663 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7664 {
7665 	struct drm_device *dev = intel_crtc->base.dev;
7666 	struct drm_i915_private *dev_priv = dev->dev_private;
7667 	enum i915_pipe pipe = intel_crtc->pipe;
7668 
7669 	/* pipesrc controls the size that is scaled from, which should
7670 	 * always be the user's requested size.
7671 	 */
7672 	I915_WRITE(PIPESRC(pipe),
7673 		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7674 		   (intel_crtc->config->pipe_src_h - 1));
7675 }
7676 
7677 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7678 				   struct intel_crtc_state *pipe_config)
7679 {
7680 	struct drm_device *dev = crtc->base.dev;
7681 	struct drm_i915_private *dev_priv = dev->dev_private;
7682 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7683 	uint32_t tmp;
7684 
7685 	tmp = I915_READ(HTOTAL(cpu_transcoder));
7686 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7687 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7688 	tmp = I915_READ(HBLANK(cpu_transcoder));
7689 	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7690 	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7691 	tmp = I915_READ(HSYNC(cpu_transcoder));
7692 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7693 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7694 
7695 	tmp = I915_READ(VTOTAL(cpu_transcoder));
7696 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7697 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7698 	tmp = I915_READ(VBLANK(cpu_transcoder));
7699 	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7700 	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7701 	tmp = I915_READ(VSYNC(cpu_transcoder));
7702 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7703 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7704 
7705 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7706 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7707 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7708 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7709 	}
7710 }
7711 
7712 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7713 				    struct intel_crtc_state *pipe_config)
7714 {
7715 	struct drm_device *dev = crtc->base.dev;
7716 	struct drm_i915_private *dev_priv = dev->dev_private;
7717 	u32 tmp;
7718 
7719 	tmp = I915_READ(PIPESRC(crtc->pipe));
7720 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7721 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7722 
7723 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7724 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7725 }
7726 
7727 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7728 				 struct intel_crtc_state *pipe_config)
7729 {
7730 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7731 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7732 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7733 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7734 
7735 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7736 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7737 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7738 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7739 
7740 	mode->flags = pipe_config->base.adjusted_mode.flags;
7741 	mode->type = DRM_MODE_TYPE_DRIVER;
7742 
7743 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7744 	mode->flags |= pipe_config->base.adjusted_mode.flags;
7745 
7746 	mode->hsync = drm_mode_hsync(mode);
7747 	mode->vrefresh = drm_mode_vrefresh(mode);
7748 	drm_mode_set_name(mode);
7749 }
7750 
7751 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7752 {
7753 	struct drm_device *dev = intel_crtc->base.dev;
7754 	struct drm_i915_private *dev_priv = dev->dev_private;
7755 	uint32_t pipeconf;
7756 
7757 	pipeconf = 0;
7758 
7759 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7760 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7761 		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7762 
7763 	if (intel_crtc->config->double_wide)
7764 		pipeconf |= PIPECONF_DOUBLE_WIDE;
7765 
7766 	/* only g4x and later have fancy bpc/dither controls */
7767 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7768 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7769 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7770 			pipeconf |= PIPECONF_DITHER_EN |
7771 				    PIPECONF_DITHER_TYPE_SP;
7772 
7773 		switch (intel_crtc->config->pipe_bpp) {
7774 		case 18:
7775 			pipeconf |= PIPECONF_6BPC;
7776 			break;
7777 		case 24:
7778 			pipeconf |= PIPECONF_8BPC;
7779 			break;
7780 		case 30:
7781 			pipeconf |= PIPECONF_10BPC;
7782 			break;
7783 		default:
7784 			/* Case prevented by intel_choose_pipe_bpp_dither. */
7785 			BUG();
7786 		}
7787 	}
7788 
7789 	if (HAS_PIPE_CXSR(dev)) {
7790 		if (intel_crtc->lowfreq_avail) {
7791 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7792 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7793 		} else {
7794 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7795 		}
7796 	}
7797 
7798 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7799 		if (INTEL_INFO(dev)->gen < 4 ||
7800 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7801 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7802 		else
7803 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7804 	} else
7805 		pipeconf |= PIPECONF_PROGRESSIVE;
7806 
7807 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7808 	     intel_crtc->config->limited_color_range)
7809 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7810 
7811 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7812 	POSTING_READ(PIPECONF(intel_crtc->pipe));
7813 }
7814 
7815 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7816 				   struct intel_crtc_state *crtc_state)
7817 {
7818 	struct drm_device *dev = crtc->base.dev;
7819 	struct drm_i915_private *dev_priv = dev->dev_private;
7820 	const intel_limit_t *limit;
7821 	int refclk = 48000;
7822 
7823 	memset(&crtc_state->dpll_hw_state, 0,
7824 	       sizeof(crtc_state->dpll_hw_state));
7825 
7826 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7827 		if (intel_panel_use_ssc(dev_priv)) {
7828 			refclk = dev_priv->vbt.lvds_ssc_freq;
7829 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7830 		}
7831 
7832 		limit = &intel_limits_i8xx_lvds;
7833 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7834 		limit = &intel_limits_i8xx_dvo;
7835 	} else {
7836 		limit = &intel_limits_i8xx_dac;
7837 	}
7838 
7839 	if (!crtc_state->clock_set &&
7840 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7841 				 refclk, NULL, &crtc_state->dpll)) {
7842 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7843 		return -EINVAL;
7844 	}
7845 
7846 	i8xx_compute_dpll(crtc, crtc_state, NULL);
7847 
7848 	return 0;
7849 }
7850 
7851 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7852 				  struct intel_crtc_state *crtc_state)
7853 {
7854 	struct drm_device *dev = crtc->base.dev;
7855 	struct drm_i915_private *dev_priv = dev->dev_private;
7856 	const intel_limit_t *limit;
7857 	int refclk = 96000;
7858 
7859 	memset(&crtc_state->dpll_hw_state, 0,
7860 	       sizeof(crtc_state->dpll_hw_state));
7861 
7862 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7863 		if (intel_panel_use_ssc(dev_priv)) {
7864 			refclk = dev_priv->vbt.lvds_ssc_freq;
7865 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7866 		}
7867 
7868 		if (intel_is_dual_link_lvds(dev))
7869 			limit = &intel_limits_g4x_dual_channel_lvds;
7870 		else
7871 			limit = &intel_limits_g4x_single_channel_lvds;
7872 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7873 		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7874 		limit = &intel_limits_g4x_hdmi;
7875 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7876 		limit = &intel_limits_g4x_sdvo;
7877 	} else {
7878 		/* The option is for other outputs */
7879 		limit = &intel_limits_i9xx_sdvo;
7880 	}
7881 
7882 	if (!crtc_state->clock_set &&
7883 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7884 				refclk, NULL, &crtc_state->dpll)) {
7885 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7886 		return -EINVAL;
7887 	}
7888 
7889 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7890 
7891 	return 0;
7892 }
7893 
7894 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7895 				  struct intel_crtc_state *crtc_state)
7896 {
7897 	struct drm_device *dev = crtc->base.dev;
7898 	struct drm_i915_private *dev_priv = dev->dev_private;
7899 	const intel_limit_t *limit;
7900 	int refclk = 96000;
7901 
7902 	memset(&crtc_state->dpll_hw_state, 0,
7903 	       sizeof(crtc_state->dpll_hw_state));
7904 
7905 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7906 		if (intel_panel_use_ssc(dev_priv)) {
7907 			refclk = dev_priv->vbt.lvds_ssc_freq;
7908 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7909 		}
7910 
7911 		limit = &intel_limits_pineview_lvds;
7912 	} else {
7913 		limit = &intel_limits_pineview_sdvo;
7914 	}
7915 
7916 	if (!crtc_state->clock_set &&
7917 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7918 				refclk, NULL, &crtc_state->dpll)) {
7919 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7920 		return -EINVAL;
7921 	}
7922 
7923 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7924 
7925 	return 0;
7926 }
7927 
7928 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7929 				   struct intel_crtc_state *crtc_state)
7930 {
7931 	struct drm_device *dev = crtc->base.dev;
7932 	struct drm_i915_private *dev_priv = dev->dev_private;
7933 	const intel_limit_t *limit;
7934 	int refclk = 96000;
7935 
7936 	memset(&crtc_state->dpll_hw_state, 0,
7937 	       sizeof(crtc_state->dpll_hw_state));
7938 
7939 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7940 		if (intel_panel_use_ssc(dev_priv)) {
7941 			refclk = dev_priv->vbt.lvds_ssc_freq;
7942 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7943 		}
7944 
7945 		limit = &intel_limits_i9xx_lvds;
7946 	} else {
7947 		limit = &intel_limits_i9xx_sdvo;
7948 	}
7949 
7950 	if (!crtc_state->clock_set &&
7951 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7952 				 refclk, NULL, &crtc_state->dpll)) {
7953 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7954 		return -EINVAL;
7955 	}
7956 
7957 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7958 
7959 	return 0;
7960 }
7961 
7962 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7963 				  struct intel_crtc_state *crtc_state)
7964 {
7965 	int refclk = 100000;
7966 	const intel_limit_t *limit = &intel_limits_chv;
7967 
7968 	memset(&crtc_state->dpll_hw_state, 0,
7969 	       sizeof(crtc_state->dpll_hw_state));
7970 
7971 	if (!crtc_state->clock_set &&
7972 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7973 				refclk, NULL, &crtc_state->dpll)) {
7974 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7975 		return -EINVAL;
7976 	}
7977 
7978 	chv_compute_dpll(crtc, crtc_state);
7979 
7980 	return 0;
7981 }
7982 
7983 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7984 				  struct intel_crtc_state *crtc_state)
7985 {
7986 	int refclk = 100000;
7987 	const intel_limit_t *limit = &intel_limits_vlv;
7988 
7989 	memset(&crtc_state->dpll_hw_state, 0,
7990 	       sizeof(crtc_state->dpll_hw_state));
7991 
7992 	if (!crtc_state->clock_set &&
7993 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7994 				refclk, NULL, &crtc_state->dpll)) {
7995 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7996 		return -EINVAL;
7997 	}
7998 
7999 	vlv_compute_dpll(crtc, crtc_state);
8000 
8001 	return 0;
8002 }
8003 
8004 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8005 				 struct intel_crtc_state *pipe_config)
8006 {
8007 	struct drm_device *dev = crtc->base.dev;
8008 	struct drm_i915_private *dev_priv = dev->dev_private;
8009 	uint32_t tmp;
8010 
8011 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8012 		return;
8013 
8014 	tmp = I915_READ(PFIT_CONTROL);
8015 	if (!(tmp & PFIT_ENABLE))
8016 		return;
8017 
8018 	/* Check whether the pfit is attached to our pipe. */
8019 	if (INTEL_INFO(dev)->gen < 4) {
8020 		if (crtc->pipe != PIPE_B)
8021 			return;
8022 	} else {
8023 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8024 			return;
8025 	}
8026 
8027 	pipe_config->gmch_pfit.control = tmp;
8028 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8029 }
8030 
8031 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8032 			       struct intel_crtc_state *pipe_config)
8033 {
8034 	struct drm_device *dev = crtc->base.dev;
8035 	struct drm_i915_private *dev_priv = dev->dev_private;
8036 	int pipe = pipe_config->cpu_transcoder;
8037 	intel_clock_t clock;
8038 	u32 mdiv;
8039 	int refclk = 100000;
8040 
8041 	/* In case of DSI, DPLL will not be used */
8042 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8043 		return;
8044 
8045 	mutex_lock(&dev_priv->sb_lock);
8046 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8047 	mutex_unlock(&dev_priv->sb_lock);
8048 
8049 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8050 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8051 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8052 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8053 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8054 
8055 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8056 }
8057 
8058 static void
8059 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8060 			      struct intel_initial_plane_config *plane_config)
8061 {
8062 	struct drm_device *dev = crtc->base.dev;
8063 	struct drm_i915_private *dev_priv = dev->dev_private;
8064 	u32 val, base, offset;
8065 	int pipe = crtc->pipe, plane = crtc->plane;
8066 	int fourcc, pixel_format;
8067 	unsigned int aligned_height;
8068 	struct drm_framebuffer *fb;
8069 	struct intel_framebuffer *intel_fb;
8070 
8071 	val = I915_READ(DSPCNTR(plane));
8072 	if (!(val & DISPLAY_PLANE_ENABLE))
8073 		return;
8074 
8075 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8076 	if (!intel_fb) {
8077 		DRM_DEBUG_KMS("failed to alloc fb\n");
8078 		return;
8079 	}
8080 
8081 	fb = &intel_fb->base;
8082 
8083 	if (INTEL_INFO(dev)->gen >= 4) {
8084 		if (val & DISPPLANE_TILED) {
8085 			plane_config->tiling = I915_TILING_X;
8086 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8087 		}
8088 	}
8089 
8090 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8091 	fourcc = i9xx_format_to_fourcc(pixel_format);
8092 	fb->pixel_format = fourcc;
8093 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8094 
8095 	if (INTEL_INFO(dev)->gen >= 4) {
8096 		if (plane_config->tiling)
8097 			offset = I915_READ(DSPTILEOFF(plane));
8098 		else
8099 			offset = I915_READ(DSPLINOFF(plane));
8100 		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8101 	} else {
8102 		base = I915_READ(DSPADDR(plane));
8103 	}
8104 	plane_config->base = base;
8105 
8106 	val = I915_READ(PIPESRC(pipe));
8107 	fb->width = ((val >> 16) & 0xfff) + 1;
8108 	fb->height = ((val >> 0) & 0xfff) + 1;
8109 
8110 	val = I915_READ(DSPSTRIDE(pipe));
8111 	fb->pitches[0] = val & 0xffffffc0;
8112 
8113 	aligned_height = intel_fb_align_height(dev, fb->height,
8114 					       fb->pixel_format,
8115 					       fb->modifier[0]);
8116 
8117 	plane_config->size = fb->pitches[0] * aligned_height;
8118 
8119 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8120 		      pipe_name(pipe), plane, fb->width, fb->height,
8121 		      fb->bits_per_pixel, base, fb->pitches[0],
8122 		      plane_config->size);
8123 
8124 	plane_config->fb = intel_fb;
8125 }
8126 
8127 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8128 			       struct intel_crtc_state *pipe_config)
8129 {
8130 	struct drm_device *dev = crtc->base.dev;
8131 	struct drm_i915_private *dev_priv = dev->dev_private;
8132 	int pipe = pipe_config->cpu_transcoder;
8133 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8134 	intel_clock_t clock;
8135 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8136 	int refclk = 100000;
8137 
8138 	/* In case of DSI, DPLL will not be used */
8139 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8140 		return;
8141 
8142 	mutex_lock(&dev_priv->sb_lock);
8143 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8144 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8145 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8146 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8147 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8148 	mutex_unlock(&dev_priv->sb_lock);
8149 
8150 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8151 	clock.m2 = (pll_dw0 & 0xff) << 22;
8152 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8153 		clock.m2 |= pll_dw2 & 0x3fffff;
8154 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8155 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8156 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8157 
8158 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8159 }
8160 
8161 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8162 				 struct intel_crtc_state *pipe_config)
8163 {
8164 	struct drm_device *dev = crtc->base.dev;
8165 	struct drm_i915_private *dev_priv = dev->dev_private;
8166 	enum intel_display_power_domain power_domain;
8167 	uint32_t tmp;
8168 	bool ret;
8169 
8170 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8171 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8172 		return false;
8173 
8174 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8175 	pipe_config->shared_dpll = NULL;
8176 
8177 	ret = false;
8178 
8179 	tmp = I915_READ(PIPECONF(crtc->pipe));
8180 	if (!(tmp & PIPECONF_ENABLE))
8181 		goto out;
8182 
8183 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8184 		switch (tmp & PIPECONF_BPC_MASK) {
8185 		case PIPECONF_6BPC:
8186 			pipe_config->pipe_bpp = 18;
8187 			break;
8188 		case PIPECONF_8BPC:
8189 			pipe_config->pipe_bpp = 24;
8190 			break;
8191 		case PIPECONF_10BPC:
8192 			pipe_config->pipe_bpp = 30;
8193 			break;
8194 		default:
8195 			break;
8196 		}
8197 	}
8198 
8199 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8200 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8201 		pipe_config->limited_color_range = true;
8202 
8203 	if (INTEL_INFO(dev)->gen < 4)
8204 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8205 
8206 	intel_get_pipe_timings(crtc, pipe_config);
8207 	intel_get_pipe_src_size(crtc, pipe_config);
8208 
8209 	i9xx_get_pfit_config(crtc, pipe_config);
8210 
8211 	if (INTEL_INFO(dev)->gen >= 4) {
8212 		/* No way to read it out on pipes B and C */
8213 		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8214 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8215 		else
8216 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8217 		pipe_config->pixel_multiplier =
8218 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8219 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8220 		pipe_config->dpll_hw_state.dpll_md = tmp;
8221 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8222 		tmp = I915_READ(DPLL(crtc->pipe));
8223 		pipe_config->pixel_multiplier =
8224 			((tmp & SDVO_MULTIPLIER_MASK)
8225 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8226 	} else {
8227 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8228 		 * port and will be fixed up in the encoder->get_config
8229 		 * function. */
8230 		pipe_config->pixel_multiplier = 1;
8231 	}
8232 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8233 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8234 		/*
8235 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8236 		 * on 830. Filter it out here so that we don't
8237 		 * report errors due to that.
8238 		 */
8239 		if (IS_I830(dev))
8240 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8241 
8242 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8243 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8244 	} else {
8245 		/* Mask out read-only status bits. */
8246 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8247 						     DPLL_PORTC_READY_MASK |
8248 						     DPLL_PORTB_READY_MASK);
8249 	}
8250 
8251 	if (IS_CHERRYVIEW(dev))
8252 		chv_crtc_clock_get(crtc, pipe_config);
8253 	else if (IS_VALLEYVIEW(dev))
8254 		vlv_crtc_clock_get(crtc, pipe_config);
8255 	else
8256 		i9xx_crtc_clock_get(crtc, pipe_config);
8257 
8258 	/*
8259 	 * Normally the dotclock is filled in by the encoder .get_config()
8260 	 * but in case the pipe is enabled w/o any ports we need a sane
8261 	 * default.
8262 	 */
8263 	pipe_config->base.adjusted_mode.crtc_clock =
8264 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8265 
8266 	ret = true;
8267 
8268 out:
8269 	intel_display_power_put(dev_priv, power_domain);
8270 
8271 	return ret;
8272 }
8273 
8274 static void ironlake_init_pch_refclk(struct drm_device *dev)
8275 {
8276 	struct drm_i915_private *dev_priv = dev->dev_private;
8277 	struct intel_encoder *encoder;
8278 	int i;
8279 	u32 val, final;
8280 	bool has_lvds = false;
8281 	bool has_cpu_edp = false;
8282 	bool has_panel = false;
8283 	bool has_ck505 = false;
8284 	bool can_ssc = false;
8285 	bool using_ssc_source = false;
8286 
8287 	/* We need to take the global config into account */
8288 	for_each_intel_encoder(dev, encoder) {
8289 		switch (encoder->type) {
8290 		case INTEL_OUTPUT_LVDS:
8291 			has_panel = true;
8292 			has_lvds = true;
8293 			break;
8294 		case INTEL_OUTPUT_EDP:
8295 			has_panel = true;
8296 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8297 				has_cpu_edp = true;
8298 			break;
8299 		default:
8300 			break;
8301 		}
8302 	}
8303 
8304 	if (HAS_PCH_IBX(dev)) {
8305 		has_ck505 = dev_priv->vbt.display_clock_mode;
8306 		can_ssc = has_ck505;
8307 	} else {
8308 		has_ck505 = false;
8309 		can_ssc = true;
8310 	}
8311 
8312 	/* Check if any DPLLs are using the SSC source */
8313 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8314 		u32 temp = I915_READ(PCH_DPLL(i));
8315 
8316 		if (!(temp & DPLL_VCO_ENABLE))
8317 			continue;
8318 
8319 		if ((temp & PLL_REF_INPUT_MASK) ==
8320 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8321 			using_ssc_source = true;
8322 			break;
8323 		}
8324 	}
8325 
8326 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8327 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8328 
8329 	/* Ironlake: try to setup display ref clock before DPLL
8330 	 * enabling. This is only under driver's control after
8331 	 * PCH B stepping, previous chipset stepping should be
8332 	 * ignoring this setting.
8333 	 */
8334 	val = I915_READ(PCH_DREF_CONTROL);
8335 
8336 	/* As we must carefully and slowly disable/enable each source in turn,
8337 	 * compute the final state we want first and check if we need to
8338 	 * make any changes at all.
8339 	 */
8340 	final = val;
8341 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8342 	if (has_ck505)
8343 		final |= DREF_NONSPREAD_CK505_ENABLE;
8344 	else
8345 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8346 
8347 	final &= ~DREF_SSC_SOURCE_MASK;
8348 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8349 	final &= ~DREF_SSC1_ENABLE;
8350 
8351 	if (has_panel) {
8352 		final |= DREF_SSC_SOURCE_ENABLE;
8353 
8354 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8355 			final |= DREF_SSC1_ENABLE;
8356 
8357 		if (has_cpu_edp) {
8358 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8359 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8360 			else
8361 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8362 		} else
8363 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8364 	} else if (using_ssc_source) {
8365 		final |= DREF_SSC_SOURCE_ENABLE;
8366 		final |= DREF_SSC1_ENABLE;
8367 	}
8368 
8369 	if (final == val)
8370 		return;
8371 
8372 	/* Always enable nonspread source */
8373 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8374 
8375 	if (has_ck505)
8376 		val |= DREF_NONSPREAD_CK505_ENABLE;
8377 	else
8378 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8379 
8380 	if (has_panel) {
8381 		val &= ~DREF_SSC_SOURCE_MASK;
8382 		val |= DREF_SSC_SOURCE_ENABLE;
8383 
8384 		/* SSC must be turned on before enabling the CPU output  */
8385 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8386 			DRM_DEBUG_KMS("Using SSC on panel\n");
8387 			val |= DREF_SSC1_ENABLE;
8388 		} else
8389 			val &= ~DREF_SSC1_ENABLE;
8390 
8391 		/* Get SSC going before enabling the outputs */
8392 		I915_WRITE(PCH_DREF_CONTROL, val);
8393 		POSTING_READ(PCH_DREF_CONTROL);
8394 		udelay(200);
8395 
8396 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8397 
8398 		/* Enable CPU source on CPU attached eDP */
8399 		if (has_cpu_edp) {
8400 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8401 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8402 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8403 			} else
8404 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8405 		} else
8406 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8407 
8408 		I915_WRITE(PCH_DREF_CONTROL, val);
8409 		POSTING_READ(PCH_DREF_CONTROL);
8410 		udelay(200);
8411 	} else {
8412 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8413 
8414 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8415 
8416 		/* Turn off CPU output */
8417 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8418 
8419 		I915_WRITE(PCH_DREF_CONTROL, val);
8420 		POSTING_READ(PCH_DREF_CONTROL);
8421 		udelay(200);
8422 
8423 		if (!using_ssc_source) {
8424 			DRM_DEBUG_KMS("Disabling SSC source\n");
8425 
8426 			/* Turn off the SSC source */
8427 			val &= ~DREF_SSC_SOURCE_MASK;
8428 			val |= DREF_SSC_SOURCE_DISABLE;
8429 
8430 			/* Turn off SSC1 */
8431 			val &= ~DREF_SSC1_ENABLE;
8432 
8433 			I915_WRITE(PCH_DREF_CONTROL, val);
8434 			POSTING_READ(PCH_DREF_CONTROL);
8435 			udelay(200);
8436 		}
8437 	}
8438 
8439 	BUG_ON(val != final);
8440 }
8441 
8442 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8443 {
8444 	uint32_t tmp;
8445 
8446 	tmp = I915_READ(SOUTH_CHICKEN2);
8447 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8448 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8449 
8450 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8451 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8452 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8453 
8454 	tmp = I915_READ(SOUTH_CHICKEN2);
8455 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8456 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8457 
8458 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8459 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8460 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8461 }
8462 
8463 /* WaMPhyProgramming:hsw */
8464 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8465 {
8466 	uint32_t tmp;
8467 
8468 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8469 	tmp &= ~(0xFF << 24);
8470 	tmp |= (0x12 << 24);
8471 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8472 
8473 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8474 	tmp |= (1 << 11);
8475 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8476 
8477 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8478 	tmp |= (1 << 11);
8479 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8480 
8481 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8482 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8483 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8484 
8485 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8486 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8487 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8488 
8489 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8490 	tmp &= ~(7 << 13);
8491 	tmp |= (5 << 13);
8492 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8493 
8494 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8495 	tmp &= ~(7 << 13);
8496 	tmp |= (5 << 13);
8497 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8498 
8499 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8500 	tmp &= ~0xFF;
8501 	tmp |= 0x1C;
8502 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8503 
8504 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8505 	tmp &= ~0xFF;
8506 	tmp |= 0x1C;
8507 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8508 
8509 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8510 	tmp &= ~(0xFF << 16);
8511 	tmp |= (0x1C << 16);
8512 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8513 
8514 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8515 	tmp &= ~(0xFF << 16);
8516 	tmp |= (0x1C << 16);
8517 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8518 
8519 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8520 	tmp |= (1 << 27);
8521 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8522 
8523 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8524 	tmp |= (1 << 27);
8525 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8526 
8527 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8528 	tmp &= ~(0xF << 28);
8529 	tmp |= (4 << 28);
8530 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8531 
8532 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8533 	tmp &= ~(0xF << 28);
8534 	tmp |= (4 << 28);
8535 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8536 }
8537 
8538 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8539  * Programming" based on the parameters passed:
8540  * - Sequence to enable CLKOUT_DP
8541  * - Sequence to enable CLKOUT_DP without spread
8542  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8543  */
8544 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8545 				 bool with_fdi)
8546 {
8547 	struct drm_i915_private *dev_priv = dev->dev_private;
8548 	uint32_t reg, tmp;
8549 
8550 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8551 		with_spread = true;
8552 	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8553 		with_fdi = false;
8554 
8555 	mutex_lock(&dev_priv->sb_lock);
8556 
8557 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8558 	tmp &= ~SBI_SSCCTL_DISABLE;
8559 	tmp |= SBI_SSCCTL_PATHALT;
8560 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8561 
8562 	udelay(24);
8563 
8564 	if (with_spread) {
8565 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8566 		tmp &= ~SBI_SSCCTL_PATHALT;
8567 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8568 
8569 		if (with_fdi) {
8570 			lpt_reset_fdi_mphy(dev_priv);
8571 			lpt_program_fdi_mphy(dev_priv);
8572 		}
8573 	}
8574 
8575 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8576 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8577 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8578 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8579 
8580 	mutex_unlock(&dev_priv->sb_lock);
8581 }
8582 
8583 /* Sequence to disable CLKOUT_DP */
8584 static void lpt_disable_clkout_dp(struct drm_device *dev)
8585 {
8586 	struct drm_i915_private *dev_priv = dev->dev_private;
8587 	uint32_t reg, tmp;
8588 
8589 	mutex_lock(&dev_priv->sb_lock);
8590 
8591 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8592 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8593 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8594 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8595 
8596 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8597 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8598 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8599 			tmp |= SBI_SSCCTL_PATHALT;
8600 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8601 			udelay(32);
8602 		}
8603 		tmp |= SBI_SSCCTL_DISABLE;
8604 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8605 	}
8606 
8607 	mutex_unlock(&dev_priv->sb_lock);
8608 }
8609 
8610 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8611 
8612 static const uint16_t sscdivintphase[] = {
8613 	[BEND_IDX( 50)] = 0x3B23,
8614 	[BEND_IDX( 45)] = 0x3B23,
8615 	[BEND_IDX( 40)] = 0x3C23,
8616 	[BEND_IDX( 35)] = 0x3C23,
8617 	[BEND_IDX( 30)] = 0x3D23,
8618 	[BEND_IDX( 25)] = 0x3D23,
8619 	[BEND_IDX( 20)] = 0x3E23,
8620 	[BEND_IDX( 15)] = 0x3E23,
8621 	[BEND_IDX( 10)] = 0x3F23,
8622 	[BEND_IDX(  5)] = 0x3F23,
8623 	[BEND_IDX(  0)] = 0x0025,
8624 	[BEND_IDX( -5)] = 0x0025,
8625 	[BEND_IDX(-10)] = 0x0125,
8626 	[BEND_IDX(-15)] = 0x0125,
8627 	[BEND_IDX(-20)] = 0x0225,
8628 	[BEND_IDX(-25)] = 0x0225,
8629 	[BEND_IDX(-30)] = 0x0325,
8630 	[BEND_IDX(-35)] = 0x0325,
8631 	[BEND_IDX(-40)] = 0x0425,
8632 	[BEND_IDX(-45)] = 0x0425,
8633 	[BEND_IDX(-50)] = 0x0525,
8634 };
8635 
8636 /*
8637  * Bend CLKOUT_DP
8638  * steps -50 to 50 inclusive, in steps of 5
8639  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8640  * change in clock period = -(steps / 10) * 5.787 ps
8641  */
8642 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8643 {
8644 	uint32_t tmp;
8645 	int idx = BEND_IDX(steps);
8646 
8647 	if (WARN_ON(steps % 5 != 0))
8648 		return;
8649 
8650 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8651 		return;
8652 
8653 	mutex_lock(&dev_priv->sb_lock);
8654 
8655 	if (steps % 10 != 0)
8656 		tmp = 0xAAAAAAAB;
8657 	else
8658 		tmp = 0x00000000;
8659 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8660 
8661 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8662 	tmp &= 0xffff0000;
8663 	tmp |= sscdivintphase[idx];
8664 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8665 
8666 	mutex_unlock(&dev_priv->sb_lock);
8667 }
8668 
8669 #undef BEND_IDX
8670 
8671 static void lpt_init_pch_refclk(struct drm_device *dev)
8672 {
8673 	struct intel_encoder *encoder;
8674 	bool has_vga = false;
8675 
8676 	for_each_intel_encoder(dev, encoder) {
8677 		switch (encoder->type) {
8678 		case INTEL_OUTPUT_ANALOG:
8679 			has_vga = true;
8680 			break;
8681 		default:
8682 			break;
8683 		}
8684 	}
8685 
8686 	if (has_vga) {
8687 		lpt_bend_clkout_dp(to_i915(dev), 0);
8688 		lpt_enable_clkout_dp(dev, true, true);
8689 	} else {
8690 		lpt_disable_clkout_dp(dev);
8691 	}
8692 }
8693 
8694 /*
8695  * Initialize reference clocks when the driver loads
8696  */
8697 void intel_init_pch_refclk(struct drm_device *dev)
8698 {
8699 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8700 		ironlake_init_pch_refclk(dev);
8701 	else if (HAS_PCH_LPT(dev))
8702 		lpt_init_pch_refclk(dev);
8703 }
8704 
8705 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8706 {
8707 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8708 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8709 	int pipe = intel_crtc->pipe;
8710 	uint32_t val;
8711 
8712 	val = 0;
8713 
8714 	switch (intel_crtc->config->pipe_bpp) {
8715 	case 18:
8716 		val |= PIPECONF_6BPC;
8717 		break;
8718 	case 24:
8719 		val |= PIPECONF_8BPC;
8720 		break;
8721 	case 30:
8722 		val |= PIPECONF_10BPC;
8723 		break;
8724 	case 36:
8725 		val |= PIPECONF_12BPC;
8726 		break;
8727 	default:
8728 		/* Case prevented by intel_choose_pipe_bpp_dither. */
8729 		BUG();
8730 	}
8731 
8732 	if (intel_crtc->config->dither)
8733 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8734 
8735 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8736 		val |= PIPECONF_INTERLACED_ILK;
8737 	else
8738 		val |= PIPECONF_PROGRESSIVE;
8739 
8740 	if (intel_crtc->config->limited_color_range)
8741 		val |= PIPECONF_COLOR_RANGE_SELECT;
8742 
8743 	I915_WRITE(PIPECONF(pipe), val);
8744 	POSTING_READ(PIPECONF(pipe));
8745 }
8746 
8747 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8748 {
8749 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8750 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8751 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8752 	u32 val = 0;
8753 
8754 	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8755 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8756 
8757 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8758 		val |= PIPECONF_INTERLACED_ILK;
8759 	else
8760 		val |= PIPECONF_PROGRESSIVE;
8761 
8762 	I915_WRITE(PIPECONF(cpu_transcoder), val);
8763 	POSTING_READ(PIPECONF(cpu_transcoder));
8764 }
8765 
8766 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8767 {
8768 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8769 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8770 
8771 	if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8772 		u32 val = 0;
8773 
8774 		switch (intel_crtc->config->pipe_bpp) {
8775 		case 18:
8776 			val |= PIPEMISC_DITHER_6_BPC;
8777 			break;
8778 		case 24:
8779 			val |= PIPEMISC_DITHER_8_BPC;
8780 			break;
8781 		case 30:
8782 			val |= PIPEMISC_DITHER_10_BPC;
8783 			break;
8784 		case 36:
8785 			val |= PIPEMISC_DITHER_12_BPC;
8786 			break;
8787 		default:
8788 			/* Case prevented by pipe_config_set_bpp. */
8789 			BUG();
8790 		}
8791 
8792 		if (intel_crtc->config->dither)
8793 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8794 
8795 		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8796 	}
8797 }
8798 
8799 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8800 {
8801 	/*
8802 	 * Account for spread spectrum to avoid
8803 	 * oversubscribing the link. Max center spread
8804 	 * is 2.5%; use 5% for safety's sake.
8805 	 */
8806 	u32 bps = target_clock * bpp * 21 / 20;
8807 	return DIV_ROUND_UP(bps, link_bw * 8);
8808 }
8809 
8810 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8811 {
8812 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8813 }
8814 
8815 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8816 				  struct intel_crtc_state *crtc_state,
8817 				  intel_clock_t *reduced_clock)
8818 {
8819 	struct drm_crtc *crtc = &intel_crtc->base;
8820 	struct drm_device *dev = crtc->dev;
8821 	struct drm_i915_private *dev_priv = dev->dev_private;
8822 	struct drm_atomic_state *state = crtc_state->base.state;
8823 	struct drm_connector *connector;
8824 	struct drm_connector_state *connector_state;
8825 	struct intel_encoder *encoder;
8826 	u32 dpll, fp, fp2;
8827 	int factor, i;
8828 	bool is_lvds = false, is_sdvo = false;
8829 
8830 	for_each_connector_in_state(state, connector, connector_state, i) {
8831 		if (connector_state->crtc != crtc_state->base.crtc)
8832 			continue;
8833 
8834 		encoder = to_intel_encoder(connector_state->best_encoder);
8835 
8836 		switch (encoder->type) {
8837 		case INTEL_OUTPUT_LVDS:
8838 			is_lvds = true;
8839 			break;
8840 		case INTEL_OUTPUT_SDVO:
8841 		case INTEL_OUTPUT_HDMI:
8842 			is_sdvo = true;
8843 			break;
8844 		default:
8845 			break;
8846 		}
8847 	}
8848 
8849 	/* Enable autotuning of the PLL clock (if permissible) */
8850 	factor = 21;
8851 	if (is_lvds) {
8852 		if ((intel_panel_use_ssc(dev_priv) &&
8853 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8854 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8855 			factor = 25;
8856 	} else if (crtc_state->sdvo_tv_clock)
8857 		factor = 20;
8858 
8859 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8860 
8861 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8862 		fp |= FP_CB_TUNE;
8863 
8864 	if (reduced_clock) {
8865 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
8866 
8867 		if (reduced_clock->m < factor * reduced_clock->n)
8868 			fp2 |= FP_CB_TUNE;
8869 	} else {
8870 		fp2 = fp;
8871 	}
8872 
8873 	dpll = 0;
8874 
8875 	if (is_lvds)
8876 		dpll |= DPLLB_MODE_LVDS;
8877 	else
8878 		dpll |= DPLLB_MODE_DAC_SERIAL;
8879 
8880 	dpll |= (crtc_state->pixel_multiplier - 1)
8881 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8882 
8883 	if (is_sdvo)
8884 		dpll |= DPLL_SDVO_HIGH_SPEED;
8885 	if (crtc_state->has_dp_encoder)
8886 		dpll |= DPLL_SDVO_HIGH_SPEED;
8887 
8888 	/* compute bitmask from p1 value */
8889 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8890 	/* also FPA1 */
8891 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8892 
8893 	switch (crtc_state->dpll.p2) {
8894 	case 5:
8895 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8896 		break;
8897 	case 7:
8898 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8899 		break;
8900 	case 10:
8901 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8902 		break;
8903 	case 14:
8904 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8905 		break;
8906 	}
8907 
8908 	if (is_lvds && intel_panel_use_ssc(dev_priv))
8909 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8910 	else
8911 		dpll |= PLL_REF_INPUT_DREFCLK;
8912 
8913 	dpll |= DPLL_VCO_ENABLE;
8914 
8915 	crtc_state->dpll_hw_state.dpll = dpll;
8916 	crtc_state->dpll_hw_state.fp0 = fp;
8917 	crtc_state->dpll_hw_state.fp1 = fp2;
8918 }
8919 
8920 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8921 				       struct intel_crtc_state *crtc_state)
8922 {
8923 	struct drm_device *dev = crtc->base.dev;
8924 	struct drm_i915_private *dev_priv = dev->dev_private;
8925 	intel_clock_t reduced_clock;
8926 	bool has_reduced_clock = false;
8927 	struct intel_shared_dpll *pll;
8928 	const intel_limit_t *limit;
8929 	int refclk = 120000;
8930 
8931 	memset(&crtc_state->dpll_hw_state, 0,
8932 	       sizeof(crtc_state->dpll_hw_state));
8933 
8934 	crtc->lowfreq_avail = false;
8935 
8936 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8937 	if (!crtc_state->has_pch_encoder)
8938 		return 0;
8939 
8940 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8941 		if (intel_panel_use_ssc(dev_priv)) {
8942 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8943 				      dev_priv->vbt.lvds_ssc_freq);
8944 			refclk = dev_priv->vbt.lvds_ssc_freq;
8945 		}
8946 
8947 		if (intel_is_dual_link_lvds(dev)) {
8948 			if (refclk == 100000)
8949 				limit = &intel_limits_ironlake_dual_lvds_100m;
8950 			else
8951 				limit = &intel_limits_ironlake_dual_lvds;
8952 		} else {
8953 			if (refclk == 100000)
8954 				limit = &intel_limits_ironlake_single_lvds_100m;
8955 			else
8956 				limit = &intel_limits_ironlake_single_lvds;
8957 		}
8958 	} else {
8959 		limit = &intel_limits_ironlake_dac;
8960 	}
8961 
8962 	if (!crtc_state->clock_set &&
8963 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8964 				refclk, NULL, &crtc_state->dpll)) {
8965 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8966 		return -EINVAL;
8967 	}
8968 
8969 	ironlake_compute_dpll(crtc, crtc_state,
8970 			      has_reduced_clock ? &reduced_clock : NULL);
8971 
8972 	pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8973 	if (pll == NULL) {
8974 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8975 				 pipe_name(crtc->pipe));
8976 		return -EINVAL;
8977 	}
8978 
8979 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8980 	    has_reduced_clock)
8981 		crtc->lowfreq_avail = true;
8982 
8983 	return 0;
8984 }
8985 
8986 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8987 					 struct intel_link_m_n *m_n)
8988 {
8989 	struct drm_device *dev = crtc->base.dev;
8990 	struct drm_i915_private *dev_priv = dev->dev_private;
8991 	enum i915_pipe pipe = crtc->pipe;
8992 
8993 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8994 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8995 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8996 		& ~TU_SIZE_MASK;
8997 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8998 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8999 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9000 }
9001 
9002 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9003 					 enum transcoder transcoder,
9004 					 struct intel_link_m_n *m_n,
9005 					 struct intel_link_m_n *m2_n2)
9006 {
9007 	struct drm_device *dev = crtc->base.dev;
9008 	struct drm_i915_private *dev_priv = dev->dev_private;
9009 	enum i915_pipe pipe = crtc->pipe;
9010 
9011 	if (INTEL_INFO(dev)->gen >= 5) {
9012 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9013 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9014 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9015 			& ~TU_SIZE_MASK;
9016 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9017 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9018 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9019 		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9020 		 * gen < 8) and if DRRS is supported (to make sure the
9021 		 * registers are not unnecessarily read).
9022 		 */
9023 		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9024 			crtc->config->has_drrs) {
9025 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9026 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9027 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9028 					& ~TU_SIZE_MASK;
9029 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9030 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9031 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9032 		}
9033 	} else {
9034 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9035 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9036 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9037 			& ~TU_SIZE_MASK;
9038 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9039 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9040 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9041 	}
9042 }
9043 
9044 void intel_dp_get_m_n(struct intel_crtc *crtc,
9045 		      struct intel_crtc_state *pipe_config)
9046 {
9047 	if (pipe_config->has_pch_encoder)
9048 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9049 	else
9050 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9051 					     &pipe_config->dp_m_n,
9052 					     &pipe_config->dp_m2_n2);
9053 }
9054 
9055 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9056 					struct intel_crtc_state *pipe_config)
9057 {
9058 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9059 				     &pipe_config->fdi_m_n, NULL);
9060 }
9061 
9062 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9063 				    struct intel_crtc_state *pipe_config)
9064 {
9065 	struct drm_device *dev = crtc->base.dev;
9066 	struct drm_i915_private *dev_priv = dev->dev_private;
9067 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9068 	uint32_t ps_ctrl = 0;
9069 	int id = -1;
9070 	int i;
9071 
9072 	/* find scaler attached to this pipe */
9073 	for (i = 0; i < crtc->num_scalers; i++) {
9074 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9075 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9076 			id = i;
9077 			pipe_config->pch_pfit.enabled = true;
9078 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9079 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9080 			break;
9081 		}
9082 	}
9083 
9084 	scaler_state->scaler_id = id;
9085 	if (id >= 0) {
9086 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9087 	} else {
9088 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9089 	}
9090 }
9091 
9092 static void
9093 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9094 				 struct intel_initial_plane_config *plane_config)
9095 {
9096 	struct drm_device *dev = crtc->base.dev;
9097 	struct drm_i915_private *dev_priv = dev->dev_private;
9098 	u32 val, base, offset, stride_mult, tiling;
9099 	int pipe = crtc->pipe;
9100 	int fourcc, pixel_format;
9101 	unsigned int aligned_height;
9102 	struct drm_framebuffer *fb;
9103 	struct intel_framebuffer *intel_fb;
9104 
9105 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9106 	if (!intel_fb) {
9107 		DRM_DEBUG_KMS("failed to alloc fb\n");
9108 		return;
9109 	}
9110 
9111 	fb = &intel_fb->base;
9112 
9113 	val = I915_READ(PLANE_CTL(pipe, 0));
9114 	if (!(val & PLANE_CTL_ENABLE))
9115 		goto error;
9116 
9117 	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9118 	fourcc = skl_format_to_fourcc(pixel_format,
9119 				      val & PLANE_CTL_ORDER_RGBX,
9120 				      val & PLANE_CTL_ALPHA_MASK);
9121 	fb->pixel_format = fourcc;
9122 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9123 
9124 	tiling = val & PLANE_CTL_TILED_MASK;
9125 	switch (tiling) {
9126 	case PLANE_CTL_TILED_LINEAR:
9127 		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9128 		break;
9129 	case PLANE_CTL_TILED_X:
9130 		plane_config->tiling = I915_TILING_X;
9131 		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9132 		break;
9133 	case PLANE_CTL_TILED_Y:
9134 		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9135 		break;
9136 	case PLANE_CTL_TILED_YF:
9137 		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9138 		break;
9139 	default:
9140 		MISSING_CASE(tiling);
9141 		goto error;
9142 	}
9143 
9144 	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9145 	plane_config->base = base;
9146 
9147 	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9148 
9149 	val = I915_READ(PLANE_SIZE(pipe, 0));
9150 	fb->height = ((val >> 16) & 0xfff) + 1;
9151 	fb->width = ((val >> 0) & 0x1fff) + 1;
9152 
9153 	val = I915_READ(PLANE_STRIDE(pipe, 0));
9154 	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9155 						fb->pixel_format);
9156 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9157 
9158 	aligned_height = intel_fb_align_height(dev, fb->height,
9159 					       fb->pixel_format,
9160 					       fb->modifier[0]);
9161 
9162 	plane_config->size = fb->pitches[0] * aligned_height;
9163 
9164 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9165 		      pipe_name(pipe), fb->width, fb->height,
9166 		      fb->bits_per_pixel, base, fb->pitches[0],
9167 		      plane_config->size);
9168 
9169 	plane_config->fb = intel_fb;
9170 	return;
9171 
9172 error:
9173 	kfree(fb);
9174 }
9175 
9176 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9177 				     struct intel_crtc_state *pipe_config)
9178 {
9179 	struct drm_device *dev = crtc->base.dev;
9180 	struct drm_i915_private *dev_priv = dev->dev_private;
9181 	uint32_t tmp;
9182 
9183 	tmp = I915_READ(PF_CTL(crtc->pipe));
9184 
9185 	if (tmp & PF_ENABLE) {
9186 		pipe_config->pch_pfit.enabled = true;
9187 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9188 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9189 
9190 		/* We currently do not free assignements of panel fitters on
9191 		 * ivb/hsw (since we don't use the higher upscaling modes which
9192 		 * differentiates them) so just WARN about this case for now. */
9193 		if (IS_GEN7(dev)) {
9194 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9195 				PF_PIPE_SEL_IVB(crtc->pipe));
9196 		}
9197 	}
9198 }
9199 
9200 static void
9201 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9202 				  struct intel_initial_plane_config *plane_config)
9203 {
9204 	struct drm_device *dev = crtc->base.dev;
9205 	struct drm_i915_private *dev_priv = dev->dev_private;
9206 	u32 val, base, offset;
9207 	int pipe = crtc->pipe;
9208 	int fourcc, pixel_format;
9209 	unsigned int aligned_height;
9210 	struct drm_framebuffer *fb;
9211 	struct intel_framebuffer *intel_fb;
9212 
9213 	val = I915_READ(DSPCNTR(pipe));
9214 	if (!(val & DISPLAY_PLANE_ENABLE))
9215 		return;
9216 
9217 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9218 	if (!intel_fb) {
9219 		DRM_DEBUG_KMS("failed to alloc fb\n");
9220 		return;
9221 	}
9222 
9223 	fb = &intel_fb->base;
9224 
9225 	if (INTEL_INFO(dev)->gen >= 4) {
9226 		if (val & DISPPLANE_TILED) {
9227 			plane_config->tiling = I915_TILING_X;
9228 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9229 		}
9230 	}
9231 
9232 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9233 	fourcc = i9xx_format_to_fourcc(pixel_format);
9234 	fb->pixel_format = fourcc;
9235 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9236 
9237 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9238 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9239 		offset = I915_READ(DSPOFFSET(pipe));
9240 	} else {
9241 		if (plane_config->tiling)
9242 			offset = I915_READ(DSPTILEOFF(pipe));
9243 		else
9244 			offset = I915_READ(DSPLINOFF(pipe));
9245 	}
9246 	plane_config->base = base;
9247 
9248 	val = I915_READ(PIPESRC(pipe));
9249 	fb->width = ((val >> 16) & 0xfff) + 1;
9250 	fb->height = ((val >> 0) & 0xfff) + 1;
9251 
9252 	val = I915_READ(DSPSTRIDE(pipe));
9253 	fb->pitches[0] = val & 0xffffffc0;
9254 
9255 	aligned_height = intel_fb_align_height(dev, fb->height,
9256 					       fb->pixel_format,
9257 					       fb->modifier[0]);
9258 
9259 	plane_config->size = fb->pitches[0] * aligned_height;
9260 
9261 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9262 		      pipe_name(pipe), fb->width, fb->height,
9263 		      fb->bits_per_pixel, base, fb->pitches[0],
9264 		      plane_config->size);
9265 
9266 	plane_config->fb = intel_fb;
9267 }
9268 
9269 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9270 				     struct intel_crtc_state *pipe_config)
9271 {
9272 	struct drm_device *dev = crtc->base.dev;
9273 	struct drm_i915_private *dev_priv = dev->dev_private;
9274 	enum intel_display_power_domain power_domain;
9275 	uint32_t tmp;
9276 	bool ret;
9277 
9278 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9279 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9280 		return false;
9281 
9282 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9283 	pipe_config->shared_dpll = NULL;
9284 
9285 	ret = false;
9286 	tmp = I915_READ(PIPECONF(crtc->pipe));
9287 	if (!(tmp & PIPECONF_ENABLE))
9288 		goto out;
9289 
9290 	switch (tmp & PIPECONF_BPC_MASK) {
9291 	case PIPECONF_6BPC:
9292 		pipe_config->pipe_bpp = 18;
9293 		break;
9294 	case PIPECONF_8BPC:
9295 		pipe_config->pipe_bpp = 24;
9296 		break;
9297 	case PIPECONF_10BPC:
9298 		pipe_config->pipe_bpp = 30;
9299 		break;
9300 	case PIPECONF_12BPC:
9301 		pipe_config->pipe_bpp = 36;
9302 		break;
9303 	default:
9304 		break;
9305 	}
9306 
9307 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9308 		pipe_config->limited_color_range = true;
9309 
9310 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9311 		struct intel_shared_dpll *pll;
9312 		enum intel_dpll_id pll_id;
9313 
9314 		pipe_config->has_pch_encoder = true;
9315 
9316 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9317 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9318 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9319 
9320 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9321 
9322 		if (HAS_PCH_IBX(dev_priv)) {
9323 			pll_id = (enum intel_dpll_id) crtc->pipe;
9324 		} else {
9325 			tmp = I915_READ(PCH_DPLL_SEL);
9326 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9327 				pll_id = DPLL_ID_PCH_PLL_B;
9328 			else
9329 				pll_id= DPLL_ID_PCH_PLL_A;
9330 		}
9331 
9332 		pipe_config->shared_dpll =
9333 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
9334 		pll = pipe_config->shared_dpll;
9335 
9336 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9337 						 &pipe_config->dpll_hw_state));
9338 
9339 		tmp = pipe_config->dpll_hw_state.dpll;
9340 		pipe_config->pixel_multiplier =
9341 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9342 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9343 
9344 		ironlake_pch_clock_get(crtc, pipe_config);
9345 	} else {
9346 		pipe_config->pixel_multiplier = 1;
9347 	}
9348 
9349 	intel_get_pipe_timings(crtc, pipe_config);
9350 	intel_get_pipe_src_size(crtc, pipe_config);
9351 
9352 	ironlake_get_pfit_config(crtc, pipe_config);
9353 
9354 	ret = true;
9355 
9356 out:
9357 	intel_display_power_put(dev_priv, power_domain);
9358 
9359 	return ret;
9360 }
9361 
9362 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9363 {
9364 	struct drm_device *dev = dev_priv->dev;
9365 	struct intel_crtc *crtc;
9366 
9367 	for_each_intel_crtc(dev, crtc)
9368 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9369 		     pipe_name(crtc->pipe));
9370 
9371 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9372 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9373 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9374 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9375 	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9376 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9377 	     "CPU PWM1 enabled\n");
9378 	if (IS_HASWELL(dev))
9379 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9380 		     "CPU PWM2 enabled\n");
9381 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9382 	     "PCH PWM1 enabled\n");
9383 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9384 	     "Utility pin enabled\n");
9385 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9386 
9387 	/*
9388 	 * In theory we can still leave IRQs enabled, as long as only the HPD
9389 	 * interrupts remain enabled. We used to check for that, but since it's
9390 	 * gen-specific and since we only disable LCPLL after we fully disable
9391 	 * the interrupts, the check below should be enough.
9392 	 */
9393 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9394 }
9395 
9396 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9397 {
9398 	struct drm_device *dev = dev_priv->dev;
9399 
9400 	if (IS_HASWELL(dev))
9401 		return I915_READ(D_COMP_HSW);
9402 	else
9403 		return I915_READ(D_COMP_BDW);
9404 }
9405 
9406 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9407 {
9408 	struct drm_device *dev = dev_priv->dev;
9409 
9410 	if (IS_HASWELL(dev)) {
9411 		mutex_lock(&dev_priv->rps.hw_lock);
9412 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9413 					    val))
9414 			DRM_ERROR("Failed to write to D_COMP\n");
9415 		mutex_unlock(&dev_priv->rps.hw_lock);
9416 	} else {
9417 		I915_WRITE(D_COMP_BDW, val);
9418 		POSTING_READ(D_COMP_BDW);
9419 	}
9420 }
9421 
9422 /*
9423  * This function implements pieces of two sequences from BSpec:
9424  * - Sequence for display software to disable LCPLL
9425  * - Sequence for display software to allow package C8+
9426  * The steps implemented here are just the steps that actually touch the LCPLL
9427  * register. Callers should take care of disabling all the display engine
9428  * functions, doing the mode unset, fixing interrupts, etc.
9429  */
9430 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9431 			      bool switch_to_fclk, bool allow_power_down)
9432 {
9433 	uint32_t val;
9434 
9435 	assert_can_disable_lcpll(dev_priv);
9436 
9437 	val = I915_READ(LCPLL_CTL);
9438 
9439 	if (switch_to_fclk) {
9440 		val |= LCPLL_CD_SOURCE_FCLK;
9441 		I915_WRITE(LCPLL_CTL, val);
9442 
9443 		if (wait_for_us(I915_READ(LCPLL_CTL) &
9444 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
9445 			DRM_ERROR("Switching to FCLK failed\n");
9446 
9447 		val = I915_READ(LCPLL_CTL);
9448 	}
9449 
9450 	val |= LCPLL_PLL_DISABLE;
9451 	I915_WRITE(LCPLL_CTL, val);
9452 	POSTING_READ(LCPLL_CTL);
9453 
9454 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9455 		DRM_ERROR("LCPLL still locked\n");
9456 
9457 	val = hsw_read_dcomp(dev_priv);
9458 	val |= D_COMP_COMP_DISABLE;
9459 	hsw_write_dcomp(dev_priv, val);
9460 	ndelay(100);
9461 
9462 	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9463 		     1))
9464 		DRM_ERROR("D_COMP RCOMP still in progress\n");
9465 
9466 	if (allow_power_down) {
9467 		val = I915_READ(LCPLL_CTL);
9468 		val |= LCPLL_POWER_DOWN_ALLOW;
9469 		I915_WRITE(LCPLL_CTL, val);
9470 		POSTING_READ(LCPLL_CTL);
9471 	}
9472 }
9473 
9474 /*
9475  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9476  * source.
9477  */
9478 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9479 {
9480 	uint32_t val;
9481 
9482 	val = I915_READ(LCPLL_CTL);
9483 
9484 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9485 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9486 		return;
9487 
9488 	/*
9489 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9490 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9491 	 */
9492 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9493 
9494 	if (val & LCPLL_POWER_DOWN_ALLOW) {
9495 		val &= ~LCPLL_POWER_DOWN_ALLOW;
9496 		I915_WRITE(LCPLL_CTL, val);
9497 		POSTING_READ(LCPLL_CTL);
9498 	}
9499 
9500 	val = hsw_read_dcomp(dev_priv);
9501 	val |= D_COMP_COMP_FORCE;
9502 	val &= ~D_COMP_COMP_DISABLE;
9503 	hsw_write_dcomp(dev_priv, val);
9504 
9505 	val = I915_READ(LCPLL_CTL);
9506 	val &= ~LCPLL_PLL_DISABLE;
9507 	I915_WRITE(LCPLL_CTL, val);
9508 
9509 	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9510 		DRM_ERROR("LCPLL not locked yet\n");
9511 
9512 	if (val & LCPLL_CD_SOURCE_FCLK) {
9513 		val = I915_READ(LCPLL_CTL);
9514 		val &= ~LCPLL_CD_SOURCE_FCLK;
9515 		I915_WRITE(LCPLL_CTL, val);
9516 
9517 		if (wait_for_us((I915_READ(LCPLL_CTL) &
9518 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9519 			DRM_ERROR("Switching back to LCPLL failed\n");
9520 	}
9521 
9522 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9523 	intel_update_cdclk(dev_priv->dev);
9524 }
9525 
9526 /*
9527  * Package states C8 and deeper are really deep PC states that can only be
9528  * reached when all the devices on the system allow it, so even if the graphics
9529  * device allows PC8+, it doesn't mean the system will actually get to these
9530  * states. Our driver only allows PC8+ when going into runtime PM.
9531  *
9532  * The requirements for PC8+ are that all the outputs are disabled, the power
9533  * well is disabled and most interrupts are disabled, and these are also
9534  * requirements for runtime PM. When these conditions are met, we manually do
9535  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9536  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9537  * hang the machine.
9538  *
9539  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9540  * the state of some registers, so when we come back from PC8+ we need to
9541  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9542  * need to take care of the registers kept by RC6. Notice that this happens even
9543  * if we don't put the device in PCI D3 state (which is what currently happens
9544  * because of the runtime PM support).
9545  *
9546  * For more, read "Display Sequences for Package C8" on the hardware
9547  * documentation.
9548  */
9549 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9550 {
9551 	struct drm_device *dev = dev_priv->dev;
9552 	uint32_t val;
9553 
9554 	DRM_DEBUG_KMS("Enabling package C8+\n");
9555 
9556 	if (HAS_PCH_LPT_LP(dev)) {
9557 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9558 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9559 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9560 	}
9561 
9562 	lpt_disable_clkout_dp(dev);
9563 	hsw_disable_lcpll(dev_priv, true, true);
9564 }
9565 
9566 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9567 {
9568 	struct drm_device *dev = dev_priv->dev;
9569 	uint32_t val;
9570 
9571 	DRM_DEBUG_KMS("Disabling package C8+\n");
9572 
9573 	hsw_restore_lcpll(dev_priv);
9574 	lpt_init_pch_refclk(dev);
9575 
9576 	if (HAS_PCH_LPT_LP(dev)) {
9577 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9578 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9579 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9580 	}
9581 }
9582 
9583 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9584 {
9585 	struct drm_device *dev = old_state->dev;
9586 	struct intel_atomic_state *old_intel_state =
9587 		to_intel_atomic_state(old_state);
9588 	unsigned int req_cdclk = old_intel_state->dev_cdclk;
9589 
9590 	broxton_set_cdclk(to_i915(dev), req_cdclk);
9591 }
9592 
9593 /* compute the max rate for new configuration */
9594 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9595 {
9596 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9597 	struct drm_i915_private *dev_priv = state->dev->dev_private;
9598 	struct drm_crtc *crtc;
9599 	struct drm_crtc_state *cstate;
9600 	struct intel_crtc_state *crtc_state;
9601 	unsigned max_pixel_rate = 0, i;
9602 	enum i915_pipe pipe;
9603 
9604 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9605 	       sizeof(intel_state->min_pixclk));
9606 
9607 	for_each_crtc_in_state(state, crtc, cstate, i) {
9608 		int pixel_rate;
9609 
9610 		crtc_state = to_intel_crtc_state(cstate);
9611 		if (!crtc_state->base.enable) {
9612 			intel_state->min_pixclk[i] = 0;
9613 			continue;
9614 		}
9615 
9616 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9617 
9618 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9619 		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9620 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9621 
9622 		intel_state->min_pixclk[i] = pixel_rate;
9623 	}
9624 
9625 	for_each_pipe(dev_priv, pipe)
9626 		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9627 
9628 	return max_pixel_rate;
9629 }
9630 
9631 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9632 {
9633 	struct drm_i915_private *dev_priv = dev->dev_private;
9634 	uint32_t val, data;
9635 	int ret;
9636 
9637 	if (WARN((I915_READ(LCPLL_CTL) &
9638 		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9639 		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9640 		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9641 		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9642 		 "trying to change cdclk frequency with cdclk not enabled\n"))
9643 		return;
9644 
9645 	mutex_lock(&dev_priv->rps.hw_lock);
9646 	ret = sandybridge_pcode_write(dev_priv,
9647 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9648 	mutex_unlock(&dev_priv->rps.hw_lock);
9649 	if (ret) {
9650 		DRM_ERROR("failed to inform pcode about cdclk change\n");
9651 		return;
9652 	}
9653 
9654 	val = I915_READ(LCPLL_CTL);
9655 	val |= LCPLL_CD_SOURCE_FCLK;
9656 	I915_WRITE(LCPLL_CTL, val);
9657 
9658 	if (wait_for_us(I915_READ(LCPLL_CTL) &
9659 			LCPLL_CD_SOURCE_FCLK_DONE, 1))
9660 		DRM_ERROR("Switching to FCLK failed\n");
9661 
9662 	val = I915_READ(LCPLL_CTL);
9663 	val &= ~LCPLL_CLK_FREQ_MASK;
9664 
9665 	switch (cdclk) {
9666 	case 450000:
9667 		val |= LCPLL_CLK_FREQ_450;
9668 		data = 0;
9669 		break;
9670 	case 540000:
9671 		val |= LCPLL_CLK_FREQ_54O_BDW;
9672 		data = 1;
9673 		break;
9674 	case 337500:
9675 		val |= LCPLL_CLK_FREQ_337_5_BDW;
9676 		data = 2;
9677 		break;
9678 	case 675000:
9679 		val |= LCPLL_CLK_FREQ_675_BDW;
9680 		data = 3;
9681 		break;
9682 	default:
9683 		WARN(1, "invalid cdclk frequency\n");
9684 		return;
9685 	}
9686 
9687 	I915_WRITE(LCPLL_CTL, val);
9688 
9689 	val = I915_READ(LCPLL_CTL);
9690 	val &= ~LCPLL_CD_SOURCE_FCLK;
9691 	I915_WRITE(LCPLL_CTL, val);
9692 
9693 	if (wait_for_us((I915_READ(LCPLL_CTL) &
9694 			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9695 		DRM_ERROR("Switching back to LCPLL failed\n");
9696 
9697 	mutex_lock(&dev_priv->rps.hw_lock);
9698 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9699 	mutex_unlock(&dev_priv->rps.hw_lock);
9700 
9701 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9702 
9703 	intel_update_cdclk(dev);
9704 
9705 	WARN(cdclk != dev_priv->cdclk_freq,
9706 	     "cdclk requested %d kHz but got %d kHz\n",
9707 	     cdclk, dev_priv->cdclk_freq);
9708 }
9709 
9710 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9711 {
9712 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9713 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9714 	int max_pixclk = ilk_max_pixel_rate(state);
9715 	int cdclk;
9716 
9717 	/*
9718 	 * FIXME should also account for plane ratio
9719 	 * once 64bpp pixel formats are supported.
9720 	 */
9721 	if (max_pixclk > 540000)
9722 		cdclk = 675000;
9723 	else if (max_pixclk > 450000)
9724 		cdclk = 540000;
9725 	else if (max_pixclk > 337500)
9726 		cdclk = 450000;
9727 	else
9728 		cdclk = 337500;
9729 
9730 	if (cdclk > dev_priv->max_cdclk_freq) {
9731 		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9732 			      cdclk, dev_priv->max_cdclk_freq);
9733 		return -EINVAL;
9734 	}
9735 
9736 	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9737 	if (!intel_state->active_crtcs)
9738 		intel_state->dev_cdclk = 337500;
9739 
9740 	return 0;
9741 }
9742 
9743 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9744 {
9745 	struct drm_device *dev = old_state->dev;
9746 	struct intel_atomic_state *old_intel_state =
9747 		to_intel_atomic_state(old_state);
9748 	unsigned req_cdclk = old_intel_state->dev_cdclk;
9749 
9750 	broadwell_set_cdclk(dev, req_cdclk);
9751 }
9752 
9753 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9754 				      struct intel_crtc_state *crtc_state)
9755 {
9756 	struct intel_encoder *intel_encoder =
9757 		intel_ddi_get_crtc_new_encoder(crtc_state);
9758 
9759 	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9760 		if (!intel_ddi_pll_select(crtc, crtc_state))
9761 			return -EINVAL;
9762 	}
9763 
9764 	crtc->lowfreq_avail = false;
9765 
9766 	return 0;
9767 }
9768 
9769 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9770 				enum port port,
9771 				struct intel_crtc_state *pipe_config)
9772 {
9773 	enum intel_dpll_id id;
9774 
9775 	switch (port) {
9776 	case PORT_A:
9777 		pipe_config->ddi_pll_sel = SKL_DPLL0;
9778 		id = DPLL_ID_SKL_DPLL0;
9779 		break;
9780 	case PORT_B:
9781 		pipe_config->ddi_pll_sel = SKL_DPLL1;
9782 		id = DPLL_ID_SKL_DPLL1;
9783 		break;
9784 	case PORT_C:
9785 		pipe_config->ddi_pll_sel = SKL_DPLL2;
9786 		id = DPLL_ID_SKL_DPLL2;
9787 		break;
9788 	default:
9789 		DRM_ERROR("Incorrect port type\n");
9790 		return;
9791 	}
9792 
9793 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9794 }
9795 
9796 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9797 				enum port port,
9798 				struct intel_crtc_state *pipe_config)
9799 {
9800 	enum intel_dpll_id id;
9801 	u32 temp;
9802 
9803 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9804 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9805 
9806 	switch (pipe_config->ddi_pll_sel) {
9807 	case SKL_DPLL0:
9808 		id = DPLL_ID_SKL_DPLL0;
9809 		break;
9810 	case SKL_DPLL1:
9811 		id = DPLL_ID_SKL_DPLL1;
9812 		break;
9813 	case SKL_DPLL2:
9814 		id = DPLL_ID_SKL_DPLL2;
9815 		break;
9816 	case SKL_DPLL3:
9817 		id = DPLL_ID_SKL_DPLL3;
9818 		break;
9819 	default:
9820 		MISSING_CASE(pipe_config->ddi_pll_sel);
9821 		return;
9822 	}
9823 
9824 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9825 }
9826 
9827 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9828 				enum port port,
9829 				struct intel_crtc_state *pipe_config)
9830 {
9831 	enum intel_dpll_id id;
9832 
9833 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9834 
9835 	switch (pipe_config->ddi_pll_sel) {
9836 	case PORT_CLK_SEL_WRPLL1:
9837 		id = DPLL_ID_WRPLL1;
9838 		break;
9839 	case PORT_CLK_SEL_WRPLL2:
9840 		id = DPLL_ID_WRPLL2;
9841 		break;
9842 	case PORT_CLK_SEL_SPLL:
9843 		id = DPLL_ID_SPLL;
9844 		break;
9845 	case PORT_CLK_SEL_LCPLL_810:
9846 		id = DPLL_ID_LCPLL_810;
9847 		break;
9848 	case PORT_CLK_SEL_LCPLL_1350:
9849 		id = DPLL_ID_LCPLL_1350;
9850 		break;
9851 	case PORT_CLK_SEL_LCPLL_2700:
9852 		id = DPLL_ID_LCPLL_2700;
9853 		break;
9854 	default:
9855 		MISSING_CASE(pipe_config->ddi_pll_sel);
9856 		/* fall through */
9857 	case PORT_CLK_SEL_NONE:
9858 		return;
9859 	}
9860 
9861 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9862 }
9863 
9864 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9865 				     struct intel_crtc_state *pipe_config,
9866 				     unsigned long *power_domain_mask)
9867 {
9868 	struct drm_device *dev = crtc->base.dev;
9869 	struct drm_i915_private *dev_priv = dev->dev_private;
9870 	enum intel_display_power_domain power_domain;
9871 	u32 tmp;
9872 
9873 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9874 
9875 	/*
9876 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9877 	 * consistency and less surprising code; it's in always on power).
9878 	 */
9879 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9880 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9881 		enum i915_pipe trans_edp_pipe;
9882 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9883 		default:
9884 			WARN(1, "unknown pipe linked to edp transcoder\n");
9885 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9886 		case TRANS_DDI_EDP_INPUT_A_ON:
9887 			trans_edp_pipe = PIPE_A;
9888 			break;
9889 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9890 			trans_edp_pipe = PIPE_B;
9891 			break;
9892 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9893 			trans_edp_pipe = PIPE_C;
9894 			break;
9895 		}
9896 
9897 		if (trans_edp_pipe == crtc->pipe)
9898 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9899 	}
9900 
9901 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9902 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9903 		return false;
9904 	*power_domain_mask |= BIT(power_domain);
9905 
9906 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9907 
9908 	return tmp & PIPECONF_ENABLE;
9909 }
9910 
9911 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9912 					 struct intel_crtc_state *pipe_config,
9913 					 unsigned long *power_domain_mask)
9914 {
9915 	struct drm_device *dev = crtc->base.dev;
9916 	struct drm_i915_private *dev_priv = dev->dev_private;
9917 	enum intel_display_power_domain power_domain;
9918 	enum port port;
9919 	enum transcoder cpu_transcoder;
9920 	u32 tmp;
9921 
9922 	pipe_config->has_dsi_encoder = false;
9923 
9924 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9925 		if (port == PORT_A)
9926 			cpu_transcoder = TRANSCODER_DSI_A;
9927 		else
9928 			cpu_transcoder = TRANSCODER_DSI_C;
9929 
9930 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9931 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9932 			continue;
9933 		*power_domain_mask |= BIT(power_domain);
9934 
9935 		/*
9936 		 * The PLL needs to be enabled with a valid divider
9937 		 * configuration, otherwise accessing DSI registers will hang
9938 		 * the machine. See BSpec North Display Engine
9939 		 * registers/MIPI[BXT]. We can break out here early, since we
9940 		 * need the same DSI PLL to be enabled for both DSI ports.
9941 		 */
9942 		if (!intel_dsi_pll_is_enabled(dev_priv))
9943 			break;
9944 
9945 		/* XXX: this works for video mode only */
9946 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9947 		if (!(tmp & DPI_ENABLE))
9948 			continue;
9949 
9950 		tmp = I915_READ(MIPI_CTRL(port));
9951 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9952 			continue;
9953 
9954 		pipe_config->cpu_transcoder = cpu_transcoder;
9955 		pipe_config->has_dsi_encoder = true;
9956 		break;
9957 	}
9958 
9959 	return pipe_config->has_dsi_encoder;
9960 }
9961 
9962 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9963 				       struct intel_crtc_state *pipe_config)
9964 {
9965 	struct drm_device *dev = crtc->base.dev;
9966 	struct drm_i915_private *dev_priv = dev->dev_private;
9967 	struct intel_shared_dpll *pll;
9968 	enum port port;
9969 	uint32_t tmp;
9970 
9971 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9972 
9973 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9974 
9975 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9976 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9977 	else if (IS_BROXTON(dev))
9978 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9979 	else
9980 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9981 
9982 	pll = pipe_config->shared_dpll;
9983 	if (pll) {
9984 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9985 						 &pipe_config->dpll_hw_state));
9986 	}
9987 
9988 	/*
9989 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9990 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9991 	 * the PCH transcoder is on.
9992 	 */
9993 	if (INTEL_INFO(dev)->gen < 9 &&
9994 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9995 		pipe_config->has_pch_encoder = true;
9996 
9997 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9998 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9999 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10000 
10001 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10002 	}
10003 }
10004 
10005 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10006 				    struct intel_crtc_state *pipe_config)
10007 {
10008 	struct drm_device *dev = crtc->base.dev;
10009 	struct drm_i915_private *dev_priv = dev->dev_private;
10010 	enum intel_display_power_domain power_domain;
10011 	unsigned long power_domain_mask;
10012 	bool active;
10013 
10014 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10015 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10016 		return false;
10017 	power_domain_mask = BIT(power_domain);
10018 
10019 	pipe_config->shared_dpll = NULL;
10020 
10021 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10022 
10023 	if (IS_BROXTON(dev_priv)) {
10024 		bxt_get_dsi_transcoder_state(crtc, pipe_config,
10025 					     &power_domain_mask);
10026 		WARN_ON(active && pipe_config->has_dsi_encoder);
10027 		if (pipe_config->has_dsi_encoder)
10028 			active = true;
10029 	}
10030 
10031 	if (!active)
10032 		goto out;
10033 
10034 	if (!pipe_config->has_dsi_encoder) {
10035 		haswell_get_ddi_port_state(crtc, pipe_config);
10036 		intel_get_pipe_timings(crtc, pipe_config);
10037 	}
10038 
10039 	intel_get_pipe_src_size(crtc, pipe_config);
10040 
10041 	pipe_config->gamma_mode =
10042 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10043 
10044 	if (INTEL_INFO(dev)->gen >= 9) {
10045 		skl_init_scalers(dev, crtc, pipe_config);
10046 	}
10047 
10048 	if (INTEL_INFO(dev)->gen >= 9) {
10049 		pipe_config->scaler_state.scaler_id = -1;
10050 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10051 	}
10052 
10053 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10054 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10055 		power_domain_mask |= BIT(power_domain);
10056 		if (INTEL_INFO(dev)->gen >= 9)
10057 			skylake_get_pfit_config(crtc, pipe_config);
10058 		else
10059 			ironlake_get_pfit_config(crtc, pipe_config);
10060 	}
10061 
10062 	if (IS_HASWELL(dev))
10063 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10064 			(I915_READ(IPS_CTL) & IPS_ENABLE);
10065 
10066 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10067 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10068 		pipe_config->pixel_multiplier =
10069 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10070 	} else {
10071 		pipe_config->pixel_multiplier = 1;
10072 	}
10073 
10074 out:
10075 	for_each_power_domain(power_domain, power_domain_mask)
10076 		intel_display_power_put(dev_priv, power_domain);
10077 
10078 	return active;
10079 }
10080 
10081 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10082 			       const struct intel_plane_state *plane_state)
10083 {
10084 	struct drm_device *dev = crtc->dev;
10085 	struct drm_i915_private *dev_priv = dev->dev_private;
10086 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10087 	uint32_t cntl = 0, size = 0;
10088 
10089 	if (plane_state && plane_state->visible) {
10090 		unsigned int width = plane_state->base.crtc_w;
10091 		unsigned int height = plane_state->base.crtc_h;
10092 		unsigned int stride = roundup_pow_of_two(width) * 4;
10093 
10094 		switch (stride) {
10095 		default:
10096 			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10097 				  width, stride);
10098 			stride = 256;
10099 			/* fallthrough */
10100 		case 256:
10101 		case 512:
10102 		case 1024:
10103 		case 2048:
10104 			break;
10105 		}
10106 
10107 		cntl |= CURSOR_ENABLE |
10108 			CURSOR_GAMMA_ENABLE |
10109 			CURSOR_FORMAT_ARGB |
10110 			CURSOR_STRIDE(stride);
10111 
10112 		size = (height << 12) | width;
10113 	}
10114 
10115 	if (intel_crtc->cursor_cntl != 0 &&
10116 	    (intel_crtc->cursor_base != base ||
10117 	     intel_crtc->cursor_size != size ||
10118 	     intel_crtc->cursor_cntl != cntl)) {
10119 		/* On these chipsets we can only modify the base/size/stride
10120 		 * whilst the cursor is disabled.
10121 		 */
10122 		I915_WRITE(CURCNTR(PIPE_A), 0);
10123 		POSTING_READ(CURCNTR(PIPE_A));
10124 		intel_crtc->cursor_cntl = 0;
10125 	}
10126 
10127 	if (intel_crtc->cursor_base != base) {
10128 		I915_WRITE(CURBASE(PIPE_A), base);
10129 		intel_crtc->cursor_base = base;
10130 	}
10131 
10132 	if (intel_crtc->cursor_size != size) {
10133 		I915_WRITE(CURSIZE, size);
10134 		intel_crtc->cursor_size = size;
10135 	}
10136 
10137 	if (intel_crtc->cursor_cntl != cntl) {
10138 		I915_WRITE(CURCNTR(PIPE_A), cntl);
10139 		POSTING_READ(CURCNTR(PIPE_A));
10140 		intel_crtc->cursor_cntl = cntl;
10141 	}
10142 }
10143 
10144 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10145 			       const struct intel_plane_state *plane_state)
10146 {
10147 	struct drm_device *dev = crtc->dev;
10148 	struct drm_i915_private *dev_priv = dev->dev_private;
10149 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10150 	int pipe = intel_crtc->pipe;
10151 	uint32_t cntl = 0;
10152 
10153 	if (plane_state && plane_state->visible) {
10154 		cntl = MCURSOR_GAMMA_ENABLE;
10155 		switch (plane_state->base.crtc_w) {
10156 			case 64:
10157 				cntl |= CURSOR_MODE_64_ARGB_AX;
10158 				break;
10159 			case 128:
10160 				cntl |= CURSOR_MODE_128_ARGB_AX;
10161 				break;
10162 			case 256:
10163 				cntl |= CURSOR_MODE_256_ARGB_AX;
10164 				break;
10165 			default:
10166 				MISSING_CASE(plane_state->base.crtc_w);
10167 				return;
10168 		}
10169 		cntl |= pipe << 28; /* Connect to correct pipe */
10170 
10171 		if (HAS_DDI(dev))
10172 			cntl |= CURSOR_PIPE_CSC_ENABLE;
10173 
10174 		if (plane_state->base.rotation == DRM_ROTATE_180)
10175 			cntl |= CURSOR_ROTATE_180;
10176 	}
10177 
10178 	if (intel_crtc->cursor_cntl != cntl) {
10179 		I915_WRITE(CURCNTR(pipe), cntl);
10180 		POSTING_READ(CURCNTR(pipe));
10181 		intel_crtc->cursor_cntl = cntl;
10182 	}
10183 
10184 	/* and commit changes on next vblank */
10185 	I915_WRITE(CURBASE(pipe), base);
10186 	POSTING_READ(CURBASE(pipe));
10187 
10188 	intel_crtc->cursor_base = base;
10189 }
10190 
10191 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10192 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10193 				     const struct intel_plane_state *plane_state)
10194 {
10195 	struct drm_device *dev = crtc->dev;
10196 	struct drm_i915_private *dev_priv = dev->dev_private;
10197 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10198 	int pipe = intel_crtc->pipe;
10199 	u32 base = intel_crtc->cursor_addr;
10200 	u32 pos = 0;
10201 
10202 	if (plane_state) {
10203 		int x = plane_state->base.crtc_x;
10204 		int y = plane_state->base.crtc_y;
10205 
10206 		if (x < 0) {
10207 			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10208 			x = -x;
10209 		}
10210 		pos |= x << CURSOR_X_SHIFT;
10211 
10212 		if (y < 0) {
10213 			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10214 			y = -y;
10215 		}
10216 		pos |= y << CURSOR_Y_SHIFT;
10217 
10218 		/* ILK+ do this automagically */
10219 		if (HAS_GMCH_DISPLAY(dev) &&
10220 		    plane_state->base.rotation == DRM_ROTATE_180) {
10221 			base += (plane_state->base.crtc_h *
10222 				 plane_state->base.crtc_w - 1) * 4;
10223 		}
10224 	}
10225 
10226 	I915_WRITE(CURPOS(pipe), pos);
10227 
10228 	if (IS_845G(dev) || IS_I865G(dev))
10229 		i845_update_cursor(crtc, base, plane_state);
10230 	else
10231 		i9xx_update_cursor(crtc, base, plane_state);
10232 }
10233 
10234 static bool cursor_size_ok(struct drm_device *dev,
10235 			   uint32_t width, uint32_t height)
10236 {
10237 	if (width == 0 || height == 0)
10238 		return false;
10239 
10240 	/*
10241 	 * 845g/865g are special in that they are only limited by
10242 	 * the width of their cursors, the height is arbitrary up to
10243 	 * the precision of the register. Everything else requires
10244 	 * square cursors, limited to a few power-of-two sizes.
10245 	 */
10246 	if (IS_845G(dev) || IS_I865G(dev)) {
10247 		if ((width & 63) != 0)
10248 			return false;
10249 
10250 		if (width > (IS_845G(dev) ? 64 : 512))
10251 			return false;
10252 
10253 		if (height > 1023)
10254 			return false;
10255 	} else {
10256 		switch (width | height) {
10257 		case 256:
10258 		case 128:
10259 			if (IS_GEN2(dev))
10260 				return false;
10261 		case 64:
10262 			break;
10263 		default:
10264 			return false;
10265 		}
10266 	}
10267 
10268 	return true;
10269 }
10270 
10271 /* VESA 640x480x72Hz mode to set on the pipe */
10272 static struct drm_display_mode load_detect_mode = {
10273 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10274 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10275 };
10276 
10277 struct drm_framebuffer *
10278 __intel_framebuffer_create(struct drm_device *dev,
10279 			   struct drm_mode_fb_cmd2 *mode_cmd,
10280 			   struct drm_i915_gem_object *obj)
10281 {
10282 	struct intel_framebuffer *intel_fb;
10283 	int ret;
10284 
10285 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10286 	if (!intel_fb)
10287 		return ERR_PTR(-ENOMEM);
10288 
10289 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10290 	if (ret)
10291 		goto err;
10292 
10293 	return &intel_fb->base;
10294 
10295 err:
10296 	kfree(intel_fb);
10297 	return ERR_PTR(ret);
10298 }
10299 
10300 static struct drm_framebuffer *
10301 intel_framebuffer_create(struct drm_device *dev,
10302 			 struct drm_mode_fb_cmd2 *mode_cmd,
10303 			 struct drm_i915_gem_object *obj)
10304 {
10305 	struct drm_framebuffer *fb;
10306 	int ret;
10307 
10308 	ret = i915_mutex_lock_interruptible(dev);
10309 	if (ret)
10310 		return ERR_PTR(ret);
10311 	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10312 	mutex_unlock(&dev->struct_mutex);
10313 
10314 	return fb;
10315 }
10316 
10317 static u32
10318 intel_framebuffer_pitch_for_width(int width, int bpp)
10319 {
10320 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10321 	return ALIGN(pitch, 64);
10322 }
10323 
10324 static u32
10325 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10326 {
10327 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10328 	return PAGE_ALIGN(pitch * mode->vdisplay);
10329 }
10330 
10331 static struct drm_framebuffer *
10332 intel_framebuffer_create_for_mode(struct drm_device *dev,
10333 				  struct drm_display_mode *mode,
10334 				  int depth, int bpp)
10335 {
10336 	struct drm_framebuffer *fb;
10337 	struct drm_i915_gem_object *obj;
10338 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10339 
10340 	obj = i915_gem_alloc_object(dev,
10341 				    intel_framebuffer_size_for_mode(mode, bpp));
10342 	if (obj == NULL)
10343 		return ERR_PTR(-ENOMEM);
10344 
10345 	mode_cmd.width = mode->hdisplay;
10346 	mode_cmd.height = mode->vdisplay;
10347 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10348 								bpp);
10349 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10350 
10351 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10352 	if (IS_ERR(fb))
10353 		drm_gem_object_unreference_unlocked(&obj->base);
10354 
10355 	return fb;
10356 }
10357 
10358 static struct drm_framebuffer *
10359 mode_fits_in_fbdev(struct drm_device *dev,
10360 		   struct drm_display_mode *mode)
10361 {
10362 #ifdef CONFIG_DRM_FBDEV_EMULATION
10363 	struct drm_i915_private *dev_priv = dev->dev_private;
10364 	struct drm_i915_gem_object *obj;
10365 	struct drm_framebuffer *fb;
10366 
10367 	if (!dev_priv->fbdev)
10368 		return NULL;
10369 
10370 	if (!dev_priv->fbdev->fb)
10371 		return NULL;
10372 
10373 	obj = dev_priv->fbdev->fb->obj;
10374 	BUG_ON(!obj);
10375 
10376 	fb = &dev_priv->fbdev->fb->base;
10377 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10378 							       fb->bits_per_pixel))
10379 		return NULL;
10380 
10381 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10382 		return NULL;
10383 
10384 	drm_framebuffer_reference(fb);
10385 	return fb;
10386 #else
10387 	return NULL;
10388 #endif
10389 }
10390 
10391 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10392 					   struct drm_crtc *crtc,
10393 					   struct drm_display_mode *mode,
10394 					   struct drm_framebuffer *fb,
10395 					   int x, int y)
10396 {
10397 	struct drm_plane_state *plane_state;
10398 	int hdisplay, vdisplay;
10399 	int ret;
10400 
10401 	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10402 	if (IS_ERR(plane_state))
10403 		return PTR_ERR(plane_state);
10404 
10405 	if (mode)
10406 		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10407 	else
10408 		hdisplay = vdisplay = 0;
10409 
10410 	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10411 	if (ret)
10412 		return ret;
10413 	drm_atomic_set_fb_for_plane(plane_state, fb);
10414 	plane_state->crtc_x = 0;
10415 	plane_state->crtc_y = 0;
10416 	plane_state->crtc_w = hdisplay;
10417 	plane_state->crtc_h = vdisplay;
10418 	plane_state->src_x = x << 16;
10419 	plane_state->src_y = y << 16;
10420 	plane_state->src_w = hdisplay << 16;
10421 	plane_state->src_h = vdisplay << 16;
10422 
10423 	return 0;
10424 }
10425 
10426 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10427 				struct drm_display_mode *mode,
10428 				struct intel_load_detect_pipe *old,
10429 				struct drm_modeset_acquire_ctx *ctx)
10430 {
10431 	struct intel_crtc *intel_crtc;
10432 	struct intel_encoder *intel_encoder =
10433 		intel_attached_encoder(connector);
10434 	struct drm_crtc *possible_crtc;
10435 	struct drm_encoder *encoder = &intel_encoder->base;
10436 	struct drm_crtc *crtc = NULL;
10437 	struct drm_device *dev = encoder->dev;
10438 	struct drm_framebuffer *fb;
10439 	struct drm_mode_config *config = &dev->mode_config;
10440 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10441 	struct drm_connector_state *connector_state;
10442 	struct intel_crtc_state *crtc_state;
10443 	int ret, i = -1;
10444 
10445 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10446 		      connector->base.id, connector->name,
10447 		      encoder->base.id, encoder->name);
10448 
10449 	old->restore_state = NULL;
10450 
10451 retry:
10452 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10453 	if (ret)
10454 		goto fail;
10455 
10456 	/*
10457 	 * Algorithm gets a little messy:
10458 	 *
10459 	 *   - if the connector already has an assigned crtc, use it (but make
10460 	 *     sure it's on first)
10461 	 *
10462 	 *   - try to find the first unused crtc that can drive this connector,
10463 	 *     and use that if we find one
10464 	 */
10465 
10466 	/* See if we already have a CRTC for this connector */
10467 	if (connector->state->crtc) {
10468 		crtc = connector->state->crtc;
10469 
10470 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10471 		if (ret)
10472 			goto fail;
10473 
10474 		/* Make sure the crtc and connector are running */
10475 		goto found;
10476 	}
10477 
10478 	/* Find an unused one (if possible) */
10479 	for_each_crtc(dev, possible_crtc) {
10480 		i++;
10481 		if (!(encoder->possible_crtcs & (1 << i)))
10482 			continue;
10483 
10484 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10485 		if (ret)
10486 			goto fail;
10487 
10488 		if (possible_crtc->state->enable) {
10489 			drm_modeset_unlock(&possible_crtc->mutex);
10490 			continue;
10491 		}
10492 
10493 		crtc = possible_crtc;
10494 		break;
10495 	}
10496 
10497 	/*
10498 	 * If we didn't find an unused CRTC, don't use any.
10499 	 */
10500 	if (!crtc) {
10501 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10502 		goto fail;
10503 	}
10504 
10505 found:
10506 	intel_crtc = to_intel_crtc(crtc);
10507 
10508 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10509 	if (ret)
10510 		goto fail;
10511 
10512 	state = drm_atomic_state_alloc(dev);
10513 	restore_state = drm_atomic_state_alloc(dev);
10514 	if (!state || !restore_state) {
10515 		ret = -ENOMEM;
10516 		goto fail;
10517 	}
10518 
10519 	state->acquire_ctx = ctx;
10520 	restore_state->acquire_ctx = ctx;
10521 
10522 	connector_state = drm_atomic_get_connector_state(state, connector);
10523 	if (IS_ERR(connector_state)) {
10524 		ret = PTR_ERR(connector_state);
10525 		goto fail;
10526 	}
10527 
10528 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10529 	if (ret)
10530 		goto fail;
10531 
10532 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10533 	if (IS_ERR(crtc_state)) {
10534 		ret = PTR_ERR(crtc_state);
10535 		goto fail;
10536 	}
10537 
10538 	crtc_state->base.active = crtc_state->base.enable = true;
10539 
10540 	if (!mode)
10541 		mode = &load_detect_mode;
10542 
10543 	/* We need a framebuffer large enough to accommodate all accesses
10544 	 * that the plane may generate whilst we perform load detection.
10545 	 * We can not rely on the fbcon either being present (we get called
10546 	 * during its initialisation to detect all boot displays, or it may
10547 	 * not even exist) or that it is large enough to satisfy the
10548 	 * requested mode.
10549 	 */
10550 	fb = mode_fits_in_fbdev(dev, mode);
10551 	if (fb == NULL) {
10552 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10553 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10554 	} else
10555 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10556 	if (IS_ERR(fb)) {
10557 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10558 		goto fail;
10559 	}
10560 
10561 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10562 	if (ret)
10563 		goto fail;
10564 
10565 	drm_framebuffer_unreference(fb);
10566 
10567 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10568 	if (ret)
10569 		goto fail;
10570 
10571 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10572 	if (!ret)
10573 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10574 	if (!ret)
10575 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10576 	if (ret) {
10577 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10578 		goto fail;
10579 	}
10580 
10581 	ret = drm_atomic_commit(state);
10582 	if (ret) {
10583 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10584 		goto fail;
10585 	}
10586 
10587 	old->restore_state = restore_state;
10588 
10589 	/* let the connector get through one full cycle before testing */
10590 	intel_wait_for_vblank(dev, intel_crtc->pipe);
10591 	return true;
10592 
10593 fail:
10594 	drm_atomic_state_free(state);
10595 	drm_atomic_state_free(restore_state);
10596 	restore_state = state = NULL;
10597 
10598 	if (ret == -EDEADLK) {
10599 		drm_modeset_backoff(ctx);
10600 		goto retry;
10601 	}
10602 
10603 	return false;
10604 }
10605 
10606 void intel_release_load_detect_pipe(struct drm_connector *connector,
10607 				    struct intel_load_detect_pipe *old,
10608 				    struct drm_modeset_acquire_ctx *ctx)
10609 {
10610 	struct intel_encoder *intel_encoder =
10611 		intel_attached_encoder(connector);
10612 	struct drm_encoder *encoder = &intel_encoder->base;
10613 	struct drm_atomic_state *state = old->restore_state;
10614 	int ret;
10615 
10616 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10617 		      connector->base.id, connector->name,
10618 		      encoder->base.id, encoder->name);
10619 
10620 	if (!state)
10621 		return;
10622 
10623 	ret = drm_atomic_commit(state);
10624 	if (ret) {
10625 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10626 		drm_atomic_state_free(state);
10627 	}
10628 }
10629 
10630 static int i9xx_pll_refclk(struct drm_device *dev,
10631 			   const struct intel_crtc_state *pipe_config)
10632 {
10633 	struct drm_i915_private *dev_priv = dev->dev_private;
10634 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10635 
10636 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10637 		return dev_priv->vbt.lvds_ssc_freq;
10638 	else if (HAS_PCH_SPLIT(dev))
10639 		return 120000;
10640 	else if (!IS_GEN2(dev))
10641 		return 96000;
10642 	else
10643 		return 48000;
10644 }
10645 
10646 /* Returns the clock of the currently programmed mode of the given pipe. */
10647 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10648 				struct intel_crtc_state *pipe_config)
10649 {
10650 	struct drm_device *dev = crtc->base.dev;
10651 	struct drm_i915_private *dev_priv = dev->dev_private;
10652 	int pipe = pipe_config->cpu_transcoder;
10653 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10654 	u32 fp;
10655 	intel_clock_t clock;
10656 	int port_clock;
10657 	int refclk = i9xx_pll_refclk(dev, pipe_config);
10658 
10659 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10660 		fp = pipe_config->dpll_hw_state.fp0;
10661 	else
10662 		fp = pipe_config->dpll_hw_state.fp1;
10663 
10664 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10665 	if (IS_PINEVIEW(dev)) {
10666 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10667 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10668 	} else {
10669 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10670 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10671 	}
10672 
10673 	if (!IS_GEN2(dev)) {
10674 		if (IS_PINEVIEW(dev))
10675 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10676 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10677 		else
10678 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10679 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10680 
10681 		switch (dpll & DPLL_MODE_MASK) {
10682 		case DPLLB_MODE_DAC_SERIAL:
10683 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10684 				5 : 10;
10685 			break;
10686 		case DPLLB_MODE_LVDS:
10687 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10688 				7 : 14;
10689 			break;
10690 		default:
10691 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10692 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10693 			return;
10694 		}
10695 
10696 		if (IS_PINEVIEW(dev))
10697 			port_clock = pnv_calc_dpll_params(refclk, &clock);
10698 		else
10699 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10700 	} else {
10701 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10702 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10703 
10704 		if (is_lvds) {
10705 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10706 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10707 
10708 			if (lvds & LVDS_CLKB_POWER_UP)
10709 				clock.p2 = 7;
10710 			else
10711 				clock.p2 = 14;
10712 		} else {
10713 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10714 				clock.p1 = 2;
10715 			else {
10716 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10717 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10718 			}
10719 			if (dpll & PLL_P2_DIVIDE_BY_4)
10720 				clock.p2 = 4;
10721 			else
10722 				clock.p2 = 2;
10723 		}
10724 
10725 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10726 	}
10727 
10728 	/*
10729 	 * This value includes pixel_multiplier. We will use
10730 	 * port_clock to compute adjusted_mode.crtc_clock in the
10731 	 * encoder's get_config() function.
10732 	 */
10733 	pipe_config->port_clock = port_clock;
10734 }
10735 
10736 int intel_dotclock_calculate(int link_freq,
10737 			     const struct intel_link_m_n *m_n)
10738 {
10739 	/*
10740 	 * The calculation for the data clock is:
10741 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10742 	 * But we want to avoid losing precison if possible, so:
10743 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10744 	 *
10745 	 * and the link clock is simpler:
10746 	 * link_clock = (m * link_clock) / n
10747 	 */
10748 
10749 	if (!m_n->link_n)
10750 		return 0;
10751 
10752 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10753 }
10754 
10755 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10756 				   struct intel_crtc_state *pipe_config)
10757 {
10758 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10759 
10760 	/* read out port_clock from the DPLL */
10761 	i9xx_crtc_clock_get(crtc, pipe_config);
10762 
10763 	/*
10764 	 * In case there is an active pipe without active ports,
10765 	 * we may need some idea for the dotclock anyway.
10766 	 * Calculate one based on the FDI configuration.
10767 	 */
10768 	pipe_config->base.adjusted_mode.crtc_clock =
10769 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10770 					 &pipe_config->fdi_m_n);
10771 }
10772 
10773 /** Returns the currently programmed mode of the given pipe. */
10774 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10775 					     struct drm_crtc *crtc)
10776 {
10777 	struct drm_i915_private *dev_priv = dev->dev_private;
10778 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10779 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10780 	struct drm_display_mode *mode;
10781 	struct intel_crtc_state *pipe_config;
10782 	int htot = I915_READ(HTOTAL(cpu_transcoder));
10783 	int hsync = I915_READ(HSYNC(cpu_transcoder));
10784 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10785 	int vsync = I915_READ(VSYNC(cpu_transcoder));
10786 	enum i915_pipe pipe = intel_crtc->pipe;
10787 
10788 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10789 	if (!mode)
10790 		return NULL;
10791 
10792 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10793 	if (!pipe_config) {
10794 		kfree(mode);
10795 		return NULL;
10796 	}
10797 
10798 	/*
10799 	 * Construct a pipe_config sufficient for getting the clock info
10800 	 * back out of crtc_clock_get.
10801 	 *
10802 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10803 	 * to use a real value here instead.
10804 	 */
10805 	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10806 	pipe_config->pixel_multiplier = 1;
10807 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10808 	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10809 	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10810 	i9xx_crtc_clock_get(intel_crtc, pipe_config);
10811 
10812 	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10813 	mode->hdisplay = (htot & 0xffff) + 1;
10814 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10815 	mode->hsync_start = (hsync & 0xffff) + 1;
10816 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10817 	mode->vdisplay = (vtot & 0xffff) + 1;
10818 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10819 	mode->vsync_start = (vsync & 0xffff) + 1;
10820 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10821 
10822 	drm_mode_set_name(mode);
10823 
10824 	kfree(pipe_config);
10825 
10826 	return mode;
10827 }
10828 
10829 void intel_mark_busy(struct drm_device *dev)
10830 {
10831 	struct drm_i915_private *dev_priv = dev->dev_private;
10832 
10833 	if (dev_priv->mm.busy)
10834 		return;
10835 
10836 	intel_runtime_pm_get(dev_priv);
10837 	i915_update_gfx_val(dev_priv);
10838 	if (INTEL_INFO(dev)->gen >= 6)
10839 		gen6_rps_busy(dev_priv);
10840 	dev_priv->mm.busy = true;
10841 }
10842 
10843 void intel_mark_idle(struct drm_device *dev)
10844 {
10845 	struct drm_i915_private *dev_priv = dev->dev_private;
10846 
10847 	if (!dev_priv->mm.busy)
10848 		return;
10849 
10850 	dev_priv->mm.busy = false;
10851 
10852 	if (INTEL_INFO(dev)->gen >= 6)
10853 		gen6_rps_idle(dev->dev_private);
10854 
10855 	intel_runtime_pm_put(dev_priv);
10856 }
10857 
10858 static void intel_crtc_destroy(struct drm_crtc *crtc)
10859 {
10860 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10861 	struct drm_device *dev = crtc->dev;
10862 	struct intel_unpin_work *work;
10863 
10864 	spin_lock_irq(&dev->event_lock);
10865 	work = intel_crtc->unpin_work;
10866 	intel_crtc->unpin_work = NULL;
10867 	spin_unlock_irq(&dev->event_lock);
10868 
10869 	if (work) {
10870 		cancel_work_sync(&work->work);
10871 		kfree(work);
10872 	}
10873 
10874 	drm_crtc_cleanup(crtc);
10875 
10876 	kfree(intel_crtc);
10877 }
10878 
10879 static void intel_unpin_work_fn(struct work_struct *__work)
10880 {
10881 	struct intel_unpin_work *work =
10882 		container_of(__work, struct intel_unpin_work, work);
10883 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10884 	struct drm_device *dev = crtc->base.dev;
10885 	struct drm_plane *primary = crtc->base.primary;
10886 
10887 	mutex_lock(&dev->struct_mutex);
10888 	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10889 	drm_gem_object_unreference(&work->pending_flip_obj->base);
10890 
10891 	if (work->flip_queued_req)
10892 		i915_gem_request_assign(&work->flip_queued_req, NULL);
10893 	mutex_unlock(&dev->struct_mutex);
10894 
10895 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10896 	intel_fbc_post_update(crtc);
10897 	drm_framebuffer_unreference(work->old_fb);
10898 
10899 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10900 	atomic_dec(&crtc->unpin_work_count);
10901 
10902 	kfree(work);
10903 }
10904 
10905 static void do_intel_finish_page_flip(struct drm_device *dev,
10906 				      struct drm_crtc *crtc)
10907 {
10908 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10909 	struct intel_unpin_work *work;
10910 	unsigned long flags;
10911 
10912 	/* Ignore early vblank irqs */
10913 	if (intel_crtc == NULL)
10914 		return;
10915 
10916 	/*
10917 	 * This is called both by irq handlers and the reset code (to complete
10918 	 * lost pageflips) so needs the full irqsave spinlocks.
10919 	 */
10920 	spin_lock_irqsave(&dev->event_lock, flags);
10921 	work = intel_crtc->unpin_work;
10922 
10923 	/* Ensure we don't miss a work->pending update ... */
10924 	smp_rmb();
10925 
10926 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10927 		spin_unlock_irqrestore(&dev->event_lock, flags);
10928 		return;
10929 	}
10930 
10931 	page_flip_completed(intel_crtc);
10932 
10933 	spin_unlock_irqrestore(&dev->event_lock, flags);
10934 }
10935 
10936 void intel_finish_page_flip(struct drm_device *dev, int pipe)
10937 {
10938 	struct drm_i915_private *dev_priv = dev->dev_private;
10939 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10940 
10941 	do_intel_finish_page_flip(dev, crtc);
10942 }
10943 
10944 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10945 {
10946 	struct drm_i915_private *dev_priv = dev->dev_private;
10947 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10948 
10949 	do_intel_finish_page_flip(dev, crtc);
10950 }
10951 
10952 /* Is 'a' after or equal to 'b'? */
10953 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10954 {
10955 	return !((a - b) & 0x80000000);
10956 }
10957 
10958 static bool page_flip_finished(struct intel_crtc *crtc)
10959 {
10960 	struct drm_device *dev = crtc->base.dev;
10961 	struct drm_i915_private *dev_priv = dev->dev_private;
10962 	unsigned reset_counter;
10963 
10964 	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
10965 	if (crtc->reset_counter != reset_counter)
10966 		return true;
10967 
10968 	/*
10969 	 * The relevant registers doen't exist on pre-ctg.
10970 	 * As the flip done interrupt doesn't trigger for mmio
10971 	 * flips on gmch platforms, a flip count check isn't
10972 	 * really needed there. But since ctg has the registers,
10973 	 * include it in the check anyway.
10974 	 */
10975 	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10976 		return true;
10977 
10978 	/*
10979 	 * BDW signals flip done immediately if the plane
10980 	 * is disabled, even if the plane enable is already
10981 	 * armed to occur at the next vblank :(
10982 	 */
10983 
10984 	/*
10985 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10986 	 * used the same base address. In that case the mmio flip might
10987 	 * have completed, but the CS hasn't even executed the flip yet.
10988 	 *
10989 	 * A flip count check isn't enough as the CS might have updated
10990 	 * the base address just after start of vblank, but before we
10991 	 * managed to process the interrupt. This means we'd complete the
10992 	 * CS flip too soon.
10993 	 *
10994 	 * Combining both checks should get us a good enough result. It may
10995 	 * still happen that the CS flip has been executed, but has not
10996 	 * yet actually completed. But in case the base address is the same
10997 	 * anyway, we don't really care.
10998 	 */
10999 	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11000 		crtc->unpin_work->gtt_offset &&
11001 		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11002 				    crtc->unpin_work->flip_count);
11003 }
11004 
11005 void intel_prepare_page_flip(struct drm_device *dev, int plane)
11006 {
11007 	struct drm_i915_private *dev_priv = dev->dev_private;
11008 	struct intel_crtc *intel_crtc =
11009 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11010 	unsigned long flags;
11011 
11012 
11013 	/*
11014 	 * This is called both by irq handlers and the reset code (to complete
11015 	 * lost pageflips) so needs the full irqsave spinlocks.
11016 	 *
11017 	 * NB: An MMIO update of the plane base pointer will also
11018 	 * generate a page-flip completion irq, i.e. every modeset
11019 	 * is also accompanied by a spurious intel_prepare_page_flip().
11020 	 */
11021 	spin_lock_irqsave(&dev->event_lock, flags);
11022 	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11023 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11024 	spin_unlock_irqrestore(&dev->event_lock, flags);
11025 }
11026 
11027 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11028 {
11029 	/* Ensure that the work item is consistent when activating it ... */
11030 	smp_wmb();
11031 	atomic_set(&work->pending, INTEL_FLIP_PENDING);
11032 	/* and that it is marked active as soon as the irq could fire. */
11033 	smp_wmb();
11034 }
11035 
11036 static int intel_gen2_queue_flip(struct drm_device *dev,
11037 				 struct drm_crtc *crtc,
11038 				 struct drm_framebuffer *fb,
11039 				 struct drm_i915_gem_object *obj,
11040 				 struct drm_i915_gem_request *req,
11041 				 uint32_t flags)
11042 {
11043 	struct intel_engine_cs *engine = req->engine;
11044 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11045 	u32 flip_mask;
11046 	int ret;
11047 
11048 	ret = intel_ring_begin(req, 6);
11049 	if (ret)
11050 		return ret;
11051 
11052 	/* Can't queue multiple flips, so wait for the previous
11053 	 * one to finish before executing the next.
11054 	 */
11055 	if (intel_crtc->plane)
11056 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11057 	else
11058 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11059 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11060 	intel_ring_emit(engine, MI_NOOP);
11061 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11062 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11063 	intel_ring_emit(engine, fb->pitches[0]);
11064 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11065 	intel_ring_emit(engine, 0); /* aux display base address, unused */
11066 
11067 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11068 	return 0;
11069 }
11070 
11071 static int intel_gen3_queue_flip(struct drm_device *dev,
11072 				 struct drm_crtc *crtc,
11073 				 struct drm_framebuffer *fb,
11074 				 struct drm_i915_gem_object *obj,
11075 				 struct drm_i915_gem_request *req,
11076 				 uint32_t flags)
11077 {
11078 	struct intel_engine_cs *engine = req->engine;
11079 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11080 	u32 flip_mask;
11081 	int ret;
11082 
11083 	ret = intel_ring_begin(req, 6);
11084 	if (ret)
11085 		return ret;
11086 
11087 	if (intel_crtc->plane)
11088 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11089 	else
11090 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11091 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11092 	intel_ring_emit(engine, MI_NOOP);
11093 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11094 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11095 	intel_ring_emit(engine, fb->pitches[0]);
11096 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11097 	intel_ring_emit(engine, MI_NOOP);
11098 
11099 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11100 	return 0;
11101 }
11102 
11103 static int intel_gen4_queue_flip(struct drm_device *dev,
11104 				 struct drm_crtc *crtc,
11105 				 struct drm_framebuffer *fb,
11106 				 struct drm_i915_gem_object *obj,
11107 				 struct drm_i915_gem_request *req,
11108 				 uint32_t flags)
11109 {
11110 	struct intel_engine_cs *engine = req->engine;
11111 	struct drm_i915_private *dev_priv = dev->dev_private;
11112 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11113 	uint32_t pf, pipesrc;
11114 	int ret;
11115 
11116 	ret = intel_ring_begin(req, 4);
11117 	if (ret)
11118 		return ret;
11119 
11120 	/* i965+ uses the linear or tiled offsets from the
11121 	 * Display Registers (which do not change across a page-flip)
11122 	 * so we need only reprogram the base address.
11123 	 */
11124 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11125 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11126 	intel_ring_emit(engine, fb->pitches[0]);
11127 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
11128 			obj->tiling_mode);
11129 
11130 	/* XXX Enabling the panel-fitter across page-flip is so far
11131 	 * untested on non-native modes, so ignore it for now.
11132 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11133 	 */
11134 	pf = 0;
11135 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11136 	intel_ring_emit(engine, pf | pipesrc);
11137 
11138 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11139 	return 0;
11140 }
11141 
11142 static int intel_gen6_queue_flip(struct drm_device *dev,
11143 				 struct drm_crtc *crtc,
11144 				 struct drm_framebuffer *fb,
11145 				 struct drm_i915_gem_object *obj,
11146 				 struct drm_i915_gem_request *req,
11147 				 uint32_t flags)
11148 {
11149 	struct intel_engine_cs *engine = req->engine;
11150 	struct drm_i915_private *dev_priv = dev->dev_private;
11151 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11152 	uint32_t pf, pipesrc;
11153 	int ret;
11154 
11155 	ret = intel_ring_begin(req, 4);
11156 	if (ret)
11157 		return ret;
11158 
11159 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11160 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11161 	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11162 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11163 
11164 	/* Contrary to the suggestions in the documentation,
11165 	 * "Enable Panel Fitter" does not seem to be required when page
11166 	 * flipping with a non-native mode, and worse causes a normal
11167 	 * modeset to fail.
11168 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11169 	 */
11170 	pf = 0;
11171 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11172 	intel_ring_emit(engine, pf | pipesrc);
11173 
11174 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11175 	return 0;
11176 }
11177 
11178 static int intel_gen7_queue_flip(struct drm_device *dev,
11179 				 struct drm_crtc *crtc,
11180 				 struct drm_framebuffer *fb,
11181 				 struct drm_i915_gem_object *obj,
11182 				 struct drm_i915_gem_request *req,
11183 				 uint32_t flags)
11184 {
11185 	struct intel_engine_cs *engine = req->engine;
11186 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11187 	uint32_t plane_bit = 0;
11188 	int len, ret;
11189 
11190 	switch (intel_crtc->plane) {
11191 	case PLANE_A:
11192 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11193 		break;
11194 	case PLANE_B:
11195 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11196 		break;
11197 	case PLANE_C:
11198 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11199 		break;
11200 	default:
11201 		WARN_ONCE(1, "unknown plane in flip command\n");
11202 		return -ENODEV;
11203 	}
11204 
11205 	len = 4;
11206 	if (engine->id == RCS) {
11207 		len += 6;
11208 		/*
11209 		 * On Gen 8, SRM is now taking an extra dword to accommodate
11210 		 * 48bits addresses, and we need a NOOP for the batch size to
11211 		 * stay even.
11212 		 */
11213 		if (IS_GEN8(dev))
11214 			len += 2;
11215 	}
11216 
11217 	/*
11218 	 * BSpec MI_DISPLAY_FLIP for IVB:
11219 	 * "The full packet must be contained within the same cache line."
11220 	 *
11221 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11222 	 * cacheline, if we ever start emitting more commands before
11223 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11224 	 * then do the cacheline alignment, and finally emit the
11225 	 * MI_DISPLAY_FLIP.
11226 	 */
11227 	ret = intel_ring_cacheline_align(req);
11228 	if (ret)
11229 		return ret;
11230 
11231 	ret = intel_ring_begin(req, len);
11232 	if (ret)
11233 		return ret;
11234 
11235 	/* Unmask the flip-done completion message. Note that the bspec says that
11236 	 * we should do this for both the BCS and RCS, and that we must not unmask
11237 	 * more than one flip event at any time (or ensure that one flip message
11238 	 * can be sent by waiting for flip-done prior to queueing new flips).
11239 	 * Experimentation says that BCS works despite DERRMR masking all
11240 	 * flip-done completion events and that unmasking all planes at once
11241 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11242 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11243 	 */
11244 	if (engine->id == RCS) {
11245 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11246 		intel_ring_emit_reg(engine, DERRMR);
11247 		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11248 					  DERRMR_PIPEB_PRI_FLIP_DONE |
11249 					  DERRMR_PIPEC_PRI_FLIP_DONE));
11250 		if (IS_GEN8(dev))
11251 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11252 					      MI_SRM_LRM_GLOBAL_GTT);
11253 		else
11254 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11255 					      MI_SRM_LRM_GLOBAL_GTT);
11256 		intel_ring_emit_reg(engine, DERRMR);
11257 		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11258 		if (IS_GEN8(dev)) {
11259 			intel_ring_emit(engine, 0);
11260 			intel_ring_emit(engine, MI_NOOP);
11261 		}
11262 	}
11263 
11264 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11265 	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11266 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11267 	intel_ring_emit(engine, (MI_NOOP));
11268 
11269 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11270 	return 0;
11271 }
11272 
11273 static bool use_mmio_flip(struct intel_engine_cs *engine,
11274 			  struct drm_i915_gem_object *obj)
11275 {
11276 	/*
11277 	 * This is not being used for older platforms, because
11278 	 * non-availability of flip done interrupt forces us to use
11279 	 * CS flips. Older platforms derive flip done using some clever
11280 	 * tricks involving the flip_pending status bits and vblank irqs.
11281 	 * So using MMIO flips there would disrupt this mechanism.
11282 	 */
11283 
11284 	if (engine == NULL)
11285 		return true;
11286 
11287 	if (INTEL_INFO(engine->dev)->gen < 5)
11288 		return false;
11289 
11290 	if (i915.use_mmio_flip < 0)
11291 		return false;
11292 	else if (i915.use_mmio_flip > 0)
11293 		return true;
11294 	else if (i915.enable_execlists)
11295 		return true;
11296 #if 0
11297 	else if (obj->base.dma_buf &&
11298 		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11299 						       false))
11300 		return true;
11301 #endif
11302 	else
11303 		return engine != i915_gem_request_get_engine(obj->last_write_req);
11304 }
11305 
11306 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11307 			     unsigned int rotation,
11308 			     struct intel_unpin_work *work)
11309 {
11310 	struct drm_device *dev = intel_crtc->base.dev;
11311 	struct drm_i915_private *dev_priv = dev->dev_private;
11312 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11313 	const enum i915_pipe pipe = intel_crtc->pipe;
11314 	u32 ctl, stride, tile_height;
11315 
11316 	ctl = I915_READ(PLANE_CTL(pipe, 0));
11317 	ctl &= ~PLANE_CTL_TILED_MASK;
11318 	switch (fb->modifier[0]) {
11319 	case DRM_FORMAT_MOD_NONE:
11320 		break;
11321 	case I915_FORMAT_MOD_X_TILED:
11322 		ctl |= PLANE_CTL_TILED_X;
11323 		break;
11324 	case I915_FORMAT_MOD_Y_TILED:
11325 		ctl |= PLANE_CTL_TILED_Y;
11326 		break;
11327 	case I915_FORMAT_MOD_Yf_TILED:
11328 		ctl |= PLANE_CTL_TILED_YF;
11329 		break;
11330 	default:
11331 		MISSING_CASE(fb->modifier[0]);
11332 	}
11333 
11334 	/*
11335 	 * The stride is either expressed as a multiple of 64 bytes chunks for
11336 	 * linear buffers or in number of tiles for tiled buffers.
11337 	 */
11338 	if (intel_rotation_90_or_270(rotation)) {
11339 		/* stride = Surface height in tiles */
11340 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11341 		stride = DIV_ROUND_UP(fb->height, tile_height);
11342 	} else {
11343 		stride = fb->pitches[0] /
11344 			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11345 						  fb->pixel_format);
11346 	}
11347 
11348 	/*
11349 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11350 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11351 	 */
11352 	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11353 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11354 
11355 	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11356 	POSTING_READ(PLANE_SURF(pipe, 0));
11357 }
11358 
11359 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11360 			     struct intel_unpin_work *work)
11361 {
11362 	struct drm_device *dev = intel_crtc->base.dev;
11363 	struct drm_i915_private *dev_priv = dev->dev_private;
11364 	struct intel_framebuffer *intel_fb =
11365 		to_intel_framebuffer(intel_crtc->base.primary->fb);
11366 	struct drm_i915_gem_object *obj = intel_fb->obj;
11367 	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11368 	u32 dspcntr;
11369 
11370 	dspcntr = I915_READ(reg);
11371 
11372 	if (obj->tiling_mode != I915_TILING_NONE)
11373 		dspcntr |= DISPPLANE_TILED;
11374 	else
11375 		dspcntr &= ~DISPPLANE_TILED;
11376 
11377 	I915_WRITE(reg, dspcntr);
11378 
11379 	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11380 	POSTING_READ(DSPSURF(intel_crtc->plane));
11381 }
11382 
11383 /*
11384  * XXX: This is the temporary way to update the plane registers until we get
11385  * around to using the usual plane update functions for MMIO flips
11386  */
11387 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11388 {
11389 	struct intel_crtc *crtc = mmio_flip->crtc;
11390 	struct intel_unpin_work *work;
11391 
11392 	spin_lock_irq(&crtc->base.dev->event_lock);
11393 	work = crtc->unpin_work;
11394 	spin_unlock_irq(&crtc->base.dev->event_lock);
11395 	if (work == NULL)
11396 		return;
11397 
11398 	intel_mark_page_flip_active(work);
11399 
11400 	intel_pipe_update_start(crtc);
11401 
11402 	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11403 		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11404 	else
11405 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11406 		ilk_do_mmio_flip(crtc, work);
11407 
11408 	intel_pipe_update_end(crtc);
11409 }
11410 
11411 static void intel_mmio_flip_work_func(struct work_struct *work)
11412 {
11413 	struct intel_mmio_flip *mmio_flip =
11414 		container_of(work, struct intel_mmio_flip, work);
11415 #if 0
11416 	struct intel_framebuffer *intel_fb =
11417 		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11418 	struct drm_i915_gem_object *obj = intel_fb->obj;
11419 #endif
11420 
11421 	if (mmio_flip->req) {
11422 		WARN_ON(__i915_wait_request(mmio_flip->req,
11423 					    false, NULL,
11424 					    &mmio_flip->i915->rps.mmioflips));
11425 		i915_gem_request_unreference__unlocked(mmio_flip->req);
11426 	}
11427 
11428 	/* For framebuffer backed by dmabuf, wait for fence */
11429 #if 0
11430 	if (obj->base.dma_buf)
11431 		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11432 							    false, false,
11433 							    MAX_SCHEDULE_TIMEOUT) < 0);
11434 #endif
11435 
11436 	intel_do_mmio_flip(mmio_flip);
11437 	kfree(mmio_flip);
11438 }
11439 
11440 static int intel_queue_mmio_flip(struct drm_device *dev,
11441 				 struct drm_crtc *crtc,
11442 				 struct drm_i915_gem_object *obj)
11443 {
11444 	struct intel_mmio_flip *mmio_flip;
11445 
11446 	mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK);
11447 	if (mmio_flip == NULL)
11448 		return -ENOMEM;
11449 
11450 	mmio_flip->i915 = to_i915(dev);
11451 	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11452 	mmio_flip->crtc = to_intel_crtc(crtc);
11453 	mmio_flip->rotation = crtc->primary->state->rotation;
11454 
11455 	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11456 	schedule_work(&mmio_flip->work);
11457 
11458 	return 0;
11459 }
11460 
11461 static int intel_default_queue_flip(struct drm_device *dev,
11462 				    struct drm_crtc *crtc,
11463 				    struct drm_framebuffer *fb,
11464 				    struct drm_i915_gem_object *obj,
11465 				    struct drm_i915_gem_request *req,
11466 				    uint32_t flags)
11467 {
11468 	return -ENODEV;
11469 }
11470 
11471 static bool __intel_pageflip_stall_check(struct drm_device *dev,
11472 					 struct drm_crtc *crtc)
11473 {
11474 	struct drm_i915_private *dev_priv = dev->dev_private;
11475 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11476 	struct intel_unpin_work *work = intel_crtc->unpin_work;
11477 	u32 addr;
11478 
11479 	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11480 		return true;
11481 
11482 	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11483 		return false;
11484 
11485 	if (!work->enable_stall_check)
11486 		return false;
11487 
11488 	if (work->flip_ready_vblank == 0) {
11489 		if (work->flip_queued_req &&
11490 		    !i915_gem_request_completed(work->flip_queued_req, true))
11491 			return false;
11492 
11493 		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11494 	}
11495 
11496 	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11497 		return false;
11498 
11499 	/* Potential stall - if we see that the flip has happened,
11500 	 * assume a missed interrupt. */
11501 	if (INTEL_INFO(dev)->gen >= 4)
11502 		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11503 	else
11504 		addr = I915_READ(DSPADDR(intel_crtc->plane));
11505 
11506 	/* There is a potential issue here with a false positive after a flip
11507 	 * to the same address. We could address this by checking for a
11508 	 * non-incrementing frame counter.
11509 	 */
11510 	return addr == work->gtt_offset;
11511 }
11512 
11513 void intel_check_page_flip(struct drm_device *dev, int pipe)
11514 {
11515 	struct drm_i915_private *dev_priv = dev->dev_private;
11516 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11517 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11518 	struct intel_unpin_work *work;
11519 
11520 //	WARN_ON(!in_interrupt());
11521 
11522 	if (crtc == NULL)
11523 		return;
11524 
11525 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
11526 	work = intel_crtc->unpin_work;
11527 	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11528 		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11529 			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11530 		page_flip_completed(intel_crtc);
11531 		work = NULL;
11532 	}
11533 	if (work != NULL &&
11534 	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11535 		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11536 	lockmgr(&dev->event_lock, LK_RELEASE);
11537 }
11538 
11539 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11540 				struct drm_framebuffer *fb,
11541 				struct drm_pending_vblank_event *event,
11542 				uint32_t page_flip_flags)
11543 {
11544 	struct drm_device *dev = crtc->dev;
11545 	struct drm_i915_private *dev_priv = dev->dev_private;
11546 	struct drm_framebuffer *old_fb = crtc->primary->fb;
11547 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11548 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11549 	struct drm_plane *primary = crtc->primary;
11550 	enum i915_pipe pipe = intel_crtc->pipe;
11551 	struct intel_unpin_work *work;
11552 	struct intel_engine_cs *engine;
11553 	bool mmio_flip;
11554 	struct drm_i915_gem_request *request = NULL;
11555 	int ret;
11556 
11557 	/*
11558 	 * drm_mode_page_flip_ioctl() should already catch this, but double
11559 	 * check to be safe.  In the future we may enable pageflipping from
11560 	 * a disabled primary plane.
11561 	 */
11562 	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11563 		return -EBUSY;
11564 
11565 	/* Can't change pixel format via MI display flips. */
11566 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11567 		return -EINVAL;
11568 
11569 	/*
11570 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11571 	 * Note that pitch changes could also affect these register.
11572 	 */
11573 	if (INTEL_INFO(dev)->gen > 3 &&
11574 	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11575 	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11576 		return -EINVAL;
11577 
11578 	if (i915_terminally_wedged(&dev_priv->gpu_error))
11579 		goto out_hang;
11580 
11581 	work = kzalloc(sizeof(*work), GFP_KERNEL);
11582 	if (work == NULL)
11583 		return -ENOMEM;
11584 
11585 	work->event = event;
11586 	work->crtc = crtc;
11587 	work->old_fb = old_fb;
11588 	INIT_WORK(&work->work, intel_unpin_work_fn);
11589 
11590 	ret = drm_crtc_vblank_get(crtc);
11591 	if (ret)
11592 		goto free_work;
11593 
11594 	/* We borrow the event spin lock for protecting unpin_work */
11595 	spin_lock_irq(&dev->event_lock);
11596 	if (intel_crtc->unpin_work) {
11597 		/* Before declaring the flip queue wedged, check if
11598 		 * the hardware completed the operation behind our backs.
11599 		 */
11600 		if (__intel_pageflip_stall_check(dev, crtc)) {
11601 			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11602 			page_flip_completed(intel_crtc);
11603 		} else {
11604 			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11605 			spin_unlock_irq(&dev->event_lock);
11606 
11607 			drm_crtc_vblank_put(crtc);
11608 			kfree(work);
11609 			return -EBUSY;
11610 		}
11611 	}
11612 	intel_crtc->unpin_work = work;
11613 	spin_unlock_irq(&dev->event_lock);
11614 
11615 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11616 		flush_workqueue(dev_priv->wq);
11617 
11618 	/* Reference the objects for the scheduled work. */
11619 	drm_framebuffer_reference(work->old_fb);
11620 	drm_gem_object_reference(&obj->base);
11621 
11622 	crtc->primary->fb = fb;
11623 	update_state_fb(crtc->primary);
11624 	intel_fbc_pre_update(intel_crtc);
11625 
11626 	work->pending_flip_obj = obj;
11627 
11628 	ret = i915_mutex_lock_interruptible(dev);
11629 	if (ret)
11630 		goto cleanup;
11631 
11632 	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11633 	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11634 		ret = -EIO;
11635 		goto cleanup;
11636 	}
11637 
11638 	atomic_inc(&intel_crtc->unpin_work_count);
11639 
11640 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11641 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11642 
11643 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11644 		engine = &dev_priv->engine[BCS];
11645 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11646 			/* vlv: DISPLAY_FLIP fails to change tiling */
11647 			engine = NULL;
11648 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11649 		engine = &dev_priv->engine[BCS];
11650 	} else if (INTEL_INFO(dev)->gen >= 7) {
11651 		engine = i915_gem_request_get_engine(obj->last_write_req);
11652 		if (engine == NULL || engine->id != RCS)
11653 			engine = &dev_priv->engine[BCS];
11654 	} else {
11655 		engine = &dev_priv->engine[RCS];
11656 	}
11657 
11658 	mmio_flip = use_mmio_flip(engine, obj);
11659 
11660 	/* When using CS flips, we want to emit semaphores between rings.
11661 	 * However, when using mmio flips we will create a task to do the
11662 	 * synchronisation, so all we want here is to pin the framebuffer
11663 	 * into the display plane and skip any waits.
11664 	 */
11665 	if (!mmio_flip) {
11666 		ret = i915_gem_object_sync(obj, engine, &request);
11667 		if (ret)
11668 			goto cleanup_pending;
11669 	}
11670 
11671 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11672 	if (ret)
11673 		goto cleanup_pending;
11674 
11675 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11676 						  obj, 0);
11677 	work->gtt_offset += intel_crtc->dspaddr_offset;
11678 
11679 	if (mmio_flip) {
11680 		ret = intel_queue_mmio_flip(dev, crtc, obj);
11681 		if (ret)
11682 			goto cleanup_unpin;
11683 
11684 		i915_gem_request_assign(&work->flip_queued_req,
11685 					obj->last_write_req);
11686 	} else {
11687 		if (!request) {
11688 			request = i915_gem_request_alloc(engine, NULL);
11689 			if (IS_ERR(request)) {
11690 				ret = PTR_ERR(request);
11691 				goto cleanup_unpin;
11692 			}
11693 		}
11694 
11695 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11696 						   page_flip_flags);
11697 		if (ret)
11698 			goto cleanup_unpin;
11699 
11700 		i915_gem_request_assign(&work->flip_queued_req, request);
11701 	}
11702 
11703 	if (request)
11704 		i915_add_request_no_flush(request);
11705 
11706 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11707 	work->enable_stall_check = true;
11708 
11709 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11710 			  to_intel_plane(primary)->frontbuffer_bit);
11711 	mutex_unlock(&dev->struct_mutex);
11712 
11713 	intel_frontbuffer_flip_prepare(dev,
11714 				       to_intel_plane(primary)->frontbuffer_bit);
11715 
11716 	trace_i915_flip_request(intel_crtc->plane, obj);
11717 
11718 	return 0;
11719 
11720 cleanup_unpin:
11721 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11722 cleanup_pending:
11723 	if (!IS_ERR_OR_NULL(request))
11724 		i915_add_request_no_flush(request);
11725 	atomic_dec(&intel_crtc->unpin_work_count);
11726 	mutex_unlock(&dev->struct_mutex);
11727 cleanup:
11728 	crtc->primary->fb = old_fb;
11729 	update_state_fb(crtc->primary);
11730 
11731 	drm_gem_object_unreference_unlocked(&obj->base);
11732 	drm_framebuffer_unreference(work->old_fb);
11733 
11734 	spin_lock_irq(&dev->event_lock);
11735 	intel_crtc->unpin_work = NULL;
11736 	spin_unlock_irq(&dev->event_lock);
11737 
11738 	drm_crtc_vblank_put(crtc);
11739 free_work:
11740 	kfree(work);
11741 
11742 	if (ret == -EIO) {
11743 		struct drm_atomic_state *state;
11744 		struct drm_plane_state *plane_state;
11745 
11746 out_hang:
11747 		state = drm_atomic_state_alloc(dev);
11748 		if (!state)
11749 			return -ENOMEM;
11750 		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11751 
11752 retry:
11753 		plane_state = drm_atomic_get_plane_state(state, primary);
11754 		ret = PTR_ERR_OR_ZERO(plane_state);
11755 		if (!ret) {
11756 			drm_atomic_set_fb_for_plane(plane_state, fb);
11757 
11758 			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11759 			if (!ret)
11760 				ret = drm_atomic_commit(state);
11761 		}
11762 
11763 		if (ret == -EDEADLK) {
11764 			drm_modeset_backoff(state->acquire_ctx);
11765 			drm_atomic_state_clear(state);
11766 			goto retry;
11767 		}
11768 
11769 		if (ret)
11770 			drm_atomic_state_free(state);
11771 
11772 		if (ret == 0 && event) {
11773 			spin_lock_irq(&dev->event_lock);
11774 			drm_crtc_send_vblank_event(crtc, event);
11775 			spin_unlock_irq(&dev->event_lock);
11776 		}
11777 	}
11778 	return ret;
11779 }
11780 
11781 
11782 /**
11783  * intel_wm_need_update - Check whether watermarks need updating
11784  * @plane: drm plane
11785  * @state: new plane state
11786  *
11787  * Check current plane state versus the new one to determine whether
11788  * watermarks need to be recalculated.
11789  *
11790  * Returns true or false.
11791  */
11792 static bool intel_wm_need_update(struct drm_plane *plane,
11793 				 struct drm_plane_state *state)
11794 {
11795 	struct intel_plane_state *new = to_intel_plane_state(state);
11796 	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11797 
11798 	/* Update watermarks on tiling or size changes. */
11799 	if (new->visible != cur->visible)
11800 		return true;
11801 
11802 	if (!cur->base.fb || !new->base.fb)
11803 		return false;
11804 
11805 	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11806 	    cur->base.rotation != new->base.rotation ||
11807 	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11808 	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11809 	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11810 	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11811 		return true;
11812 
11813 	return false;
11814 }
11815 
11816 static bool needs_scaling(struct intel_plane_state *state)
11817 {
11818 	int src_w = drm_rect_width(&state->src) >> 16;
11819 	int src_h = drm_rect_height(&state->src) >> 16;
11820 	int dst_w = drm_rect_width(&state->dst);
11821 	int dst_h = drm_rect_height(&state->dst);
11822 
11823 	return (src_w != dst_w || src_h != dst_h);
11824 }
11825 
11826 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11827 				    struct drm_plane_state *plane_state)
11828 {
11829 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11830 	struct drm_crtc *crtc = crtc_state->crtc;
11831 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11832 	struct drm_plane *plane = plane_state->plane;
11833 	struct drm_device *dev = crtc->dev;
11834 	struct drm_i915_private *dev_priv = to_i915(dev);
11835 	struct intel_plane_state *old_plane_state =
11836 		to_intel_plane_state(plane->state);
11837 	int idx = intel_crtc->base.base.id, ret;
11838 	bool mode_changed = needs_modeset(crtc_state);
11839 	bool was_crtc_enabled = crtc->state->active;
11840 	bool is_crtc_enabled = crtc_state->active;
11841 	bool turn_off, turn_on, visible, was_visible;
11842 	struct drm_framebuffer *fb = plane_state->fb;
11843 
11844 	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11845 	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11846 		ret = skl_update_scaler_plane(
11847 			to_intel_crtc_state(crtc_state),
11848 			to_intel_plane_state(plane_state));
11849 		if (ret)
11850 			return ret;
11851 	}
11852 
11853 	was_visible = old_plane_state->visible;
11854 	visible = to_intel_plane_state(plane_state)->visible;
11855 
11856 	if (!was_crtc_enabled && WARN_ON(was_visible))
11857 		was_visible = false;
11858 
11859 	/*
11860 	 * Visibility is calculated as if the crtc was on, but
11861 	 * after scaler setup everything depends on it being off
11862 	 * when the crtc isn't active.
11863 	 */
11864 	if (!is_crtc_enabled)
11865 		to_intel_plane_state(plane_state)->visible = visible = false;
11866 
11867 	if (!was_visible && !visible)
11868 		return 0;
11869 
11870 	if (fb != old_plane_state->base.fb)
11871 		pipe_config->fb_changed = true;
11872 
11873 	turn_off = was_visible && (!visible || mode_changed);
11874 	turn_on = visible && (!was_visible || mode_changed);
11875 
11876 	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11877 			 plane->base.id, fb ? fb->base.id : -1);
11878 
11879 	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11880 			 plane->base.id, was_visible, visible,
11881 			 turn_off, turn_on, mode_changed);
11882 
11883 	if (turn_on) {
11884 		pipe_config->update_wm_pre = true;
11885 
11886 		/* must disable cxsr around plane enable/disable */
11887 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11888 			pipe_config->disable_cxsr = true;
11889 	} else if (turn_off) {
11890 		pipe_config->update_wm_post = true;
11891 
11892 		/* must disable cxsr around plane enable/disable */
11893 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11894 			pipe_config->disable_cxsr = true;
11895 	} else if (intel_wm_need_update(plane, plane_state)) {
11896 		/* FIXME bollocks */
11897 		pipe_config->update_wm_pre = true;
11898 		pipe_config->update_wm_post = true;
11899 	}
11900 
11901 	/* Pre-gen9 platforms need two-step watermark updates */
11902 	if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11903 	    INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
11904 		to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11905 
11906 	if (visible || was_visible)
11907 		pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
11908 
11909 	/*
11910 	 * WaCxSRDisabledForSpriteScaling:ivb
11911 	 *
11912 	 * cstate->update_wm was already set above, so this flag will
11913 	 * take effect when we commit and program watermarks.
11914 	 */
11915 	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11916 	    needs_scaling(to_intel_plane_state(plane_state)) &&
11917 	    !needs_scaling(old_plane_state))
11918 		pipe_config->disable_lp_wm = true;
11919 
11920 	return 0;
11921 }
11922 
11923 static bool encoders_cloneable(const struct intel_encoder *a,
11924 			       const struct intel_encoder *b)
11925 {
11926 	/* masks could be asymmetric, so check both ways */
11927 	return a == b || (a->cloneable & (1 << b->type) &&
11928 			  b->cloneable & (1 << a->type));
11929 }
11930 
11931 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11932 					 struct intel_crtc *crtc,
11933 					 struct intel_encoder *encoder)
11934 {
11935 	struct intel_encoder *source_encoder;
11936 	struct drm_connector *connector;
11937 	struct drm_connector_state *connector_state;
11938 	int i;
11939 
11940 	for_each_connector_in_state(state, connector, connector_state, i) {
11941 		if (connector_state->crtc != &crtc->base)
11942 			continue;
11943 
11944 		source_encoder =
11945 			to_intel_encoder(connector_state->best_encoder);
11946 		if (!encoders_cloneable(encoder, source_encoder))
11947 			return false;
11948 	}
11949 
11950 	return true;
11951 }
11952 
11953 static bool check_encoder_cloning(struct drm_atomic_state *state,
11954 				  struct intel_crtc *crtc)
11955 {
11956 	struct intel_encoder *encoder;
11957 	struct drm_connector *connector;
11958 	struct drm_connector_state *connector_state;
11959 	int i;
11960 
11961 	for_each_connector_in_state(state, connector, connector_state, i) {
11962 		if (connector_state->crtc != &crtc->base)
11963 			continue;
11964 
11965 		encoder = to_intel_encoder(connector_state->best_encoder);
11966 		if (!check_single_encoder_cloning(state, crtc, encoder))
11967 			return false;
11968 	}
11969 
11970 	return true;
11971 }
11972 
11973 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11974 				   struct drm_crtc_state *crtc_state)
11975 {
11976 	struct drm_device *dev = crtc->dev;
11977 	struct drm_i915_private *dev_priv = dev->dev_private;
11978 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11979 	struct intel_crtc_state *pipe_config =
11980 		to_intel_crtc_state(crtc_state);
11981 	struct drm_atomic_state *state = crtc_state->state;
11982 	int ret;
11983 	bool mode_changed = needs_modeset(crtc_state);
11984 
11985 	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11986 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11987 		return -EINVAL;
11988 	}
11989 
11990 	if (mode_changed && !crtc_state->active)
11991 		pipe_config->update_wm_post = true;
11992 
11993 	if (mode_changed && crtc_state->enable &&
11994 	    dev_priv->display.crtc_compute_clock &&
11995 	    !WARN_ON(pipe_config->shared_dpll)) {
11996 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11997 							   pipe_config);
11998 		if (ret)
11999 			return ret;
12000 	}
12001 
12002 	if (crtc_state->color_mgmt_changed) {
12003 		ret = intel_color_check(crtc, crtc_state);
12004 		if (ret)
12005 			return ret;
12006 
12007 		/*
12008 		 * Changing color management on Intel hardware is
12009 		 * handled as part of planes update.
12010 		 */
12011 		crtc_state->planes_changed = true;
12012 	}
12013 
12014 	ret = 0;
12015 	if (dev_priv->display.compute_pipe_wm) {
12016 		ret = dev_priv->display.compute_pipe_wm(pipe_config);
12017 		if (ret) {
12018 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12019 			return ret;
12020 		}
12021 	}
12022 
12023 	if (dev_priv->display.compute_intermediate_wm &&
12024 	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
12025 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12026 			return 0;
12027 
12028 		/*
12029 		 * Calculate 'intermediate' watermarks that satisfy both the
12030 		 * old state and the new state.  We can program these
12031 		 * immediately.
12032 		 */
12033 		ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12034 								intel_crtc,
12035 								pipe_config);
12036 		if (ret) {
12037 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12038 			return ret;
12039 		}
12040 	} else if (dev_priv->display.compute_intermediate_wm) {
12041 		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12042 			pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
12043 	}
12044 
12045 	if (INTEL_INFO(dev)->gen >= 9) {
12046 		if (mode_changed)
12047 			ret = skl_update_scaler_crtc(pipe_config);
12048 
12049 		if (!ret)
12050 			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12051 							 pipe_config);
12052 	}
12053 
12054 	return ret;
12055 }
12056 
12057 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12058 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12059 	.atomic_begin = intel_begin_crtc_commit,
12060 	.atomic_flush = intel_finish_crtc_commit,
12061 	.atomic_check = intel_crtc_atomic_check,
12062 };
12063 
12064 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12065 {
12066 	struct intel_connector *connector;
12067 
12068 	for_each_intel_connector(dev, connector) {
12069 		if (connector->base.state->crtc)
12070 			drm_connector_unreference(&connector->base);
12071 
12072 		if (connector->base.encoder) {
12073 			connector->base.state->best_encoder =
12074 				connector->base.encoder;
12075 			connector->base.state->crtc =
12076 				connector->base.encoder->crtc;
12077 
12078 			drm_connector_reference(&connector->base);
12079 		} else {
12080 			connector->base.state->best_encoder = NULL;
12081 			connector->base.state->crtc = NULL;
12082 		}
12083 	}
12084 }
12085 
12086 static void
12087 connected_sink_compute_bpp(struct intel_connector *connector,
12088 			   struct intel_crtc_state *pipe_config)
12089 {
12090 	int bpp = pipe_config->pipe_bpp;
12091 
12092 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12093 		connector->base.base.id,
12094 		connector->base.name);
12095 
12096 	/* Don't use an invalid EDID bpc value */
12097 	if (connector->base.display_info.bpc &&
12098 	    connector->base.display_info.bpc * 3 < bpp) {
12099 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12100 			      bpp, connector->base.display_info.bpc*3);
12101 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12102 	}
12103 
12104 	/* Clamp bpp to 8 on screens without EDID 1.4 */
12105 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
12106 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12107 			      bpp);
12108 		pipe_config->pipe_bpp = 24;
12109 	}
12110 }
12111 
12112 static int
12113 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12114 			  struct intel_crtc_state *pipe_config)
12115 {
12116 	struct drm_device *dev = crtc->base.dev;
12117 	struct drm_atomic_state *state;
12118 	struct drm_connector *connector;
12119 	struct drm_connector_state *connector_state;
12120 	int bpp, i;
12121 
12122 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12123 		bpp = 10*3;
12124 	else if (INTEL_INFO(dev)->gen >= 5)
12125 		bpp = 12*3;
12126 	else
12127 		bpp = 8*3;
12128 
12129 
12130 	pipe_config->pipe_bpp = bpp;
12131 
12132 	state = pipe_config->base.state;
12133 
12134 	/* Clamp display bpp to EDID value */
12135 	for_each_connector_in_state(state, connector, connector_state, i) {
12136 		if (connector_state->crtc != &crtc->base)
12137 			continue;
12138 
12139 		connected_sink_compute_bpp(to_intel_connector(connector),
12140 					   pipe_config);
12141 	}
12142 
12143 	return bpp;
12144 }
12145 
12146 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12147 {
12148 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12149 			"type: 0x%x flags: 0x%x\n",
12150 		mode->crtc_clock,
12151 		mode->crtc_hdisplay, mode->crtc_hsync_start,
12152 		mode->crtc_hsync_end, mode->crtc_htotal,
12153 		mode->crtc_vdisplay, mode->crtc_vsync_start,
12154 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12155 }
12156 
12157 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12158 				   struct intel_crtc_state *pipe_config,
12159 				   const char *context)
12160 {
12161 	struct drm_device *dev = crtc->base.dev;
12162 	struct drm_plane *plane;
12163 	struct intel_plane *intel_plane;
12164 	struct intel_plane_state *state;
12165 	struct drm_framebuffer *fb;
12166 
12167 	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12168 		      context, pipe_config, pipe_name(crtc->pipe));
12169 
12170 	DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12171 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12172 		      pipe_config->pipe_bpp, pipe_config->dither);
12173 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12174 		      pipe_config->has_pch_encoder,
12175 		      pipe_config->fdi_lanes,
12176 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12177 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12178 		      pipe_config->fdi_m_n.tu);
12179 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12180 		      pipe_config->has_dp_encoder,
12181 		      pipe_config->lane_count,
12182 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12183 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12184 		      pipe_config->dp_m_n.tu);
12185 
12186 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12187 		      pipe_config->has_dp_encoder,
12188 		      pipe_config->lane_count,
12189 		      pipe_config->dp_m2_n2.gmch_m,
12190 		      pipe_config->dp_m2_n2.gmch_n,
12191 		      pipe_config->dp_m2_n2.link_m,
12192 		      pipe_config->dp_m2_n2.link_n,
12193 		      pipe_config->dp_m2_n2.tu);
12194 
12195 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12196 		      pipe_config->has_audio,
12197 		      pipe_config->has_infoframe);
12198 
12199 	DRM_DEBUG_KMS("requested mode:\n");
12200 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12201 	DRM_DEBUG_KMS("adjusted mode:\n");
12202 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12203 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12204 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12205 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12206 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12207 	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12208 		      crtc->num_scalers,
12209 		      pipe_config->scaler_state.scaler_users,
12210 		      pipe_config->scaler_state.scaler_id);
12211 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12212 		      pipe_config->gmch_pfit.control,
12213 		      pipe_config->gmch_pfit.pgm_ratios,
12214 		      pipe_config->gmch_pfit.lvds_border_bits);
12215 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12216 		      pipe_config->pch_pfit.pos,
12217 		      pipe_config->pch_pfit.size,
12218 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12219 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12220 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12221 
12222 	if (IS_BROXTON(dev)) {
12223 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12224 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12225 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12226 			      pipe_config->ddi_pll_sel,
12227 			      pipe_config->dpll_hw_state.ebb0,
12228 			      pipe_config->dpll_hw_state.ebb4,
12229 			      pipe_config->dpll_hw_state.pll0,
12230 			      pipe_config->dpll_hw_state.pll1,
12231 			      pipe_config->dpll_hw_state.pll2,
12232 			      pipe_config->dpll_hw_state.pll3,
12233 			      pipe_config->dpll_hw_state.pll6,
12234 			      pipe_config->dpll_hw_state.pll8,
12235 			      pipe_config->dpll_hw_state.pll9,
12236 			      pipe_config->dpll_hw_state.pll10,
12237 			      pipe_config->dpll_hw_state.pcsdw12);
12238 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12239 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12240 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12241 			      pipe_config->ddi_pll_sel,
12242 			      pipe_config->dpll_hw_state.ctrl1,
12243 			      pipe_config->dpll_hw_state.cfgcr1,
12244 			      pipe_config->dpll_hw_state.cfgcr2);
12245 	} else if (HAS_DDI(dev)) {
12246 		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12247 			      pipe_config->ddi_pll_sel,
12248 			      pipe_config->dpll_hw_state.wrpll,
12249 			      pipe_config->dpll_hw_state.spll);
12250 	} else {
12251 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12252 			      "fp0: 0x%x, fp1: 0x%x\n",
12253 			      pipe_config->dpll_hw_state.dpll,
12254 			      pipe_config->dpll_hw_state.dpll_md,
12255 			      pipe_config->dpll_hw_state.fp0,
12256 			      pipe_config->dpll_hw_state.fp1);
12257 	}
12258 
12259 	DRM_DEBUG_KMS("planes on this crtc\n");
12260 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12261 		intel_plane = to_intel_plane(plane);
12262 		if (intel_plane->pipe != crtc->pipe)
12263 			continue;
12264 
12265 		state = to_intel_plane_state(plane->state);
12266 		fb = state->base.fb;
12267 		if (!fb) {
12268 			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12269 				"disabled, scaler_id = %d\n",
12270 				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12271 				plane->base.id, intel_plane->pipe,
12272 				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12273 				drm_plane_index(plane), state->scaler_id);
12274 			continue;
12275 		}
12276 
12277 		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12278 			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12279 			plane->base.id, intel_plane->pipe,
12280 			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12281 			drm_plane_index(plane));
12282 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12283 			fb->base.id, fb->width, fb->height, fb->pixel_format);
12284 		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12285 			state->scaler_id,
12286 			state->src.x1 >> 16, state->src.y1 >> 16,
12287 			drm_rect_width(&state->src) >> 16,
12288 			drm_rect_height(&state->src) >> 16,
12289 			state->dst.x1, state->dst.y1,
12290 			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12291 	}
12292 }
12293 
12294 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12295 {
12296 	struct drm_device *dev = state->dev;
12297 	struct drm_connector *connector;
12298 	unsigned int used_ports = 0;
12299 
12300 	/*
12301 	 * Walk the connector list instead of the encoder
12302 	 * list to detect the problem on ddi platforms
12303 	 * where there's just one encoder per digital port.
12304 	 */
12305 	drm_for_each_connector(connector, dev) {
12306 		struct drm_connector_state *connector_state;
12307 		struct intel_encoder *encoder;
12308 
12309 		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12310 		if (!connector_state)
12311 			connector_state = connector->state;
12312 
12313 		if (!connector_state->best_encoder)
12314 			continue;
12315 
12316 		encoder = to_intel_encoder(connector_state->best_encoder);
12317 
12318 		WARN_ON(!connector_state->crtc);
12319 
12320 		switch (encoder->type) {
12321 			unsigned int port_mask;
12322 		case INTEL_OUTPUT_UNKNOWN:
12323 			if (WARN_ON(!HAS_DDI(dev)))
12324 				break;
12325 		case INTEL_OUTPUT_DISPLAYPORT:
12326 		case INTEL_OUTPUT_HDMI:
12327 		case INTEL_OUTPUT_EDP:
12328 			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12329 
12330 			/* the same port mustn't appear more than once */
12331 			if (used_ports & port_mask)
12332 				return false;
12333 
12334 			used_ports |= port_mask;
12335 		default:
12336 			break;
12337 		}
12338 	}
12339 
12340 	return true;
12341 }
12342 
12343 static void
12344 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12345 {
12346 	struct drm_crtc_state tmp_state;
12347 	struct intel_crtc_scaler_state scaler_state;
12348 	struct intel_dpll_hw_state dpll_hw_state;
12349 	struct intel_shared_dpll *shared_dpll;
12350 	uint32_t ddi_pll_sel;
12351 	bool force_thru;
12352 
12353 	/* FIXME: before the switch to atomic started, a new pipe_config was
12354 	 * kzalloc'd. Code that depends on any field being zero should be
12355 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12356 	 * only fields that are know to not cause problems are preserved. */
12357 
12358 	tmp_state = crtc_state->base;
12359 	scaler_state = crtc_state->scaler_state;
12360 	shared_dpll = crtc_state->shared_dpll;
12361 	dpll_hw_state = crtc_state->dpll_hw_state;
12362 	ddi_pll_sel = crtc_state->ddi_pll_sel;
12363 	force_thru = crtc_state->pch_pfit.force_thru;
12364 
12365 	memset(crtc_state, 0, sizeof *crtc_state);
12366 
12367 	crtc_state->base = tmp_state;
12368 	crtc_state->scaler_state = scaler_state;
12369 	crtc_state->shared_dpll = shared_dpll;
12370 	crtc_state->dpll_hw_state = dpll_hw_state;
12371 	crtc_state->ddi_pll_sel = ddi_pll_sel;
12372 	crtc_state->pch_pfit.force_thru = force_thru;
12373 }
12374 
12375 static int
12376 intel_modeset_pipe_config(struct drm_crtc *crtc,
12377 			  struct intel_crtc_state *pipe_config)
12378 {
12379 	struct drm_atomic_state *state = pipe_config->base.state;
12380 	struct intel_encoder *encoder;
12381 	struct drm_connector *connector;
12382 	struct drm_connector_state *connector_state;
12383 	int base_bpp, ret = -EINVAL;
12384 	int i;
12385 	bool retry = true;
12386 
12387 	clear_intel_crtc_state(pipe_config);
12388 
12389 	pipe_config->cpu_transcoder =
12390 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12391 
12392 	/*
12393 	 * Sanitize sync polarity flags based on requested ones. If neither
12394 	 * positive or negative polarity is requested, treat this as meaning
12395 	 * negative polarity.
12396 	 */
12397 	if (!(pipe_config->base.adjusted_mode.flags &
12398 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12399 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12400 
12401 	if (!(pipe_config->base.adjusted_mode.flags &
12402 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12403 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12404 
12405 	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12406 					     pipe_config);
12407 	if (base_bpp < 0)
12408 		goto fail;
12409 
12410 	/*
12411 	 * Determine the real pipe dimensions. Note that stereo modes can
12412 	 * increase the actual pipe size due to the frame doubling and
12413 	 * insertion of additional space for blanks between the frame. This
12414 	 * is stored in the crtc timings. We use the requested mode to do this
12415 	 * computation to clearly distinguish it from the adjusted mode, which
12416 	 * can be changed by the connectors in the below retry loop.
12417 	 */
12418 	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12419 			       &pipe_config->pipe_src_w,
12420 			       &pipe_config->pipe_src_h);
12421 
12422 encoder_retry:
12423 	/* Ensure the port clock defaults are reset when retrying. */
12424 	pipe_config->port_clock = 0;
12425 	pipe_config->pixel_multiplier = 1;
12426 
12427 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12428 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12429 			      CRTC_STEREO_DOUBLE);
12430 
12431 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12432 	 * adjust it according to limitations or connector properties, and also
12433 	 * a chance to reject the mode entirely.
12434 	 */
12435 	for_each_connector_in_state(state, connector, connector_state, i) {
12436 		if (connector_state->crtc != crtc)
12437 			continue;
12438 
12439 		encoder = to_intel_encoder(connector_state->best_encoder);
12440 
12441 		if (!(encoder->compute_config(encoder, pipe_config))) {
12442 			DRM_DEBUG_KMS("Encoder config failure\n");
12443 			goto fail;
12444 		}
12445 	}
12446 
12447 	/* Set default port clock if not overwritten by the encoder. Needs to be
12448 	 * done afterwards in case the encoder adjusts the mode. */
12449 	if (!pipe_config->port_clock)
12450 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12451 			* pipe_config->pixel_multiplier;
12452 
12453 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12454 	if (ret < 0) {
12455 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12456 		goto fail;
12457 	}
12458 
12459 	if (ret == RETRY) {
12460 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12461 			ret = -EINVAL;
12462 			goto fail;
12463 		}
12464 
12465 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12466 		retry = false;
12467 		goto encoder_retry;
12468 	}
12469 
12470 	/* Dithering seems to not pass-through bits correctly when it should, so
12471 	 * only enable it on 6bpc panels. */
12472 	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12473 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12474 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12475 
12476 fail:
12477 	return ret;
12478 }
12479 
12480 static void
12481 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12482 {
12483 	struct drm_crtc *crtc;
12484 	struct drm_crtc_state *crtc_state;
12485 	int i;
12486 
12487 	/* Double check state. */
12488 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12489 		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12490 
12491 		/* Update hwmode for vblank functions */
12492 		if (crtc->state->active)
12493 			crtc->hwmode = crtc->state->adjusted_mode;
12494 		else
12495 			crtc->hwmode.crtc_clock = 0;
12496 
12497 		/*
12498 		 * Update legacy state to satisfy fbc code. This can
12499 		 * be removed when fbc uses the atomic state.
12500 		 */
12501 		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12502 			struct drm_plane_state *plane_state = crtc->primary->state;
12503 
12504 			crtc->primary->fb = plane_state->fb;
12505 			crtc->x = plane_state->src_x >> 16;
12506 			crtc->y = plane_state->src_y >> 16;
12507 		}
12508 	}
12509 }
12510 
12511 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12512 {
12513 	int diff;
12514 
12515 	if (clock1 == clock2)
12516 		return true;
12517 
12518 	if (!clock1 || !clock2)
12519 		return false;
12520 
12521 	diff = abs(clock1 - clock2);
12522 
12523 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12524 		return true;
12525 
12526 	return false;
12527 }
12528 
12529 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12530 	list_for_each_entry((intel_crtc), \
12531 			    &(dev)->mode_config.crtc_list, \
12532 			    base.head) \
12533 		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12534 
12535 static bool
12536 intel_compare_m_n(unsigned int m, unsigned int n,
12537 		  unsigned int m2, unsigned int n2,
12538 		  bool exact)
12539 {
12540 	if (m == m2 && n == n2)
12541 		return true;
12542 
12543 	if (exact || !m || !n || !m2 || !n2)
12544 		return false;
12545 
12546 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12547 
12548 	if (n > n2) {
12549 		while (n > n2) {
12550 			m2 <<= 1;
12551 			n2 <<= 1;
12552 		}
12553 	} else if (n < n2) {
12554 		while (n < n2) {
12555 			m <<= 1;
12556 			n <<= 1;
12557 		}
12558 	}
12559 
12560 	if (n != n2)
12561 		return false;
12562 
12563 	return intel_fuzzy_clock_check(m, m2);
12564 }
12565 
12566 static bool
12567 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12568 		       struct intel_link_m_n *m2_n2,
12569 		       bool adjust)
12570 {
12571 	if (m_n->tu == m2_n2->tu &&
12572 	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12573 			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12574 	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12575 			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12576 		if (adjust)
12577 			*m2_n2 = *m_n;
12578 
12579 		return true;
12580 	}
12581 
12582 	return false;
12583 }
12584 
12585 static bool
12586 intel_pipe_config_compare(struct drm_device *dev,
12587 			  struct intel_crtc_state *current_config,
12588 			  struct intel_crtc_state *pipe_config,
12589 			  bool adjust)
12590 {
12591 	bool ret = true;
12592 
12593 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12594 	do { \
12595 		if (!adjust) \
12596 			DRM_ERROR(fmt, ##__VA_ARGS__); \
12597 		else \
12598 			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12599 	} while (0)
12600 
12601 #define PIPE_CONF_CHECK_X(name)	\
12602 	if (current_config->name != pipe_config->name) { \
12603 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12604 			  "(expected 0x%08x, found 0x%08x)\n", \
12605 			  current_config->name, \
12606 			  pipe_config->name); \
12607 		ret = false; \
12608 	}
12609 
12610 #define PIPE_CONF_CHECK_I(name)	\
12611 	if (current_config->name != pipe_config->name) { \
12612 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12613 			  "(expected %i, found %i)\n", \
12614 			  current_config->name, \
12615 			  pipe_config->name); \
12616 		ret = false; \
12617 	}
12618 
12619 #define PIPE_CONF_CHECK_P(name)	\
12620 	if (current_config->name != pipe_config->name) { \
12621 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12622 			  "(expected %p, found %p)\n", \
12623 			  current_config->name, \
12624 			  pipe_config->name); \
12625 		ret = false; \
12626 	}
12627 
12628 #define PIPE_CONF_CHECK_M_N(name) \
12629 	if (!intel_compare_link_m_n(&current_config->name, \
12630 				    &pipe_config->name,\
12631 				    adjust)) { \
12632 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12633 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12634 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12635 			  current_config->name.tu, \
12636 			  current_config->name.gmch_m, \
12637 			  current_config->name.gmch_n, \
12638 			  current_config->name.link_m, \
12639 			  current_config->name.link_n, \
12640 			  pipe_config->name.tu, \
12641 			  pipe_config->name.gmch_m, \
12642 			  pipe_config->name.gmch_n, \
12643 			  pipe_config->name.link_m, \
12644 			  pipe_config->name.link_n); \
12645 		ret = false; \
12646 	}
12647 
12648 /* This is required for BDW+ where there is only one set of registers for
12649  * switching between high and low RR.
12650  * This macro can be used whenever a comparison has to be made between one
12651  * hw state and multiple sw state variables.
12652  */
12653 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12654 	if (!intel_compare_link_m_n(&current_config->name, \
12655 				    &pipe_config->name, adjust) && \
12656 	    !intel_compare_link_m_n(&current_config->alt_name, \
12657 				    &pipe_config->name, adjust)) { \
12658 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12659 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12660 			  "or tu %i gmch %i/%i link %i/%i, " \
12661 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12662 			  current_config->name.tu, \
12663 			  current_config->name.gmch_m, \
12664 			  current_config->name.gmch_n, \
12665 			  current_config->name.link_m, \
12666 			  current_config->name.link_n, \
12667 			  current_config->alt_name.tu, \
12668 			  current_config->alt_name.gmch_m, \
12669 			  current_config->alt_name.gmch_n, \
12670 			  current_config->alt_name.link_m, \
12671 			  current_config->alt_name.link_n, \
12672 			  pipe_config->name.tu, \
12673 			  pipe_config->name.gmch_m, \
12674 			  pipe_config->name.gmch_n, \
12675 			  pipe_config->name.link_m, \
12676 			  pipe_config->name.link_n); \
12677 		ret = false; \
12678 	}
12679 
12680 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12681 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12682 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12683 			  "(expected %i, found %i)\n", \
12684 			  current_config->name & (mask), \
12685 			  pipe_config->name & (mask)); \
12686 		ret = false; \
12687 	}
12688 
12689 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12690 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12691 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12692 			  "(expected %i, found %i)\n", \
12693 			  current_config->name, \
12694 			  pipe_config->name); \
12695 		ret = false; \
12696 	}
12697 
12698 #define PIPE_CONF_QUIRK(quirk)	\
12699 	((current_config->quirks | pipe_config->quirks) & (quirk))
12700 
12701 	PIPE_CONF_CHECK_I(cpu_transcoder);
12702 
12703 	PIPE_CONF_CHECK_I(has_pch_encoder);
12704 	PIPE_CONF_CHECK_I(fdi_lanes);
12705 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12706 
12707 	PIPE_CONF_CHECK_I(has_dp_encoder);
12708 	PIPE_CONF_CHECK_I(lane_count);
12709 
12710 	if (INTEL_INFO(dev)->gen < 8) {
12711 		PIPE_CONF_CHECK_M_N(dp_m_n);
12712 
12713 		if (current_config->has_drrs)
12714 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12715 	} else
12716 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12717 
12718 	PIPE_CONF_CHECK_I(has_dsi_encoder);
12719 
12720 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12721 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12722 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12723 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12724 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12725 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12726 
12727 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12728 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12729 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12730 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12731 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12732 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12733 
12734 	PIPE_CONF_CHECK_I(pixel_multiplier);
12735 	PIPE_CONF_CHECK_I(has_hdmi_sink);
12736 	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12737 	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12738 		PIPE_CONF_CHECK_I(limited_color_range);
12739 	PIPE_CONF_CHECK_I(has_infoframe);
12740 
12741 	PIPE_CONF_CHECK_I(has_audio);
12742 
12743 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12744 			      DRM_MODE_FLAG_INTERLACE);
12745 
12746 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12747 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12748 				      DRM_MODE_FLAG_PHSYNC);
12749 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12750 				      DRM_MODE_FLAG_NHSYNC);
12751 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12752 				      DRM_MODE_FLAG_PVSYNC);
12753 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12754 				      DRM_MODE_FLAG_NVSYNC);
12755 	}
12756 
12757 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12758 	/* pfit ratios are autocomputed by the hw on gen4+ */
12759 	if (INTEL_INFO(dev)->gen < 4)
12760 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12761 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12762 
12763 	if (!adjust) {
12764 		PIPE_CONF_CHECK_I(pipe_src_w);
12765 		PIPE_CONF_CHECK_I(pipe_src_h);
12766 
12767 		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12768 		if (current_config->pch_pfit.enabled) {
12769 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12770 			PIPE_CONF_CHECK_X(pch_pfit.size);
12771 		}
12772 
12773 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12774 	}
12775 
12776 	/* BDW+ don't expose a synchronous way to read the state */
12777 	if (IS_HASWELL(dev))
12778 		PIPE_CONF_CHECK_I(ips_enabled);
12779 
12780 	PIPE_CONF_CHECK_I(double_wide);
12781 
12782 	PIPE_CONF_CHECK_X(ddi_pll_sel);
12783 
12784 	PIPE_CONF_CHECK_P(shared_dpll);
12785 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12786 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12787 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12788 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12789 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12790 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12791 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12792 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12793 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12794 
12795 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12796 	PIPE_CONF_CHECK_X(dsi_pll.div);
12797 
12798 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12799 		PIPE_CONF_CHECK_I(pipe_bpp);
12800 
12801 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12802 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12803 
12804 #undef PIPE_CONF_CHECK_X
12805 #undef PIPE_CONF_CHECK_I
12806 #undef PIPE_CONF_CHECK_P
12807 #undef PIPE_CONF_CHECK_FLAGS
12808 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12809 #undef PIPE_CONF_QUIRK
12810 #undef INTEL_ERR_OR_DBG_KMS
12811 
12812 	return ret;
12813 }
12814 
12815 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12816 					   const struct intel_crtc_state *pipe_config)
12817 {
12818 	if (pipe_config->has_pch_encoder) {
12819 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12820 							    &pipe_config->fdi_m_n);
12821 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12822 
12823 		/*
12824 		 * FDI already provided one idea for the dotclock.
12825 		 * Yell if the encoder disagrees.
12826 		 */
12827 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12828 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12829 		     fdi_dotclock, dotclock);
12830 	}
12831 }
12832 
12833 static void verify_wm_state(struct drm_crtc *crtc,
12834 			    struct drm_crtc_state *new_state)
12835 {
12836 	struct drm_device *dev = crtc->dev;
12837 	struct drm_i915_private *dev_priv = dev->dev_private;
12838 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12839 	struct skl_ddb_entry *hw_entry, *sw_entry;
12840 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12841 	const enum i915_pipe pipe = intel_crtc->pipe;
12842 	int plane;
12843 
12844 	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12845 		return;
12846 
12847 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12848 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12849 
12850 	/* planes */
12851 	for_each_plane(dev_priv, pipe, plane) {
12852 		hw_entry = &hw_ddb.plane[pipe][plane];
12853 		sw_entry = &sw_ddb->plane[pipe][plane];
12854 
12855 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12856 			continue;
12857 
12858 		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12859 			  "(expected (%u,%u), found (%u,%u))\n",
12860 			  pipe_name(pipe), plane + 1,
12861 			  sw_entry->start, sw_entry->end,
12862 			  hw_entry->start, hw_entry->end);
12863 	}
12864 
12865 	/* cursor */
12866 	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12867 	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12868 
12869 	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
12870 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12871 			  "(expected (%u,%u), found (%u,%u))\n",
12872 			  pipe_name(pipe),
12873 			  sw_entry->start, sw_entry->end,
12874 			  hw_entry->start, hw_entry->end);
12875 	}
12876 }
12877 
12878 static void
12879 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12880 {
12881 	struct drm_connector *connector;
12882 
12883 	drm_for_each_connector(connector, dev) {
12884 		struct drm_encoder *encoder = connector->encoder;
12885 		struct drm_connector_state *state = connector->state;
12886 
12887 		if (state->crtc != crtc)
12888 			continue;
12889 
12890 		intel_connector_verify_state(to_intel_connector(connector));
12891 
12892 		I915_STATE_WARN(state->best_encoder != encoder,
12893 		     "connector's atomic encoder doesn't match legacy encoder\n");
12894 	}
12895 }
12896 
12897 static void
12898 verify_encoder_state(struct drm_device *dev)
12899 {
12900 	struct intel_encoder *encoder;
12901 	struct intel_connector *connector;
12902 
12903 	for_each_intel_encoder(dev, encoder) {
12904 		bool enabled = false;
12905 		enum i915_pipe pipe;
12906 
12907 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12908 			      encoder->base.base.id,
12909 			      encoder->base.name);
12910 
12911 		for_each_intel_connector(dev, connector) {
12912 			if (connector->base.state->best_encoder != &encoder->base)
12913 				continue;
12914 			enabled = true;
12915 
12916 			I915_STATE_WARN(connector->base.state->crtc !=
12917 					encoder->base.crtc,
12918 			     "connector's crtc doesn't match encoder crtc\n");
12919 		}
12920 
12921 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12922 		     "encoder's enabled state mismatch "
12923 		     "(expected %i, found %i)\n",
12924 		     !!encoder->base.crtc, enabled);
12925 
12926 		if (!encoder->base.crtc) {
12927 			bool active;
12928 
12929 			active = encoder->get_hw_state(encoder, &pipe);
12930 			I915_STATE_WARN(active,
12931 			     "encoder detached but still enabled on pipe %c.\n",
12932 			     pipe_name(pipe));
12933 		}
12934 	}
12935 }
12936 
12937 static void
12938 verify_crtc_state(struct drm_crtc *crtc,
12939 		  struct drm_crtc_state *old_crtc_state,
12940 		  struct drm_crtc_state *new_crtc_state)
12941 {
12942 	struct drm_device *dev = crtc->dev;
12943 	struct drm_i915_private *dev_priv = dev->dev_private;
12944 	struct intel_encoder *encoder;
12945 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12946 	struct intel_crtc_state *pipe_config, *sw_config;
12947 	struct drm_atomic_state *old_state;
12948 	bool active;
12949 
12950 	old_state = old_crtc_state->state;
12951 	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12952 	pipe_config = to_intel_crtc_state(old_crtc_state);
12953 	memset(pipe_config, 0, sizeof(*pipe_config));
12954 	pipe_config->base.crtc = crtc;
12955 	pipe_config->base.state = old_state;
12956 
12957 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
12958 
12959 	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12960 
12961 	/* hw state is inconsistent with the pipe quirk */
12962 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12963 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12964 		active = new_crtc_state->active;
12965 
12966 	I915_STATE_WARN(new_crtc_state->active != active,
12967 	     "crtc active state doesn't match with hw state "
12968 	     "(expected %i, found %i)\n", new_crtc_state->active, active);
12969 
12970 	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12971 	     "transitional active state does not match atomic hw state "
12972 	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12973 
12974 	for_each_encoder_on_crtc(dev, crtc, encoder) {
12975 		enum i915_pipe pipe;
12976 
12977 		active = encoder->get_hw_state(encoder, &pipe);
12978 		I915_STATE_WARN(active != new_crtc_state->active,
12979 			"[ENCODER:%i] active %i with crtc active %i\n",
12980 			encoder->base.base.id, active, new_crtc_state->active);
12981 
12982 		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12983 				"Encoder connected to wrong pipe %c\n",
12984 				pipe_name(pipe));
12985 
12986 		if (active)
12987 			encoder->get_config(encoder, pipe_config);
12988 	}
12989 
12990 	if (!new_crtc_state->active)
12991 		return;
12992 
12993 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
12994 
12995 	sw_config = to_intel_crtc_state(crtc->state);
12996 	if (!intel_pipe_config_compare(dev, sw_config,
12997 				       pipe_config, false)) {
12998 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
12999 		intel_dump_pipe_config(intel_crtc, pipe_config,
13000 				       "[hw state]");
13001 		intel_dump_pipe_config(intel_crtc, sw_config,
13002 				       "[sw state]");
13003 	}
13004 }
13005 
13006 static void
13007 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13008 			 struct intel_shared_dpll *pll,
13009 			 struct drm_crtc *crtc,
13010 			 struct drm_crtc_state *new_state)
13011 {
13012 	struct intel_dpll_hw_state dpll_hw_state;
13013 	unsigned crtc_mask;
13014 	bool active;
13015 
13016 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13017 
13018 	DRM_DEBUG_KMS("%s\n", pll->name);
13019 
13020 	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13021 
13022 	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13023 		I915_STATE_WARN(!pll->on && pll->active_mask,
13024 		     "pll in active use but not on in sw tracking\n");
13025 		I915_STATE_WARN(pll->on && !pll->active_mask,
13026 		     "pll is on but not used by any active crtc\n");
13027 		I915_STATE_WARN(pll->on != active,
13028 		     "pll on state mismatch (expected %i, found %i)\n",
13029 		     pll->on, active);
13030 	}
13031 
13032 	if (!crtc) {
13033 		I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13034 				"more active pll users than references: %x vs %x\n",
13035 				pll->active_mask, pll->config.crtc_mask);
13036 
13037 		return;
13038 	}
13039 
13040 	crtc_mask = 1 << drm_crtc_index(crtc);
13041 
13042 	if (new_state->active)
13043 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13044 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13045 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13046 	else
13047 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13048 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13049 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13050 
13051 	I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13052 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13053 			crtc_mask, pll->config.crtc_mask);
13054 
13055 	I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13056 					  &dpll_hw_state,
13057 					  sizeof(dpll_hw_state)),
13058 			"pll hw state mismatch\n");
13059 }
13060 
13061 static void
13062 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13063 			 struct drm_crtc_state *old_crtc_state,
13064 			 struct drm_crtc_state *new_crtc_state)
13065 {
13066 	struct drm_i915_private *dev_priv = dev->dev_private;
13067 	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13068 	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13069 
13070 	if (new_state->shared_dpll)
13071 		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13072 
13073 	if (old_state->shared_dpll &&
13074 	    old_state->shared_dpll != new_state->shared_dpll) {
13075 		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13076 		struct intel_shared_dpll *pll = old_state->shared_dpll;
13077 
13078 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13079 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13080 				pipe_name(drm_crtc_index(crtc)));
13081 		I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13082 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13083 				pipe_name(drm_crtc_index(crtc)));
13084 	}
13085 }
13086 
13087 static void
13088 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13089 			 struct drm_crtc_state *old_state,
13090 			 struct drm_crtc_state *new_state)
13091 {
13092 	if (!needs_modeset(new_state) &&
13093 	    !to_intel_crtc_state(new_state)->update_pipe)
13094 		return;
13095 
13096 	verify_wm_state(crtc, new_state);
13097 	verify_connector_state(crtc->dev, crtc);
13098 	verify_crtc_state(crtc, old_state, new_state);
13099 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13100 }
13101 
13102 static void
13103 verify_disabled_dpll_state(struct drm_device *dev)
13104 {
13105 	struct drm_i915_private *dev_priv = dev->dev_private;
13106 	int i;
13107 
13108 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13109 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13110 }
13111 
13112 static void
13113 intel_modeset_verify_disabled(struct drm_device *dev)
13114 {
13115 	verify_encoder_state(dev);
13116 	verify_connector_state(dev, NULL);
13117 	verify_disabled_dpll_state(dev);
13118 }
13119 
13120 static void update_scanline_offset(struct intel_crtc *crtc)
13121 {
13122 	struct drm_device *dev = crtc->base.dev;
13123 
13124 	/*
13125 	 * The scanline counter increments at the leading edge of hsync.
13126 	 *
13127 	 * On most platforms it starts counting from vtotal-1 on the
13128 	 * first active line. That means the scanline counter value is
13129 	 * always one less than what we would expect. Ie. just after
13130 	 * start of vblank, which also occurs at start of hsync (on the
13131 	 * last active line), the scanline counter will read vblank_start-1.
13132 	 *
13133 	 * On gen2 the scanline counter starts counting from 1 instead
13134 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13135 	 * to keep the value positive), instead of adding one.
13136 	 *
13137 	 * On HSW+ the behaviour of the scanline counter depends on the output
13138 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13139 	 * there's an extra 1 line difference. So we need to add two instead of
13140 	 * one to the value.
13141 	 */
13142 	if (IS_GEN2(dev)) {
13143 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13144 		int vtotal;
13145 
13146 		vtotal = adjusted_mode->crtc_vtotal;
13147 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13148 			vtotal /= 2;
13149 
13150 		crtc->scanline_offset = vtotal - 1;
13151 	} else if (HAS_DDI(dev) &&
13152 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13153 		crtc->scanline_offset = 2;
13154 	} else
13155 		crtc->scanline_offset = 1;
13156 }
13157 
13158 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13159 {
13160 	struct drm_device *dev = state->dev;
13161 	struct drm_i915_private *dev_priv = to_i915(dev);
13162 	struct intel_shared_dpll_config *shared_dpll = NULL;
13163 	struct drm_crtc *crtc;
13164 	struct drm_crtc_state *crtc_state;
13165 	int i;
13166 
13167 	if (!dev_priv->display.crtc_compute_clock)
13168 		return;
13169 
13170 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13171 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13172 		struct intel_shared_dpll *old_dpll =
13173 			to_intel_crtc_state(crtc->state)->shared_dpll;
13174 
13175 		if (!needs_modeset(crtc_state))
13176 			continue;
13177 
13178 		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13179 
13180 		if (!old_dpll)
13181 			continue;
13182 
13183 		if (!shared_dpll)
13184 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13185 
13186 		intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13187 	}
13188 }
13189 
13190 /*
13191  * This implements the workaround described in the "notes" section of the mode
13192  * set sequence documentation. When going from no pipes or single pipe to
13193  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13194  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13195  */
13196 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13197 {
13198 	struct drm_crtc_state *crtc_state;
13199 	struct intel_crtc *intel_crtc;
13200 	struct drm_crtc *crtc;
13201 	struct intel_crtc_state *first_crtc_state = NULL;
13202 	struct intel_crtc_state *other_crtc_state = NULL;
13203 	enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13204 	int i;
13205 
13206 	/* look at all crtc's that are going to be enabled in during modeset */
13207 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13208 		intel_crtc = to_intel_crtc(crtc);
13209 
13210 		if (!crtc_state->active || !needs_modeset(crtc_state))
13211 			continue;
13212 
13213 		if (first_crtc_state) {
13214 			other_crtc_state = to_intel_crtc_state(crtc_state);
13215 			break;
13216 		} else {
13217 			first_crtc_state = to_intel_crtc_state(crtc_state);
13218 			first_pipe = intel_crtc->pipe;
13219 		}
13220 	}
13221 
13222 	/* No workaround needed? */
13223 	if (!first_crtc_state)
13224 		return 0;
13225 
13226 	/* w/a possibly needed, check how many crtc's are already enabled. */
13227 	for_each_intel_crtc(state->dev, intel_crtc) {
13228 		struct intel_crtc_state *pipe_config;
13229 
13230 		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13231 		if (IS_ERR(pipe_config))
13232 			return PTR_ERR(pipe_config);
13233 
13234 		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13235 
13236 		if (!pipe_config->base.active ||
13237 		    needs_modeset(&pipe_config->base))
13238 			continue;
13239 
13240 		/* 2 or more enabled crtcs means no need for w/a */
13241 		if (enabled_pipe != INVALID_PIPE)
13242 			return 0;
13243 
13244 		enabled_pipe = intel_crtc->pipe;
13245 	}
13246 
13247 	if (enabled_pipe != INVALID_PIPE)
13248 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13249 	else if (other_crtc_state)
13250 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13251 
13252 	return 0;
13253 }
13254 
13255 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13256 {
13257 	struct drm_crtc *crtc;
13258 	struct drm_crtc_state *crtc_state;
13259 	int ret = 0;
13260 
13261 	/* add all active pipes to the state */
13262 	for_each_crtc(state->dev, crtc) {
13263 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13264 		if (IS_ERR(crtc_state))
13265 			return PTR_ERR(crtc_state);
13266 
13267 		if (!crtc_state->active || needs_modeset(crtc_state))
13268 			continue;
13269 
13270 		crtc_state->mode_changed = true;
13271 
13272 		ret = drm_atomic_add_affected_connectors(state, crtc);
13273 		if (ret)
13274 			break;
13275 
13276 		ret = drm_atomic_add_affected_planes(state, crtc);
13277 		if (ret)
13278 			break;
13279 	}
13280 
13281 	return ret;
13282 }
13283 
13284 static int intel_modeset_checks(struct drm_atomic_state *state)
13285 {
13286 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13287 	struct drm_i915_private *dev_priv = state->dev->dev_private;
13288 	struct drm_crtc *crtc;
13289 	struct drm_crtc_state *crtc_state;
13290 	int ret = 0, i;
13291 
13292 	if (!check_digital_port_conflicts(state)) {
13293 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13294 		return -EINVAL;
13295 	}
13296 
13297 	intel_state->modeset = true;
13298 	intel_state->active_crtcs = dev_priv->active_crtcs;
13299 
13300 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13301 		if (crtc_state->active)
13302 			intel_state->active_crtcs |= 1 << i;
13303 		else
13304 			intel_state->active_crtcs &= ~(1 << i);
13305 	}
13306 
13307 	/*
13308 	 * See if the config requires any additional preparation, e.g.
13309 	 * to adjust global state with pipes off.  We need to do this
13310 	 * here so we can get the modeset_pipe updated config for the new
13311 	 * mode set on this crtc.  For other crtcs we need to use the
13312 	 * adjusted_mode bits in the crtc directly.
13313 	 */
13314 	if (dev_priv->display.modeset_calc_cdclk) {
13315 		ret = dev_priv->display.modeset_calc_cdclk(state);
13316 
13317 		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
13318 			ret = intel_modeset_all_pipes(state);
13319 
13320 		if (ret < 0)
13321 			return ret;
13322 
13323 		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13324 			      intel_state->cdclk, intel_state->dev_cdclk);
13325 	} else
13326 		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13327 
13328 	intel_modeset_clear_plls(state);
13329 
13330 	if (IS_HASWELL(dev_priv))
13331 		return haswell_mode_set_planes_workaround(state);
13332 
13333 	return 0;
13334 }
13335 
13336 /*
13337  * Handle calculation of various watermark data at the end of the atomic check
13338  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13339  * handlers to ensure that all derived state has been updated.
13340  */
13341 static void calc_watermark_data(struct drm_atomic_state *state)
13342 {
13343 	struct drm_device *dev = state->dev;
13344 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13345 	struct drm_crtc *crtc;
13346 	struct drm_crtc_state *cstate;
13347 	struct drm_plane *plane;
13348 	struct drm_plane_state *pstate;
13349 
13350 	/*
13351 	 * Calculate watermark configuration details now that derived
13352 	 * plane/crtc state is all properly updated.
13353 	 */
13354 	drm_for_each_crtc(crtc, dev) {
13355 		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13356 			crtc->state;
13357 
13358 		if (cstate->active)
13359 			intel_state->wm_config.num_pipes_active++;
13360 	}
13361 	drm_for_each_legacy_plane(plane, dev) {
13362 		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13363 			plane->state;
13364 
13365 		if (!to_intel_plane_state(pstate)->visible)
13366 			continue;
13367 
13368 		intel_state->wm_config.sprites_enabled = true;
13369 		if (pstate->crtc_w != pstate->src_w >> 16 ||
13370 		    pstate->crtc_h != pstate->src_h >> 16)
13371 			intel_state->wm_config.sprites_scaled = true;
13372 	}
13373 }
13374 
13375 /**
13376  * intel_atomic_check - validate state object
13377  * @dev: drm device
13378  * @state: state to validate
13379  */
13380 static int intel_atomic_check(struct drm_device *dev,
13381 			      struct drm_atomic_state *state)
13382 {
13383 	struct drm_i915_private *dev_priv = to_i915(dev);
13384 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13385 	struct drm_crtc *crtc;
13386 	struct drm_crtc_state *crtc_state;
13387 	int ret, i;
13388 	bool any_ms = false;
13389 
13390 	ret = drm_atomic_helper_check_modeset(dev, state);
13391 	if (ret)
13392 		return ret;
13393 
13394 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13395 		struct intel_crtc_state *pipe_config =
13396 			to_intel_crtc_state(crtc_state);
13397 
13398 		/* Catch I915_MODE_FLAG_INHERITED */
13399 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13400 			crtc_state->mode_changed = true;
13401 
13402 		if (!crtc_state->enable) {
13403 			if (needs_modeset(crtc_state))
13404 				any_ms = true;
13405 			continue;
13406 		}
13407 
13408 		if (!needs_modeset(crtc_state))
13409 			continue;
13410 
13411 		/* FIXME: For only active_changed we shouldn't need to do any
13412 		 * state recomputation at all. */
13413 
13414 		ret = drm_atomic_add_affected_connectors(state, crtc);
13415 		if (ret)
13416 			return ret;
13417 
13418 		ret = intel_modeset_pipe_config(crtc, pipe_config);
13419 		if (ret)
13420 			return ret;
13421 
13422 		if (i915.fastboot &&
13423 		    intel_pipe_config_compare(dev,
13424 					to_intel_crtc_state(crtc->state),
13425 					pipe_config, true)) {
13426 			crtc_state->mode_changed = false;
13427 			to_intel_crtc_state(crtc_state)->update_pipe = true;
13428 		}
13429 
13430 		if (needs_modeset(crtc_state)) {
13431 			any_ms = true;
13432 
13433 			ret = drm_atomic_add_affected_planes(state, crtc);
13434 			if (ret)
13435 				return ret;
13436 		}
13437 
13438 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13439 				       needs_modeset(crtc_state) ?
13440 				       "[modeset]" : "[fastset]");
13441 	}
13442 
13443 	if (any_ms) {
13444 		ret = intel_modeset_checks(state);
13445 
13446 		if (ret)
13447 			return ret;
13448 	} else
13449 		intel_state->cdclk = dev_priv->cdclk_freq;
13450 
13451 	ret = drm_atomic_helper_check_planes(dev, state);
13452 	if (ret)
13453 		return ret;
13454 
13455 	intel_fbc_choose_crtc(dev_priv, state);
13456 	calc_watermark_data(state);
13457 
13458 	return 0;
13459 }
13460 
13461 static int intel_atomic_prepare_commit(struct drm_device *dev,
13462 				       struct drm_atomic_state *state,
13463 				       bool nonblock)
13464 {
13465 	struct drm_i915_private *dev_priv = dev->dev_private;
13466 	struct drm_plane_state *plane_state;
13467 	struct drm_crtc_state *crtc_state;
13468 	struct drm_plane *plane;
13469 	struct drm_crtc *crtc;
13470 	int i, ret;
13471 
13472 	if (nonblock) {
13473 		DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
13474 		return -EINVAL;
13475 	}
13476 
13477 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13478 		if (state->legacy_cursor_update)
13479 			continue;
13480 
13481 		ret = intel_crtc_wait_for_pending_flips(crtc);
13482 		if (ret)
13483 			return ret;
13484 
13485 		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13486 			flush_workqueue(dev_priv->wq);
13487 	}
13488 
13489 	ret = mutex_lock_interruptible(&dev->struct_mutex);
13490 	if (ret)
13491 		return ret;
13492 
13493 	ret = drm_atomic_helper_prepare_planes(dev, state);
13494 	mutex_unlock(&dev->struct_mutex);
13495 
13496 	if (!ret && !nonblock) {
13497 		for_each_plane_in_state(state, plane, plane_state, i) {
13498 			struct intel_plane_state *intel_plane_state =
13499 				to_intel_plane_state(plane_state);
13500 
13501 			if (!intel_plane_state->wait_req)
13502 				continue;
13503 
13504 			ret = __i915_wait_request(intel_plane_state->wait_req,
13505 						  true, NULL, NULL);
13506 			if (ret) {
13507 				/* Any hang should be swallowed by the wait */
13508 				WARN_ON(ret == -EIO);
13509 				mutex_lock(&dev->struct_mutex);
13510 				drm_atomic_helper_cleanup_planes(dev, state);
13511 				mutex_unlock(&dev->struct_mutex);
13512 				break;
13513 			}
13514 		}
13515 	}
13516 
13517 	return ret;
13518 }
13519 
13520 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13521 					  struct drm_i915_private *dev_priv,
13522 					  unsigned crtc_mask)
13523 {
13524 	unsigned last_vblank_count[I915_MAX_PIPES];
13525 	enum i915_pipe pipe;
13526 	int ret;
13527 
13528 	if (!crtc_mask)
13529 		return;
13530 
13531 	for_each_pipe(dev_priv, pipe) {
13532 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13533 
13534 		if (!((1 << pipe) & crtc_mask))
13535 			continue;
13536 
13537 		ret = drm_crtc_vblank_get(crtc);
13538 		if (WARN_ON(ret != 0)) {
13539 			crtc_mask &= ~(1 << pipe);
13540 			continue;
13541 		}
13542 
13543 		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13544 	}
13545 
13546 	for_each_pipe(dev_priv, pipe) {
13547 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13548 		long lret;
13549 
13550 		if (!((1 << pipe) & crtc_mask))
13551 			continue;
13552 
13553 		lret = wait_event_timeout(dev->vblank[pipe].queue,
13554 				last_vblank_count[pipe] !=
13555 					drm_crtc_vblank_count(crtc),
13556 				msecs_to_jiffies(50));
13557 
13558 		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13559 
13560 		drm_crtc_vblank_put(crtc);
13561 	}
13562 }
13563 
13564 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13565 {
13566 	/* fb updated, need to unpin old fb */
13567 	if (crtc_state->fb_changed)
13568 		return true;
13569 
13570 	/* wm changes, need vblank before final wm's */
13571 	if (crtc_state->update_wm_post)
13572 		return true;
13573 
13574 	/*
13575 	 * cxsr is re-enabled after vblank.
13576 	 * This is already handled by crtc_state->update_wm_post,
13577 	 * but added for clarity.
13578 	 */
13579 	if (crtc_state->disable_cxsr)
13580 		return true;
13581 
13582 	return false;
13583 }
13584 
13585 /**
13586  * intel_atomic_commit - commit validated state object
13587  * @dev: DRM device
13588  * @state: the top-level driver state object
13589  * @nonblock: nonblocking commit
13590  *
13591  * This function commits a top-level state object that has been validated
13592  * with drm_atomic_helper_check().
13593  *
13594  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13595  * we can only handle plane-related operations and do not yet support
13596  * nonblocking commit.
13597  *
13598  * RETURNS
13599  * Zero for success or -errno.
13600  */
13601 static int intel_atomic_commit(struct drm_device *dev,
13602 			       struct drm_atomic_state *state,
13603 			       bool nonblock)
13604 {
13605 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13606 	struct drm_i915_private *dev_priv = dev->dev_private;
13607 	struct drm_crtc_state *old_crtc_state;
13608 	struct drm_crtc *crtc;
13609 	struct intel_crtc_state *intel_cstate;
13610 	int ret = 0, i;
13611 	bool hw_check = intel_state->modeset;
13612 	unsigned long put_domains[I915_MAX_PIPES] = {};
13613 	unsigned crtc_vblank_mask = 0;
13614 
13615 	ret = intel_atomic_prepare_commit(dev, state, nonblock);
13616 	if (ret) {
13617 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13618 		return ret;
13619 	}
13620 
13621 	drm_atomic_helper_swap_state(state, true);
13622 	dev_priv->wm.config = intel_state->wm_config;
13623 	intel_shared_dpll_commit(state);
13624 
13625 	if (intel_state->modeset) {
13626 		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13627 		       sizeof(intel_state->min_pixclk));
13628 		dev_priv->active_crtcs = intel_state->active_crtcs;
13629 		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13630 
13631 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13632 	}
13633 
13634 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13635 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13636 
13637 		if (needs_modeset(crtc->state) ||
13638 		    to_intel_crtc_state(crtc->state)->update_pipe) {
13639 			hw_check = true;
13640 
13641 			put_domains[to_intel_crtc(crtc)->pipe] =
13642 				modeset_get_crtc_power_domains(crtc,
13643 					to_intel_crtc_state(crtc->state));
13644 		}
13645 
13646 		if (!needs_modeset(crtc->state))
13647 			continue;
13648 
13649 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13650 
13651 		if (old_crtc_state->active) {
13652 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13653 			dev_priv->display.crtc_disable(crtc);
13654 			intel_crtc->active = false;
13655 			intel_fbc_disable(intel_crtc);
13656 			intel_disable_shared_dpll(intel_crtc);
13657 
13658 			/*
13659 			 * Underruns don't always raise
13660 			 * interrupts, so check manually.
13661 			 */
13662 			intel_check_cpu_fifo_underruns(dev_priv);
13663 			intel_check_pch_fifo_underruns(dev_priv);
13664 
13665 			if (!crtc->state->active)
13666 				intel_update_watermarks(crtc);
13667 		}
13668 	}
13669 
13670 	/* Only after disabling all output pipelines that will be changed can we
13671 	 * update the the output configuration. */
13672 	intel_modeset_update_crtc_state(state);
13673 
13674 	if (intel_state->modeset) {
13675 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13676 
13677 		if (dev_priv->display.modeset_commit_cdclk &&
13678 		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13679 			dev_priv->display.modeset_commit_cdclk(state);
13680 
13681 		intel_modeset_verify_disabled(dev);
13682 	}
13683 
13684 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13685 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13686 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13687 		bool modeset = needs_modeset(crtc->state);
13688 		struct intel_crtc_state *pipe_config =
13689 			to_intel_crtc_state(crtc->state);
13690 		bool update_pipe = !modeset && pipe_config->update_pipe;
13691 
13692 		if (modeset && crtc->state->active) {
13693 			update_scanline_offset(to_intel_crtc(crtc));
13694 			dev_priv->display.crtc_enable(crtc);
13695 		}
13696 
13697 		if (!modeset)
13698 			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13699 
13700 		if (crtc->state->active &&
13701 		    drm_atomic_get_existing_plane_state(state, crtc->primary))
13702 			intel_fbc_enable(intel_crtc);
13703 
13704 		if (crtc->state->active &&
13705 		    (crtc->state->planes_changed || update_pipe))
13706 			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13707 
13708 		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13709 			crtc_vblank_mask |= 1 << i;
13710 	}
13711 
13712 	/* FIXME: add subpixel order */
13713 
13714 	if (!state->legacy_cursor_update)
13715 		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13716 
13717 	/*
13718 	 * Now that the vblank has passed, we can go ahead and program the
13719 	 * optimal watermarks on platforms that need two-step watermark
13720 	 * programming.
13721 	 *
13722 	 * TODO: Move this (and other cleanup) to an async worker eventually.
13723 	 */
13724 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13725 		intel_cstate = to_intel_crtc_state(crtc->state);
13726 
13727 		if (dev_priv->display.optimize_watermarks)
13728 			dev_priv->display.optimize_watermarks(intel_cstate);
13729 	}
13730 
13731 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13732 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13733 
13734 		if (put_domains[i])
13735 			modeset_put_power_domains(dev_priv, put_domains[i]);
13736 
13737 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13738 	}
13739 
13740 	if (intel_state->modeset)
13741 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13742 
13743 	mutex_lock(&dev->struct_mutex);
13744 	drm_atomic_helper_cleanup_planes(dev, state);
13745 	mutex_unlock(&dev->struct_mutex);
13746 
13747 	drm_atomic_state_free(state);
13748 
13749 	/* As one of the primary mmio accessors, KMS has a high likelihood
13750 	 * of triggering bugs in unclaimed access. After we finish
13751 	 * modesetting, see if an error has been flagged, and if so
13752 	 * enable debugging for the next modeset - and hope we catch
13753 	 * the culprit.
13754 	 *
13755 	 * XXX note that we assume display power is on at this point.
13756 	 * This might hold true now but we need to add pm helper to check
13757 	 * unclaimed only when the hardware is on, as atomic commits
13758 	 * can happen also when the device is completely off.
13759 	 */
13760 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13761 
13762 	return 0;
13763 }
13764 
13765 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13766 {
13767 	struct drm_device *dev = crtc->dev;
13768 	struct drm_atomic_state *state;
13769 	struct drm_crtc_state *crtc_state;
13770 	int ret;
13771 
13772 	state = drm_atomic_state_alloc(dev);
13773 	if (!state) {
13774 		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13775 			      crtc->base.id);
13776 		return;
13777 	}
13778 
13779 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13780 
13781 retry:
13782 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13783 	ret = PTR_ERR_OR_ZERO(crtc_state);
13784 	if (!ret) {
13785 		if (!crtc_state->active)
13786 			goto out;
13787 
13788 		crtc_state->mode_changed = true;
13789 		ret = drm_atomic_commit(state);
13790 	}
13791 
13792 	if (ret == -EDEADLK) {
13793 		drm_atomic_state_clear(state);
13794 		drm_modeset_backoff(state->acquire_ctx);
13795 		goto retry;
13796 	}
13797 
13798 	if (ret)
13799 out:
13800 		drm_atomic_state_free(state);
13801 }
13802 
13803 #undef for_each_intel_crtc_masked
13804 
13805 static const struct drm_crtc_funcs intel_crtc_funcs = {
13806 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
13807 	.set_config = drm_atomic_helper_set_config,
13808 	.set_property = drm_atomic_helper_crtc_set_property,
13809 	.destroy = intel_crtc_destroy,
13810 	.page_flip = intel_crtc_page_flip,
13811 	.atomic_duplicate_state = intel_crtc_duplicate_state,
13812 	.atomic_destroy_state = intel_crtc_destroy_state,
13813 };
13814 
13815 /**
13816  * intel_prepare_plane_fb - Prepare fb for usage on plane
13817  * @plane: drm plane to prepare for
13818  * @fb: framebuffer to prepare for presentation
13819  *
13820  * Prepares a framebuffer for usage on a display plane.  Generally this
13821  * involves pinning the underlying object and updating the frontbuffer tracking
13822  * bits.  Some older platforms need special physical address handling for
13823  * cursor planes.
13824  *
13825  * Must be called with struct_mutex held.
13826  *
13827  * Returns 0 on success, negative error code on failure.
13828  */
13829 int
13830 intel_prepare_plane_fb(struct drm_plane *plane,
13831 		       struct drm_plane_state *new_state)
13832 {
13833 	struct drm_device *dev = plane->dev;
13834 	struct drm_framebuffer *fb = new_state->fb;
13835 	struct intel_plane *intel_plane = to_intel_plane(plane);
13836 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13837 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13838 	int ret = 0;
13839 
13840 	if (!obj && !old_obj)
13841 		return 0;
13842 
13843 	if (old_obj) {
13844 		struct drm_crtc_state *crtc_state =
13845 			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13846 
13847 		/* Big Hammer, we also need to ensure that any pending
13848 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13849 		 * current scanout is retired before unpinning the old
13850 		 * framebuffer. Note that we rely on userspace rendering
13851 		 * into the buffer attached to the pipe they are waiting
13852 		 * on. If not, userspace generates a GPU hang with IPEHR
13853 		 * point to the MI_WAIT_FOR_EVENT.
13854 		 *
13855 		 * This should only fail upon a hung GPU, in which case we
13856 		 * can safely continue.
13857 		 */
13858 		if (needs_modeset(crtc_state))
13859 			ret = i915_gem_object_wait_rendering(old_obj, true);
13860 		if (ret) {
13861 			/* GPU hangs should have been swallowed by the wait */
13862 			WARN_ON(ret == -EIO);
13863 			return ret;
13864 		}
13865 	}
13866 
13867 	/* For framebuffer backed by dmabuf, wait for fence */
13868 #if 0
13869 	if (obj && obj->base.dma_buf) {
13870 		long lret;
13871 
13872 		lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13873 							   false, true,
13874 							   MAX_SCHEDULE_TIMEOUT);
13875 		if (lret == -ERESTARTSYS)
13876 			return lret;
13877 
13878 		WARN(lret < 0, "waiting returns %li\n", lret);
13879 	}
13880 #endif
13881 
13882 	if (!obj) {
13883 		ret = 0;
13884 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13885 	    INTEL_INFO(dev)->cursor_needs_physical) {
13886 		int align = IS_I830(dev) ? 16 * 1024 : 256;
13887 		ret = i915_gem_object_attach_phys(obj, align);
13888 		if (ret)
13889 			DRM_DEBUG_KMS("failed to attach phys object\n");
13890 	} else {
13891 		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
13892 	}
13893 
13894 	if (ret == 0) {
13895 		if (obj) {
13896 			struct intel_plane_state *plane_state =
13897 				to_intel_plane_state(new_state);
13898 
13899 			i915_gem_request_assign(&plane_state->wait_req,
13900 						obj->last_write_req);
13901 		}
13902 
13903 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13904 	}
13905 
13906 	return ret;
13907 }
13908 
13909 /**
13910  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13911  * @plane: drm plane to clean up for
13912  * @fb: old framebuffer that was on plane
13913  *
13914  * Cleans up a framebuffer that has just been removed from a plane.
13915  *
13916  * Must be called with struct_mutex held.
13917  */
13918 void
13919 intel_cleanup_plane_fb(struct drm_plane *plane,
13920 		       struct drm_plane_state *old_state)
13921 {
13922 	struct drm_device *dev = plane->dev;
13923 	struct intel_plane *intel_plane = to_intel_plane(plane);
13924 	struct intel_plane_state *old_intel_state;
13925 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13926 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13927 
13928 	old_intel_state = to_intel_plane_state(old_state);
13929 
13930 	if (!obj && !old_obj)
13931 		return;
13932 
13933 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13934 	    !INTEL_INFO(dev)->cursor_needs_physical))
13935 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13936 
13937 	/* prepare_fb aborted? */
13938 	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13939 	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13940 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13941 
13942 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13943 }
13944 
13945 int
13946 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13947 {
13948 	int max_scale;
13949 	struct drm_device *dev;
13950 	struct drm_i915_private *dev_priv;
13951 	int crtc_clock, cdclk;
13952 
13953 	if (!intel_crtc || !crtc_state->base.enable)
13954 		return DRM_PLANE_HELPER_NO_SCALING;
13955 
13956 	dev = intel_crtc->base.dev;
13957 	dev_priv = dev->dev_private;
13958 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13959 	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13960 
13961 	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13962 		return DRM_PLANE_HELPER_NO_SCALING;
13963 
13964 	/*
13965 	 * skl max scale is lower of:
13966 	 *    close to 3 but not 3, -1 is for that purpose
13967 	 *            or
13968 	 *    cdclk/crtc_clock
13969 	 */
13970 	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13971 
13972 	return max_scale;
13973 }
13974 
13975 static int
13976 intel_check_primary_plane(struct drm_plane *plane,
13977 			  struct intel_crtc_state *crtc_state,
13978 			  struct intel_plane_state *state)
13979 {
13980 	struct drm_crtc *crtc = state->base.crtc;
13981 	struct drm_framebuffer *fb = state->base.fb;
13982 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13983 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13984 	bool can_position = false;
13985 
13986 	if (INTEL_INFO(plane->dev)->gen >= 9) {
13987 		/* use scaler when colorkey is not required */
13988 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13989 			min_scale = 1;
13990 			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13991 		}
13992 		can_position = true;
13993 	}
13994 
13995 	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13996 					     &state->dst, &state->clip,
13997 					     state->base.rotation,
13998 					     min_scale, max_scale,
13999 					     can_position, true,
14000 					     &state->visible);
14001 }
14002 
14003 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14004 				    struct drm_crtc_state *old_crtc_state)
14005 {
14006 	struct drm_device *dev = crtc->dev;
14007 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14008 	struct intel_crtc_state *old_intel_state =
14009 		to_intel_crtc_state(old_crtc_state);
14010 	bool modeset = needs_modeset(crtc->state);
14011 
14012 	/* Perform vblank evasion around commit operation */
14013 	intel_pipe_update_start(intel_crtc);
14014 
14015 	if (modeset)
14016 		return;
14017 
14018 	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14019 		intel_color_set_csc(crtc->state);
14020 		intel_color_load_luts(crtc->state);
14021 	}
14022 
14023 	if (to_intel_crtc_state(crtc->state)->update_pipe)
14024 		intel_update_pipe_config(intel_crtc, old_intel_state);
14025 	else if (INTEL_INFO(dev)->gen >= 9)
14026 		skl_detach_scalers(intel_crtc);
14027 }
14028 
14029 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14030 				     struct drm_crtc_state *old_crtc_state)
14031 {
14032 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14033 
14034 	intel_pipe_update_end(intel_crtc);
14035 }
14036 
14037 /**
14038  * intel_plane_destroy - destroy a plane
14039  * @plane: plane to destroy
14040  *
14041  * Common destruction function for all types of planes (primary, cursor,
14042  * sprite).
14043  */
14044 void intel_plane_destroy(struct drm_plane *plane)
14045 {
14046 	struct intel_plane *intel_plane = to_intel_plane(plane);
14047 	drm_plane_cleanup(plane);
14048 	kfree(intel_plane);
14049 }
14050 
14051 const struct drm_plane_funcs intel_plane_funcs = {
14052 	.update_plane = drm_atomic_helper_update_plane,
14053 	.disable_plane = drm_atomic_helper_disable_plane,
14054 	.destroy = intel_plane_destroy,
14055 	.set_property = drm_atomic_helper_plane_set_property,
14056 	.atomic_get_property = intel_plane_atomic_get_property,
14057 	.atomic_set_property = intel_plane_atomic_set_property,
14058 	.atomic_duplicate_state = intel_plane_duplicate_state,
14059 	.atomic_destroy_state = intel_plane_destroy_state,
14060 
14061 };
14062 
14063 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14064 						    int pipe)
14065 {
14066 	struct intel_plane *primary = NULL;
14067 	struct intel_plane_state *state = NULL;
14068 	const uint32_t *intel_primary_formats;
14069 	unsigned int num_formats;
14070 	int ret;
14071 
14072 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14073 	if (!primary)
14074 		goto fail;
14075 
14076 	state = intel_create_plane_state(&primary->base);
14077 	if (!state)
14078 		goto fail;
14079 	primary->base.state = &state->base;
14080 
14081 	primary->can_scale = false;
14082 	primary->max_downscale = 1;
14083 	if (INTEL_INFO(dev)->gen >= 9) {
14084 		primary->can_scale = true;
14085 		state->scaler_id = -1;
14086 	}
14087 	primary->pipe = pipe;
14088 	primary->plane = pipe;
14089 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14090 	primary->check_plane = intel_check_primary_plane;
14091 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14092 		primary->plane = !pipe;
14093 
14094 	if (INTEL_INFO(dev)->gen >= 9) {
14095 		intel_primary_formats = skl_primary_formats;
14096 		num_formats = ARRAY_SIZE(skl_primary_formats);
14097 
14098 		primary->update_plane = skylake_update_primary_plane;
14099 		primary->disable_plane = skylake_disable_primary_plane;
14100 	} else if (HAS_PCH_SPLIT(dev)) {
14101 		intel_primary_formats = i965_primary_formats;
14102 		num_formats = ARRAY_SIZE(i965_primary_formats);
14103 
14104 		primary->update_plane = ironlake_update_primary_plane;
14105 		primary->disable_plane = i9xx_disable_primary_plane;
14106 	} else if (INTEL_INFO(dev)->gen >= 4) {
14107 		intel_primary_formats = i965_primary_formats;
14108 		num_formats = ARRAY_SIZE(i965_primary_formats);
14109 
14110 		primary->update_plane = i9xx_update_primary_plane;
14111 		primary->disable_plane = i9xx_disable_primary_plane;
14112 	} else {
14113 		intel_primary_formats = i8xx_primary_formats;
14114 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14115 
14116 		primary->update_plane = i9xx_update_primary_plane;
14117 		primary->disable_plane = i9xx_disable_primary_plane;
14118 	}
14119 
14120 	ret = drm_universal_plane_init(dev, &primary->base, 0,
14121 				       &intel_plane_funcs,
14122 				       intel_primary_formats, num_formats,
14123 				       DRM_PLANE_TYPE_PRIMARY, NULL);
14124 	if (ret)
14125 		goto fail;
14126 
14127 	if (INTEL_INFO(dev)->gen >= 4)
14128 		intel_create_rotation_property(dev, primary);
14129 
14130 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14131 
14132 	return &primary->base;
14133 
14134 fail:
14135 	kfree(state);
14136 	kfree(primary);
14137 
14138 	return NULL;
14139 }
14140 
14141 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14142 {
14143 	if (!dev->mode_config.rotation_property) {
14144 		unsigned long flags = DRM_ROTATE_0 |
14145 			DRM_ROTATE_180;
14146 
14147 		if (INTEL_INFO(dev)->gen >= 9)
14148 			flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
14149 
14150 		dev->mode_config.rotation_property =
14151 			drm_mode_create_rotation_property(dev, flags);
14152 	}
14153 	if (dev->mode_config.rotation_property)
14154 		drm_object_attach_property(&plane->base.base,
14155 				dev->mode_config.rotation_property,
14156 				plane->base.state->rotation);
14157 }
14158 
14159 static int
14160 intel_check_cursor_plane(struct drm_plane *plane,
14161 			 struct intel_crtc_state *crtc_state,
14162 			 struct intel_plane_state *state)
14163 {
14164 	struct drm_crtc *crtc = crtc_state->base.crtc;
14165 	struct drm_framebuffer *fb = state->base.fb;
14166 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14167 	enum i915_pipe pipe = to_intel_plane(plane)->pipe;
14168 	unsigned stride;
14169 	int ret;
14170 
14171 	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14172 					    &state->dst, &state->clip,
14173 					    state->base.rotation,
14174 					    DRM_PLANE_HELPER_NO_SCALING,
14175 					    DRM_PLANE_HELPER_NO_SCALING,
14176 					    true, true, &state->visible);
14177 	if (ret)
14178 		return ret;
14179 
14180 	/* if we want to turn off the cursor ignore width and height */
14181 	if (!obj)
14182 		return 0;
14183 
14184 	/* Check for which cursor types we support */
14185 	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14186 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14187 			  state->base.crtc_w, state->base.crtc_h);
14188 		return -EINVAL;
14189 	}
14190 
14191 	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14192 	if (obj->base.size < stride * state->base.crtc_h) {
14193 		DRM_DEBUG_KMS("buffer is too small\n");
14194 		return -ENOMEM;
14195 	}
14196 
14197 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14198 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14199 		return -EINVAL;
14200 	}
14201 
14202 	/*
14203 	 * There's something wrong with the cursor on CHV pipe C.
14204 	 * If it straddles the left edge of the screen then
14205 	 * moving it away from the edge or disabling it often
14206 	 * results in a pipe underrun, and often that can lead to
14207 	 * dead pipe (constant underrun reported, and it scans
14208 	 * out just a solid color). To recover from that, the
14209 	 * display power well must be turned off and on again.
14210 	 * Refuse the put the cursor into that compromised position.
14211 	 */
14212 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14213 	    state->visible && state->base.crtc_x < 0) {
14214 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14215 		return -EINVAL;
14216 	}
14217 
14218 	return 0;
14219 }
14220 
14221 static void
14222 intel_disable_cursor_plane(struct drm_plane *plane,
14223 			   struct drm_crtc *crtc)
14224 {
14225 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14226 
14227 	intel_crtc->cursor_addr = 0;
14228 	intel_crtc_update_cursor(crtc, NULL);
14229 }
14230 
14231 static void
14232 intel_update_cursor_plane(struct drm_plane *plane,
14233 			  const struct intel_crtc_state *crtc_state,
14234 			  const struct intel_plane_state *state)
14235 {
14236 	struct drm_crtc *crtc = crtc_state->base.crtc;
14237 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14238 	struct drm_device *dev = plane->dev;
14239 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14240 	uint32_t addr;
14241 
14242 	if (!obj)
14243 		addr = 0;
14244 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14245 		addr = i915_gem_obj_ggtt_offset(obj);
14246 	else
14247 		addr = obj->phys_handle->busaddr;
14248 
14249 	intel_crtc->cursor_addr = addr;
14250 	intel_crtc_update_cursor(crtc, state);
14251 }
14252 
14253 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14254 						   int pipe)
14255 {
14256 	struct intel_plane *cursor = NULL;
14257 	struct intel_plane_state *state = NULL;
14258 	int ret;
14259 
14260 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14261 	if (!cursor)
14262 		goto fail;
14263 
14264 	state = intel_create_plane_state(&cursor->base);
14265 	if (!state)
14266 		goto fail;
14267 	cursor->base.state = &state->base;
14268 
14269 	cursor->can_scale = false;
14270 	cursor->max_downscale = 1;
14271 	cursor->pipe = pipe;
14272 	cursor->plane = pipe;
14273 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14274 	cursor->check_plane = intel_check_cursor_plane;
14275 	cursor->update_plane = intel_update_cursor_plane;
14276 	cursor->disable_plane = intel_disable_cursor_plane;
14277 
14278 	ret = drm_universal_plane_init(dev, &cursor->base, 0,
14279 				       &intel_plane_funcs,
14280 				       intel_cursor_formats,
14281 				       ARRAY_SIZE(intel_cursor_formats),
14282 				       DRM_PLANE_TYPE_CURSOR,
14283 				       "cursor %c", pipe_name(pipe));
14284 	if (ret)
14285 		goto fail;
14286 
14287 	if (INTEL_INFO(dev)->gen >= 4) {
14288 		if (!dev->mode_config.rotation_property)
14289 			dev->mode_config.rotation_property =
14290 				drm_mode_create_rotation_property(dev,
14291 							DRM_ROTATE_0 |
14292 							DRM_ROTATE_180);
14293 		if (dev->mode_config.rotation_property)
14294 			drm_object_attach_property(&cursor->base.base,
14295 				dev->mode_config.rotation_property,
14296 				state->base.rotation);
14297 	}
14298 
14299 	if (INTEL_INFO(dev)->gen >=9)
14300 		state->scaler_id = -1;
14301 
14302 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14303 
14304 	return &cursor->base;
14305 
14306 fail:
14307 	kfree(state);
14308 	kfree(cursor);
14309 
14310 	return NULL;
14311 }
14312 
14313 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14314 	struct intel_crtc_state *crtc_state)
14315 {
14316 	int i;
14317 	struct intel_scaler *intel_scaler;
14318 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14319 
14320 	for (i = 0; i < intel_crtc->num_scalers; i++) {
14321 		intel_scaler = &scaler_state->scalers[i];
14322 		intel_scaler->in_use = 0;
14323 		intel_scaler->mode = PS_SCALER_MODE_DYN;
14324 	}
14325 
14326 	scaler_state->scaler_id = -1;
14327 }
14328 
14329 static void intel_crtc_init(struct drm_device *dev, int pipe)
14330 {
14331 	struct drm_i915_private *dev_priv = dev->dev_private;
14332 	struct intel_crtc *intel_crtc;
14333 	struct intel_crtc_state *crtc_state = NULL;
14334 	struct drm_plane *primary = NULL;
14335 	struct drm_plane *cursor = NULL;
14336 	int ret;
14337 
14338 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14339 	if (intel_crtc == NULL)
14340 		return;
14341 
14342 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14343 	if (!crtc_state)
14344 		goto fail;
14345 	intel_crtc->config = crtc_state;
14346 	intel_crtc->base.state = &crtc_state->base;
14347 	crtc_state->base.crtc = &intel_crtc->base;
14348 
14349 	/* initialize shared scalers */
14350 	if (INTEL_INFO(dev)->gen >= 9) {
14351 		if (pipe == PIPE_C)
14352 			intel_crtc->num_scalers = 1;
14353 		else
14354 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14355 
14356 		skl_init_scalers(dev, intel_crtc, crtc_state);
14357 	}
14358 
14359 	primary = intel_primary_plane_create(dev, pipe);
14360 	if (!primary)
14361 		goto fail;
14362 
14363 	cursor = intel_cursor_plane_create(dev, pipe);
14364 	if (!cursor)
14365 		goto fail;
14366 
14367 	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14368 					cursor, &intel_crtc_funcs, NULL);
14369 	if (ret)
14370 		goto fail;
14371 
14372 	/*
14373 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14374 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14375 	 */
14376 	intel_crtc->pipe = pipe;
14377 	intel_crtc->plane = pipe;
14378 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14379 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14380 		intel_crtc->plane = !pipe;
14381 	}
14382 
14383 	intel_crtc->cursor_base = ~0;
14384 	intel_crtc->cursor_cntl = ~0;
14385 	intel_crtc->cursor_size = ~0;
14386 
14387 	intel_crtc->wm.cxsr_allowed = true;
14388 
14389 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14390 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14391 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14392 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14393 
14394 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14395 
14396 	intel_color_init(&intel_crtc->base);
14397 
14398 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14399 	return;
14400 
14401 fail:
14402 	if (primary)
14403 		drm_plane_cleanup(primary);
14404 	if (cursor)
14405 		drm_plane_cleanup(cursor);
14406 	kfree(crtc_state);
14407 	kfree(intel_crtc);
14408 }
14409 
14410 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14411 {
14412 	struct drm_encoder *encoder = connector->base.encoder;
14413 	struct drm_device *dev = connector->base.dev;
14414 
14415 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14416 
14417 	if (!encoder || WARN_ON(!encoder->crtc))
14418 		return INVALID_PIPE;
14419 
14420 	return to_intel_crtc(encoder->crtc)->pipe;
14421 }
14422 
14423 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14424 				struct drm_file *file)
14425 {
14426 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14427 	struct drm_crtc *drmmode_crtc;
14428 	struct intel_crtc *crtc;
14429 
14430 	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14431 
14432 	if (!drmmode_crtc) {
14433 		DRM_ERROR("no such CRTC id\n");
14434 		return -ENOENT;
14435 	}
14436 
14437 	crtc = to_intel_crtc(drmmode_crtc);
14438 	pipe_from_crtc_id->pipe = crtc->pipe;
14439 
14440 	return 0;
14441 }
14442 
14443 static int intel_encoder_clones(struct intel_encoder *encoder)
14444 {
14445 	struct drm_device *dev = encoder->base.dev;
14446 	struct intel_encoder *source_encoder;
14447 	int index_mask = 0;
14448 	int entry = 0;
14449 
14450 	for_each_intel_encoder(dev, source_encoder) {
14451 		if (encoders_cloneable(encoder, source_encoder))
14452 			index_mask |= (1 << entry);
14453 
14454 		entry++;
14455 	}
14456 
14457 	return index_mask;
14458 }
14459 
14460 static bool has_edp_a(struct drm_device *dev)
14461 {
14462 	struct drm_i915_private *dev_priv = dev->dev_private;
14463 
14464 	if (!IS_MOBILE(dev))
14465 		return false;
14466 
14467 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14468 		return false;
14469 
14470 	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14471 		return false;
14472 
14473 	return true;
14474 }
14475 
14476 static bool intel_crt_present(struct drm_device *dev)
14477 {
14478 	struct drm_i915_private *dev_priv = dev->dev_private;
14479 
14480 	if (INTEL_INFO(dev)->gen >= 9)
14481 		return false;
14482 
14483 	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14484 		return false;
14485 
14486 	if (IS_CHERRYVIEW(dev))
14487 		return false;
14488 
14489 	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14490 		return false;
14491 
14492 	/* DDI E can't be used if DDI A requires 4 lanes */
14493 	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14494 		return false;
14495 
14496 	if (!dev_priv->vbt.int_crt_support)
14497 		return false;
14498 
14499 	return true;
14500 }
14501 
14502 static void intel_setup_outputs(struct drm_device *dev)
14503 {
14504 	struct drm_i915_private *dev_priv = dev->dev_private;
14505 	struct intel_encoder *encoder;
14506 	bool dpd_is_edp = false;
14507 
14508 	intel_lvds_init(dev);
14509 
14510 	if (intel_crt_present(dev))
14511 		intel_crt_init(dev);
14512 
14513 	if (IS_BROXTON(dev)) {
14514 		/*
14515 		 * FIXME: Broxton doesn't support port detection via the
14516 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14517 		 * detect the ports.
14518 		 */
14519 		intel_ddi_init(dev, PORT_A);
14520 		intel_ddi_init(dev, PORT_B);
14521 		intel_ddi_init(dev, PORT_C);
14522 
14523 		intel_dsi_init(dev);
14524 	} else if (HAS_DDI(dev)) {
14525 		int found;
14526 
14527 		/*
14528 		 * Haswell uses DDI functions to detect digital outputs.
14529 		 * On SKL pre-D0 the strap isn't connected, so we assume
14530 		 * it's there.
14531 		 */
14532 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14533 		/* WaIgnoreDDIAStrap: skl */
14534 		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14535 			intel_ddi_init(dev, PORT_A);
14536 
14537 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14538 		 * register */
14539 		found = I915_READ(SFUSE_STRAP);
14540 
14541 		if (found & SFUSE_STRAP_DDIB_DETECTED)
14542 			intel_ddi_init(dev, PORT_B);
14543 		if (found & SFUSE_STRAP_DDIC_DETECTED)
14544 			intel_ddi_init(dev, PORT_C);
14545 		if (found & SFUSE_STRAP_DDID_DETECTED)
14546 			intel_ddi_init(dev, PORT_D);
14547 		/*
14548 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14549 		 */
14550 		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14551 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14552 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14553 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14554 			intel_ddi_init(dev, PORT_E);
14555 
14556 	} else if (HAS_PCH_SPLIT(dev)) {
14557 		int found;
14558 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14559 
14560 		if (has_edp_a(dev))
14561 			intel_dp_init(dev, DP_A, PORT_A);
14562 
14563 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14564 			/* PCH SDVOB multiplex with HDMIB */
14565 			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14566 			if (!found)
14567 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14568 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14569 				intel_dp_init(dev, PCH_DP_B, PORT_B);
14570 		}
14571 
14572 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14573 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14574 
14575 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14576 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14577 
14578 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14579 			intel_dp_init(dev, PCH_DP_C, PORT_C);
14580 
14581 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14582 			intel_dp_init(dev, PCH_DP_D, PORT_D);
14583 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14584 		bool has_edp, has_port;
14585 
14586 		/*
14587 		 * The DP_DETECTED bit is the latched state of the DDC
14588 		 * SDA pin at boot. However since eDP doesn't require DDC
14589 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14590 		 * eDP ports may have been muxed to an alternate function.
14591 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14592 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14593 		 * detect eDP ports.
14594 		 *
14595 		 * Sadly the straps seem to be missing sometimes even for HDMI
14596 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14597 		 * and VBT for the presence of the port. Additionally we can't
14598 		 * trust the port type the VBT declares as we've seen at least
14599 		 * HDMI ports that the VBT claim are DP or eDP.
14600 		 */
14601 		has_edp = intel_dp_is_edp(dev, PORT_B);
14602 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14603 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14604 			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14605 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14606 			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14607 
14608 		has_edp = intel_dp_is_edp(dev, PORT_C);
14609 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14610 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14611 			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14612 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14613 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14614 
14615 		if (IS_CHERRYVIEW(dev)) {
14616 			/*
14617 			 * eDP not supported on port D,
14618 			 * so no need to worry about it
14619 			 */
14620 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14621 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14622 				intel_dp_init(dev, CHV_DP_D, PORT_D);
14623 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14624 				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14625 		}
14626 
14627 		intel_dsi_init(dev);
14628 	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14629 		bool found = false;
14630 
14631 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14632 			DRM_DEBUG_KMS("probing SDVOB\n");
14633 			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14634 			if (!found && IS_G4X(dev)) {
14635 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14636 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14637 			}
14638 
14639 			if (!found && IS_G4X(dev))
14640 				intel_dp_init(dev, DP_B, PORT_B);
14641 		}
14642 
14643 		/* Before G4X SDVOC doesn't have its own detect register */
14644 
14645 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14646 			DRM_DEBUG_KMS("probing SDVOC\n");
14647 			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14648 		}
14649 
14650 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14651 
14652 			if (IS_G4X(dev)) {
14653 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14654 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14655 			}
14656 			if (IS_G4X(dev))
14657 				intel_dp_init(dev, DP_C, PORT_C);
14658 		}
14659 
14660 		if (IS_G4X(dev) &&
14661 		    (I915_READ(DP_D) & DP_DETECTED))
14662 			intel_dp_init(dev, DP_D, PORT_D);
14663 	} else if (IS_GEN2(dev))
14664 		intel_dvo_init(dev);
14665 
14666 	if (SUPPORTS_TV(dev))
14667 		intel_tv_init(dev);
14668 
14669 	intel_psr_init(dev);
14670 
14671 	for_each_intel_encoder(dev, encoder) {
14672 		encoder->base.possible_crtcs = encoder->crtc_mask;
14673 		encoder->base.possible_clones =
14674 			intel_encoder_clones(encoder);
14675 	}
14676 
14677 	intel_init_pch_refclk(dev);
14678 
14679 	drm_helper_move_panel_connectors_to_head(dev);
14680 }
14681 
14682 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14683 {
14684 	struct drm_device *dev = fb->dev;
14685 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14686 
14687 	drm_framebuffer_cleanup(fb);
14688 	mutex_lock(&dev->struct_mutex);
14689 	WARN_ON(!intel_fb->obj->framebuffer_references--);
14690 	drm_gem_object_unreference(&intel_fb->obj->base);
14691 	mutex_unlock(&dev->struct_mutex);
14692 	kfree(intel_fb);
14693 }
14694 
14695 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14696 						struct drm_file *file,
14697 						unsigned int *handle)
14698 {
14699 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14700 	struct drm_i915_gem_object *obj = intel_fb->obj;
14701 
14702 	if (obj->userptr.mm) {
14703 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14704 		return -EINVAL;
14705 	}
14706 
14707 	return drm_gem_handle_create(file, &obj->base, handle);
14708 }
14709 
14710 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14711 					struct drm_file *file,
14712 					unsigned flags, unsigned color,
14713 					struct drm_clip_rect *clips,
14714 					unsigned num_clips)
14715 {
14716 	struct drm_device *dev = fb->dev;
14717 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14718 	struct drm_i915_gem_object *obj = intel_fb->obj;
14719 
14720 	mutex_lock(&dev->struct_mutex);
14721 	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14722 	mutex_unlock(&dev->struct_mutex);
14723 
14724 	return 0;
14725 }
14726 
14727 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14728 	.destroy = intel_user_framebuffer_destroy,
14729 	.create_handle = intel_user_framebuffer_create_handle,
14730 	.dirty = intel_user_framebuffer_dirty,
14731 };
14732 
14733 static
14734 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14735 			 uint32_t pixel_format)
14736 {
14737 	u32 gen = INTEL_INFO(dev)->gen;
14738 
14739 	if (gen >= 9) {
14740 		int cpp = drm_format_plane_cpp(pixel_format, 0);
14741 
14742 		/* "The stride in bytes must not exceed the of the size of 8K
14743 		 *  pixels and 32K bytes."
14744 		 */
14745 		return min(8192 * cpp, 32768);
14746 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14747 		return 32*1024;
14748 	} else if (gen >= 4) {
14749 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14750 			return 16*1024;
14751 		else
14752 			return 32*1024;
14753 	} else if (gen >= 3) {
14754 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14755 			return 8*1024;
14756 		else
14757 			return 16*1024;
14758 	} else {
14759 		/* XXX DSPC is limited to 4k tiled */
14760 		return 8*1024;
14761 	}
14762 }
14763 
14764 static int intel_framebuffer_init(struct drm_device *dev,
14765 				  struct intel_framebuffer *intel_fb,
14766 				  struct drm_mode_fb_cmd2 *mode_cmd,
14767 				  struct drm_i915_gem_object *obj)
14768 {
14769 	struct drm_i915_private *dev_priv = to_i915(dev);
14770 	unsigned int aligned_height;
14771 	int ret;
14772 	u32 pitch_limit, stride_alignment;
14773 
14774 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14775 
14776 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14777 		/* Enforce that fb modifier and tiling mode match, but only for
14778 		 * X-tiled. This is needed for FBC. */
14779 		if (!!(obj->tiling_mode == I915_TILING_X) !=
14780 		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14781 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14782 			return -EINVAL;
14783 		}
14784 	} else {
14785 		if (obj->tiling_mode == I915_TILING_X)
14786 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14787 		else if (obj->tiling_mode == I915_TILING_Y) {
14788 			DRM_DEBUG("No Y tiling for legacy addfb\n");
14789 			return -EINVAL;
14790 		}
14791 	}
14792 
14793 	/* Passed in modifier sanity checking. */
14794 	switch (mode_cmd->modifier[0]) {
14795 	case I915_FORMAT_MOD_Y_TILED:
14796 	case I915_FORMAT_MOD_Yf_TILED:
14797 		if (INTEL_INFO(dev)->gen < 9) {
14798 			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14799 				  mode_cmd->modifier[0]);
14800 			return -EINVAL;
14801 		}
14802 	case DRM_FORMAT_MOD_NONE:
14803 	case I915_FORMAT_MOD_X_TILED:
14804 		break;
14805 	default:
14806 		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14807 			  mode_cmd->modifier[0]);
14808 		return -EINVAL;
14809 	}
14810 
14811 	stride_alignment = intel_fb_stride_alignment(dev_priv,
14812 						     mode_cmd->modifier[0],
14813 						     mode_cmd->pixel_format);
14814 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14815 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14816 			  mode_cmd->pitches[0], stride_alignment);
14817 		return -EINVAL;
14818 	}
14819 
14820 	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14821 					   mode_cmd->pixel_format);
14822 	if (mode_cmd->pitches[0] > pitch_limit) {
14823 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14824 			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14825 			  "tiled" : "linear",
14826 			  mode_cmd->pitches[0], pitch_limit);
14827 		return -EINVAL;
14828 	}
14829 
14830 	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14831 	    mode_cmd->pitches[0] != obj->stride) {
14832 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14833 			  mode_cmd->pitches[0], obj->stride);
14834 		return -EINVAL;
14835 	}
14836 
14837 	/* Reject formats not supported by any plane early. */
14838 	switch (mode_cmd->pixel_format) {
14839 	case DRM_FORMAT_C8:
14840 	case DRM_FORMAT_RGB565:
14841 	case DRM_FORMAT_XRGB8888:
14842 	case DRM_FORMAT_ARGB8888:
14843 		break;
14844 	case DRM_FORMAT_XRGB1555:
14845 		if (INTEL_INFO(dev)->gen > 3) {
14846 			DRM_DEBUG("unsupported pixel format: %s\n",
14847 				  drm_get_format_name(mode_cmd->pixel_format));
14848 			return -EINVAL;
14849 		}
14850 		break;
14851 	case DRM_FORMAT_ABGR8888:
14852 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14853 		    INTEL_INFO(dev)->gen < 9) {
14854 			DRM_DEBUG("unsupported pixel format: %s\n",
14855 				  drm_get_format_name(mode_cmd->pixel_format));
14856 			return -EINVAL;
14857 		}
14858 		break;
14859 	case DRM_FORMAT_XBGR8888:
14860 	case DRM_FORMAT_XRGB2101010:
14861 	case DRM_FORMAT_XBGR2101010:
14862 		if (INTEL_INFO(dev)->gen < 4) {
14863 			DRM_DEBUG("unsupported pixel format: %s\n",
14864 				  drm_get_format_name(mode_cmd->pixel_format));
14865 			return -EINVAL;
14866 		}
14867 		break;
14868 	case DRM_FORMAT_ABGR2101010:
14869 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14870 			DRM_DEBUG("unsupported pixel format: %s\n",
14871 				  drm_get_format_name(mode_cmd->pixel_format));
14872 			return -EINVAL;
14873 		}
14874 		break;
14875 	case DRM_FORMAT_YUYV:
14876 	case DRM_FORMAT_UYVY:
14877 	case DRM_FORMAT_YVYU:
14878 	case DRM_FORMAT_VYUY:
14879 		if (INTEL_INFO(dev)->gen < 5) {
14880 			DRM_DEBUG("unsupported pixel format: %s\n",
14881 				  drm_get_format_name(mode_cmd->pixel_format));
14882 			return -EINVAL;
14883 		}
14884 		break;
14885 	default:
14886 		DRM_DEBUG("unsupported pixel format: %s\n",
14887 			  drm_get_format_name(mode_cmd->pixel_format));
14888 		return -EINVAL;
14889 	}
14890 
14891 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14892 	if (mode_cmd->offsets[0] != 0)
14893 		return -EINVAL;
14894 
14895 	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14896 					       mode_cmd->pixel_format,
14897 					       mode_cmd->modifier[0]);
14898 	/* FIXME drm helper for size checks (especially planar formats)? */
14899 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14900 		return -EINVAL;
14901 
14902 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14903 	intel_fb->obj = obj;
14904 
14905 	intel_fill_fb_info(dev_priv, &intel_fb->base);
14906 
14907 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14908 	if (ret) {
14909 		DRM_ERROR("framebuffer init failed %d\n", ret);
14910 		return ret;
14911 	}
14912 
14913 	intel_fb->obj->framebuffer_references++;
14914 
14915 	return 0;
14916 }
14917 
14918 static struct drm_framebuffer *
14919 intel_user_framebuffer_create(struct drm_device *dev,
14920 			      struct drm_file *filp,
14921 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14922 {
14923 	struct drm_framebuffer *fb;
14924 	struct drm_i915_gem_object *obj;
14925 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14926 
14927 	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
14928 	if (&obj->base == NULL)
14929 		return ERR_PTR(-ENOENT);
14930 
14931 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14932 	if (IS_ERR(fb))
14933 		drm_gem_object_unreference_unlocked(&obj->base);
14934 
14935 	return fb;
14936 }
14937 
14938 #ifndef CONFIG_DRM_FBDEV_EMULATION
14939 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14940 {
14941 }
14942 #endif
14943 
14944 static const struct drm_mode_config_funcs intel_mode_funcs = {
14945 	.fb_create = intel_user_framebuffer_create,
14946 	.output_poll_changed = intel_fbdev_output_poll_changed,
14947 	.atomic_check = intel_atomic_check,
14948 	.atomic_commit = intel_atomic_commit,
14949 	.atomic_state_alloc = intel_atomic_state_alloc,
14950 	.atomic_state_clear = intel_atomic_state_clear,
14951 };
14952 
14953 /**
14954  * intel_init_display_hooks - initialize the display modesetting hooks
14955  * @dev_priv: device private
14956  */
14957 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14958 {
14959 	if (INTEL_INFO(dev_priv)->gen >= 9) {
14960 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14961 		dev_priv->display.get_initial_plane_config =
14962 			skylake_get_initial_plane_config;
14963 		dev_priv->display.crtc_compute_clock =
14964 			haswell_crtc_compute_clock;
14965 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14966 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14967 	} else if (HAS_DDI(dev_priv)) {
14968 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14969 		dev_priv->display.get_initial_plane_config =
14970 			ironlake_get_initial_plane_config;
14971 		dev_priv->display.crtc_compute_clock =
14972 			haswell_crtc_compute_clock;
14973 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14974 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14975 	} else if (HAS_PCH_SPLIT(dev_priv)) {
14976 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14977 		dev_priv->display.get_initial_plane_config =
14978 			ironlake_get_initial_plane_config;
14979 		dev_priv->display.crtc_compute_clock =
14980 			ironlake_crtc_compute_clock;
14981 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14982 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14983 	} else if (IS_CHERRYVIEW(dev_priv)) {
14984 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14985 		dev_priv->display.get_initial_plane_config =
14986 			i9xx_get_initial_plane_config;
14987 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14988 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14989 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14990 	} else if (IS_VALLEYVIEW(dev_priv)) {
14991 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14992 		dev_priv->display.get_initial_plane_config =
14993 			i9xx_get_initial_plane_config;
14994 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14995 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14996 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14997 	} else if (IS_G4X(dev_priv)) {
14998 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14999 		dev_priv->display.get_initial_plane_config =
15000 			i9xx_get_initial_plane_config;
15001 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15002 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15003 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15004 	} else if (IS_PINEVIEW(dev_priv)) {
15005 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15006 		dev_priv->display.get_initial_plane_config =
15007 			i9xx_get_initial_plane_config;
15008 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15009 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15010 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15011 	} else if (!IS_GEN2(dev_priv)) {
15012 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15013 		dev_priv->display.get_initial_plane_config =
15014 			i9xx_get_initial_plane_config;
15015 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15016 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15017 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15018 	} else {
15019 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15020 		dev_priv->display.get_initial_plane_config =
15021 			i9xx_get_initial_plane_config;
15022 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15023 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15024 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15025 	}
15026 
15027 	/* Returns the core display clock speed */
15028 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15029 		dev_priv->display.get_display_clock_speed =
15030 			skylake_get_display_clock_speed;
15031 	else if (IS_BROXTON(dev_priv))
15032 		dev_priv->display.get_display_clock_speed =
15033 			broxton_get_display_clock_speed;
15034 	else if (IS_BROADWELL(dev_priv))
15035 		dev_priv->display.get_display_clock_speed =
15036 			broadwell_get_display_clock_speed;
15037 	else if (IS_HASWELL(dev_priv))
15038 		dev_priv->display.get_display_clock_speed =
15039 			haswell_get_display_clock_speed;
15040 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15041 		dev_priv->display.get_display_clock_speed =
15042 			valleyview_get_display_clock_speed;
15043 	else if (IS_GEN5(dev_priv))
15044 		dev_priv->display.get_display_clock_speed =
15045 			ilk_get_display_clock_speed;
15046 	else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15047 		 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
15048 		dev_priv->display.get_display_clock_speed =
15049 			i945_get_display_clock_speed;
15050 	else if (IS_GM45(dev_priv))
15051 		dev_priv->display.get_display_clock_speed =
15052 			gm45_get_display_clock_speed;
15053 	else if (IS_CRESTLINE(dev_priv))
15054 		dev_priv->display.get_display_clock_speed =
15055 			i965gm_get_display_clock_speed;
15056 	else if (IS_PINEVIEW(dev_priv))
15057 		dev_priv->display.get_display_clock_speed =
15058 			pnv_get_display_clock_speed;
15059 	else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
15060 		dev_priv->display.get_display_clock_speed =
15061 			g33_get_display_clock_speed;
15062 	else if (IS_I915G(dev_priv))
15063 		dev_priv->display.get_display_clock_speed =
15064 			i915_get_display_clock_speed;
15065 	else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
15066 		dev_priv->display.get_display_clock_speed =
15067 			i9xx_misc_get_display_clock_speed;
15068 	else if (IS_I915GM(dev_priv))
15069 		dev_priv->display.get_display_clock_speed =
15070 			i915gm_get_display_clock_speed;
15071 	else if (IS_I865G(dev_priv))
15072 		dev_priv->display.get_display_clock_speed =
15073 			i865_get_display_clock_speed;
15074 	else if (IS_I85X(dev_priv))
15075 		dev_priv->display.get_display_clock_speed =
15076 			i85x_get_display_clock_speed;
15077 	else { /* 830 */
15078 		WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
15079 		dev_priv->display.get_display_clock_speed =
15080 			i830_get_display_clock_speed;
15081 	}
15082 
15083 	if (IS_GEN5(dev_priv)) {
15084 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15085 	} else if (IS_GEN6(dev_priv)) {
15086 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15087 	} else if (IS_IVYBRIDGE(dev_priv)) {
15088 		/* FIXME: detect B0+ stepping and use auto training */
15089 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15090 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15091 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15092 		if (IS_BROADWELL(dev_priv)) {
15093 			dev_priv->display.modeset_commit_cdclk =
15094 				broadwell_modeset_commit_cdclk;
15095 			dev_priv->display.modeset_calc_cdclk =
15096 				broadwell_modeset_calc_cdclk;
15097 		}
15098 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15099 		dev_priv->display.modeset_commit_cdclk =
15100 			valleyview_modeset_commit_cdclk;
15101 		dev_priv->display.modeset_calc_cdclk =
15102 			valleyview_modeset_calc_cdclk;
15103 	} else if (IS_BROXTON(dev_priv)) {
15104 		dev_priv->display.modeset_commit_cdclk =
15105 			broxton_modeset_commit_cdclk;
15106 		dev_priv->display.modeset_calc_cdclk =
15107 			broxton_modeset_calc_cdclk;
15108 	}
15109 
15110 	switch (INTEL_INFO(dev_priv)->gen) {
15111 	case 2:
15112 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15113 		break;
15114 
15115 	case 3:
15116 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15117 		break;
15118 
15119 	case 4:
15120 	case 5:
15121 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15122 		break;
15123 
15124 	case 6:
15125 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15126 		break;
15127 	case 7:
15128 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15129 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15130 		break;
15131 	case 9:
15132 		/* Drop through - unsupported since execlist only. */
15133 	default:
15134 		/* Default just returns -ENODEV to indicate unsupported */
15135 		dev_priv->display.queue_flip = intel_default_queue_flip;
15136 	}
15137 }
15138 
15139 /*
15140  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15141  * resume, or other times.  This quirk makes sure that's the case for
15142  * affected systems.
15143  */
15144 static void quirk_pipea_force(struct drm_device *dev)
15145 {
15146 	struct drm_i915_private *dev_priv = dev->dev_private;
15147 
15148 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15149 	DRM_INFO("applying pipe a force quirk\n");
15150 }
15151 
15152 static void quirk_pipeb_force(struct drm_device *dev)
15153 {
15154 	struct drm_i915_private *dev_priv = dev->dev_private;
15155 
15156 	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15157 	DRM_INFO("applying pipe b force quirk\n");
15158 }
15159 
15160 /*
15161  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15162  */
15163 static void quirk_ssc_force_disable(struct drm_device *dev)
15164 {
15165 	struct drm_i915_private *dev_priv = dev->dev_private;
15166 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15167 	DRM_INFO("applying lvds SSC disable quirk\n");
15168 }
15169 
15170 /*
15171  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15172  * brightness value
15173  */
15174 static void quirk_invert_brightness(struct drm_device *dev)
15175 {
15176 	struct drm_i915_private *dev_priv = dev->dev_private;
15177 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15178 	DRM_INFO("applying inverted panel brightness quirk\n");
15179 }
15180 
15181 /* Some VBT's incorrectly indicate no backlight is present */
15182 static void quirk_backlight_present(struct drm_device *dev)
15183 {
15184 	struct drm_i915_private *dev_priv = dev->dev_private;
15185 	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15186 	DRM_INFO("applying backlight present quirk\n");
15187 }
15188 
15189 struct intel_quirk {
15190 	int device;
15191 	int subsystem_vendor;
15192 	int subsystem_device;
15193 	void (*hook)(struct drm_device *dev);
15194 };
15195 
15196 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15197 struct intel_dmi_quirk {
15198 	void (*hook)(struct drm_device *dev);
15199 	const struct dmi_system_id (*dmi_id_list)[];
15200 };
15201 
15202 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15203 {
15204 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15205 	return 1;
15206 }
15207 
15208 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15209 	{
15210 		.dmi_id_list = &(const struct dmi_system_id[]) {
15211 			{
15212 				.callback = intel_dmi_reverse_brightness,
15213 				.ident = "NCR Corporation",
15214 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15215 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15216 				},
15217 			},
15218 			{ }  /* terminating entry */
15219 		},
15220 		.hook = quirk_invert_brightness,
15221 	},
15222 };
15223 
15224 static struct intel_quirk intel_quirks[] = {
15225 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15226 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15227 
15228 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15229 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15230 
15231 	/* 830 needs to leave pipe A & dpll A up */
15232 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15233 
15234 	/* 830 needs to leave pipe B & dpll B up */
15235 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15236 
15237 	/* Lenovo U160 cannot use SSC on LVDS */
15238 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15239 
15240 	/* Sony Vaio Y cannot use SSC on LVDS */
15241 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15242 
15243 	/* Acer Aspire 5734Z must invert backlight brightness */
15244 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15245 
15246 	/* Acer/eMachines G725 */
15247 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15248 
15249 	/* Acer/eMachines e725 */
15250 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15251 
15252 	/* Acer/Packard Bell NCL20 */
15253 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15254 
15255 	/* Acer Aspire 4736Z */
15256 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15257 
15258 	/* Acer Aspire 5336 */
15259 	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15260 
15261 	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15262 	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15263 
15264 	/* Acer C720 Chromebook (Core i3 4005U) */
15265 	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15266 
15267 	/* Apple Macbook 2,1 (Core 2 T7400) */
15268 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15269 
15270 	/* Apple Macbook 4,1 */
15271 	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15272 
15273 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15274 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15275 
15276 	/* HP Chromebook 14 (Celeron 2955U) */
15277 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15278 
15279 	/* Dell Chromebook 11 */
15280 	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15281 
15282 	/* Dell Chromebook 11 (2015 version) */
15283 	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15284 };
15285 
15286 static void intel_init_quirks(struct drm_device *dev)
15287 {
15288 	struct pci_dev *d = dev->pdev;
15289 	int i;
15290 
15291 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15292 		struct intel_quirk *q = &intel_quirks[i];
15293 
15294 		if (d->device == q->device &&
15295 		    (d->subsystem_vendor == q->subsystem_vendor ||
15296 		     q->subsystem_vendor == PCI_ANY_ID) &&
15297 		    (d->subsystem_device == q->subsystem_device ||
15298 		     q->subsystem_device == PCI_ANY_ID))
15299 			q->hook(dev);
15300 	}
15301 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15302 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15303 			intel_dmi_quirks[i].hook(dev);
15304 	}
15305 }
15306 
15307 /* Disable the VGA plane that we never use */
15308 static void i915_disable_vga(struct drm_device *dev)
15309 {
15310 	struct drm_i915_private *dev_priv = dev->dev_private;
15311 	u8 sr1;
15312 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15313 
15314 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15315 #if 0
15316 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15317 #endif
15318 	outb(VGA_SR_INDEX, SR01);
15319 	sr1 = inb(VGA_SR_DATA);
15320 	outb(VGA_SR_DATA, sr1 | 1 << 5);
15321 #if 0
15322 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15323 #endif
15324 	udelay(300);
15325 
15326 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15327 	POSTING_READ(vga_reg);
15328 }
15329 
15330 void intel_modeset_init_hw(struct drm_device *dev)
15331 {
15332 	struct drm_i915_private *dev_priv = dev->dev_private;
15333 
15334 	intel_update_cdclk(dev);
15335 
15336 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15337 
15338 	intel_init_clock_gating(dev);
15339 	intel_enable_gt_powersave(dev);
15340 }
15341 
15342 /*
15343  * Calculate what we think the watermarks should be for the state we've read
15344  * out of the hardware and then immediately program those watermarks so that
15345  * we ensure the hardware settings match our internal state.
15346  *
15347  * We can calculate what we think WM's should be by creating a duplicate of the
15348  * current state (which was constructed during hardware readout) and running it
15349  * through the atomic check code to calculate new watermark values in the
15350  * state object.
15351  */
15352 static void sanitize_watermarks(struct drm_device *dev)
15353 {
15354 	struct drm_i915_private *dev_priv = to_i915(dev);
15355 	struct drm_atomic_state *state;
15356 	struct drm_crtc *crtc;
15357 	struct drm_crtc_state *cstate;
15358 	struct drm_modeset_acquire_ctx ctx;
15359 	int ret;
15360 	int i;
15361 
15362 	/* Only supported on platforms that use atomic watermark design */
15363 	if (!dev_priv->display.optimize_watermarks)
15364 		return;
15365 
15366 	/*
15367 	 * We need to hold connection_mutex before calling duplicate_state so
15368 	 * that the connector loop is protected.
15369 	 */
15370 	drm_modeset_acquire_init(&ctx, 0);
15371 retry:
15372 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15373 	if (ret == -EDEADLK) {
15374 		drm_modeset_backoff(&ctx);
15375 		goto retry;
15376 	} else if (WARN_ON(ret)) {
15377 		goto fail;
15378 	}
15379 
15380 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15381 	if (WARN_ON(IS_ERR(state)))
15382 		goto fail;
15383 
15384 	/*
15385 	 * Hardware readout is the only time we don't want to calculate
15386 	 * intermediate watermarks (since we don't trust the current
15387 	 * watermarks).
15388 	 */
15389 	to_intel_atomic_state(state)->skip_intermediate_wm = true;
15390 
15391 	ret = intel_atomic_check(dev, state);
15392 	if (ret) {
15393 		/*
15394 		 * If we fail here, it means that the hardware appears to be
15395 		 * programmed in a way that shouldn't be possible, given our
15396 		 * understanding of watermark requirements.  This might mean a
15397 		 * mistake in the hardware readout code or a mistake in the
15398 		 * watermark calculations for a given platform.  Raise a WARN
15399 		 * so that this is noticeable.
15400 		 *
15401 		 * If this actually happens, we'll have to just leave the
15402 		 * BIOS-programmed watermarks untouched and hope for the best.
15403 		 */
15404 		WARN(true, "Could not determine valid watermarks for inherited state\n");
15405 		goto fail;
15406 	}
15407 
15408 	/* Write calculated watermark values back */
15409 	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15410 	for_each_crtc_in_state(state, crtc, cstate, i) {
15411 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15412 
15413 		cs->wm.need_postvbl_update = true;
15414 		dev_priv->display.optimize_watermarks(cs);
15415 	}
15416 
15417 	drm_atomic_state_free(state);
15418 fail:
15419 	drm_modeset_drop_locks(&ctx);
15420 	drm_modeset_acquire_fini(&ctx);
15421 }
15422 
15423 void intel_modeset_init(struct drm_device *dev)
15424 {
15425 	struct drm_i915_private *dev_priv = to_i915(dev);
15426 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
15427 	int sprite, ret;
15428 	enum i915_pipe pipe;
15429 	struct intel_crtc *crtc;
15430 
15431 	drm_mode_config_init(dev);
15432 
15433 	dev->mode_config.min_width = 0;
15434 	dev->mode_config.min_height = 0;
15435 
15436 	dev->mode_config.preferred_depth = 24;
15437 	dev->mode_config.prefer_shadow = 1;
15438 
15439 	dev->mode_config.allow_fb_modifiers = true;
15440 
15441 	dev->mode_config.funcs = &intel_mode_funcs;
15442 
15443 	intel_init_quirks(dev);
15444 
15445 	intel_init_pm(dev);
15446 
15447 	if (INTEL_INFO(dev)->num_pipes == 0)
15448 		return;
15449 
15450 	/*
15451 	 * There may be no VBT; and if the BIOS enabled SSC we can
15452 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15453 	 * BIOS isn't using it, don't assume it will work even if the VBT
15454 	 * indicates as much.
15455 	 */
15456 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15457 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15458 					    DREF_SSC1_ENABLE);
15459 
15460 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15461 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15462 				     bios_lvds_use_ssc ? "en" : "dis",
15463 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15464 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15465 		}
15466 	}
15467 
15468 	if (IS_GEN2(dev)) {
15469 		dev->mode_config.max_width = 2048;
15470 		dev->mode_config.max_height = 2048;
15471 	} else if (IS_GEN3(dev)) {
15472 		dev->mode_config.max_width = 4096;
15473 		dev->mode_config.max_height = 4096;
15474 	} else {
15475 		dev->mode_config.max_width = 8192;
15476 		dev->mode_config.max_height = 8192;
15477 	}
15478 
15479 	if (IS_845G(dev) || IS_I865G(dev)) {
15480 		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15481 		dev->mode_config.cursor_height = 1023;
15482 	} else if (IS_GEN2(dev)) {
15483 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15484 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15485 	} else {
15486 		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15487 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15488 	}
15489 
15490 	dev->mode_config.fb_base = ggtt->mappable_base;
15491 
15492 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15493 		      INTEL_INFO(dev)->num_pipes,
15494 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15495 
15496 	for_each_pipe(dev_priv, pipe) {
15497 		intel_crtc_init(dev, pipe);
15498 		for_each_sprite(dev_priv, pipe, sprite) {
15499 			ret = intel_plane_init(dev, pipe, sprite);
15500 			if (ret)
15501 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15502 					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15503 		}
15504 	}
15505 
15506 	intel_update_czclk(dev_priv);
15507 	intel_update_rawclk(dev_priv);
15508 	intel_update_cdclk(dev);
15509 
15510 	intel_shared_dpll_init(dev);
15511 
15512 	/* Just disable it once at startup */
15513 	i915_disable_vga(dev);
15514 	intel_setup_outputs(dev);
15515 
15516 	drm_modeset_lock_all(dev);
15517 	intel_modeset_setup_hw_state(dev);
15518 	drm_modeset_unlock_all(dev);
15519 
15520 	for_each_intel_crtc(dev, crtc) {
15521 		struct intel_initial_plane_config plane_config = {};
15522 
15523 		if (!crtc->active)
15524 			continue;
15525 
15526 		/*
15527 		 * Note that reserving the BIOS fb up front prevents us
15528 		 * from stuffing other stolen allocations like the ring
15529 		 * on top.  This prevents some ugliness at boot time, and
15530 		 * can even allow for smooth boot transitions if the BIOS
15531 		 * fb is large enough for the active pipe configuration.
15532 		 */
15533 		dev_priv->display.get_initial_plane_config(crtc,
15534 							   &plane_config);
15535 
15536 		/*
15537 		 * If the fb is shared between multiple heads, we'll
15538 		 * just get the first one.
15539 		 */
15540 		intel_find_initial_plane_obj(crtc, &plane_config);
15541 	}
15542 
15543 	/*
15544 	 * Make sure hardware watermarks really match the state we read out.
15545 	 * Note that we need to do this after reconstructing the BIOS fb's
15546 	 * since the watermark calculation done here will use pstate->fb.
15547 	 */
15548 	sanitize_watermarks(dev);
15549 }
15550 
15551 static void intel_enable_pipe_a(struct drm_device *dev)
15552 {
15553 	struct intel_connector *connector;
15554 	struct drm_connector *crt = NULL;
15555 	struct intel_load_detect_pipe load_detect_temp;
15556 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15557 
15558 	/* We can't just switch on the pipe A, we need to set things up with a
15559 	 * proper mode and output configuration. As a gross hack, enable pipe A
15560 	 * by enabling the load detect pipe once. */
15561 	for_each_intel_connector(dev, connector) {
15562 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15563 			crt = &connector->base;
15564 			break;
15565 		}
15566 	}
15567 
15568 	if (!crt)
15569 		return;
15570 
15571 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15572 		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15573 }
15574 
15575 static bool
15576 intel_check_plane_mapping(struct intel_crtc *crtc)
15577 {
15578 	struct drm_device *dev = crtc->base.dev;
15579 	struct drm_i915_private *dev_priv = dev->dev_private;
15580 	u32 val;
15581 
15582 	if (INTEL_INFO(dev)->num_pipes == 1)
15583 		return true;
15584 
15585 	val = I915_READ(DSPCNTR(!crtc->plane));
15586 
15587 	if ((val & DISPLAY_PLANE_ENABLE) &&
15588 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15589 		return false;
15590 
15591 	return true;
15592 }
15593 
15594 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15595 {
15596 	struct drm_device *dev = crtc->base.dev;
15597 	struct intel_encoder *encoder;
15598 
15599 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15600 		return true;
15601 
15602 	return false;
15603 }
15604 
15605 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15606 {
15607 	struct drm_device *dev = encoder->base.dev;
15608 	struct intel_connector *connector;
15609 
15610 	for_each_connector_on_encoder(dev, &encoder->base, connector)
15611 		return true;
15612 
15613 	return false;
15614 }
15615 
15616 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15617 {
15618 	struct drm_device *dev = crtc->base.dev;
15619 	struct drm_i915_private *dev_priv = dev->dev_private;
15620 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15621 
15622 	/* Clear any frame start delays used for debugging left by the BIOS */
15623 	if (!transcoder_is_dsi(cpu_transcoder)) {
15624 		i915_reg_t reg = PIPECONF(cpu_transcoder);
15625 
15626 		I915_WRITE(reg,
15627 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15628 	}
15629 
15630 	/* restore vblank interrupts to correct state */
15631 	drm_crtc_vblank_reset(&crtc->base);
15632 	if (crtc->active) {
15633 		struct intel_plane *plane;
15634 
15635 		drm_crtc_vblank_on(&crtc->base);
15636 
15637 		/* Disable everything but the primary plane */
15638 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15639 			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15640 				continue;
15641 
15642 			plane->disable_plane(&plane->base, &crtc->base);
15643 		}
15644 	}
15645 
15646 	/* We need to sanitize the plane -> pipe mapping first because this will
15647 	 * disable the crtc (and hence change the state) if it is wrong. Note
15648 	 * that gen4+ has a fixed plane -> pipe mapping.  */
15649 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15650 		bool plane;
15651 
15652 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15653 			      crtc->base.base.id);
15654 
15655 		/* Pipe has the wrong plane attached and the plane is active.
15656 		 * Temporarily change the plane mapping and disable everything
15657 		 * ...  */
15658 		plane = crtc->plane;
15659 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15660 		crtc->plane = !plane;
15661 		intel_crtc_disable_noatomic(&crtc->base);
15662 		crtc->plane = plane;
15663 	}
15664 
15665 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15666 	    crtc->pipe == PIPE_A && !crtc->active) {
15667 		/* BIOS forgot to enable pipe A, this mostly happens after
15668 		 * resume. Force-enable the pipe to fix this, the update_dpms
15669 		 * call below we restore the pipe to the right state, but leave
15670 		 * the required bits on. */
15671 		intel_enable_pipe_a(dev);
15672 	}
15673 
15674 	/* Adjust the state of the output pipe according to whether we
15675 	 * have active connectors/encoders. */
15676 	if (crtc->active && !intel_crtc_has_encoders(crtc))
15677 		intel_crtc_disable_noatomic(&crtc->base);
15678 
15679 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15680 		/*
15681 		 * We start out with underrun reporting disabled to avoid races.
15682 		 * For correct bookkeeping mark this on active crtcs.
15683 		 *
15684 		 * Also on gmch platforms we dont have any hardware bits to
15685 		 * disable the underrun reporting. Which means we need to start
15686 		 * out with underrun reporting disabled also on inactive pipes,
15687 		 * since otherwise we'll complain about the garbage we read when
15688 		 * e.g. coming up after runtime pm.
15689 		 *
15690 		 * No protection against concurrent access is required - at
15691 		 * worst a fifo underrun happens which also sets this to false.
15692 		 */
15693 		crtc->cpu_fifo_underrun_disabled = true;
15694 		crtc->pch_fifo_underrun_disabled = true;
15695 	}
15696 }
15697 
15698 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15699 {
15700 	struct intel_connector *connector;
15701 	struct drm_device *dev = encoder->base.dev;
15702 
15703 	/* We need to check both for a crtc link (meaning that the
15704 	 * encoder is active and trying to read from a pipe) and the
15705 	 * pipe itself being active. */
15706 	bool has_active_crtc = encoder->base.crtc &&
15707 		to_intel_crtc(encoder->base.crtc)->active;
15708 
15709 	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15710 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15711 			      encoder->base.base.id,
15712 			      encoder->base.name);
15713 
15714 		/* Connector is active, but has no active pipe. This is
15715 		 * fallout from our resume register restoring. Disable
15716 		 * the encoder manually again. */
15717 		if (encoder->base.crtc) {
15718 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15719 				      encoder->base.base.id,
15720 				      encoder->base.name);
15721 			encoder->disable(encoder);
15722 			if (encoder->post_disable)
15723 				encoder->post_disable(encoder);
15724 		}
15725 		encoder->base.crtc = NULL;
15726 
15727 		/* Inconsistent output/port/pipe state happens presumably due to
15728 		 * a bug in one of the get_hw_state functions. Or someplace else
15729 		 * in our code, like the register restore mess on resume. Clamp
15730 		 * things to off as a safer default. */
15731 		for_each_intel_connector(dev, connector) {
15732 			if (connector->encoder != encoder)
15733 				continue;
15734 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15735 			connector->base.encoder = NULL;
15736 		}
15737 	}
15738 	/* Enabled encoders without active connectors will be fixed in
15739 	 * the crtc fixup. */
15740 }
15741 
15742 void i915_redisable_vga_power_on(struct drm_device *dev)
15743 {
15744 	struct drm_i915_private *dev_priv = dev->dev_private;
15745 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15746 
15747 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15748 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15749 		i915_disable_vga(dev);
15750 	}
15751 }
15752 
15753 void i915_redisable_vga(struct drm_device *dev)
15754 {
15755 	struct drm_i915_private *dev_priv = dev->dev_private;
15756 
15757 	/* This function can be called both from intel_modeset_setup_hw_state or
15758 	 * at a very early point in our resume sequence, where the power well
15759 	 * structures are not yet restored. Since this function is at a very
15760 	 * paranoid "someone might have enabled VGA while we were not looking"
15761 	 * level, just check if the power well is enabled instead of trying to
15762 	 * follow the "don't touch the power well if we don't need it" policy
15763 	 * the rest of the driver uses. */
15764 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15765 		return;
15766 
15767 	i915_redisable_vga_power_on(dev);
15768 
15769 	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15770 }
15771 
15772 static bool primary_get_hw_state(struct intel_plane *plane)
15773 {
15774 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15775 
15776 	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15777 }
15778 
15779 /* FIXME read out full plane state for all planes */
15780 static void readout_plane_state(struct intel_crtc *crtc)
15781 {
15782 	struct drm_plane *primary = crtc->base.primary;
15783 	struct intel_plane_state *plane_state =
15784 		to_intel_plane_state(primary->state);
15785 
15786 	plane_state->visible = crtc->active &&
15787 		primary_get_hw_state(to_intel_plane(primary));
15788 
15789 	if (plane_state->visible)
15790 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15791 }
15792 
15793 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15794 {
15795 	struct drm_i915_private *dev_priv = dev->dev_private;
15796 	enum i915_pipe pipe;
15797 	struct intel_crtc *crtc;
15798 	struct intel_encoder *encoder;
15799 	struct intel_connector *connector;
15800 	int i;
15801 
15802 	dev_priv->active_crtcs = 0;
15803 
15804 	for_each_intel_crtc(dev, crtc) {
15805 		struct intel_crtc_state *crtc_state = crtc->config;
15806 		int pixclk = 0;
15807 
15808 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15809 		memset(crtc_state, 0, sizeof(*crtc_state));
15810 		crtc_state->base.crtc = &crtc->base;
15811 
15812 		crtc_state->base.active = crtc_state->base.enable =
15813 			dev_priv->display.get_pipe_config(crtc, crtc_state);
15814 
15815 		crtc->base.enabled = crtc_state->base.enable;
15816 		crtc->active = crtc_state->base.active;
15817 
15818 		if (crtc_state->base.active) {
15819 			dev_priv->active_crtcs |= 1 << crtc->pipe;
15820 
15821 			if (IS_BROADWELL(dev_priv)) {
15822 				pixclk = ilk_pipe_pixel_rate(crtc_state);
15823 
15824 				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15825 				if (crtc_state->ips_enabled)
15826 					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15827 			} else if (IS_VALLEYVIEW(dev_priv) ||
15828 				   IS_CHERRYVIEW(dev_priv) ||
15829 				   IS_BROXTON(dev_priv))
15830 				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15831 			else
15832 				WARN_ON(dev_priv->display.modeset_calc_cdclk);
15833 		}
15834 
15835 		dev_priv->min_pixclk[crtc->pipe] = pixclk;
15836 
15837 		readout_plane_state(crtc);
15838 
15839 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15840 			      crtc->base.base.id,
15841 			      crtc->active ? "enabled" : "disabled");
15842 	}
15843 
15844 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15845 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15846 
15847 		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15848 						  &pll->config.hw_state);
15849 		pll->config.crtc_mask = 0;
15850 		for_each_intel_crtc(dev, crtc) {
15851 			if (crtc->active && crtc->config->shared_dpll == pll)
15852 				pll->config.crtc_mask |= 1 << crtc->pipe;
15853 		}
15854 		pll->active_mask = pll->config.crtc_mask;
15855 
15856 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15857 			      pll->name, pll->config.crtc_mask, pll->on);
15858 	}
15859 
15860 	for_each_intel_encoder(dev, encoder) {
15861 		pipe = 0;
15862 
15863 		if (encoder->get_hw_state(encoder, &pipe)) {
15864 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15865 			encoder->base.crtc = &crtc->base;
15866 			encoder->get_config(encoder, crtc->config);
15867 		} else {
15868 			encoder->base.crtc = NULL;
15869 		}
15870 
15871 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15872 			      encoder->base.base.id,
15873 			      encoder->base.name,
15874 			      encoder->base.crtc ? "enabled" : "disabled",
15875 			      pipe_name(pipe));
15876 	}
15877 
15878 	for_each_intel_connector(dev, connector) {
15879 		if (connector->get_hw_state(connector)) {
15880 			connector->base.dpms = DRM_MODE_DPMS_ON;
15881 
15882 			encoder = connector->encoder;
15883 			connector->base.encoder = &encoder->base;
15884 
15885 			if (encoder->base.crtc &&
15886 			    encoder->base.crtc->state->active) {
15887 				/*
15888 				 * This has to be done during hardware readout
15889 				 * because anything calling .crtc_disable may
15890 				 * rely on the connector_mask being accurate.
15891 				 */
15892 				encoder->base.crtc->state->connector_mask |=
15893 					1 << drm_connector_index(&connector->base);
15894 				encoder->base.crtc->state->encoder_mask |=
15895 					1 << drm_encoder_index(&encoder->base);
15896 			}
15897 
15898 		} else {
15899 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15900 			connector->base.encoder = NULL;
15901 		}
15902 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15903 			      connector->base.base.id,
15904 			      connector->base.name,
15905 			      connector->base.encoder ? "enabled" : "disabled");
15906 	}
15907 
15908 	for_each_intel_crtc(dev, crtc) {
15909 		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15910 
15911 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15912 		if (crtc->base.state->active) {
15913 			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15914 			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15915 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15916 
15917 			/*
15918 			 * The initial mode needs to be set in order to keep
15919 			 * the atomic core happy. It wants a valid mode if the
15920 			 * crtc's enabled, so we do the above call.
15921 			 *
15922 			 * At this point some state updated by the connectors
15923 			 * in their ->detect() callback has not run yet, so
15924 			 * no recalculation can be done yet.
15925 			 *
15926 			 * Even if we could do a recalculation and modeset
15927 			 * right now it would cause a double modeset if
15928 			 * fbdev or userspace chooses a different initial mode.
15929 			 *
15930 			 * If that happens, someone indicated they wanted a
15931 			 * mode change, which means it's safe to do a full
15932 			 * recalculation.
15933 			 */
15934 			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15935 
15936 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15937 			update_scanline_offset(crtc);
15938 		}
15939 
15940 		intel_pipe_config_sanity_check(dev_priv, crtc->config);
15941 	}
15942 }
15943 
15944 /* Scan out the current hw modeset state,
15945  * and sanitizes it to the current state
15946  */
15947 static void
15948 intel_modeset_setup_hw_state(struct drm_device *dev)
15949 {
15950 	struct drm_i915_private *dev_priv = dev->dev_private;
15951 	enum i915_pipe pipe;
15952 	struct intel_crtc *crtc;
15953 	struct intel_encoder *encoder;
15954 	int i;
15955 
15956 	intel_modeset_readout_hw_state(dev);
15957 
15958 	/* HW state is read out, now we need to sanitize this mess. */
15959 	for_each_intel_encoder(dev, encoder) {
15960 		intel_sanitize_encoder(encoder);
15961 	}
15962 
15963 	for_each_pipe(dev_priv, pipe) {
15964 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15965 		intel_sanitize_crtc(crtc);
15966 		intel_dump_pipe_config(crtc, crtc->config,
15967 				       "[setup_hw_state]");
15968 	}
15969 
15970 	intel_modeset_update_connector_atomic_state(dev);
15971 
15972 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15973 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15974 
15975 		if (!pll->on || pll->active_mask)
15976 			continue;
15977 
15978 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15979 
15980 		pll->funcs.disable(dev_priv, pll);
15981 		pll->on = false;
15982 	}
15983 
15984 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15985 		vlv_wm_get_hw_state(dev);
15986 	else if (IS_GEN9(dev))
15987 		skl_wm_get_hw_state(dev);
15988 	else if (HAS_PCH_SPLIT(dev))
15989 		ilk_wm_get_hw_state(dev);
15990 
15991 	for_each_intel_crtc(dev, crtc) {
15992 		unsigned long put_domains;
15993 
15994 		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15995 		if (WARN_ON(put_domains))
15996 			modeset_put_power_domains(dev_priv, put_domains);
15997 	}
15998 	intel_display_set_init_power(dev_priv, false);
15999 
16000 	intel_fbc_init_pipe_state(dev_priv);
16001 }
16002 
16003 void intel_display_resume(struct drm_device *dev)
16004 {
16005 	struct drm_i915_private *dev_priv = to_i915(dev);
16006 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16007 	struct drm_modeset_acquire_ctx ctx;
16008 	int ret;
16009 	bool setup = false;
16010 
16011 	dev_priv->modeset_restore_state = NULL;
16012 
16013 	/*
16014 	 * This is a cludge because with real atomic modeset mode_config.mutex
16015 	 * won't be taken. Unfortunately some probed state like
16016 	 * audio_codec_enable is still protected by mode_config.mutex, so lock
16017 	 * it here for now.
16018 	 */
16019 	mutex_lock(&dev->mode_config.mutex);
16020 	drm_modeset_acquire_init(&ctx, 0);
16021 
16022 retry:
16023 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
16024 
16025 	if (ret == 0 && !setup) {
16026 		setup = true;
16027 
16028 		intel_modeset_setup_hw_state(dev);
16029 		i915_redisable_vga(dev);
16030 	}
16031 
16032 	if (ret == 0 && state) {
16033 		struct drm_crtc_state *crtc_state;
16034 		struct drm_crtc *crtc;
16035 		int i;
16036 
16037 		state->acquire_ctx = &ctx;
16038 
16039 		/* ignore any reset values/BIOS leftovers in the WM registers */
16040 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
16041 
16042 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
16043 			/*
16044 			 * Force recalculation even if we restore
16045 			 * current state. With fast modeset this may not result
16046 			 * in a modeset when the state is compatible.
16047 			 */
16048 			crtc_state->mode_changed = true;
16049 		}
16050 
16051 		ret = drm_atomic_commit(state);
16052 	}
16053 
16054 	if (ret == -EDEADLK) {
16055 		drm_modeset_backoff(&ctx);
16056 		goto retry;
16057 	}
16058 
16059 	drm_modeset_drop_locks(&ctx);
16060 	drm_modeset_acquire_fini(&ctx);
16061 	mutex_unlock(&dev->mode_config.mutex);
16062 
16063 	if (ret) {
16064 		DRM_ERROR("Restoring old state failed with %i\n", ret);
16065 		drm_atomic_state_free(state);
16066 	}
16067 }
16068 
16069 void intel_modeset_gem_init(struct drm_device *dev)
16070 {
16071 	struct drm_crtc *c;
16072 	struct drm_i915_gem_object *obj;
16073 	int ret;
16074 
16075 	intel_init_gt_powersave(dev);
16076 
16077 	intel_modeset_init_hw(dev);
16078 
16079 	intel_setup_overlay(dev);
16080 
16081 	/*
16082 	 * Make sure any fbs we allocated at startup are properly
16083 	 * pinned & fenced.  When we do the allocation it's too early
16084 	 * for this.
16085 	 */
16086 	for_each_crtc(dev, c) {
16087 		obj = intel_fb_obj(c->primary->fb);
16088 		if (obj == NULL)
16089 			continue;
16090 
16091 		mutex_lock(&dev->struct_mutex);
16092 		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16093 						 c->primary->state->rotation);
16094 		mutex_unlock(&dev->struct_mutex);
16095 		if (ret) {
16096 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16097 				  to_intel_crtc(c)->pipe);
16098 			drm_framebuffer_unreference(c->primary->fb);
16099 			c->primary->fb = NULL;
16100 			c->primary->crtc = c->primary->state->crtc = NULL;
16101 			update_state_fb(c->primary);
16102 			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16103 		}
16104 	}
16105 
16106 	intel_backlight_register(dev);
16107 }
16108 
16109 void intel_connector_unregister(struct intel_connector *intel_connector)
16110 {
16111 	struct drm_connector *connector = &intel_connector->base;
16112 
16113 	intel_panel_destroy_backlight(connector);
16114 	drm_connector_unregister(connector);
16115 }
16116 
16117 void intel_modeset_cleanup(struct drm_device *dev)
16118 {
16119 	struct drm_i915_private *dev_priv = dev->dev_private;
16120 	struct intel_connector *connector;
16121 
16122 	intel_disable_gt_powersave(dev);
16123 
16124 	intel_backlight_unregister(dev);
16125 
16126 	/*
16127 	 * Interrupts and polling as the first thing to avoid creating havoc.
16128 	 * Too much stuff here (turning of connectors, ...) would
16129 	 * experience fancy races otherwise.
16130 	 */
16131 	intel_irq_uninstall(dev_priv);
16132 
16133 	/*
16134 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16135 	 * poll handlers. Hence disable polling after hpd handling is shut down.
16136 	 */
16137 	drm_kms_helper_poll_fini(dev);
16138 
16139 	intel_unregister_dsm_handler();
16140 
16141 	intel_fbc_global_disable(dev_priv);
16142 
16143 	/* flush any delayed tasks or pending work */
16144 	flush_scheduled_work();
16145 
16146 	/* destroy the backlight and sysfs files before encoders/connectors */
16147 	for_each_intel_connector(dev, connector)
16148 		connector->unregister(connector);
16149 
16150 	drm_mode_config_cleanup(dev);
16151 
16152 	intel_cleanup_overlay(dev);
16153 
16154 	intel_cleanup_gt_powersave(dev);
16155 
16156 	intel_teardown_gmbus(dev);
16157 }
16158 
16159 /*
16160  * Return which encoder is currently attached for connector.
16161  */
16162 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16163 {
16164 	return &intel_attached_encoder(connector)->base;
16165 }
16166 
16167 void intel_connector_attach_encoder(struct intel_connector *connector,
16168 				    struct intel_encoder *encoder)
16169 {
16170 	connector->encoder = encoder;
16171 	drm_mode_connector_attach_encoder(&connector->base,
16172 					  &encoder->base);
16173 }
16174 
16175 /*
16176  * set vga decode state - true == enable VGA decode
16177  */
16178 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16179 {
16180 	struct drm_i915_private *dev_priv = dev->dev_private;
16181 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16182 	u16 gmch_ctrl;
16183 
16184 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16185 		DRM_ERROR("failed to read control word\n");
16186 		return -EIO;
16187 	}
16188 
16189 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16190 		return 0;
16191 
16192 	if (state)
16193 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16194 	else
16195 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16196 
16197 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16198 		DRM_ERROR("failed to write control word\n");
16199 		return -EIO;
16200 	}
16201 
16202 	return 0;
16203 }
16204 
16205 struct intel_display_error_state {
16206 
16207 	u32 power_well_driver;
16208 
16209 	int num_transcoders;
16210 
16211 	struct intel_cursor_error_state {
16212 		u32 control;
16213 		u32 position;
16214 		u32 base;
16215 		u32 size;
16216 	} cursor[I915_MAX_PIPES];
16217 
16218 	struct intel_pipe_error_state {
16219 		bool power_domain_on;
16220 		u32 source;
16221 		u32 stat;
16222 	} pipe[I915_MAX_PIPES];
16223 
16224 	struct intel_plane_error_state {
16225 		u32 control;
16226 		u32 stride;
16227 		u32 size;
16228 		u32 pos;
16229 		u32 addr;
16230 		u32 surface;
16231 		u32 tile_offset;
16232 	} plane[I915_MAX_PIPES];
16233 
16234 	struct intel_transcoder_error_state {
16235 		bool power_domain_on;
16236 		enum transcoder cpu_transcoder;
16237 
16238 		u32 conf;
16239 
16240 		u32 htotal;
16241 		u32 hblank;
16242 		u32 hsync;
16243 		u32 vtotal;
16244 		u32 vblank;
16245 		u32 vsync;
16246 	} transcoder[4];
16247 };
16248 
16249 struct intel_display_error_state *
16250 intel_display_capture_error_state(struct drm_device *dev)
16251 {
16252 	struct drm_i915_private *dev_priv = dev->dev_private;
16253 	struct intel_display_error_state *error;
16254 	int transcoders[] = {
16255 		TRANSCODER_A,
16256 		TRANSCODER_B,
16257 		TRANSCODER_C,
16258 		TRANSCODER_EDP,
16259 	};
16260 	int i;
16261 
16262 	if (INTEL_INFO(dev)->num_pipes == 0)
16263 		return NULL;
16264 
16265 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16266 	if (error == NULL)
16267 		return NULL;
16268 
16269 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16270 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16271 
16272 	for_each_pipe(dev_priv, i) {
16273 		error->pipe[i].power_domain_on =
16274 			__intel_display_power_is_enabled(dev_priv,
16275 							 POWER_DOMAIN_PIPE(i));
16276 		if (!error->pipe[i].power_domain_on)
16277 			continue;
16278 
16279 		error->cursor[i].control = I915_READ(CURCNTR(i));
16280 		error->cursor[i].position = I915_READ(CURPOS(i));
16281 		error->cursor[i].base = I915_READ(CURBASE(i));
16282 
16283 		error->plane[i].control = I915_READ(DSPCNTR(i));
16284 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16285 		if (INTEL_INFO(dev)->gen <= 3) {
16286 			error->plane[i].size = I915_READ(DSPSIZE(i));
16287 			error->plane[i].pos = I915_READ(DSPPOS(i));
16288 		}
16289 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16290 			error->plane[i].addr = I915_READ(DSPADDR(i));
16291 		if (INTEL_INFO(dev)->gen >= 4) {
16292 			error->plane[i].surface = I915_READ(DSPSURF(i));
16293 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16294 		}
16295 
16296 		error->pipe[i].source = I915_READ(PIPESRC(i));
16297 
16298 		if (HAS_GMCH_DISPLAY(dev))
16299 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16300 	}
16301 
16302 	/* Note: this does not include DSI transcoders. */
16303 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16304 	if (HAS_DDI(dev_priv))
16305 		error->num_transcoders++; /* Account for eDP. */
16306 
16307 	for (i = 0; i < error->num_transcoders; i++) {
16308 		enum transcoder cpu_transcoder = transcoders[i];
16309 
16310 		error->transcoder[i].power_domain_on =
16311 			__intel_display_power_is_enabled(dev_priv,
16312 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16313 		if (!error->transcoder[i].power_domain_on)
16314 			continue;
16315 
16316 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16317 
16318 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16319 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16320 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16321 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16322 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16323 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16324 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16325 	}
16326 
16327 	return error;
16328 }
16329 
16330 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16331 
16332 void
16333 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16334 				struct drm_device *dev,
16335 				struct intel_display_error_state *error)
16336 {
16337 	struct drm_i915_private *dev_priv = dev->dev_private;
16338 	int i;
16339 
16340 	if (!error)
16341 		return;
16342 
16343 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16344 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16345 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16346 			   error->power_well_driver);
16347 	for_each_pipe(dev_priv, i) {
16348 		err_printf(m, "Pipe [%d]:\n", i);
16349 		err_printf(m, "  Power: %s\n",
16350 			   onoff(error->pipe[i].power_domain_on));
16351 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16352 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16353 
16354 		err_printf(m, "Plane [%d]:\n", i);
16355 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16356 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16357 		if (INTEL_INFO(dev)->gen <= 3) {
16358 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16359 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16360 		}
16361 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16362 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16363 		if (INTEL_INFO(dev)->gen >= 4) {
16364 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16365 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16366 		}
16367 
16368 		err_printf(m, "Cursor [%d]:\n", i);
16369 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16370 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16371 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16372 	}
16373 
16374 	for (i = 0; i < error->num_transcoders; i++) {
16375 		err_printf(m, "CPU transcoder: %s\n",
16376 			   transcoder_name(error->transcoder[i].cpu_transcoder));
16377 		err_printf(m, "  Power: %s\n",
16378 			   onoff(error->transcoder[i].power_domain_on));
16379 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16380 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16381 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16382 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16383 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16384 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16385 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16386 	}
16387 }
16388