xref: /dflybsd-src/sys/dev/drm/i915/intel_display.c (revision e8721bf4b6e6b12c2025fbba27e7b9214bb19756)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drmP.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_crtc_helper.h>
39 
40 static void intel_increase_pllclock(struct drm_crtc *crtc);
41 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
42 
43 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
44 				struct intel_crtc_config *pipe_config);
45 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
46 				   struct intel_crtc_config *pipe_config);
47 
48 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
49 			  int x, int y, struct drm_framebuffer *old_fb);
50 
51 
52 typedef struct {
53 	int	min, max;
54 } intel_range_t;
55 
56 typedef struct {
57 	int	dot_limit;
58 	int	p2_slow, p2_fast;
59 } intel_p2_t;
60 
61 typedef struct intel_limit intel_limit_t;
62 struct intel_limit {
63 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
64 	intel_p2_t	    p2;
65 };
66 
67 int
68 intel_pch_rawclk(struct drm_device *dev)
69 {
70 	struct drm_i915_private *dev_priv = dev->dev_private;
71 
72 	WARN_ON(!HAS_PCH_SPLIT(dev));
73 
74 	return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
75 }
76 
77 static inline u32 /* units of 100MHz */
78 intel_fdi_link_freq(struct drm_device *dev)
79 {
80 	if (IS_GEN5(dev)) {
81 		struct drm_i915_private *dev_priv = dev->dev_private;
82 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
83 	} else
84 		return 27;
85 }
86 
87 static const intel_limit_t intel_limits_i8xx_dac = {
88 	.dot = { .min = 25000, .max = 350000 },
89 	.vco = { .min = 908000, .max = 1512000 },
90 	.n = { .min = 2, .max = 16 },
91 	.m = { .min = 96, .max = 140 },
92 	.m1 = { .min = 18, .max = 26 },
93 	.m2 = { .min = 6, .max = 16 },
94 	.p = { .min = 4, .max = 128 },
95 	.p1 = { .min = 2, .max = 33 },
96 	.p2 = { .dot_limit = 165000,
97 		.p2_slow = 4, .p2_fast = 2 },
98 };
99 
100 static const intel_limit_t intel_limits_i8xx_dvo = {
101 	.dot = { .min = 25000, .max = 350000 },
102 	.vco = { .min = 908000, .max = 1512000 },
103 	.n = { .min = 2, .max = 16 },
104 	.m = { .min = 96, .max = 140 },
105 	.m1 = { .min = 18, .max = 26 },
106 	.m2 = { .min = 6, .max = 16 },
107 	.p = { .min = 4, .max = 128 },
108 	.p1 = { .min = 2, .max = 33 },
109 	.p2 = { .dot_limit = 165000,
110 		.p2_slow = 4, .p2_fast = 4 },
111 };
112 
113 static const intel_limit_t intel_limits_i8xx_lvds = {
114 	.dot = { .min = 25000, .max = 350000 },
115 	.vco = { .min = 908000, .max = 1512000 },
116 	.n = { .min = 2, .max = 16 },
117 	.m = { .min = 96, .max = 140 },
118 	.m1 = { .min = 18, .max = 26 },
119 	.m2 = { .min = 6, .max = 16 },
120 	.p = { .min = 4, .max = 128 },
121 	.p1 = { .min = 1, .max = 6 },
122 	.p2 = { .dot_limit = 165000,
123 		.p2_slow = 14, .p2_fast = 7 },
124 };
125 
126 static const intel_limit_t intel_limits_i9xx_sdvo = {
127 	.dot = { .min = 20000, .max = 400000 },
128 	.vco = { .min = 1400000, .max = 2800000 },
129 	.n = { .min = 1, .max = 6 },
130 	.m = { .min = 70, .max = 120 },
131 	.m1 = { .min = 8, .max = 18 },
132 	.m2 = { .min = 3, .max = 7 },
133 	.p = { .min = 5, .max = 80 },
134 	.p1 = { .min = 1, .max = 8 },
135 	.p2 = { .dot_limit = 200000,
136 		.p2_slow = 10, .p2_fast = 5 },
137 };
138 
139 static const intel_limit_t intel_limits_i9xx_lvds = {
140 	.dot = { .min = 20000, .max = 400000 },
141 	.vco = { .min = 1400000, .max = 2800000 },
142 	.n = { .min = 1, .max = 6 },
143 	.m = { .min = 70, .max = 120 },
144 	.m1 = { .min = 8, .max = 18 },
145 	.m2 = { .min = 3, .max = 7 },
146 	.p = { .min = 7, .max = 98 },
147 	.p1 = { .min = 1, .max = 8 },
148 	.p2 = { .dot_limit = 112000,
149 		.p2_slow = 14, .p2_fast = 7 },
150 };
151 
152 
153 static const intel_limit_t intel_limits_g4x_sdvo = {
154 	.dot = { .min = 25000, .max = 270000 },
155 	.vco = { .min = 1750000, .max = 3500000},
156 	.n = { .min = 1, .max = 4 },
157 	.m = { .min = 104, .max = 138 },
158 	.m1 = { .min = 17, .max = 23 },
159 	.m2 = { .min = 5, .max = 11 },
160 	.p = { .min = 10, .max = 30 },
161 	.p1 = { .min = 1, .max = 3},
162 	.p2 = { .dot_limit = 270000,
163 		.p2_slow = 10,
164 		.p2_fast = 10
165 	},
166 };
167 
168 static const intel_limit_t intel_limits_g4x_hdmi = {
169 	.dot = { .min = 22000, .max = 400000 },
170 	.vco = { .min = 1750000, .max = 3500000},
171 	.n = { .min = 1, .max = 4 },
172 	.m = { .min = 104, .max = 138 },
173 	.m1 = { .min = 16, .max = 23 },
174 	.m2 = { .min = 5, .max = 11 },
175 	.p = { .min = 5, .max = 80 },
176 	.p1 = { .min = 1, .max = 8},
177 	.p2 = { .dot_limit = 165000,
178 		.p2_slow = 10, .p2_fast = 5 },
179 };
180 
181 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
182 	.dot = { .min = 20000, .max = 115000 },
183 	.vco = { .min = 1750000, .max = 3500000 },
184 	.n = { .min = 1, .max = 3 },
185 	.m = { .min = 104, .max = 138 },
186 	.m1 = { .min = 17, .max = 23 },
187 	.m2 = { .min = 5, .max = 11 },
188 	.p = { .min = 28, .max = 112 },
189 	.p1 = { .min = 2, .max = 8 },
190 	.p2 = { .dot_limit = 0,
191 		.p2_slow = 14, .p2_fast = 14
192 	},
193 };
194 
195 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
196 	.dot = { .min = 80000, .max = 224000 },
197 	.vco = { .min = 1750000, .max = 3500000 },
198 	.n = { .min = 1, .max = 3 },
199 	.m = { .min = 104, .max = 138 },
200 	.m1 = { .min = 17, .max = 23 },
201 	.m2 = { .min = 5, .max = 11 },
202 	.p = { .min = 14, .max = 42 },
203 	.p1 = { .min = 2, .max = 6 },
204 	.p2 = { .dot_limit = 0,
205 		.p2_slow = 7, .p2_fast = 7
206 	},
207 };
208 
209 static const intel_limit_t intel_limits_pineview_sdvo = {
210 	.dot = { .min = 20000, .max = 400000},
211 	.vco = { .min = 1700000, .max = 3500000 },
212 	/* Pineview's Ncounter is a ring counter */
213 	.n = { .min = 3, .max = 6 },
214 	.m = { .min = 2, .max = 256 },
215 	/* Pineview only has one combined m divider, which we treat as m2. */
216 	.m1 = { .min = 0, .max = 0 },
217 	.m2 = { .min = 0, .max = 254 },
218 	.p = { .min = 5, .max = 80 },
219 	.p1 = { .min = 1, .max = 8 },
220 	.p2 = { .dot_limit = 200000,
221 		.p2_slow = 10, .p2_fast = 5 },
222 };
223 
224 static const intel_limit_t intel_limits_pineview_lvds = {
225 	.dot = { .min = 20000, .max = 400000 },
226 	.vco = { .min = 1700000, .max = 3500000 },
227 	.n = { .min = 3, .max = 6 },
228 	.m = { .min = 2, .max = 256 },
229 	.m1 = { .min = 0, .max = 0 },
230 	.m2 = { .min = 0, .max = 254 },
231 	.p = { .min = 7, .max = 112 },
232 	.p1 = { .min = 1, .max = 8 },
233 	.p2 = { .dot_limit = 112000,
234 		.p2_slow = 14, .p2_fast = 14 },
235 };
236 
237 /* Ironlake / Sandybridge
238  *
239  * We calculate clock using (register_value + 2) for N/M1/M2, so here
240  * the range value for them is (actual_value - 2).
241  */
242 static const intel_limit_t intel_limits_ironlake_dac = {
243 	.dot = { .min = 25000, .max = 350000 },
244 	.vco = { .min = 1760000, .max = 3510000 },
245 	.n = { .min = 1, .max = 5 },
246 	.m = { .min = 79, .max = 127 },
247 	.m1 = { .min = 12, .max = 22 },
248 	.m2 = { .min = 5, .max = 9 },
249 	.p = { .min = 5, .max = 80 },
250 	.p1 = { .min = 1, .max = 8 },
251 	.p2 = { .dot_limit = 225000,
252 		.p2_slow = 10, .p2_fast = 5 },
253 };
254 
255 static const intel_limit_t intel_limits_ironlake_single_lvds = {
256 	.dot = { .min = 25000, .max = 350000 },
257 	.vco = { .min = 1760000, .max = 3510000 },
258 	.n = { .min = 1, .max = 3 },
259 	.m = { .min = 79, .max = 118 },
260 	.m1 = { .min = 12, .max = 22 },
261 	.m2 = { .min = 5, .max = 9 },
262 	.p = { .min = 28, .max = 112 },
263 	.p1 = { .min = 2, .max = 8 },
264 	.p2 = { .dot_limit = 225000,
265 		.p2_slow = 14, .p2_fast = 14 },
266 };
267 
268 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
269 	.dot = { .min = 25000, .max = 350000 },
270 	.vco = { .min = 1760000, .max = 3510000 },
271 	.n = { .min = 1, .max = 3 },
272 	.m = { .min = 79, .max = 127 },
273 	.m1 = { .min = 12, .max = 22 },
274 	.m2 = { .min = 5, .max = 9 },
275 	.p = { .min = 14, .max = 56 },
276 	.p1 = { .min = 2, .max = 8 },
277 	.p2 = { .dot_limit = 225000,
278 		.p2_slow = 7, .p2_fast = 7 },
279 };
280 
281 /* LVDS 100mhz refclk limits. */
282 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
283 	.dot = { .min = 25000, .max = 350000 },
284 	.vco = { .min = 1760000, .max = 3510000 },
285 	.n = { .min = 1, .max = 2 },
286 	.m = { .min = 79, .max = 126 },
287 	.m1 = { .min = 12, .max = 22 },
288 	.m2 = { .min = 5, .max = 9 },
289 	.p = { .min = 28, .max = 112 },
290 	.p1 = { .min = 2, .max = 8 },
291 	.p2 = { .dot_limit = 225000,
292 		.p2_slow = 14, .p2_fast = 14 },
293 };
294 
295 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
296 	.dot = { .min = 25000, .max = 350000 },
297 	.vco = { .min = 1760000, .max = 3510000 },
298 	.n = { .min = 1, .max = 3 },
299 	.m = { .min = 79, .max = 126 },
300 	.m1 = { .min = 12, .max = 22 },
301 	.m2 = { .min = 5, .max = 9 },
302 	.p = { .min = 14, .max = 42 },
303 	.p1 = { .min = 2, .max = 6 },
304 	.p2 = { .dot_limit = 225000,
305 		.p2_slow = 7, .p2_fast = 7 },
306 };
307 
308 static const intel_limit_t intel_limits_vlv = {
309 	 /*
310 	  * These are the data rate limits (measured in fast clocks)
311 	  * since those are the strictest limits we have. The fast
312 	  * clock and actual rate limits are more relaxed, so checking
313 	  * them would make no difference.
314 	  */
315 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
316 	.vco = { .min = 4000000, .max = 6000000 },
317 	.n = { .min = 1, .max = 7 },
318 	.m1 = { .min = 2, .max = 3 },
319 	.m2 = { .min = 11, .max = 156 },
320 	.p1 = { .min = 2, .max = 3 },
321 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
322 };
323 
324 static void vlv_clock(int refclk, intel_clock_t *clock)
325 {
326 	clock->m = clock->m1 * clock->m2;
327 	clock->p = clock->p1 * clock->p2;
328 	if (WARN_ON(clock->n == 0 || clock->p == 0))
329 		return;
330 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
331 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
332 }
333 
334 /**
335  * Returns whether any output on the specified pipe is of the specified type
336  */
337 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
338 {
339 	struct drm_device *dev = crtc->dev;
340 	struct intel_encoder *encoder;
341 
342 	for_each_encoder_on_crtc(dev, crtc, encoder)
343 		if (encoder->type == type)
344 			return true;
345 
346 	return false;
347 }
348 
349 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
350 						int refclk)
351 {
352 	struct drm_device *dev = crtc->dev;
353 	const intel_limit_t *limit;
354 
355 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
356 		if (intel_is_dual_link_lvds(dev)) {
357 			if (refclk == 100000)
358 				limit = &intel_limits_ironlake_dual_lvds_100m;
359 			else
360 				limit = &intel_limits_ironlake_dual_lvds;
361 		} else {
362 			if (refclk == 100000)
363 				limit = &intel_limits_ironlake_single_lvds_100m;
364 			else
365 				limit = &intel_limits_ironlake_single_lvds;
366 		}
367 	} else
368 		limit = &intel_limits_ironlake_dac;
369 
370 	return limit;
371 }
372 
373 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
374 {
375 	struct drm_device *dev = crtc->dev;
376 	const intel_limit_t *limit;
377 
378 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
379 		if (intel_is_dual_link_lvds(dev))
380 			limit = &intel_limits_g4x_dual_channel_lvds;
381 		else
382 			limit = &intel_limits_g4x_single_channel_lvds;
383 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
384 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
385 		limit = &intel_limits_g4x_hdmi;
386 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
387 		limit = &intel_limits_g4x_sdvo;
388 	} else /* The option is for other outputs */
389 		limit = &intel_limits_i9xx_sdvo;
390 
391 	return limit;
392 }
393 
394 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
395 {
396 	struct drm_device *dev = crtc->dev;
397 	const intel_limit_t *limit;
398 
399 	if (HAS_PCH_SPLIT(dev))
400 		limit = intel_ironlake_limit(crtc, refclk);
401 	else if (IS_G4X(dev)) {
402 		limit = intel_g4x_limit(crtc);
403 	} else if (IS_PINEVIEW(dev)) {
404 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
405 			limit = &intel_limits_pineview_lvds;
406 		else
407 			limit = &intel_limits_pineview_sdvo;
408 	} else if (IS_VALLEYVIEW(dev)) {
409 		limit = &intel_limits_vlv;
410 	} else if (!IS_GEN2(dev)) {
411 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
412 			limit = &intel_limits_i9xx_lvds;
413 		else
414 			limit = &intel_limits_i9xx_sdvo;
415 	} else {
416 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
417 			limit = &intel_limits_i8xx_lvds;
418 		else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
419 			limit = &intel_limits_i8xx_dvo;
420 		else
421 			limit = &intel_limits_i8xx_dac;
422 	}
423 	return limit;
424 }
425 
426 /* m1 is reserved as 0 in Pineview, n is a ring counter */
427 static void pineview_clock(int refclk, intel_clock_t *clock)
428 {
429 	clock->m = clock->m2 + 2;
430 	clock->p = clock->p1 * clock->p2;
431 	if (WARN_ON(clock->n == 0 || clock->p == 0))
432 		return;
433 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
434 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
435 }
436 
437 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
438 {
439 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
440 }
441 
442 static void i9xx_clock(int refclk, intel_clock_t *clock)
443 {
444 	clock->m = i9xx_dpll_compute_m(clock);
445 	clock->p = clock->p1 * clock->p2;
446 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
447 		return;
448 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
449 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
450 }
451 
452 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
453 /**
454  * Returns whether the given set of divisors are valid for a given refclk with
455  * the given connectors.
456  */
457 
458 static bool intel_PLL_is_valid(struct drm_device *dev,
459 			       const intel_limit_t *limit,
460 			       const intel_clock_t *clock)
461 {
462 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
463 		INTELPllInvalid("n out of range\n");
464 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
465 		INTELPllInvalid("p1 out of range\n");
466 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
467 		INTELPllInvalid("m2 out of range\n");
468 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
469 		INTELPllInvalid("m1 out of range\n");
470 
471 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
472 		if (clock->m1 <= clock->m2)
473 			INTELPllInvalid("m1 <= m2\n");
474 
475 	if (!IS_VALLEYVIEW(dev)) {
476 		if (clock->p < limit->p.min || limit->p.max < clock->p)
477 			INTELPllInvalid("p out of range\n");
478 		if (clock->m < limit->m.min || limit->m.max < clock->m)
479 			INTELPllInvalid("m out of range\n");
480 	}
481 
482 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
483 		INTELPllInvalid("vco out of range\n");
484 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
485 	 * connector, etc., rather than just a single range.
486 	 */
487 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
488 		INTELPllInvalid("dot out of range\n");
489 
490 	return true;
491 }
492 
493 static bool
494 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
495 		    int target, int refclk, intel_clock_t *match_clock,
496 		    intel_clock_t *best_clock)
497 {
498 	struct drm_device *dev = crtc->dev;
499 	intel_clock_t clock;
500 	int err = target;
501 
502 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
503 		/*
504 		 * For LVDS just rely on its current settings for dual-channel.
505 		 * We haven't figured out how to reliably set up different
506 		 * single/dual channel state, if we even can.
507 		 */
508 		if (intel_is_dual_link_lvds(dev))
509 			clock.p2 = limit->p2.p2_fast;
510 		else
511 			clock.p2 = limit->p2.p2_slow;
512 	} else {
513 		if (target < limit->p2.dot_limit)
514 			clock.p2 = limit->p2.p2_slow;
515 		else
516 			clock.p2 = limit->p2.p2_fast;
517 	}
518 
519 	memset(best_clock, 0, sizeof(*best_clock));
520 
521 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
522 	     clock.m1++) {
523 		for (clock.m2 = limit->m2.min;
524 		     clock.m2 <= limit->m2.max; clock.m2++) {
525 			if (clock.m2 >= clock.m1)
526 				break;
527 			for (clock.n = limit->n.min;
528 			     clock.n <= limit->n.max; clock.n++) {
529 				for (clock.p1 = limit->p1.min;
530 					clock.p1 <= limit->p1.max; clock.p1++) {
531 					int this_err;
532 
533 					i9xx_clock(refclk, &clock);
534 					if (!intel_PLL_is_valid(dev, limit,
535 								&clock))
536 						continue;
537 					if (match_clock &&
538 					    clock.p != match_clock->p)
539 						continue;
540 
541 					this_err = abs(clock.dot - target);
542 					if (this_err < err) {
543 						*best_clock = clock;
544 						err = this_err;
545 					}
546 				}
547 			}
548 		}
549 	}
550 
551 	return (err != target);
552 }
553 
554 static bool
555 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
556 		   int target, int refclk, intel_clock_t *match_clock,
557 		   intel_clock_t *best_clock)
558 {
559 	struct drm_device *dev = crtc->dev;
560 	intel_clock_t clock;
561 	int err = target;
562 
563 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
564 		/*
565 		 * For LVDS just rely on its current settings for dual-channel.
566 		 * We haven't figured out how to reliably set up different
567 		 * single/dual channel state, if we even can.
568 		 */
569 		if (intel_is_dual_link_lvds(dev))
570 			clock.p2 = limit->p2.p2_fast;
571 		else
572 			clock.p2 = limit->p2.p2_slow;
573 	} else {
574 		if (target < limit->p2.dot_limit)
575 			clock.p2 = limit->p2.p2_slow;
576 		else
577 			clock.p2 = limit->p2.p2_fast;
578 	}
579 
580 	memset(best_clock, 0, sizeof(*best_clock));
581 
582 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
583 	     clock.m1++) {
584 		for (clock.m2 = limit->m2.min;
585 		     clock.m2 <= limit->m2.max; clock.m2++) {
586 			for (clock.n = limit->n.min;
587 			     clock.n <= limit->n.max; clock.n++) {
588 				for (clock.p1 = limit->p1.min;
589 					clock.p1 <= limit->p1.max; clock.p1++) {
590 					int this_err;
591 
592 					pineview_clock(refclk, &clock);
593 					if (!intel_PLL_is_valid(dev, limit,
594 								&clock))
595 						continue;
596 					if (match_clock &&
597 					    clock.p != match_clock->p)
598 						continue;
599 
600 					this_err = abs(clock.dot - target);
601 					if (this_err < err) {
602 						*best_clock = clock;
603 						err = this_err;
604 					}
605 				}
606 			}
607 		}
608 	}
609 
610 	return (err != target);
611 }
612 
613 static bool
614 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
615 		   int target, int refclk, intel_clock_t *match_clock,
616 		   intel_clock_t *best_clock)
617 {
618 	struct drm_device *dev = crtc->dev;
619 	intel_clock_t clock;
620 	int max_n;
621 	bool found;
622 	/* approximately equals target * 0.00585 */
623 	int err_most = (target >> 8) + (target >> 9);
624 	found = false;
625 
626 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
627 		if (intel_is_dual_link_lvds(dev))
628 			clock.p2 = limit->p2.p2_fast;
629 		else
630 			clock.p2 = limit->p2.p2_slow;
631 	} else {
632 		if (target < limit->p2.dot_limit)
633 			clock.p2 = limit->p2.p2_slow;
634 		else
635 			clock.p2 = limit->p2.p2_fast;
636 	}
637 
638 	memset(best_clock, 0, sizeof(*best_clock));
639 	max_n = limit->n.max;
640 	/* based on hardware requirement, prefer smaller n to precision */
641 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
642 		/* based on hardware requirement, prefere larger m1,m2 */
643 		for (clock.m1 = limit->m1.max;
644 		     clock.m1 >= limit->m1.min; clock.m1--) {
645 			for (clock.m2 = limit->m2.max;
646 			     clock.m2 >= limit->m2.min; clock.m2--) {
647 				for (clock.p1 = limit->p1.max;
648 				     clock.p1 >= limit->p1.min; clock.p1--) {
649 					int this_err;
650 
651 					i9xx_clock(refclk, &clock);
652 					if (!intel_PLL_is_valid(dev, limit,
653 								&clock))
654 						continue;
655 
656 					this_err = abs(clock.dot - target);
657 					if (this_err < err_most) {
658 						*best_clock = clock;
659 						err_most = this_err;
660 						max_n = clock.n;
661 						found = true;
662 					}
663 				}
664 			}
665 		}
666 	}
667 	return found;
668 }
669 
670 static bool
671 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
672 		   int target, int refclk, intel_clock_t *match_clock,
673 		   intel_clock_t *best_clock)
674 {
675 	struct drm_device *dev = crtc->dev;
676 	intel_clock_t clock;
677 	unsigned int bestppm = 1000000;
678 	/* min update 19.2 MHz */
679 	int max_n = min(limit->n.max, refclk / 19200);
680 	bool found = false;
681 
682 	target *= 5; /* fast clock */
683 
684 	memset(best_clock, 0, sizeof(*best_clock));
685 
686 	/* based on hardware requirement, prefer smaller n to precision */
687 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
688 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
689 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
690 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
691 				clock.p = clock.p1 * clock.p2;
692 				/* based on hardware requirement, prefer bigger m1,m2 values */
693 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
694 					unsigned int ppm, diff;
695 
696 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
697 								     refclk * clock.m1);
698 
699 					vlv_clock(refclk, &clock);
700 
701 					if (!intel_PLL_is_valid(dev, limit,
702 								&clock))
703 						continue;
704 
705 					diff = abs(clock.dot - target);
706 					ppm = div_u64(1000000ULL * diff, target);
707 
708 					if (ppm < 100 && clock.p > best_clock->p) {
709 						bestppm = 0;
710 						*best_clock = clock;
711 						found = true;
712 					}
713 
714 					if (bestppm >= 10 && ppm < bestppm - 10) {
715 						bestppm = ppm;
716 						*best_clock = clock;
717 						found = true;
718 					}
719 				}
720 			}
721 		}
722 	}
723 
724 	return found;
725 }
726 
727 bool intel_crtc_active(struct drm_crtc *crtc)
728 {
729 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
730 
731 	/* Be paranoid as we can arrive here with only partial
732 	 * state retrieved from the hardware during setup.
733 	 *
734 	 * We can ditch the adjusted_mode.crtc_clock check as soon
735 	 * as Haswell has gained clock readout/fastboot support.
736 	 *
737 	 * We can ditch the crtc->fb check as soon as we can
738 	 * properly reconstruct framebuffers.
739 	 */
740 	return intel_crtc->active && crtc->fb &&
741 		intel_crtc->config.adjusted_mode.crtc_clock;
742 }
743 
744 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
745 					     enum i915_pipe pipe)
746 {
747 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
748 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
749 
750 	return intel_crtc->config.cpu_transcoder;
751 }
752 
753 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
754 {
755 	struct drm_i915_private *dev_priv = dev->dev_private;
756 	u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
757 
758 	frame = I915_READ(frame_reg);
759 
760 	if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
761 		DRM_DEBUG_KMS("vblank wait timed out\n");
762 }
763 
764 /**
765  * intel_wait_for_vblank - wait for vblank on a given pipe
766  * @dev: drm device
767  * @pipe: pipe to wait for
768  *
769  * Wait for vblank to occur on a given pipe.  Needed for various bits of
770  * mode setting code.
771  */
772 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
773 {
774 	struct drm_i915_private *dev_priv = dev->dev_private;
775 	int pipestat_reg = PIPESTAT(pipe);
776 
777 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
778 		g4x_wait_for_vblank(dev, pipe);
779 		return;
780 	}
781 
782 	/* Clear existing vblank status. Note this will clear any other
783 	 * sticky status fields as well.
784 	 *
785 	 * This races with i915_driver_irq_handler() with the result
786 	 * that either function could miss a vblank event.  Here it is not
787 	 * fatal, as we will either wait upon the next vblank interrupt or
788 	 * timeout.  Generally speaking intel_wait_for_vblank() is only
789 	 * called during modeset at which time the GPU should be idle and
790 	 * should *not* be performing page flips and thus not waiting on
791 	 * vblanks...
792 	 * Currently, the result of us stealing a vblank from the irq
793 	 * handler is that a single frame will be skipped during swapbuffers.
794 	 */
795 	I915_WRITE(pipestat_reg,
796 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
797 
798 	/* Wait for vblank interrupt bit to set */
799 	if (wait_for(I915_READ(pipestat_reg) &
800 		     PIPE_VBLANK_INTERRUPT_STATUS,
801 		     50))
802 		DRM_DEBUG_KMS("vblank wait timed out\n");
803 }
804 
805 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
806 {
807 	struct drm_i915_private *dev_priv = dev->dev_private;
808 	u32 reg = PIPEDSL(pipe);
809 	u32 line1, line2;
810 	u32 line_mask;
811 
812 	if (IS_GEN2(dev))
813 		line_mask = DSL_LINEMASK_GEN2;
814 	else
815 		line_mask = DSL_LINEMASK_GEN3;
816 
817 	line1 = I915_READ(reg) & line_mask;
818 	mdelay(5);
819 	line2 = I915_READ(reg) & line_mask;
820 
821 	return line1 == line2;
822 }
823 
824 /*
825  * intel_wait_for_pipe_off - wait for pipe to turn off
826  * @dev: drm device
827  * @pipe: pipe to wait for
828  *
829  * After disabling a pipe, we can't wait for vblank in the usual way,
830  * spinning on the vblank interrupt status bit, since we won't actually
831  * see an interrupt when the pipe is disabled.
832  *
833  * On Gen4 and above:
834  *   wait for the pipe register state bit to turn off
835  *
836  * Otherwise:
837  *   wait for the display line value to settle (it usually
838  *   ends up stopping at the start of the next frame).
839  *
840  */
841 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
842 {
843 	struct drm_i915_private *dev_priv = dev->dev_private;
844 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
845 								      pipe);
846 
847 	if (INTEL_INFO(dev)->gen >= 4) {
848 		int reg = PIPECONF(cpu_transcoder);
849 
850 		/* Wait for the Pipe State to go off */
851 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
852 			     100))
853 			WARN(1, "pipe_off wait timed out\n");
854 	} else {
855 		/* Wait for the display line to settle */
856 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
857 			WARN(1, "pipe_off wait timed out\n");
858 	}
859 }
860 
861 /*
862  * ibx_digital_port_connected - is the specified port connected?
863  * @dev_priv: i915 private structure
864  * @port: the port to test
865  *
866  * Returns true if @port is connected, false otherwise.
867  */
868 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
869 				struct intel_digital_port *port)
870 {
871 	u32 bit;
872 
873 	if (HAS_PCH_IBX(dev_priv->dev)) {
874 		switch(port->port) {
875 		case PORT_B:
876 			bit = SDE_PORTB_HOTPLUG;
877 			break;
878 		case PORT_C:
879 			bit = SDE_PORTC_HOTPLUG;
880 			break;
881 		case PORT_D:
882 			bit = SDE_PORTD_HOTPLUG;
883 			break;
884 		default:
885 			return true;
886 		}
887 	} else {
888 		switch(port->port) {
889 		case PORT_B:
890 			bit = SDE_PORTB_HOTPLUG_CPT;
891 			break;
892 		case PORT_C:
893 			bit = SDE_PORTC_HOTPLUG_CPT;
894 			break;
895 		case PORT_D:
896 			bit = SDE_PORTD_HOTPLUG_CPT;
897 			break;
898 		default:
899 			return true;
900 		}
901 	}
902 
903 	return I915_READ(SDEISR) & bit;
904 }
905 
906 static const char *state_string(bool enabled)
907 {
908 	return enabled ? "on" : "off";
909 }
910 
911 /* Only for pre-ILK configs */
912 void assert_pll(struct drm_i915_private *dev_priv,
913 		enum i915_pipe pipe, bool state)
914 {
915 	int reg;
916 	u32 val;
917 	bool cur_state;
918 
919 	reg = DPLL(pipe);
920 	val = I915_READ(reg);
921 	cur_state = !!(val & DPLL_VCO_ENABLE);
922 	WARN(cur_state != state,
923 	     "PLL state assertion failure (expected %s, current %s)\n",
924 	     state_string(state), state_string(cur_state));
925 }
926 
927 /* XXX: the dsi pll is shared between MIPI DSI ports */
928 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
929 {
930 	u32 val;
931 	bool cur_state;
932 
933 	mutex_lock(&dev_priv->dpio_lock);
934 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
935 	mutex_unlock(&dev_priv->dpio_lock);
936 
937 	cur_state = val & DSI_PLL_VCO_EN;
938 	WARN(cur_state != state,
939 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
940 	     state_string(state), state_string(cur_state));
941 }
942 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
943 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
944 
945 struct intel_shared_dpll *
946 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
947 {
948 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
949 
950 	if (crtc->config.shared_dpll < 0)
951 		return NULL;
952 
953 	return &dev_priv->shared_dplls[crtc->config.shared_dpll];
954 }
955 
956 /* For ILK+ */
957 void assert_shared_dpll(struct drm_i915_private *dev_priv,
958 			struct intel_shared_dpll *pll,
959 			bool state)
960 {
961 	bool cur_state;
962 	struct intel_dpll_hw_state hw_state;
963 
964 	if (HAS_PCH_LPT(dev_priv->dev)) {
965 		DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
966 		return;
967 	}
968 
969 	if (WARN (!pll,
970 		  "asserting DPLL %s with no DPLL\n", state_string(state)))
971 		return;
972 
973 	cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
974 	WARN(cur_state != state,
975 	     "%s assertion failure (expected %s, current %s)\n",
976 	     pll->name, state_string(state), state_string(cur_state));
977 }
978 
979 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
980 			  enum i915_pipe pipe, bool state)
981 {
982 	int reg;
983 	u32 val;
984 	bool cur_state;
985 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
986 								      pipe);
987 
988 	if (HAS_DDI(dev_priv->dev)) {
989 		/* DDI does not have a specific FDI_TX register */
990 		reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
991 		val = I915_READ(reg);
992 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
993 	} else {
994 		reg = FDI_TX_CTL(pipe);
995 		val = I915_READ(reg);
996 		cur_state = !!(val & FDI_TX_ENABLE);
997 	}
998 	WARN(cur_state != state,
999 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1000 	     state_string(state), state_string(cur_state));
1001 }
1002 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1003 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1004 
1005 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1006 			  enum i915_pipe pipe, bool state)
1007 {
1008 	int reg;
1009 	u32 val;
1010 	bool cur_state;
1011 
1012 	reg = FDI_RX_CTL(pipe);
1013 	val = I915_READ(reg);
1014 	cur_state = !!(val & FDI_RX_ENABLE);
1015 	WARN(cur_state != state,
1016 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1017 	     state_string(state), state_string(cur_state));
1018 }
1019 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1020 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1021 
1022 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1023 				      enum i915_pipe pipe)
1024 {
1025 	int reg;
1026 	u32 val;
1027 
1028 	/* ILK FDI PLL is always enabled */
1029 	if (dev_priv->info->gen == 5)
1030 		return;
1031 
1032 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1033 	if (HAS_DDI(dev_priv->dev))
1034 		return;
1035 
1036 	reg = FDI_TX_CTL(pipe);
1037 	val = I915_READ(reg);
1038 	WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1039 }
1040 
1041 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1042 		       enum i915_pipe pipe, bool state)
1043 {
1044 	int reg;
1045 	u32 val;
1046 	bool cur_state;
1047 
1048 	reg = FDI_RX_CTL(pipe);
1049 	val = I915_READ(reg);
1050 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1051 	WARN(cur_state != state,
1052 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1053 	     state_string(state), state_string(cur_state));
1054 }
1055 
1056 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1057 				  enum i915_pipe pipe)
1058 {
1059 	int pp_reg, lvds_reg;
1060 	u32 val;
1061 	enum i915_pipe panel_pipe = PIPE_A;
1062 	bool locked = true;
1063 
1064 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
1065 		pp_reg = PCH_PP_CONTROL;
1066 		lvds_reg = PCH_LVDS;
1067 	} else {
1068 		pp_reg = PP_CONTROL;
1069 		lvds_reg = LVDS;
1070 	}
1071 
1072 	val = I915_READ(pp_reg);
1073 	if (!(val & PANEL_POWER_ON) ||
1074 	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1075 		locked = false;
1076 
1077 	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1078 		panel_pipe = PIPE_B;
1079 
1080 	WARN(panel_pipe == pipe && locked,
1081 	     "panel assertion failure, pipe %c regs locked\n",
1082 	     pipe_name(pipe));
1083 }
1084 
1085 static void assert_cursor(struct drm_i915_private *dev_priv,
1086 			  enum i915_pipe pipe, bool state)
1087 {
1088 	struct drm_device *dev = dev_priv->dev;
1089 	bool cur_state;
1090 
1091 	if (IS_845G(dev) || IS_I865G(dev))
1092 		cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1093 	else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
1094 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1095 	else
1096 		cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1097 
1098 	WARN(cur_state != state,
1099 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1100 	     pipe_name(pipe), state_string(state), state_string(cur_state));
1101 }
1102 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1103 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1104 
1105 void assert_pipe(struct drm_i915_private *dev_priv,
1106 		 enum i915_pipe pipe, bool state)
1107 {
1108 	int reg;
1109 	u32 val;
1110 	bool cur_state;
1111 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1112 								      pipe);
1113 
1114 	/* if we need the pipe A quirk it must be always on */
1115 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1116 		state = true;
1117 
1118 	if (!intel_display_power_enabled(dev_priv->dev,
1119 				POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1120 		cur_state = false;
1121 	} else {
1122 		reg = PIPECONF(cpu_transcoder);
1123 		val = I915_READ(reg);
1124 		cur_state = !!(val & PIPECONF_ENABLE);
1125 	}
1126 
1127 	WARN(cur_state != state,
1128 	     "pipe %c assertion failure (expected %s, current %s)\n",
1129 	     pipe_name(pipe), state_string(state), state_string(cur_state));
1130 }
1131 
1132 static void assert_plane(struct drm_i915_private *dev_priv,
1133 			 enum plane plane, bool state)
1134 {
1135 	int reg;
1136 	u32 val;
1137 	bool cur_state;
1138 
1139 	reg = DSPCNTR(plane);
1140 	val = I915_READ(reg);
1141 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1142 	WARN(cur_state != state,
1143 	     "plane %c assertion failure (expected %s, current %s)\n",
1144 	     plane_name(plane), state_string(state), state_string(cur_state));
1145 }
1146 
1147 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1148 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1149 
1150 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1151 				   enum i915_pipe pipe)
1152 {
1153 	struct drm_device *dev = dev_priv->dev;
1154 	int reg, i;
1155 	u32 val;
1156 	int cur_pipe;
1157 
1158 	/* Primary planes are fixed to pipes on gen4+ */
1159 	if (INTEL_INFO(dev)->gen >= 4) {
1160 		reg = DSPCNTR(pipe);
1161 		val = I915_READ(reg);
1162 		WARN((val & DISPLAY_PLANE_ENABLE),
1163 		     "plane %c assertion failure, should be disabled but not\n",
1164 		     plane_name(pipe));
1165 		return;
1166 	}
1167 
1168 	/* Need to check both planes against the pipe */
1169 	for_each_pipe(i) {
1170 		reg = DSPCNTR(i);
1171 		val = I915_READ(reg);
1172 		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1173 			DISPPLANE_SEL_PIPE_SHIFT;
1174 		WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1175 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1176 		     plane_name(i), pipe_name(pipe));
1177 	}
1178 }
1179 
1180 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1181 				    enum i915_pipe pipe)
1182 {
1183 	struct drm_device *dev = dev_priv->dev;
1184 	int reg, i;
1185 	u32 val;
1186 
1187 	if (IS_VALLEYVIEW(dev)) {
1188 		for (i = 0; i < dev_priv->num_plane; i++) {
1189 			reg = SPCNTR(pipe, i);
1190 			val = I915_READ(reg);
1191 			WARN((val & SP_ENABLE),
1192 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1193 			     sprite_name(pipe, i), pipe_name(pipe));
1194 		}
1195 	} else if (INTEL_INFO(dev)->gen >= 7) {
1196 		reg = SPRCTL(pipe);
1197 		val = I915_READ(reg);
1198 		WARN((val & SPRITE_ENABLE),
1199 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1200 		     plane_name(pipe), pipe_name(pipe));
1201 	} else if (INTEL_INFO(dev)->gen >= 5) {
1202 		reg = DVSCNTR(pipe);
1203 		val = I915_READ(reg);
1204 		WARN((val & DVS_ENABLE),
1205 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1206 		     plane_name(pipe), pipe_name(pipe));
1207 	}
1208 }
1209 
1210 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1211 {
1212 	u32 val;
1213 	bool enabled;
1214 
1215 	WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1216 
1217 	val = I915_READ(PCH_DREF_CONTROL);
1218 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1219 			    DREF_SUPERSPREAD_SOURCE_MASK));
1220 	WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1221 }
1222 
1223 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1224 					   enum i915_pipe pipe)
1225 {
1226 	int reg;
1227 	u32 val;
1228 	bool enabled;
1229 
1230 	reg = PCH_TRANSCONF(pipe);
1231 	val = I915_READ(reg);
1232 	enabled = !!(val & TRANS_ENABLE);
1233 	WARN(enabled,
1234 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1235 	     pipe_name(pipe));
1236 }
1237 
1238 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1239 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1240 {
1241 	if ((val & DP_PORT_EN) == 0)
1242 		return false;
1243 
1244 	if (HAS_PCH_CPT(dev_priv->dev)) {
1245 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1246 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1247 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1248 			return false;
1249 	} else {
1250 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1251 			return false;
1252 	}
1253 	return true;
1254 }
1255 
1256 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1257 			      enum i915_pipe pipe, u32 val)
1258 {
1259 	if ((val & SDVO_ENABLE) == 0)
1260 		return false;
1261 
1262 	if (HAS_PCH_CPT(dev_priv->dev)) {
1263 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1264 			return false;
1265 	} else {
1266 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1267 			return false;
1268 	}
1269 	return true;
1270 }
1271 
1272 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1273 			      enum i915_pipe pipe, u32 val)
1274 {
1275 	if ((val & LVDS_PORT_EN) == 0)
1276 		return false;
1277 
1278 	if (HAS_PCH_CPT(dev_priv->dev)) {
1279 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1280 			return false;
1281 	} else {
1282 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1283 			return false;
1284 	}
1285 	return true;
1286 }
1287 
1288 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1289 			      enum i915_pipe pipe, u32 val)
1290 {
1291 	if ((val & ADPA_DAC_ENABLE) == 0)
1292 		return false;
1293 	if (HAS_PCH_CPT(dev_priv->dev)) {
1294 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1295 			return false;
1296 	} else {
1297 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1298 			return false;
1299 	}
1300 	return true;
1301 }
1302 
1303 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1304 				   enum i915_pipe pipe, int reg, u32 port_sel)
1305 {
1306 	u32 val = I915_READ(reg);
1307 	WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1308 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1309 	     reg, pipe_name(pipe));
1310 
1311 	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1312 	     && (val & DP_PIPEB_SELECT),
1313 	     "IBX PCH dp port still using transcoder B\n");
1314 }
1315 
1316 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1317 				     enum i915_pipe pipe, int reg)
1318 {
1319 	u32 val = I915_READ(reg);
1320 	WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1321 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1322 	     reg, pipe_name(pipe));
1323 
1324 	WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1325 	     && (val & SDVO_PIPE_B_SELECT),
1326 	     "IBX PCH hdmi port still using transcoder B\n");
1327 }
1328 
1329 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1330 				      enum i915_pipe pipe)
1331 {
1332 	int reg;
1333 	u32 val;
1334 
1335 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1336 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1337 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1338 
1339 	reg = PCH_ADPA;
1340 	val = I915_READ(reg);
1341 	WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1342 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1343 	     pipe_name(pipe));
1344 
1345 	reg = PCH_LVDS;
1346 	val = I915_READ(reg);
1347 	WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1348 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1349 	     pipe_name(pipe));
1350 
1351 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1352 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1353 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1354 }
1355 
1356 static void intel_init_dpio(struct drm_device *dev)
1357 {
1358 	struct drm_i915_private *dev_priv = dev->dev_private;
1359 
1360 	if (!IS_VALLEYVIEW(dev))
1361 		return;
1362 
1363 	DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1364 }
1365 
1366 static void intel_reset_dpio(struct drm_device *dev)
1367 {
1368 	struct drm_i915_private *dev_priv = dev->dev_private;
1369 
1370 	if (!IS_VALLEYVIEW(dev))
1371 		return;
1372 
1373 	/*
1374 	 * Enable the CRI clock source so we can get at the display and the
1375 	 * reference clock for VGA hotplug / manual detection.
1376 	 */
1377 	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
1378 		   DPLL_REFA_CLK_ENABLE_VLV |
1379 		   DPLL_INTEGRATED_CRI_CLK_VLV);
1380 
1381 	/*
1382 	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1383 	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1384 	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1385 	 *   b.	The other bits such as sfr settings / modesel may all be set
1386 	 *      to 0.
1387 	 *
1388 	 * This should only be done on init and resume from S3 with both
1389 	 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1390 	 */
1391 	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1392 }
1393 
1394 static void vlv_enable_pll(struct intel_crtc *crtc)
1395 {
1396 	struct drm_device *dev = crtc->base.dev;
1397 	struct drm_i915_private *dev_priv = dev->dev_private;
1398 	int reg = DPLL(crtc->pipe);
1399 	u32 dpll = crtc->config.dpll_hw_state.dpll;
1400 
1401 	assert_pipe_disabled(dev_priv, crtc->pipe);
1402 
1403 	/* No really, not for ILK+ */
1404 	BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1405 
1406 	/* PLL is protected by panel, make sure we can write it */
1407 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1408 		assert_panel_unlocked(dev_priv, crtc->pipe);
1409 
1410 	I915_WRITE(reg, dpll);
1411 	POSTING_READ(reg);
1412 	udelay(150);
1413 
1414 	if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1415 		DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1416 
1417 	I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1418 	POSTING_READ(DPLL_MD(crtc->pipe));
1419 
1420 	/* We do this three times for luck */
1421 	I915_WRITE(reg, dpll);
1422 	POSTING_READ(reg);
1423 	udelay(150); /* wait for warmup */
1424 	I915_WRITE(reg, dpll);
1425 	POSTING_READ(reg);
1426 	udelay(150); /* wait for warmup */
1427 	I915_WRITE(reg, dpll);
1428 	POSTING_READ(reg);
1429 	udelay(150); /* wait for warmup */
1430 }
1431 
1432 static void i9xx_enable_pll(struct intel_crtc *crtc)
1433 {
1434 	struct drm_device *dev = crtc->base.dev;
1435 	struct drm_i915_private *dev_priv = dev->dev_private;
1436 	int reg = DPLL(crtc->pipe);
1437 	u32 dpll = crtc->config.dpll_hw_state.dpll;
1438 
1439 	assert_pipe_disabled(dev_priv, crtc->pipe);
1440 
1441 	/* No really, not for ILK+ */
1442 	BUG_ON(dev_priv->info->gen >= 5);
1443 
1444 	/* PLL is protected by panel, make sure we can write it */
1445 	if (IS_MOBILE(dev) && !IS_I830(dev))
1446 		assert_panel_unlocked(dev_priv, crtc->pipe);
1447 
1448 	I915_WRITE(reg, dpll);
1449 
1450 	/* Wait for the clocks to stabilize. */
1451 	POSTING_READ(reg);
1452 	udelay(150);
1453 
1454 	if (INTEL_INFO(dev)->gen >= 4) {
1455 		I915_WRITE(DPLL_MD(crtc->pipe),
1456 			   crtc->config.dpll_hw_state.dpll_md);
1457 	} else {
1458 		/* The pixel multiplier can only be updated once the
1459 		 * DPLL is enabled and the clocks are stable.
1460 		 *
1461 		 * So write it again.
1462 		 */
1463 		I915_WRITE(reg, dpll);
1464 	}
1465 
1466 	/* We do this three times for luck */
1467 	I915_WRITE(reg, dpll);
1468 	POSTING_READ(reg);
1469 	udelay(150); /* wait for warmup */
1470 	I915_WRITE(reg, dpll);
1471 	POSTING_READ(reg);
1472 	udelay(150); /* wait for warmup */
1473 	I915_WRITE(reg, dpll);
1474 	POSTING_READ(reg);
1475 	udelay(150); /* wait for warmup */
1476 }
1477 
1478 /**
1479  * i9xx_disable_pll - disable a PLL
1480  * @dev_priv: i915 private structure
1481  * @pipe: pipe PLL to disable
1482  *
1483  * Disable the PLL for @pipe, making sure the pipe is off first.
1484  *
1485  * Note!  This is for pre-ILK only.
1486  */
1487 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1488 {
1489 	/* Don't disable pipe A or pipe A PLLs if needed */
1490 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1491 		return;
1492 
1493 	/* Make sure the pipe isn't still relying on us */
1494 	assert_pipe_disabled(dev_priv, pipe);
1495 
1496 	I915_WRITE(DPLL(pipe), 0);
1497 	POSTING_READ(DPLL(pipe));
1498 }
1499 
1500 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1501 {
1502 	u32 val = 0;
1503 
1504 	/* Make sure the pipe isn't still relying on us */
1505 	assert_pipe_disabled(dev_priv, pipe);
1506 
1507 	/*
1508 	 * Leave integrated clock source and reference clock enabled for pipe B.
1509 	 * The latter is needed for VGA hotplug / manual detection.
1510 	 */
1511 	if (pipe == PIPE_B)
1512 		val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1513 	I915_WRITE(DPLL(pipe), val);
1514 	POSTING_READ(DPLL(pipe));
1515 }
1516 
1517 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1518 		struct intel_digital_port *dport)
1519 {
1520 	u32 port_mask;
1521 
1522 	switch (dport->port) {
1523 	case PORT_B:
1524 		port_mask = DPLL_PORTB_READY_MASK;
1525 		break;
1526 	case PORT_C:
1527 		port_mask = DPLL_PORTC_READY_MASK;
1528 		break;
1529 	default:
1530 		BUG();
1531 	}
1532 
1533 	if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1534 		WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1535 		     port_name(dport->port), I915_READ(DPLL(0)));
1536 }
1537 
1538 /**
1539  * ironlake_enable_shared_dpll - enable PCH PLL
1540  * @dev_priv: i915 private structure
1541  * @pipe: pipe PLL to enable
1542  *
1543  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1544  * drives the transcoder clock.
1545  */
1546 static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
1547 {
1548 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1549 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1550 
1551 	/* PCH PLLs only available on ILK, SNB and IVB */
1552 	BUG_ON(dev_priv->info->gen < 5);
1553 	if (WARN_ON(pll == NULL))
1554 		return;
1555 
1556 	if (WARN_ON(pll->refcount == 0))
1557 		return;
1558 
1559 	DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1560 		      pll->name, pll->active, pll->on,
1561 		      crtc->base.base.id);
1562 
1563 	if (pll->active++) {
1564 		WARN_ON(!pll->on);
1565 		assert_shared_dpll_enabled(dev_priv, pll);
1566 		return;
1567 	}
1568 	WARN_ON(pll->on);
1569 
1570 	DRM_DEBUG_KMS("enabling %s\n", pll->name);
1571 	pll->enable(dev_priv, pll);
1572 	pll->on = true;
1573 }
1574 
1575 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1576 {
1577 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1578 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1579 
1580 	/* PCH only available on ILK+ */
1581 	BUG_ON(dev_priv->info->gen < 5);
1582 	if (WARN_ON(pll == NULL))
1583 	       return;
1584 
1585 	if (WARN_ON(pll->refcount == 0))
1586 		return;
1587 
1588 	DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1589 		      pll->name, pll->active, pll->on,
1590 		      crtc->base.base.id);
1591 
1592 	if (WARN_ON(pll->active == 0)) {
1593 		assert_shared_dpll_disabled(dev_priv, pll);
1594 		return;
1595 	}
1596 
1597 	assert_shared_dpll_enabled(dev_priv, pll);
1598 	WARN_ON(!pll->on);
1599 	if (--pll->active)
1600 		return;
1601 
1602 	DRM_DEBUG_KMS("disabling %s\n", pll->name);
1603 	pll->disable(dev_priv, pll);
1604 	pll->on = false;
1605 }
1606 
1607 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1608 					   enum i915_pipe pipe)
1609 {
1610 	struct drm_device *dev = dev_priv->dev;
1611 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1612 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1613 	uint32_t reg, val, pipeconf_val;
1614 
1615 	/* PCH only available on ILK+ */
1616 	BUG_ON(dev_priv->info->gen < 5);
1617 
1618 	/* Make sure PCH DPLL is enabled */
1619 	assert_shared_dpll_enabled(dev_priv,
1620 				   intel_crtc_to_shared_dpll(intel_crtc));
1621 
1622 	/* FDI must be feeding us bits for PCH ports */
1623 	assert_fdi_tx_enabled(dev_priv, pipe);
1624 	assert_fdi_rx_enabled(dev_priv, pipe);
1625 
1626 	if (HAS_PCH_CPT(dev)) {
1627 		/* Workaround: Set the timing override bit before enabling the
1628 		 * pch transcoder. */
1629 		reg = TRANS_CHICKEN2(pipe);
1630 		val = I915_READ(reg);
1631 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1632 		I915_WRITE(reg, val);
1633 	}
1634 
1635 	reg = PCH_TRANSCONF(pipe);
1636 	val = I915_READ(reg);
1637 	pipeconf_val = I915_READ(PIPECONF(pipe));
1638 
1639 	if (HAS_PCH_IBX(dev_priv->dev)) {
1640 		/*
1641 		 * make the BPC in transcoder be consistent with
1642 		 * that in pipeconf reg.
1643 		 */
1644 		val &= ~PIPECONF_BPC_MASK;
1645 		val |= pipeconf_val & PIPECONF_BPC_MASK;
1646 	}
1647 
1648 	val &= ~TRANS_INTERLACE_MASK;
1649 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1650 		if (HAS_PCH_IBX(dev_priv->dev) &&
1651 		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1652 			val |= TRANS_LEGACY_INTERLACED_ILK;
1653 		else
1654 			val |= TRANS_INTERLACED;
1655 	else
1656 		val |= TRANS_PROGRESSIVE;
1657 
1658 	I915_WRITE(reg, val | TRANS_ENABLE);
1659 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1660 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1661 }
1662 
1663 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1664 				      enum transcoder cpu_transcoder)
1665 {
1666 	u32 val, pipeconf_val;
1667 
1668 	/* PCH only available on ILK+ */
1669 	BUG_ON(dev_priv->info->gen < 5);
1670 
1671 	/* FDI must be feeding us bits for PCH ports */
1672 	assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1673 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1674 
1675 	/* Workaround: set timing override bit. */
1676 	val = I915_READ(_TRANSA_CHICKEN2);
1677 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1678 	I915_WRITE(_TRANSA_CHICKEN2, val);
1679 
1680 	val = TRANS_ENABLE;
1681 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1682 
1683 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1684 	    PIPECONF_INTERLACED_ILK)
1685 		val |= TRANS_INTERLACED;
1686 	else
1687 		val |= TRANS_PROGRESSIVE;
1688 
1689 	I915_WRITE(LPT_TRANSCONF, val);
1690 	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1691 		DRM_ERROR("Failed to enable PCH transcoder\n");
1692 }
1693 
1694 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1695 					    enum i915_pipe pipe)
1696 {
1697 	struct drm_device *dev = dev_priv->dev;
1698 	uint32_t reg, val;
1699 
1700 	/* FDI relies on the transcoder */
1701 	assert_fdi_tx_disabled(dev_priv, pipe);
1702 	assert_fdi_rx_disabled(dev_priv, pipe);
1703 
1704 	/* Ports must be off as well */
1705 	assert_pch_ports_disabled(dev_priv, pipe);
1706 
1707 	reg = PCH_TRANSCONF(pipe);
1708 	val = I915_READ(reg);
1709 	val &= ~TRANS_ENABLE;
1710 	I915_WRITE(reg, val);
1711 	/* wait for PCH transcoder off, transcoder state */
1712 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1713 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1714 
1715 	if (!HAS_PCH_IBX(dev)) {
1716 		/* Workaround: Clear the timing override chicken bit again. */
1717 		reg = TRANS_CHICKEN2(pipe);
1718 		val = I915_READ(reg);
1719 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1720 		I915_WRITE(reg, val);
1721 	}
1722 }
1723 
1724 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1725 {
1726 	u32 val;
1727 
1728 	val = I915_READ(LPT_TRANSCONF);
1729 	val &= ~TRANS_ENABLE;
1730 	I915_WRITE(LPT_TRANSCONF, val);
1731 	/* wait for PCH transcoder off, transcoder state */
1732 	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1733 		DRM_ERROR("Failed to disable PCH transcoder\n");
1734 
1735 	/* Workaround: clear timing override bit. */
1736 	val = I915_READ(_TRANSA_CHICKEN2);
1737 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1738 	I915_WRITE(_TRANSA_CHICKEN2, val);
1739 }
1740 
1741 /**
1742  * intel_enable_pipe - enable a pipe, asserting requirements
1743  * @dev_priv: i915 private structure
1744  * @pipe: pipe to enable
1745  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1746  *
1747  * Enable @pipe, making sure that various hardware specific requirements
1748  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1749  *
1750  * @pipe should be %PIPE_A or %PIPE_B.
1751  *
1752  * Will wait until the pipe is actually running (i.e. first vblank) before
1753  * returning.
1754  */
1755 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
1756 			      bool pch_port, bool dsi)
1757 {
1758 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1759 								      pipe);
1760 	enum i915_pipe pch_transcoder;
1761 	int reg;
1762 	u32 val;
1763 
1764 	assert_planes_disabled(dev_priv, pipe);
1765 	assert_cursor_disabled(dev_priv, pipe);
1766 	assert_sprites_disabled(dev_priv, pipe);
1767 
1768 	if (HAS_PCH_LPT(dev_priv->dev))
1769 		pch_transcoder = TRANSCODER_A;
1770 	else
1771 		pch_transcoder = pipe;
1772 
1773 	/*
1774 	 * A pipe without a PLL won't actually be able to drive bits from
1775 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1776 	 * need the check.
1777 	 */
1778 	if (!HAS_PCH_SPLIT(dev_priv->dev))
1779 		if (dsi)
1780 			assert_dsi_pll_enabled(dev_priv);
1781 		else
1782 			assert_pll_enabled(dev_priv, pipe);
1783 	else {
1784 		if (pch_port) {
1785 			/* if driving the PCH, we need FDI enabled */
1786 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1787 			assert_fdi_tx_pll_enabled(dev_priv,
1788 						  (enum i915_pipe) cpu_transcoder);
1789 		}
1790 		/* FIXME: assert CPU port conditions for SNB+ */
1791 	}
1792 
1793 	reg = PIPECONF(cpu_transcoder);
1794 	val = I915_READ(reg);
1795 	if (val & PIPECONF_ENABLE)
1796 		return;
1797 
1798 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1799 	intel_wait_for_vblank(dev_priv->dev, pipe);
1800 }
1801 
1802 /**
1803  * intel_disable_pipe - disable a pipe, asserting requirements
1804  * @dev_priv: i915 private structure
1805  * @pipe: pipe to disable
1806  *
1807  * Disable @pipe, making sure that various hardware specific requirements
1808  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1809  *
1810  * @pipe should be %PIPE_A or %PIPE_B.
1811  *
1812  * Will wait until the pipe has shut down before returning.
1813  */
1814 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1815 			       enum i915_pipe pipe)
1816 {
1817 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 								      pipe);
1819 	int reg;
1820 	u32 val;
1821 
1822 	/*
1823 	 * Make sure planes won't keep trying to pump pixels to us,
1824 	 * or we might hang the display.
1825 	 */
1826 	assert_planes_disabled(dev_priv, pipe);
1827 	assert_cursor_disabled(dev_priv, pipe);
1828 	assert_sprites_disabled(dev_priv, pipe);
1829 
1830 	/* Don't disable pipe A or pipe A PLLs if needed */
1831 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1832 		return;
1833 
1834 	reg = PIPECONF(cpu_transcoder);
1835 	val = I915_READ(reg);
1836 	if ((val & PIPECONF_ENABLE) == 0)
1837 		return;
1838 
1839 	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1840 	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1841 }
1842 
1843 /*
1844  * Plane regs are double buffered, going from enabled->disabled needs a
1845  * trigger in order to latch.  The display address reg provides this.
1846  */
1847 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1848 			       enum plane plane)
1849 {
1850 	u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
1851 
1852 	I915_WRITE(reg, I915_READ(reg));
1853 	POSTING_READ(reg);
1854 }
1855 
1856 /**
1857  * intel_enable_primary_plane - enable the primary plane on a given pipe
1858  * @dev_priv: i915 private structure
1859  * @plane: plane to enable
1860  * @pipe: pipe being fed
1861  *
1862  * Enable @plane on @pipe, making sure that @pipe is running first.
1863  */
1864 static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
1865 				       enum plane plane, enum i915_pipe pipe)
1866 {
1867 	struct intel_crtc *intel_crtc =
1868 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1869 	int reg;
1870 	u32 val;
1871 
1872 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1873 	assert_pipe_enabled(dev_priv, pipe);
1874 
1875 	WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
1876 
1877 	intel_crtc->primary_enabled = true;
1878 
1879 	reg = DSPCNTR(plane);
1880 	val = I915_READ(reg);
1881 	if (val & DISPLAY_PLANE_ENABLE)
1882 		return;
1883 
1884 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1885 	intel_flush_primary_plane(dev_priv, plane);
1886 	intel_wait_for_vblank(dev_priv->dev, pipe);
1887 }
1888 
1889 /**
1890  * intel_disable_primary_plane - disable the primary plane
1891  * @dev_priv: i915 private structure
1892  * @plane: plane to disable
1893  * @pipe: pipe consuming the data
1894  *
1895  * Disable @plane; should be an independent operation.
1896  */
1897 static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
1898 					enum plane plane, enum i915_pipe pipe)
1899 {
1900 	struct intel_crtc *intel_crtc =
1901 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1902 	int reg;
1903 	u32 val;
1904 
1905 	WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
1906 
1907 	intel_crtc->primary_enabled = false;
1908 
1909 	reg = DSPCNTR(plane);
1910 	val = I915_READ(reg);
1911 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1912 		return;
1913 
1914 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1915 	intel_flush_primary_plane(dev_priv, plane);
1916 	intel_wait_for_vblank(dev_priv->dev, pipe);
1917 }
1918 
1919 static bool need_vtd_wa(struct drm_device *dev)
1920 {
1921 #ifdef CONFIG_INTEL_IOMMU
1922 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1923 		return true;
1924 #endif
1925 	return false;
1926 }
1927 
1928 int
1929 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1930 			   struct drm_i915_gem_object *obj,
1931 			   struct intel_ring_buffer *pipelined)
1932 {
1933 	struct drm_i915_private *dev_priv = dev->dev_private;
1934 	u32 alignment;
1935 	int ret;
1936 
1937 	switch (obj->tiling_mode) {
1938 	case I915_TILING_NONE:
1939 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1940 			alignment = 128 * 1024;
1941 		else if (INTEL_INFO(dev)->gen >= 4)
1942 			alignment = 4 * 1024;
1943 		else
1944 			alignment = 64 * 1024;
1945 		break;
1946 	case I915_TILING_X:
1947 		/* pin() will align the object as required by fence */
1948 		alignment = 0;
1949 		break;
1950 	case I915_TILING_Y:
1951 		WARN(1, "Y tiled bo slipped through, driver bug!\n");
1952 		return -EINVAL;
1953 	default:
1954 		BUG();
1955 	}
1956 
1957 	/* Note that the w/a also requires 64 PTE of padding following the
1958 	 * bo. We currently fill all unused PTE with the shadow page and so
1959 	 * we should always have valid PTE following the scanout preventing
1960 	 * the VT-d warning.
1961 	 */
1962 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
1963 		alignment = 256 * 1024;
1964 
1965 	dev_priv->mm.interruptible = false;
1966 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1967 	if (ret)
1968 		goto err_interruptible;
1969 
1970 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1971 	 * fence, whereas 965+ only requires a fence if using
1972 	 * framebuffer compression.  For simplicity, we always install
1973 	 * a fence as the cost is not that onerous.
1974 	 */
1975 	ret = i915_gem_object_get_fence(obj);
1976 	if (ret)
1977 		goto err_unpin;
1978 
1979 	i915_gem_object_pin_fence(obj);
1980 
1981 	dev_priv->mm.interruptible = true;
1982 	return 0;
1983 
1984 err_unpin:
1985 	i915_gem_object_unpin_from_display_plane(obj);
1986 err_interruptible:
1987 	dev_priv->mm.interruptible = true;
1988 	return ret;
1989 }
1990 
1991 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1992 {
1993 	i915_gem_object_unpin_fence(obj);
1994 	i915_gem_object_unpin_from_display_plane(obj);
1995 }
1996 
1997 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1998  * is assumed to be a power-of-two. */
1999 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2000 					     unsigned int tiling_mode,
2001 					     unsigned int cpp,
2002 					     unsigned int pitch)
2003 {
2004 	if (tiling_mode != I915_TILING_NONE) {
2005 		unsigned int tile_rows, tiles;
2006 
2007 		tile_rows = *y / 8;
2008 		*y %= 8;
2009 
2010 		tiles = *x / (512/cpp);
2011 		*x %= 512/cpp;
2012 
2013 		return tile_rows * pitch * 8 + tiles * 4096;
2014 	} else {
2015 		unsigned int offset;
2016 
2017 		offset = *y * pitch + *x * cpp;
2018 		*y = 0;
2019 		*x = (offset & 4095) / cpp;
2020 		return offset & -4096;
2021 	}
2022 }
2023 
2024 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2025 			     int x, int y)
2026 {
2027 	struct drm_device *dev = crtc->dev;
2028 	struct drm_i915_private *dev_priv = dev->dev_private;
2029 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2030 	struct intel_framebuffer *intel_fb;
2031 	struct drm_i915_gem_object *obj;
2032 	int plane = intel_crtc->plane;
2033 	unsigned long linear_offset;
2034 	u32 dspcntr;
2035 	u32 reg;
2036 
2037 	switch (plane) {
2038 	case 0:
2039 	case 1:
2040 		break;
2041 	default:
2042 		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2043 		return -EINVAL;
2044 	}
2045 
2046 	intel_fb = to_intel_framebuffer(fb);
2047 	obj = intel_fb->obj;
2048 
2049 	reg = DSPCNTR(plane);
2050 	dspcntr = I915_READ(reg);
2051 	/* Mask out pixel format bits in case we change it */
2052 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2053 	switch (fb->pixel_format) {
2054 	case DRM_FORMAT_C8:
2055 		dspcntr |= DISPPLANE_8BPP;
2056 		break;
2057 	case DRM_FORMAT_XRGB1555:
2058 	case DRM_FORMAT_ARGB1555:
2059 		dspcntr |= DISPPLANE_BGRX555;
2060 		break;
2061 	case DRM_FORMAT_RGB565:
2062 		dspcntr |= DISPPLANE_BGRX565;
2063 		break;
2064 	case DRM_FORMAT_XRGB8888:
2065 	case DRM_FORMAT_ARGB8888:
2066 		dspcntr |= DISPPLANE_BGRX888;
2067 		break;
2068 	case DRM_FORMAT_XBGR8888:
2069 	case DRM_FORMAT_ABGR8888:
2070 		dspcntr |= DISPPLANE_RGBX888;
2071 		break;
2072 	case DRM_FORMAT_XRGB2101010:
2073 	case DRM_FORMAT_ARGB2101010:
2074 		dspcntr |= DISPPLANE_BGRX101010;
2075 		break;
2076 	case DRM_FORMAT_XBGR2101010:
2077 	case DRM_FORMAT_ABGR2101010:
2078 		dspcntr |= DISPPLANE_RGBX101010;
2079 		break;
2080 	default:
2081 		BUG();
2082 	}
2083 
2084 	if (INTEL_INFO(dev)->gen >= 4) {
2085 		if (obj->tiling_mode != I915_TILING_NONE)
2086 			dspcntr |= DISPPLANE_TILED;
2087 		else
2088 			dspcntr &= ~DISPPLANE_TILED;
2089 	}
2090 
2091 	if (IS_G4X(dev))
2092 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2093 
2094 	I915_WRITE(reg, dspcntr);
2095 
2096 	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2097 
2098 	if (INTEL_INFO(dev)->gen >= 4) {
2099 		intel_crtc->dspaddr_offset =
2100 			intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2101 						       fb->bits_per_pixel / 8,
2102 						       fb->pitches[0]);
2103 		linear_offset -= intel_crtc->dspaddr_offset;
2104 	} else {
2105 		intel_crtc->dspaddr_offset = linear_offset;
2106 	}
2107 
2108 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2109 		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2110 		      fb->pitches[0]);
2111 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2112 	if (INTEL_INFO(dev)->gen >= 4) {
2113 		I915_WRITE(DSPSURF(plane),
2114 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2115 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2116 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2117 	} else
2118 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2119 	POSTING_READ(reg);
2120 
2121 	return 0;
2122 }
2123 
2124 static int ironlake_update_plane(struct drm_crtc *crtc,
2125 				 struct drm_framebuffer *fb, int x, int y)
2126 {
2127 	struct drm_device *dev = crtc->dev;
2128 	struct drm_i915_private *dev_priv = dev->dev_private;
2129 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2130 	struct intel_framebuffer *intel_fb;
2131 	struct drm_i915_gem_object *obj;
2132 	int plane = intel_crtc->plane;
2133 	unsigned long linear_offset;
2134 	u32 dspcntr;
2135 	u32 reg;
2136 
2137 	switch (plane) {
2138 	case 0:
2139 	case 1:
2140 	case 2:
2141 		break;
2142 	default:
2143 		DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
2144 		return -EINVAL;
2145 	}
2146 
2147 	intel_fb = to_intel_framebuffer(fb);
2148 	obj = intel_fb->obj;
2149 
2150 	reg = DSPCNTR(plane);
2151 	dspcntr = I915_READ(reg);
2152 	/* Mask out pixel format bits in case we change it */
2153 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2154 	switch (fb->pixel_format) {
2155 	case DRM_FORMAT_C8:
2156 		dspcntr |= DISPPLANE_8BPP;
2157 		break;
2158 	case DRM_FORMAT_RGB565:
2159 		dspcntr |= DISPPLANE_BGRX565;
2160 		break;
2161 	case DRM_FORMAT_XRGB8888:
2162 	case DRM_FORMAT_ARGB8888:
2163 		dspcntr |= DISPPLANE_BGRX888;
2164 		break;
2165 	case DRM_FORMAT_XBGR8888:
2166 	case DRM_FORMAT_ABGR8888:
2167 		dspcntr |= DISPPLANE_RGBX888;
2168 		break;
2169 	case DRM_FORMAT_XRGB2101010:
2170 	case DRM_FORMAT_ARGB2101010:
2171 		dspcntr |= DISPPLANE_BGRX101010;
2172 		break;
2173 	case DRM_FORMAT_XBGR2101010:
2174 	case DRM_FORMAT_ABGR2101010:
2175 		dspcntr |= DISPPLANE_RGBX101010;
2176 		break;
2177 	default:
2178 		BUG();
2179 	}
2180 
2181 	if (obj->tiling_mode != I915_TILING_NONE)
2182 		dspcntr |= DISPPLANE_TILED;
2183 	else
2184 		dspcntr &= ~DISPPLANE_TILED;
2185 
2186 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2187 		dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2188 	else
2189 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2190 
2191 	I915_WRITE(reg, dspcntr);
2192 
2193 	linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2194 	intel_crtc->dspaddr_offset =
2195 		intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2196 					       fb->bits_per_pixel / 8,
2197 					       fb->pitches[0]);
2198 	linear_offset -= intel_crtc->dspaddr_offset;
2199 
2200 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2201 		      i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2202 		      fb->pitches[0]);
2203 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2204 	I915_WRITE(DSPSURF(plane),
2205 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2206 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2207 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2208 	} else {
2209 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2210 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2211 	}
2212 	POSTING_READ(reg);
2213 
2214 	return 0;
2215 }
2216 
2217 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2218 static int
2219 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2220 			   int x, int y, enum mode_set_atomic state)
2221 {
2222 	struct drm_device *dev = crtc->dev;
2223 	struct drm_i915_private *dev_priv = dev->dev_private;
2224 
2225 	if (dev_priv->display.disable_fbc)
2226 		dev_priv->display.disable_fbc(dev);
2227 	intel_increase_pllclock(crtc);
2228 
2229 	return dev_priv->display.update_plane(crtc, fb, x, y);
2230 }
2231 
2232 void intel_display_handle_reset(struct drm_device *dev)
2233 {
2234 	struct drm_i915_private *dev_priv = dev->dev_private;
2235 	struct drm_crtc *crtc;
2236 
2237 	/*
2238 	 * Flips in the rings have been nuked by the reset,
2239 	 * so complete all pending flips so that user space
2240 	 * will get its events and not get stuck.
2241 	 *
2242 	 * Also update the base address of all primary
2243 	 * planes to the the last fb to make sure we're
2244 	 * showing the correct fb after a reset.
2245 	 *
2246 	 * Need to make two loops over the crtcs so that we
2247 	 * don't try to grab a crtc mutex before the
2248 	 * pending_flip_queue really got woken up.
2249 	 */
2250 
2251 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2252 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2253 		enum plane plane = intel_crtc->plane;
2254 
2255 		intel_prepare_page_flip(dev, plane);
2256 		intel_finish_page_flip_plane(dev, plane);
2257 	}
2258 
2259 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2260 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2261 
2262 		mutex_lock(&crtc->mutex);
2263 		/*
2264 		 * FIXME: Once we have proper support for primary planes (and
2265 		 * disabling them without disabling the entire crtc) allow again
2266 		 * a NULL crtc->fb.
2267 		 */
2268 		if (intel_crtc->active && crtc->fb)
2269 			dev_priv->display.update_plane(crtc, crtc->fb,
2270 						       crtc->x, crtc->y);
2271 		mutex_unlock(&crtc->mutex);
2272 	}
2273 }
2274 
2275 static int
2276 intel_finish_fb(struct drm_framebuffer *old_fb)
2277 {
2278 	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2279 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2280 	bool was_interruptible = dev_priv->mm.interruptible;
2281 	int ret;
2282 
2283 	/* Big Hammer, we also need to ensure that any pending
2284 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2285 	 * current scanout is retired before unpinning the old
2286 	 * framebuffer.
2287 	 *
2288 	 * This should only fail upon a hung GPU, in which case we
2289 	 * can safely continue.
2290 	 */
2291 	dev_priv->mm.interruptible = false;
2292 	ret = i915_gem_object_finish_gpu(obj);
2293 	dev_priv->mm.interruptible = was_interruptible;
2294 
2295 	return ret;
2296 }
2297 
2298 static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2299 {
2300 	struct drm_device *dev = crtc->dev;
2301 	drm_i915_private_t *dev_priv = dev->dev_private;
2302 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2303 
2304 #if 0
2305 	if (!dev->primary->master)
2306 		return;
2307 
2308 	master_priv = dev->primary->master->driver_priv;
2309 	if (!master_priv->sarea_priv)
2310 		return;
2311 #else
2312 	if (!dev_priv->sarea_priv)
2313 		return;
2314 #endif
2315 
2316 	switch (intel_crtc->pipe) {
2317 	case 0:
2318 		dev_priv->sarea_priv->pipeA_x = x;
2319 		dev_priv->sarea_priv->pipeA_y = y;
2320 		break;
2321 	case 1:
2322 		dev_priv->sarea_priv->pipeB_x = x;
2323 		dev_priv->sarea_priv->pipeB_y = y;
2324 		break;
2325 	default:
2326 		break;
2327 	}
2328 }
2329 
2330 static int
2331 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2332 		    struct drm_framebuffer *fb)
2333 {
2334 	struct drm_device *dev = crtc->dev;
2335 	struct drm_i915_private *dev_priv = dev->dev_private;
2336 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2337 	struct drm_framebuffer *old_fb;
2338 	int ret;
2339 
2340 	/* no fb bound */
2341 	if (!fb) {
2342 		DRM_ERROR("No FB bound\n");
2343 		return 0;
2344 	}
2345 
2346 	if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2347 		DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2348 			  plane_name(intel_crtc->plane),
2349 			  INTEL_INFO(dev)->num_pipes);
2350 		return -EINVAL;
2351 	}
2352 
2353 	mutex_lock(&dev->struct_mutex);
2354 	ret = intel_pin_and_fence_fb_obj(dev,
2355 					 to_intel_framebuffer(fb)->obj,
2356 					 NULL);
2357 	if (ret != 0) {
2358 		mutex_unlock(&dev->struct_mutex);
2359 		DRM_ERROR("pin & fence failed\n");
2360 		return ret;
2361 	}
2362 
2363 	/*
2364 	 * Update pipe size and adjust fitter if needed: the reason for this is
2365 	 * that in compute_mode_changes we check the native mode (not the pfit
2366 	 * mode) to see if we can flip rather than do a full mode set. In the
2367 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
2368 	 * pfit state, we'll end up with a big fb scanned out into the wrong
2369 	 * sized surface.
2370 	 *
2371 	 * To fix this properly, we need to hoist the checks up into
2372 	 * compute_mode_changes (or above), check the actual pfit state and
2373 	 * whether the platform allows pfit disable with pipe active, and only
2374 	 * then update the pipesrc and pfit state, even on the flip path.
2375 	 */
2376 	if (i915_fastboot) {
2377 		const struct drm_display_mode *adjusted_mode =
2378 			&intel_crtc->config.adjusted_mode;
2379 
2380 		I915_WRITE(PIPESRC(intel_crtc->pipe),
2381 			   ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2382 			   (adjusted_mode->crtc_vdisplay - 1));
2383 		if (!intel_crtc->config.pch_pfit.enabled &&
2384 		    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2385 		     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2386 			I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2387 			I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2388 			I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2389 		}
2390 		intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2391 		intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2392 	}
2393 
2394 	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2395 	if (ret) {
2396 		intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2397 		mutex_unlock(&dev->struct_mutex);
2398 		DRM_ERROR("failed to update base address\n");
2399 		return ret;
2400 	}
2401 
2402 	old_fb = crtc->fb;
2403 	crtc->fb = fb;
2404 	crtc->x = x;
2405 	crtc->y = y;
2406 
2407 	if (old_fb) {
2408 		if (intel_crtc->active && old_fb != fb)
2409 			intel_wait_for_vblank(dev, intel_crtc->pipe);
2410 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2411 	}
2412 
2413 	intel_update_fbc(dev);
2414 	intel_edp_psr_update(dev);
2415 	mutex_unlock(&dev->struct_mutex);
2416 
2417 	intel_crtc_update_sarea_pos(crtc, x, y);
2418 
2419 	return 0;
2420 }
2421 
2422 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2423 {
2424 	struct drm_device *dev = crtc->dev;
2425 	struct drm_i915_private *dev_priv = dev->dev_private;
2426 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2427 	int pipe = intel_crtc->pipe;
2428 	u32 reg, temp;
2429 
2430 	/* enable normal train */
2431 	reg = FDI_TX_CTL(pipe);
2432 	temp = I915_READ(reg);
2433 	if (IS_IVYBRIDGE(dev)) {
2434 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2435 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2436 	} else {
2437 		temp &= ~FDI_LINK_TRAIN_NONE;
2438 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2439 	}
2440 	I915_WRITE(reg, temp);
2441 
2442 	reg = FDI_RX_CTL(pipe);
2443 	temp = I915_READ(reg);
2444 	if (HAS_PCH_CPT(dev)) {
2445 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2446 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2447 	} else {
2448 		temp &= ~FDI_LINK_TRAIN_NONE;
2449 		temp |= FDI_LINK_TRAIN_NONE;
2450 	}
2451 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2452 
2453 	/* wait one idle pattern time */
2454 	POSTING_READ(reg);
2455 	udelay(1000);
2456 
2457 	/* IVB wants error correction enabled */
2458 	if (IS_IVYBRIDGE(dev))
2459 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2460 			   FDI_FE_ERRC_ENABLE);
2461 }
2462 
2463 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2464 {
2465 	return crtc->base.enabled && crtc->active &&
2466 		crtc->config.has_pch_encoder;
2467 }
2468 
2469 static void ivb_modeset_global_resources(struct drm_device *dev)
2470 {
2471 	struct drm_i915_private *dev_priv = dev->dev_private;
2472 	struct intel_crtc *pipe_B_crtc =
2473 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2474 	struct intel_crtc *pipe_C_crtc =
2475 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2476 	uint32_t temp;
2477 
2478 	/*
2479 	 * When everything is off disable fdi C so that we could enable fdi B
2480 	 * with all lanes. Note that we don't care about enabled pipes without
2481 	 * an enabled pch encoder.
2482 	 */
2483 	if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2484 	    !pipe_has_enabled_pch(pipe_C_crtc)) {
2485 		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2486 		WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2487 
2488 		temp = I915_READ(SOUTH_CHICKEN1);
2489 		temp &= ~FDI_BC_BIFURCATION_SELECT;
2490 		DRM_DEBUG_KMS("disabling fdi C rx\n");
2491 		I915_WRITE(SOUTH_CHICKEN1, temp);
2492 	}
2493 }
2494 
2495 /* The FDI link training functions for ILK/Ibexpeak. */
2496 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2497 {
2498 	struct drm_device *dev = crtc->dev;
2499 	struct drm_i915_private *dev_priv = dev->dev_private;
2500 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2501 	int pipe = intel_crtc->pipe;
2502 	int plane = intel_crtc->plane;
2503 	u32 reg, temp, tries;
2504 
2505 	/* FDI needs bits from pipe & plane first */
2506 	assert_pipe_enabled(dev_priv, pipe);
2507 	assert_plane_enabled(dev_priv, plane);
2508 
2509 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2510 	   for train result */
2511 	reg = FDI_RX_IMR(pipe);
2512 	temp = I915_READ(reg);
2513 	temp &= ~FDI_RX_SYMBOL_LOCK;
2514 	temp &= ~FDI_RX_BIT_LOCK;
2515 	I915_WRITE(reg, temp);
2516 	I915_READ(reg);
2517 	udelay(150);
2518 
2519 	/* enable CPU FDI TX and PCH FDI RX */
2520 	reg = FDI_TX_CTL(pipe);
2521 	temp = I915_READ(reg);
2522 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2523 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2524 	temp &= ~FDI_LINK_TRAIN_NONE;
2525 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2526 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2527 
2528 	reg = FDI_RX_CTL(pipe);
2529 	temp = I915_READ(reg);
2530 	temp &= ~FDI_LINK_TRAIN_NONE;
2531 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2532 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2533 
2534 	POSTING_READ(reg);
2535 	udelay(150);
2536 
2537 	/* Ironlake workaround, enable clock pointer after FDI enable*/
2538 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2539 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2540 		   FDI_RX_PHASE_SYNC_POINTER_EN);
2541 
2542 	reg = FDI_RX_IIR(pipe);
2543 	for (tries = 0; tries < 5; tries++) {
2544 		temp = I915_READ(reg);
2545 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2546 
2547 		if ((temp & FDI_RX_BIT_LOCK)) {
2548 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2549 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2550 			break;
2551 		}
2552 	}
2553 	if (tries == 5)
2554 		DRM_ERROR("FDI train 1 fail!\n");
2555 
2556 	/* Train 2 */
2557 	reg = FDI_TX_CTL(pipe);
2558 	temp = I915_READ(reg);
2559 	temp &= ~FDI_LINK_TRAIN_NONE;
2560 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2561 	I915_WRITE(reg, temp);
2562 
2563 	reg = FDI_RX_CTL(pipe);
2564 	temp = I915_READ(reg);
2565 	temp &= ~FDI_LINK_TRAIN_NONE;
2566 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2567 	I915_WRITE(reg, temp);
2568 
2569 	POSTING_READ(reg);
2570 	udelay(150);
2571 
2572 	reg = FDI_RX_IIR(pipe);
2573 	for (tries = 0; tries < 5; tries++) {
2574 		temp = I915_READ(reg);
2575 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2576 
2577 		if (temp & FDI_RX_SYMBOL_LOCK) {
2578 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2579 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2580 			break;
2581 		}
2582 	}
2583 	if (tries == 5)
2584 		DRM_ERROR("FDI train 2 fail!\n");
2585 
2586 	DRM_DEBUG_KMS("FDI train done\n");
2587 
2588 }
2589 
2590 static const int snb_b_fdi_train_param[] = {
2591 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2592 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2593 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2594 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2595 };
2596 
2597 /* The FDI link training functions for SNB/Cougarpoint. */
2598 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2599 {
2600 	struct drm_device *dev = crtc->dev;
2601 	struct drm_i915_private *dev_priv = dev->dev_private;
2602 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2603 	int pipe = intel_crtc->pipe;
2604 	u32 reg, temp, i, retry;
2605 
2606 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2607 	   for train result */
2608 	reg = FDI_RX_IMR(pipe);
2609 	temp = I915_READ(reg);
2610 	temp &= ~FDI_RX_SYMBOL_LOCK;
2611 	temp &= ~FDI_RX_BIT_LOCK;
2612 	I915_WRITE(reg, temp);
2613 
2614 	POSTING_READ(reg);
2615 	udelay(150);
2616 
2617 	/* enable CPU FDI TX and PCH FDI RX */
2618 	reg = FDI_TX_CTL(pipe);
2619 	temp = I915_READ(reg);
2620 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
2621 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2622 	temp &= ~FDI_LINK_TRAIN_NONE;
2623 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2624 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2625 	/* SNB-B */
2626 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2627 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2628 
2629 	I915_WRITE(FDI_RX_MISC(pipe),
2630 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2631 
2632 	reg = FDI_RX_CTL(pipe);
2633 	temp = I915_READ(reg);
2634 	if (HAS_PCH_CPT(dev)) {
2635 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2636 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2637 	} else {
2638 		temp &= ~FDI_LINK_TRAIN_NONE;
2639 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2640 	}
2641 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2642 
2643 	POSTING_READ(reg);
2644 	udelay(150);
2645 
2646 	for (i = 0; i < 4; i++) {
2647 		reg = FDI_TX_CTL(pipe);
2648 		temp = I915_READ(reg);
2649 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2650 		temp |= snb_b_fdi_train_param[i];
2651 		I915_WRITE(reg, temp);
2652 
2653 		POSTING_READ(reg);
2654 		udelay(500);
2655 
2656 		for (retry = 0; retry < 5; retry++) {
2657 			reg = FDI_RX_IIR(pipe);
2658 			temp = I915_READ(reg);
2659 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2660 			if (temp & FDI_RX_BIT_LOCK) {
2661 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2662 				DRM_DEBUG_KMS("FDI train 1 done.\n");
2663 				break;
2664 			}
2665 			udelay(50);
2666 		}
2667 		if (retry < 5)
2668 			break;
2669 	}
2670 	if (i == 4)
2671 		DRM_ERROR("FDI train 1 fail!\n");
2672 
2673 	/* Train 2 */
2674 	reg = FDI_TX_CTL(pipe);
2675 	temp = I915_READ(reg);
2676 	temp &= ~FDI_LINK_TRAIN_NONE;
2677 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2678 	if (IS_GEN6(dev)) {
2679 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2680 		/* SNB-B */
2681 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2682 	}
2683 	I915_WRITE(reg, temp);
2684 
2685 	reg = FDI_RX_CTL(pipe);
2686 	temp = I915_READ(reg);
2687 	if (HAS_PCH_CPT(dev)) {
2688 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2689 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2690 	} else {
2691 		temp &= ~FDI_LINK_TRAIN_NONE;
2692 		temp |= FDI_LINK_TRAIN_PATTERN_2;
2693 	}
2694 	I915_WRITE(reg, temp);
2695 
2696 	POSTING_READ(reg);
2697 	udelay(150);
2698 
2699 	for (i = 0; i < 4; i++) {
2700 		reg = FDI_TX_CTL(pipe);
2701 		temp = I915_READ(reg);
2702 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2703 		temp |= snb_b_fdi_train_param[i];
2704 		I915_WRITE(reg, temp);
2705 
2706 		POSTING_READ(reg);
2707 		udelay(500);
2708 
2709 		for (retry = 0; retry < 5; retry++) {
2710 			reg = FDI_RX_IIR(pipe);
2711 			temp = I915_READ(reg);
2712 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2713 			if (temp & FDI_RX_SYMBOL_LOCK) {
2714 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2715 				DRM_DEBUG_KMS("FDI train 2 done.\n");
2716 				break;
2717 			}
2718 			udelay(50);
2719 		}
2720 		if (retry < 5)
2721 			break;
2722 	}
2723 	if (i == 4)
2724 		DRM_ERROR("FDI train 2 fail!\n");
2725 
2726 	DRM_DEBUG_KMS("FDI train done.\n");
2727 }
2728 
2729 /* Manual link training for Ivy Bridge A0 parts */
2730 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2731 {
2732 	struct drm_device *dev = crtc->dev;
2733 	struct drm_i915_private *dev_priv = dev->dev_private;
2734 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2735 	int pipe = intel_crtc->pipe;
2736 	u32 reg, temp, i, j;
2737 
2738 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2739 	   for train result */
2740 	reg = FDI_RX_IMR(pipe);
2741 	temp = I915_READ(reg);
2742 	temp &= ~FDI_RX_SYMBOL_LOCK;
2743 	temp &= ~FDI_RX_BIT_LOCK;
2744 	I915_WRITE(reg, temp);
2745 
2746 	POSTING_READ(reg);
2747 	udelay(150);
2748 
2749 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2750 		      I915_READ(FDI_RX_IIR(pipe)));
2751 
2752 	/* Try each vswing and preemphasis setting twice before moving on */
2753 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2754 		/* disable first in case we need to retry */
2755 		reg = FDI_TX_CTL(pipe);
2756 		temp = I915_READ(reg);
2757 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2758 		temp &= ~FDI_TX_ENABLE;
2759 		I915_WRITE(reg, temp);
2760 
2761 		reg = FDI_RX_CTL(pipe);
2762 		temp = I915_READ(reg);
2763 		temp &= ~FDI_LINK_TRAIN_AUTO;
2764 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2765 		temp &= ~FDI_RX_ENABLE;
2766 		I915_WRITE(reg, temp);
2767 
2768 		/* enable CPU FDI TX and PCH FDI RX */
2769 		reg = FDI_TX_CTL(pipe);
2770 		temp = I915_READ(reg);
2771 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
2772 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2773 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2774 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2775 		temp |= snb_b_fdi_train_param[j/2];
2776 		temp |= FDI_COMPOSITE_SYNC;
2777 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
2778 
2779 		I915_WRITE(FDI_RX_MISC(pipe),
2780 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2781 
2782 		reg = FDI_RX_CTL(pipe);
2783 		temp = I915_READ(reg);
2784 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2785 		temp |= FDI_COMPOSITE_SYNC;
2786 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
2787 
2788 		POSTING_READ(reg);
2789 		udelay(1); /* should be 0.5us */
2790 
2791 		for (i = 0; i < 4; i++) {
2792 			reg = FDI_RX_IIR(pipe);
2793 			temp = I915_READ(reg);
2794 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2795 
2796 			if (temp & FDI_RX_BIT_LOCK ||
2797 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2798 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2799 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2800 					      i);
2801 				break;
2802 			}
2803 			udelay(1); /* should be 0.5us */
2804 		}
2805 		if (i == 4) {
2806 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2807 			continue;
2808 		}
2809 
2810 		/* Train 2 */
2811 		reg = FDI_TX_CTL(pipe);
2812 		temp = I915_READ(reg);
2813 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2814 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2815 		I915_WRITE(reg, temp);
2816 
2817 		reg = FDI_RX_CTL(pipe);
2818 		temp = I915_READ(reg);
2819 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2820 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2821 		I915_WRITE(reg, temp);
2822 
2823 		POSTING_READ(reg);
2824 		udelay(2); /* should be 1.5us */
2825 
2826 		for (i = 0; i < 4; i++) {
2827 			reg = FDI_RX_IIR(pipe);
2828 			temp = I915_READ(reg);
2829 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2830 
2831 			if (temp & FDI_RX_SYMBOL_LOCK ||
2832 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2833 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2834 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2835 					      i);
2836 				goto train_done;
2837 			}
2838 			udelay(2); /* should be 1.5us */
2839 		}
2840 		if (i == 4)
2841 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
2842 	}
2843 
2844 train_done:
2845 	DRM_DEBUG_KMS("FDI train done.\n");
2846 }
2847 
2848 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2849 {
2850 	struct drm_device *dev = intel_crtc->base.dev;
2851 	struct drm_i915_private *dev_priv = dev->dev_private;
2852 	int pipe = intel_crtc->pipe;
2853 	u32 reg, temp;
2854 
2855 
2856 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2857 	reg = FDI_RX_CTL(pipe);
2858 	temp = I915_READ(reg);
2859 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2860 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2861 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2862 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2863 
2864 	POSTING_READ(reg);
2865 	udelay(200);
2866 
2867 	/* Switch from Rawclk to PCDclk */
2868 	temp = I915_READ(reg);
2869 	I915_WRITE(reg, temp | FDI_PCDCLK);
2870 
2871 	POSTING_READ(reg);
2872 	udelay(200);
2873 
2874 	/* Enable CPU FDI TX PLL, always on for Ironlake */
2875 	reg = FDI_TX_CTL(pipe);
2876 	temp = I915_READ(reg);
2877 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2878 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2879 
2880 		POSTING_READ(reg);
2881 		udelay(100);
2882 	}
2883 }
2884 
2885 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2886 {
2887 	struct drm_device *dev = intel_crtc->base.dev;
2888 	struct drm_i915_private *dev_priv = dev->dev_private;
2889 	int pipe = intel_crtc->pipe;
2890 	u32 reg, temp;
2891 
2892 	/* Switch from PCDclk to Rawclk */
2893 	reg = FDI_RX_CTL(pipe);
2894 	temp = I915_READ(reg);
2895 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2896 
2897 	/* Disable CPU FDI TX PLL */
2898 	reg = FDI_TX_CTL(pipe);
2899 	temp = I915_READ(reg);
2900 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2901 
2902 	POSTING_READ(reg);
2903 	udelay(100);
2904 
2905 	reg = FDI_RX_CTL(pipe);
2906 	temp = I915_READ(reg);
2907 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2908 
2909 	/* Wait for the clocks to turn off. */
2910 	POSTING_READ(reg);
2911 	udelay(100);
2912 }
2913 
2914 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2915 {
2916 	struct drm_device *dev = crtc->dev;
2917 	struct drm_i915_private *dev_priv = dev->dev_private;
2918 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2919 	int pipe = intel_crtc->pipe;
2920 	u32 reg, temp;
2921 
2922 	/* disable CPU FDI tx and PCH FDI rx */
2923 	reg = FDI_TX_CTL(pipe);
2924 	temp = I915_READ(reg);
2925 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2926 	POSTING_READ(reg);
2927 
2928 	reg = FDI_RX_CTL(pipe);
2929 	temp = I915_READ(reg);
2930 	temp &= ~(0x7 << 16);
2931 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2932 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2933 
2934 	POSTING_READ(reg);
2935 	udelay(100);
2936 
2937 	/* Ironlake workaround, disable clock pointer after downing FDI */
2938 	if (HAS_PCH_IBX(dev)) {
2939 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2940 	}
2941 
2942 	/* still set train pattern 1 */
2943 	reg = FDI_TX_CTL(pipe);
2944 	temp = I915_READ(reg);
2945 	temp &= ~FDI_LINK_TRAIN_NONE;
2946 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2947 	I915_WRITE(reg, temp);
2948 
2949 	reg = FDI_RX_CTL(pipe);
2950 	temp = I915_READ(reg);
2951 	if (HAS_PCH_CPT(dev)) {
2952 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2953 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2954 	} else {
2955 		temp &= ~FDI_LINK_TRAIN_NONE;
2956 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2957 	}
2958 	/* BPC in FDI rx is consistent with that in PIPECONF */
2959 	temp &= ~(0x07 << 16);
2960 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2961 	I915_WRITE(reg, temp);
2962 
2963 	POSTING_READ(reg);
2964 	udelay(100);
2965 }
2966 
2967 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2968 {
2969 	struct drm_device *dev = crtc->dev;
2970 	struct drm_i915_private *dev_priv = dev->dev_private;
2971 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2972 	bool pending;
2973 
2974 	if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2975 	    intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2976 		return false;
2977 
2978 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2979 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
2980 	lockmgr(&dev->event_lock, LK_RELEASE);
2981 
2982 	return pending;
2983 }
2984 
2985 bool intel_has_pending_fb_unpin(struct drm_device *dev)
2986 {
2987 	struct intel_crtc *crtc;
2988 
2989 	/* Note that we don't need to be called with mode_config.lock here
2990 	 * as our list of CRTC objects is static for the lifetime of the
2991 	 * device and so cannot disappear as we iterate. Similarly, we can
2992 	 * happily treat the predicates as racy, atomic checks as userspace
2993 	 * cannot claim and pin a new fb without at least acquring the
2994 	 * struct_mutex and so serialising with us.
2995 	 */
2996 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
2997 		if (atomic_read(&crtc->unpin_work_count) == 0)
2998 			continue;
2999 
3000 		if (crtc->unpin_work)
3001 			intel_wait_for_vblank(dev, crtc->pipe);
3002 
3003 		return true;
3004 	}
3005 
3006 	return false;
3007 }
3008 
3009 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3010 {
3011 	struct drm_device *dev = crtc->dev;
3012 	struct drm_i915_private *dev_priv = dev->dev_private;
3013 
3014 	if (crtc->fb == NULL)
3015 		return;
3016 
3017 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3018 
3019 	wait_event(dev_priv->pending_flip_queue,
3020 		   !intel_crtc_has_pending_flip(crtc));
3021 
3022 	mutex_lock(&dev->struct_mutex);
3023 	intel_finish_fb(crtc->fb);
3024 	mutex_unlock(&dev->struct_mutex);
3025 }
3026 
3027 /* Program iCLKIP clock to the desired frequency */
3028 static void lpt_program_iclkip(struct drm_crtc *crtc)
3029 {
3030 	struct drm_device *dev = crtc->dev;
3031 	struct drm_i915_private *dev_priv = dev->dev_private;
3032 	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3033 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3034 	u32 temp;
3035 
3036 	mutex_lock(&dev_priv->dpio_lock);
3037 
3038 	/* It is necessary to ungate the pixclk gate prior to programming
3039 	 * the divisors, and gate it back when it is done.
3040 	 */
3041 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3042 
3043 	/* Disable SSCCTL */
3044 	intel_sbi_write(dev_priv, SBI_SSCCTL6,
3045 			intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3046 				SBI_SSCCTL_DISABLE,
3047 			SBI_ICLK);
3048 
3049 	/* 20MHz is a corner case which is out of range for the 7-bit divisor */
3050 	if (clock == 20000) {
3051 		auxdiv = 1;
3052 		divsel = 0x41;
3053 		phaseinc = 0x20;
3054 	} else {
3055 		/* The iCLK virtual clock root frequency is in MHz,
3056 		 * but the adjusted_mode->crtc_clock in in KHz. To get the
3057 		 * divisors, it is necessary to divide one by another, so we
3058 		 * convert the virtual clock precision to KHz here for higher
3059 		 * precision.
3060 		 */
3061 		u32 iclk_virtual_root_freq = 172800 * 1000;
3062 		u32 iclk_pi_range = 64;
3063 		u32 desired_divisor, msb_divisor_value, pi_value;
3064 
3065 		desired_divisor = (iclk_virtual_root_freq / clock);
3066 		msb_divisor_value = desired_divisor / iclk_pi_range;
3067 		pi_value = desired_divisor % iclk_pi_range;
3068 
3069 		auxdiv = 0;
3070 		divsel = msb_divisor_value - 2;
3071 		phaseinc = pi_value;
3072 	}
3073 
3074 	/* This should not happen with any sane values */
3075 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3076 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3077 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3078 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3079 
3080 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3081 			clock,
3082 			auxdiv,
3083 			divsel,
3084 			phasedir,
3085 			phaseinc);
3086 
3087 	/* Program SSCDIVINTPHASE6 */
3088 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3089 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3090 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3091 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3092 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3093 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3094 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3095 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3096 
3097 	/* Program SSCAUXDIV */
3098 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3099 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3100 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3101 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3102 
3103 	/* Enable modulator and associated divider */
3104 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3105 	temp &= ~SBI_SSCCTL_DISABLE;
3106 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3107 
3108 	/* Wait for initialization time */
3109 	udelay(24);
3110 
3111 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3112 
3113 	mutex_unlock(&dev_priv->dpio_lock);
3114 }
3115 
3116 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3117 						enum i915_pipe pch_transcoder)
3118 {
3119 	struct drm_device *dev = crtc->base.dev;
3120 	struct drm_i915_private *dev_priv = dev->dev_private;
3121 	enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3122 
3123 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3124 		   I915_READ(HTOTAL(cpu_transcoder)));
3125 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3126 		   I915_READ(HBLANK(cpu_transcoder)));
3127 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3128 		   I915_READ(HSYNC(cpu_transcoder)));
3129 
3130 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3131 		   I915_READ(VTOTAL(cpu_transcoder)));
3132 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3133 		   I915_READ(VBLANK(cpu_transcoder)));
3134 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3135 		   I915_READ(VSYNC(cpu_transcoder)));
3136 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3137 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
3138 }
3139 
3140 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3141 {
3142 	struct drm_i915_private *dev_priv = dev->dev_private;
3143 	uint32_t temp;
3144 
3145 	temp = I915_READ(SOUTH_CHICKEN1);
3146 	if (temp & FDI_BC_BIFURCATION_SELECT)
3147 		return;
3148 
3149 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3150 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3151 
3152 	temp |= FDI_BC_BIFURCATION_SELECT;
3153 	DRM_DEBUG_KMS("enabling fdi C rx\n");
3154 	I915_WRITE(SOUTH_CHICKEN1, temp);
3155 	POSTING_READ(SOUTH_CHICKEN1);
3156 }
3157 
3158 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3159 {
3160 	struct drm_device *dev = intel_crtc->base.dev;
3161 	struct drm_i915_private *dev_priv = dev->dev_private;
3162 
3163 	switch (intel_crtc->pipe) {
3164 	case PIPE_A:
3165 		break;
3166 	case PIPE_B:
3167 		if (intel_crtc->config.fdi_lanes > 2)
3168 			WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3169 		else
3170 			cpt_enable_fdi_bc_bifurcation(dev);
3171 
3172 		break;
3173 	case PIPE_C:
3174 		cpt_enable_fdi_bc_bifurcation(dev);
3175 
3176 		break;
3177 	default:
3178 		BUG();
3179 	}
3180 }
3181 
3182 /*
3183  * Enable PCH resources required for PCH ports:
3184  *   - PCH PLLs
3185  *   - FDI training & RX/TX
3186  *   - update transcoder timings
3187  *   - DP transcoding bits
3188  *   - transcoder
3189  */
3190 static void ironlake_pch_enable(struct drm_crtc *crtc)
3191 {
3192 	struct drm_device *dev = crtc->dev;
3193 	struct drm_i915_private *dev_priv = dev->dev_private;
3194 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3195 	int pipe = intel_crtc->pipe;
3196 	u32 reg, temp;
3197 
3198 	assert_pch_transcoder_disabled(dev_priv, pipe);
3199 
3200 	if (IS_IVYBRIDGE(dev))
3201 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3202 
3203 	/* Write the TU size bits before fdi link training, so that error
3204 	 * detection works. */
3205 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
3206 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3207 
3208 	/* For PCH output, training FDI link */
3209 	dev_priv->display.fdi_link_train(crtc);
3210 
3211 	/* We need to program the right clock selection before writing the pixel
3212 	 * mutliplier into the DPLL. */
3213 	if (HAS_PCH_CPT(dev)) {
3214 		u32 sel;
3215 
3216 		temp = I915_READ(PCH_DPLL_SEL);
3217 		temp |= TRANS_DPLL_ENABLE(pipe);
3218 		sel = TRANS_DPLLB_SEL(pipe);
3219 		if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3220 			temp |= sel;
3221 		else
3222 			temp &= ~sel;
3223 		I915_WRITE(PCH_DPLL_SEL, temp);
3224 	}
3225 
3226 	/* XXX: pch pll's can be enabled any time before we enable the PCH
3227 	 * transcoder, and we actually should do this to not upset any PCH
3228 	 * transcoder that already use the clock when we share it.
3229 	 *
3230 	 * Note that enable_shared_dpll tries to do the right thing, but
3231 	 * get_shared_dpll unconditionally resets the pll - we need that to have
3232 	 * the right LVDS enable sequence. */
3233 	ironlake_enable_shared_dpll(intel_crtc);
3234 
3235 	/* set transcoder timing, panel must allow it */
3236 	assert_panel_unlocked(dev_priv, pipe);
3237 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3238 
3239 	intel_fdi_normal_train(crtc);
3240 
3241 	/* For PCH DP, enable TRANS_DP_CTL */
3242 	if (HAS_PCH_CPT(dev) &&
3243 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3244 	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3245 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3246 		reg = TRANS_DP_CTL(pipe);
3247 		temp = I915_READ(reg);
3248 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3249 			  TRANS_DP_SYNC_MASK |
3250 			  TRANS_DP_BPC_MASK);
3251 		temp |= (TRANS_DP_OUTPUT_ENABLE |
3252 			 TRANS_DP_ENH_FRAMING);
3253 		temp |= bpc << 9; /* same format but at 11:9 */
3254 
3255 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3256 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3257 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3258 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3259 
3260 		switch (intel_trans_dp_port_sel(crtc)) {
3261 		case PCH_DP_B:
3262 			temp |= TRANS_DP_PORT_SEL_B;
3263 			break;
3264 		case PCH_DP_C:
3265 			temp |= TRANS_DP_PORT_SEL_C;
3266 			break;
3267 		case PCH_DP_D:
3268 			temp |= TRANS_DP_PORT_SEL_D;
3269 			break;
3270 		default:
3271 			BUG();
3272 		}
3273 
3274 		I915_WRITE(reg, temp);
3275 	}
3276 
3277 	ironlake_enable_pch_transcoder(dev_priv, pipe);
3278 }
3279 
3280 static void lpt_pch_enable(struct drm_crtc *crtc)
3281 {
3282 	struct drm_device *dev = crtc->dev;
3283 	struct drm_i915_private *dev_priv = dev->dev_private;
3284 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3285 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3286 
3287 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3288 
3289 	lpt_program_iclkip(crtc);
3290 
3291 	/* Set transcoder timing. */
3292 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3293 
3294 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3295 }
3296 
3297 static void intel_put_shared_dpll(struct intel_crtc *crtc)
3298 {
3299 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3300 
3301 	if (pll == NULL)
3302 		return;
3303 
3304 	if (pll->refcount == 0) {
3305 		WARN(1, "bad %s refcount\n", pll->name);
3306 		return;
3307 	}
3308 
3309 	if (--pll->refcount == 0) {
3310 		WARN_ON(pll->on);
3311 		WARN_ON(pll->active);
3312 	}
3313 
3314 	crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3315 }
3316 
3317 static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3318 {
3319 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3320 	struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3321 	enum intel_dpll_id i;
3322 
3323 	if (pll) {
3324 		DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3325 			      crtc->base.base.id, pll->name);
3326 		intel_put_shared_dpll(crtc);
3327 	}
3328 
3329 	if (HAS_PCH_IBX(dev_priv->dev)) {
3330 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3331 		i = (enum intel_dpll_id) crtc->pipe;
3332 		pll = &dev_priv->shared_dplls[i];
3333 
3334 		DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3335 			      crtc->base.base.id, pll->name);
3336 
3337 		goto found;
3338 	}
3339 
3340 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3341 		pll = &dev_priv->shared_dplls[i];
3342 
3343 		/* Only want to check enabled timings first */
3344 		if (pll->refcount == 0)
3345 			continue;
3346 
3347 		if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3348 			   sizeof(pll->hw_state)) == 0) {
3349 			DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3350 				      crtc->base.base.id,
3351 				      pll->name, pll->refcount, pll->active);
3352 
3353 			goto found;
3354 		}
3355 	}
3356 
3357 	/* Ok no matching timings, maybe there's a free one? */
3358 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3359 		pll = &dev_priv->shared_dplls[i];
3360 		if (pll->refcount == 0) {
3361 			DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3362 				      crtc->base.base.id, pll->name);
3363 			goto found;
3364 		}
3365 	}
3366 
3367 	return NULL;
3368 
3369 found:
3370 	crtc->config.shared_dpll = i;
3371 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3372 			 pipe_name(crtc->pipe));
3373 
3374 	if (pll->active == 0) {
3375 		memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3376 		       sizeof(pll->hw_state));
3377 
3378 		DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
3379 		WARN_ON(pll->on);
3380 		assert_shared_dpll_disabled(dev_priv, pll);
3381 
3382 		pll->mode_set(dev_priv, pll);
3383 	}
3384 	pll->refcount++;
3385 
3386 	return pll;
3387 }
3388 
3389 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3390 {
3391 	struct drm_i915_private *dev_priv = dev->dev_private;
3392 	int dslreg = PIPEDSL(pipe);
3393 	u32 temp;
3394 
3395 	temp = I915_READ(dslreg);
3396 	udelay(500);
3397 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
3398 		if (wait_for(I915_READ(dslreg) != temp, 5))
3399 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3400 	}
3401 }
3402 
3403 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3404 {
3405 	struct drm_device *dev = crtc->base.dev;
3406 	struct drm_i915_private *dev_priv = dev->dev_private;
3407 	int pipe = crtc->pipe;
3408 
3409 	if (crtc->config.pch_pfit.enabled) {
3410 		/* Force use of hard-coded filter coefficients
3411 		 * as some pre-programmed values are broken,
3412 		 * e.g. x201.
3413 		 */
3414 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3415 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3416 						 PF_PIPE_SEL_IVB(pipe));
3417 		else
3418 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3419 		I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3420 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3421 	}
3422 }
3423 
3424 static void intel_enable_planes(struct drm_crtc *crtc)
3425 {
3426 	struct drm_device *dev = crtc->dev;
3427 	enum i915_pipe pipe = to_intel_crtc(crtc)->pipe;
3428 	struct intel_plane *intel_plane;
3429 
3430 	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3431 		if (intel_plane->pipe == pipe)
3432 			intel_plane_restore(&intel_plane->base);
3433 }
3434 
3435 static void intel_disable_planes(struct drm_crtc *crtc)
3436 {
3437 	struct drm_device *dev = crtc->dev;
3438 	enum i915_pipe pipe = to_intel_crtc(crtc)->pipe;
3439 	struct intel_plane *intel_plane;
3440 
3441 	list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3442 		if (intel_plane->pipe == pipe)
3443 			intel_plane_disable(&intel_plane->base);
3444 }
3445 
3446 void hsw_enable_ips(struct intel_crtc *crtc)
3447 {
3448 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3449 
3450 	if (!crtc->config.ips_enabled)
3451 		return;
3452 
3453 	/* We can only enable IPS after we enable a plane and wait for a vblank.
3454 	 * We guarantee that the plane is enabled by calling intel_enable_ips
3455 	 * only after intel_enable_plane. And intel_enable_plane already waits
3456 	 * for a vblank, so all we need to do here is to enable the IPS bit. */
3457 	assert_plane_enabled(dev_priv, crtc->plane);
3458 	if (IS_BROADWELL(crtc->base.dev)) {
3459 		mutex_lock(&dev_priv->rps.hw_lock);
3460 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3461 		mutex_unlock(&dev_priv->rps.hw_lock);
3462 		/* Quoting Art Runyan: "its not safe to expect any particular
3463 		 * value in IPS_CTL bit 31 after enabling IPS through the
3464 		 * mailbox." Moreover, the mailbox may return a bogus state,
3465 		 * so we need to just enable it and continue on.
3466 		 */
3467 	} else {
3468 		I915_WRITE(IPS_CTL, IPS_ENABLE);
3469 		/* The bit only becomes 1 in the next vblank, so this wait here
3470 		 * is essentially intel_wait_for_vblank. If we don't have this
3471 		 * and don't wait for vblanks until the end of crtc_enable, then
3472 		 * the HW state readout code will complain that the expected
3473 		 * IPS_CTL value is not the one we read. */
3474 		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3475 			DRM_ERROR("Timed out waiting for IPS enable\n");
3476 	}
3477 }
3478 
3479 void hsw_disable_ips(struct intel_crtc *crtc)
3480 {
3481 	struct drm_device *dev = crtc->base.dev;
3482 	struct drm_i915_private *dev_priv = dev->dev_private;
3483 
3484 	if (!crtc->config.ips_enabled)
3485 		return;
3486 
3487 	assert_plane_enabled(dev_priv, crtc->plane);
3488 	if (IS_BROADWELL(crtc->base.dev)) {
3489 		mutex_lock(&dev_priv->rps.hw_lock);
3490 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3491 		mutex_unlock(&dev_priv->rps.hw_lock);
3492 	} else {
3493 		I915_WRITE(IPS_CTL, 0);
3494 		POSTING_READ(IPS_CTL);
3495 	}
3496 
3497 	/* We need to wait for a vblank before we can disable the plane. */
3498 	intel_wait_for_vblank(dev, crtc->pipe);
3499 }
3500 
3501 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3502 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3503 {
3504 	struct drm_device *dev = crtc->dev;
3505 	struct drm_i915_private *dev_priv = dev->dev_private;
3506 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3507 	enum i915_pipe pipe = intel_crtc->pipe;
3508 	int palreg = PALETTE(pipe);
3509 	int i;
3510 	bool reenable_ips = false;
3511 
3512 	/* The clocks have to be on to load the palette. */
3513 	if (!crtc->enabled || !intel_crtc->active)
3514 		return;
3515 
3516 	if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3517 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3518 			assert_dsi_pll_enabled(dev_priv);
3519 		else
3520 			assert_pll_enabled(dev_priv, pipe);
3521 	}
3522 
3523 	/* use legacy palette for Ironlake */
3524 	if (HAS_PCH_SPLIT(dev))
3525 		palreg = LGC_PALETTE(pipe);
3526 
3527 	/* Workaround : Do not read or write the pipe palette/gamma data while
3528 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3529 	 */
3530 	if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3531 	    ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3532 	     GAMMA_MODE_MODE_SPLIT)) {
3533 		hsw_disable_ips(intel_crtc);
3534 		reenable_ips = true;
3535 	}
3536 
3537 	for (i = 0; i < 256; i++) {
3538 		I915_WRITE(palreg + 4 * i,
3539 			   (intel_crtc->lut_r[i] << 16) |
3540 			   (intel_crtc->lut_g[i] << 8) |
3541 			   intel_crtc->lut_b[i]);
3542 	}
3543 
3544 	if (reenable_ips)
3545 		hsw_enable_ips(intel_crtc);
3546 }
3547 
3548 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3549 {
3550 	struct drm_device *dev = crtc->dev;
3551 	struct drm_i915_private *dev_priv = dev->dev_private;
3552 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3553 	struct intel_encoder *encoder;
3554 	int pipe = intel_crtc->pipe;
3555 	int plane = intel_crtc->plane;
3556 
3557 	WARN_ON(!crtc->enabled);
3558 
3559 	if (intel_crtc->active)
3560 		return;
3561 
3562 	intel_crtc->active = true;
3563 
3564 	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3565 	intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3566 
3567 	for_each_encoder_on_crtc(dev, crtc, encoder)
3568 		if (encoder->pre_enable)
3569 			encoder->pre_enable(encoder);
3570 
3571 	if (intel_crtc->config.has_pch_encoder) {
3572 		/* Note: FDI PLL enabling _must_ be done before we enable the
3573 		 * cpu pipes, hence this is separate from all the other fdi/pch
3574 		 * enabling. */
3575 		ironlake_fdi_pll_enable(intel_crtc);
3576 	} else {
3577 		assert_fdi_tx_disabled(dev_priv, pipe);
3578 		assert_fdi_rx_disabled(dev_priv, pipe);
3579 	}
3580 
3581 	ironlake_pfit_enable(intel_crtc);
3582 
3583 	/*
3584 	 * On ILK+ LUT must be loaded before the pipe is running but with
3585 	 * clocks enabled
3586 	 */
3587 	intel_crtc_load_lut(crtc);
3588 
3589 	intel_update_watermarks(crtc);
3590 	intel_enable_pipe(dev_priv, pipe,
3591 			  intel_crtc->config.has_pch_encoder, false);
3592 	intel_enable_primary_plane(dev_priv, plane, pipe);
3593 	intel_enable_planes(crtc);
3594 	intel_crtc_update_cursor(crtc, true);
3595 
3596 	if (intel_crtc->config.has_pch_encoder)
3597 		ironlake_pch_enable(crtc);
3598 
3599 	mutex_lock(&dev->struct_mutex);
3600 	intel_update_fbc(dev);
3601 	mutex_unlock(&dev->struct_mutex);
3602 
3603 	for_each_encoder_on_crtc(dev, crtc, encoder)
3604 		encoder->enable(encoder);
3605 
3606 	if (HAS_PCH_CPT(dev))
3607 		cpt_verify_modeset(dev, intel_crtc->pipe);
3608 
3609 	/*
3610 	 * There seems to be a race in PCH platform hw (at least on some
3611 	 * outputs) where an enabled pipe still completes any pageflip right
3612 	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3613 	 * as the first vblank happend, everything works as expected. Hence just
3614 	 * wait for one vblank before returning to avoid strange things
3615 	 * happening.
3616 	 */
3617 	intel_wait_for_vblank(dev, intel_crtc->pipe);
3618 }
3619 
3620 /* IPS only exists on ULT machines and is tied to pipe A. */
3621 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3622 {
3623 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3624 }
3625 
3626 static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3627 {
3628 	struct drm_device *dev = crtc->dev;
3629 	struct drm_i915_private *dev_priv = dev->dev_private;
3630 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3631 	int pipe = intel_crtc->pipe;
3632 	int plane = intel_crtc->plane;
3633 
3634 	intel_enable_primary_plane(dev_priv, plane, pipe);
3635 	intel_enable_planes(crtc);
3636 	intel_crtc_update_cursor(crtc, true);
3637 
3638 	hsw_enable_ips(intel_crtc);
3639 
3640 	mutex_lock(&dev->struct_mutex);
3641 	intel_update_fbc(dev);
3642 	mutex_unlock(&dev->struct_mutex);
3643 }
3644 
3645 static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3646 {
3647 	struct drm_device *dev = crtc->dev;
3648 	struct drm_i915_private *dev_priv = dev->dev_private;
3649 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3650 	int pipe = intel_crtc->pipe;
3651 	int plane = intel_crtc->plane;
3652 
3653 	intel_crtc_wait_for_pending_flips(crtc);
3654 	drm_vblank_off(dev, pipe);
3655 
3656 	/* FBC must be disabled before disabling the plane on HSW. */
3657 	if (dev_priv->fbc.plane == plane)
3658 		intel_disable_fbc(dev);
3659 
3660 	hsw_disable_ips(intel_crtc);
3661 
3662 	intel_crtc_update_cursor(crtc, false);
3663 	intel_disable_planes(crtc);
3664 	intel_disable_primary_plane(dev_priv, plane, pipe);
3665 }
3666 
3667 /*
3668  * This implements the workaround described in the "notes" section of the mode
3669  * set sequence documentation. When going from no pipes or single pipe to
3670  * multiple pipes, and planes are enabled after the pipe, we need to wait at
3671  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3672  */
3673 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3674 {
3675 	struct drm_device *dev = crtc->base.dev;
3676 	struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3677 
3678 	/* We want to get the other_active_crtc only if there's only 1 other
3679 	 * active crtc. */
3680 	list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3681 		if (!crtc_it->active || crtc_it == crtc)
3682 			continue;
3683 
3684 		if (other_active_crtc)
3685 			return;
3686 
3687 		other_active_crtc = crtc_it;
3688 	}
3689 	if (!other_active_crtc)
3690 		return;
3691 
3692 	intel_wait_for_vblank(dev, other_active_crtc->pipe);
3693 	intel_wait_for_vblank(dev, other_active_crtc->pipe);
3694 }
3695 
3696 static void haswell_crtc_enable(struct drm_crtc *crtc)
3697 {
3698 	struct drm_device *dev = crtc->dev;
3699 	struct drm_i915_private *dev_priv = dev->dev_private;
3700 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3701 	struct intel_encoder *encoder;
3702 	int pipe = intel_crtc->pipe;
3703 
3704 	WARN_ON(!crtc->enabled);
3705 
3706 	if (intel_crtc->active)
3707 		return;
3708 
3709 	intel_crtc->active = true;
3710 
3711 	intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3712 	if (intel_crtc->config.has_pch_encoder)
3713 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3714 
3715 	if (intel_crtc->config.has_pch_encoder)
3716 		dev_priv->display.fdi_link_train(crtc);
3717 
3718 	for_each_encoder_on_crtc(dev, crtc, encoder)
3719 		if (encoder->pre_enable)
3720 			encoder->pre_enable(encoder);
3721 
3722 	intel_ddi_enable_pipe_clock(intel_crtc);
3723 
3724 	ironlake_pfit_enable(intel_crtc);
3725 
3726 	/*
3727 	 * On ILK+ LUT must be loaded before the pipe is running but with
3728 	 * clocks enabled
3729 	 */
3730 	intel_crtc_load_lut(crtc);
3731 
3732 	intel_ddi_set_pipe_settings(crtc);
3733 	intel_ddi_enable_transcoder_func(crtc);
3734 
3735 	intel_update_watermarks(crtc);
3736 	intel_enable_pipe(dev_priv, pipe,
3737 			  intel_crtc->config.has_pch_encoder, false);
3738 
3739 	if (intel_crtc->config.has_pch_encoder)
3740 		lpt_pch_enable(crtc);
3741 
3742 	for_each_encoder_on_crtc(dev, crtc, encoder) {
3743 		encoder->enable(encoder);
3744 		intel_opregion_notify_encoder(encoder, true);
3745 	}
3746 
3747 	/* If we change the relative order between pipe/planes enabling, we need
3748 	 * to change the workaround. */
3749 	haswell_mode_set_planes_workaround(intel_crtc);
3750 	haswell_crtc_enable_planes(crtc);
3751 
3752 	/*
3753 	 * There seems to be a race in PCH platform hw (at least on some
3754 	 * outputs) where an enabled pipe still completes any pageflip right
3755 	 * away (as if the pipe is off) instead of waiting for vblank. As soon
3756 	 * as the first vblank happend, everything works as expected. Hence just
3757 	 * wait for one vblank before returning to avoid strange things
3758 	 * happening.
3759 	 */
3760 	intel_wait_for_vblank(dev, intel_crtc->pipe);
3761 }
3762 
3763 static void ironlake_pfit_disable(struct intel_crtc *crtc)
3764 {
3765 	struct drm_device *dev = crtc->base.dev;
3766 	struct drm_i915_private *dev_priv = dev->dev_private;
3767 	int pipe = crtc->pipe;
3768 
3769 	/* To avoid upsetting the power well on haswell only disable the pfit if
3770 	 * it's in use. The hw state code will make sure we get this right. */
3771 	if (crtc->config.pch_pfit.enabled) {
3772 		I915_WRITE(PF_CTL(pipe), 0);
3773 		I915_WRITE(PF_WIN_POS(pipe), 0);
3774 		I915_WRITE(PF_WIN_SZ(pipe), 0);
3775 	}
3776 }
3777 
3778 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3779 {
3780 	struct drm_device *dev = crtc->dev;
3781 	struct drm_i915_private *dev_priv = dev->dev_private;
3782 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3783 	struct intel_encoder *encoder;
3784 	int pipe = intel_crtc->pipe;
3785 	int plane = intel_crtc->plane;
3786 	u32 reg, temp;
3787 
3788 
3789 	if (!intel_crtc->active)
3790 		return;
3791 
3792 	for_each_encoder_on_crtc(dev, crtc, encoder)
3793 		encoder->disable(encoder);
3794 
3795 	intel_crtc_wait_for_pending_flips(crtc);
3796 	drm_vblank_off(dev, pipe);
3797 
3798 	if (dev_priv->fbc.plane == plane)
3799 		intel_disable_fbc(dev);
3800 
3801 	intel_crtc_update_cursor(crtc, false);
3802 	intel_disable_planes(crtc);
3803 	intel_disable_primary_plane(dev_priv, plane, pipe);
3804 
3805 	if (intel_crtc->config.has_pch_encoder)
3806 		intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3807 
3808 	intel_disable_pipe(dev_priv, pipe);
3809 
3810 	ironlake_pfit_disable(intel_crtc);
3811 
3812 	for_each_encoder_on_crtc(dev, crtc, encoder)
3813 		if (encoder->post_disable)
3814 			encoder->post_disable(encoder);
3815 
3816 	if (intel_crtc->config.has_pch_encoder) {
3817 		ironlake_fdi_disable(crtc);
3818 
3819 		ironlake_disable_pch_transcoder(dev_priv, pipe);
3820 		intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3821 
3822 		if (HAS_PCH_CPT(dev)) {
3823 			/* disable TRANS_DP_CTL */
3824 			reg = TRANS_DP_CTL(pipe);
3825 			temp = I915_READ(reg);
3826 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3827 				  TRANS_DP_PORT_SEL_MASK);
3828 			temp |= TRANS_DP_PORT_SEL_NONE;
3829 			I915_WRITE(reg, temp);
3830 
3831 			/* disable DPLL_SEL */
3832 			temp = I915_READ(PCH_DPLL_SEL);
3833 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3834 			I915_WRITE(PCH_DPLL_SEL, temp);
3835 		}
3836 
3837 		/* disable PCH DPLL */
3838 		intel_disable_shared_dpll(intel_crtc);
3839 
3840 		ironlake_fdi_pll_disable(intel_crtc);
3841 	}
3842 
3843 	intel_crtc->active = false;
3844 	intel_update_watermarks(crtc);
3845 
3846 	mutex_lock(&dev->struct_mutex);
3847 	intel_update_fbc(dev);
3848 	mutex_unlock(&dev->struct_mutex);
3849 }
3850 
3851 static void haswell_crtc_disable(struct drm_crtc *crtc)
3852 {
3853 	struct drm_device *dev = crtc->dev;
3854 	struct drm_i915_private *dev_priv = dev->dev_private;
3855 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3856 	struct intel_encoder *encoder;
3857 	int pipe = intel_crtc->pipe;
3858 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3859 
3860 	if (!intel_crtc->active)
3861 		return;
3862 
3863 	haswell_crtc_disable_planes(crtc);
3864 
3865 	for_each_encoder_on_crtc(dev, crtc, encoder) {
3866 		intel_opregion_notify_encoder(encoder, false);
3867 		encoder->disable(encoder);
3868 	}
3869 
3870 	if (intel_crtc->config.has_pch_encoder)
3871 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3872 	intel_disable_pipe(dev_priv, pipe);
3873 
3874 	intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3875 
3876 	ironlake_pfit_disable(intel_crtc);
3877 
3878 	intel_ddi_disable_pipe_clock(intel_crtc);
3879 
3880 	for_each_encoder_on_crtc(dev, crtc, encoder)
3881 		if (encoder->post_disable)
3882 			encoder->post_disable(encoder);
3883 
3884 	if (intel_crtc->config.has_pch_encoder) {
3885 		lpt_disable_pch_transcoder(dev_priv);
3886 		intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3887 		intel_ddi_fdi_disable(crtc);
3888 	}
3889 
3890 	intel_crtc->active = false;
3891 	intel_update_watermarks(crtc);
3892 
3893 	mutex_lock(&dev->struct_mutex);
3894 	intel_update_fbc(dev);
3895 	mutex_unlock(&dev->struct_mutex);
3896 }
3897 
3898 static void ironlake_crtc_off(struct drm_crtc *crtc)
3899 {
3900 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3901 	intel_put_shared_dpll(intel_crtc);
3902 }
3903 
3904 static void haswell_crtc_off(struct drm_crtc *crtc)
3905 {
3906 	intel_ddi_put_crtc_pll(crtc);
3907 }
3908 
3909 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3910 {
3911 	if (!enable && intel_crtc->overlay) {
3912 		struct drm_device *dev = intel_crtc->base.dev;
3913 		struct drm_i915_private *dev_priv = dev->dev_private;
3914 
3915 		mutex_lock(&dev->struct_mutex);
3916 		dev_priv->mm.interruptible = false;
3917 		(void) intel_overlay_switch_off(intel_crtc->overlay);
3918 		dev_priv->mm.interruptible = true;
3919 		mutex_unlock(&dev->struct_mutex);
3920 	}
3921 
3922 	/* Let userspace switch the overlay on again. In most cases userspace
3923 	 * has to recompute where to put it anyway.
3924 	 */
3925 }
3926 
3927 /**
3928  * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3929  * cursor plane briefly if not already running after enabling the display
3930  * plane.
3931  * This workaround avoids occasional blank screens when self refresh is
3932  * enabled.
3933  */
3934 static void
3935 g4x_fixup_plane(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
3936 {
3937 	u32 cntl = I915_READ(CURCNTR(pipe));
3938 
3939 	if ((cntl & CURSOR_MODE) == 0) {
3940 		u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3941 
3942 		I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3943 		I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3944 		intel_wait_for_vblank(dev_priv->dev, pipe);
3945 		I915_WRITE(CURCNTR(pipe), cntl);
3946 		I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3947 		I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3948 	}
3949 }
3950 
3951 static void i9xx_pfit_enable(struct intel_crtc *crtc)
3952 {
3953 	struct drm_device *dev = crtc->base.dev;
3954 	struct drm_i915_private *dev_priv = dev->dev_private;
3955 	struct intel_crtc_config *pipe_config = &crtc->config;
3956 
3957 	if (!crtc->config.gmch_pfit.control)
3958 		return;
3959 
3960 	/*
3961 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3962 	 * according to register description and PRM.
3963 	 */
3964 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3965 	assert_pipe_disabled(dev_priv, crtc->pipe);
3966 
3967 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3968 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
3969 
3970 	/* Border color in case we don't scale up to the full screen. Black by
3971 	 * default, change to something else for debugging. */
3972 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
3973 }
3974 
3975 int valleyview_get_vco(struct drm_i915_private *dev_priv)
3976 {
3977 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
3978 
3979 	/* Obtain SKU information */
3980 	mutex_lock(&dev_priv->dpio_lock);
3981 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
3982 		CCK_FUSE_HPLL_FREQ_MASK;
3983 	mutex_unlock(&dev_priv->dpio_lock);
3984 
3985 	return vco_freq[hpll_freq];
3986 }
3987 
3988 /* Adjust CDclk dividers to allow high res or save power if possible */
3989 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
3990 {
3991 	struct drm_i915_private *dev_priv = dev->dev_private;
3992 	u32 val, cmd;
3993 
3994 	if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
3995 		cmd = 2;
3996 	else if (cdclk == 266)
3997 		cmd = 1;
3998 	else
3999 		cmd = 0;
4000 
4001 	mutex_lock(&dev_priv->rps.hw_lock);
4002 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4003 	val &= ~DSPFREQGUAR_MASK;
4004 	val |= (cmd << DSPFREQGUAR_SHIFT);
4005 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4006 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4007 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4008 		     50)) {
4009 		DRM_ERROR("timed out waiting for CDclk change\n");
4010 	}
4011 	mutex_unlock(&dev_priv->rps.hw_lock);
4012 
4013 	if (cdclk == 400) {
4014 		u32 divider, vco;
4015 
4016 		vco = valleyview_get_vco(dev_priv);
4017 		divider = ((vco << 1) / cdclk) - 1;
4018 
4019 		mutex_lock(&dev_priv->dpio_lock);
4020 		/* adjust cdclk divider */
4021 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4022 		val &= ~0xf;
4023 		val |= divider;
4024 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4025 		mutex_unlock(&dev_priv->dpio_lock);
4026 	}
4027 
4028 	mutex_lock(&dev_priv->dpio_lock);
4029 	/* adjust self-refresh exit latency value */
4030 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4031 	val &= ~0x7f;
4032 
4033 	/*
4034 	 * For high bandwidth configs, we set a higher latency in the bunit
4035 	 * so that the core display fetch happens in time to avoid underruns.
4036 	 */
4037 	if (cdclk == 400)
4038 		val |= 4500 / 250; /* 4.5 usec */
4039 	else
4040 		val |= 3000 / 250; /* 3.0 usec */
4041 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4042 	mutex_unlock(&dev_priv->dpio_lock);
4043 
4044 	/* Since we changed the CDclk, we need to update the GMBUSFREQ too */
4045 	intel_i2c_reset(dev);
4046 }
4047 
4048 static int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4049 {
4050 	int cur_cdclk, vco;
4051 	int divider;
4052 
4053 	vco = valleyview_get_vco(dev_priv);
4054 
4055 	mutex_lock(&dev_priv->dpio_lock);
4056 	divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4057 	mutex_unlock(&dev_priv->dpio_lock);
4058 
4059 	divider &= 0xf;
4060 
4061 	cur_cdclk = (vco << 1) / (divider + 1);
4062 
4063 	return cur_cdclk;
4064 }
4065 
4066 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4067 				 int max_pixclk)
4068 {
4069 	int cur_cdclk;
4070 
4071 	cur_cdclk = valleyview_cur_cdclk(dev_priv);
4072 
4073 	/*
4074 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
4075 	 *   200MHz
4076 	 *   267MHz
4077 	 *   320MHz
4078 	 *   400MHz
4079 	 * So we check to see whether we're above 90% of the lower bin and
4080 	 * adjust if needed.
4081 	 */
4082 	if (max_pixclk > 288000) {
4083 		return 400;
4084 	} else if (max_pixclk > 240000) {
4085 		return 320;
4086 	} else
4087 		return 266;
4088 	/* Looks like the 200MHz CDclk freq doesn't work on some configs */
4089 }
4090 
4091 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
4092 				 unsigned modeset_pipes,
4093 				 struct intel_crtc_config *pipe_config)
4094 {
4095 	struct drm_device *dev = dev_priv->dev;
4096 	struct intel_crtc *intel_crtc;
4097 	int max_pixclk = 0;
4098 
4099 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4100 			    base.head) {
4101 		if (modeset_pipes & (1 << intel_crtc->pipe))
4102 			max_pixclk = max(max_pixclk,
4103 					 pipe_config->adjusted_mode.crtc_clock);
4104 		else if (intel_crtc->base.enabled)
4105 			max_pixclk = max(max_pixclk,
4106 					 intel_crtc->config.adjusted_mode.crtc_clock);
4107 	}
4108 
4109 	return max_pixclk;
4110 }
4111 
4112 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4113 					    unsigned *prepare_pipes,
4114 					    unsigned modeset_pipes,
4115 					    struct intel_crtc_config *pipe_config)
4116 {
4117 	struct drm_i915_private *dev_priv = dev->dev_private;
4118 	struct intel_crtc *intel_crtc;
4119 	int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
4120 					       pipe_config);
4121 	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4122 
4123 	if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
4124 		return;
4125 
4126 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
4127 			    base.head)
4128 		if (intel_crtc->base.enabled)
4129 			*prepare_pipes |= (1 << intel_crtc->pipe);
4130 }
4131 
4132 static void valleyview_modeset_global_resources(struct drm_device *dev)
4133 {
4134 	struct drm_i915_private *dev_priv = dev->dev_private;
4135 	int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
4136 	int cur_cdclk = valleyview_cur_cdclk(dev_priv);
4137 	int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4138 
4139 	if (req_cdclk != cur_cdclk)
4140 		valleyview_set_cdclk(dev, req_cdclk);
4141 }
4142 
4143 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4144 {
4145 	struct drm_device *dev = crtc->dev;
4146 	struct drm_i915_private *dev_priv = dev->dev_private;
4147 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4148 	struct intel_encoder *encoder;
4149 	int pipe = intel_crtc->pipe;
4150 	int plane = intel_crtc->plane;
4151 	bool is_dsi;
4152 
4153 	WARN_ON(!crtc->enabled);
4154 
4155 	if (intel_crtc->active)
4156 		return;
4157 
4158 	intel_crtc->active = true;
4159 
4160 	for_each_encoder_on_crtc(dev, crtc, encoder)
4161 		if (encoder->pre_pll_enable)
4162 			encoder->pre_pll_enable(encoder);
4163 
4164 	is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4165 
4166 	if (!is_dsi)
4167 		vlv_enable_pll(intel_crtc);
4168 
4169 	for_each_encoder_on_crtc(dev, crtc, encoder)
4170 		if (encoder->pre_enable)
4171 			encoder->pre_enable(encoder);
4172 
4173 	i9xx_pfit_enable(intel_crtc);
4174 
4175 	intel_crtc_load_lut(crtc);
4176 
4177 	intel_update_watermarks(crtc);
4178 	intel_enable_pipe(dev_priv, pipe, false, is_dsi);
4179 	intel_enable_primary_plane(dev_priv, plane, pipe);
4180 	intel_enable_planes(crtc);
4181 	intel_crtc_update_cursor(crtc, true);
4182 
4183 	intel_update_fbc(dev);
4184 
4185 	for_each_encoder_on_crtc(dev, crtc, encoder)
4186 		encoder->enable(encoder);
4187 }
4188 
4189 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4190 {
4191 	struct drm_device *dev = crtc->dev;
4192 	struct drm_i915_private *dev_priv = dev->dev_private;
4193 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4194 	struct intel_encoder *encoder;
4195 	int pipe = intel_crtc->pipe;
4196 	int plane = intel_crtc->plane;
4197 
4198 	WARN_ON(!crtc->enabled);
4199 
4200 	if (intel_crtc->active)
4201 		return;
4202 
4203 	intel_crtc->active = true;
4204 
4205 	for_each_encoder_on_crtc(dev, crtc, encoder)
4206 		if (encoder->pre_enable)
4207 			encoder->pre_enable(encoder);
4208 
4209 	i9xx_enable_pll(intel_crtc);
4210 
4211 	i9xx_pfit_enable(intel_crtc);
4212 
4213 	intel_crtc_load_lut(crtc);
4214 
4215 	intel_update_watermarks(crtc);
4216 	intel_enable_pipe(dev_priv, pipe, false, false);
4217 	intel_enable_primary_plane(dev_priv, plane, pipe);
4218 	intel_enable_planes(crtc);
4219 	/* The fixup needs to happen before cursor is enabled */
4220 	if (IS_G4X(dev))
4221 		g4x_fixup_plane(dev_priv, pipe);
4222 	intel_crtc_update_cursor(crtc, true);
4223 
4224 	/* Give the overlay scaler a chance to enable if it's on this pipe */
4225 	intel_crtc_dpms_overlay(intel_crtc, true);
4226 
4227 	intel_update_fbc(dev);
4228 
4229 	for_each_encoder_on_crtc(dev, crtc, encoder)
4230 		encoder->enable(encoder);
4231 }
4232 
4233 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4234 {
4235 	struct drm_device *dev = crtc->base.dev;
4236 	struct drm_i915_private *dev_priv = dev->dev_private;
4237 
4238 	if (!crtc->config.gmch_pfit.control)
4239 		return;
4240 
4241 	assert_pipe_disabled(dev_priv, crtc->pipe);
4242 
4243 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4244 			 I915_READ(PFIT_CONTROL));
4245 	I915_WRITE(PFIT_CONTROL, 0);
4246 }
4247 
4248 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4249 {
4250 	struct drm_device *dev = crtc->dev;
4251 	struct drm_i915_private *dev_priv = dev->dev_private;
4252 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4253 	struct intel_encoder *encoder;
4254 	int pipe = intel_crtc->pipe;
4255 	int plane = intel_crtc->plane;
4256 
4257 	if (!intel_crtc->active)
4258 		return;
4259 
4260 	for_each_encoder_on_crtc(dev, crtc, encoder)
4261 		encoder->disable(encoder);
4262 
4263 	/* Give the overlay scaler a chance to disable if it's on this pipe */
4264 	intel_crtc_wait_for_pending_flips(crtc);
4265 	drm_vblank_off(dev, pipe);
4266 
4267 	if (dev_priv->fbc.plane == plane)
4268 		intel_disable_fbc(dev);
4269 
4270 	intel_crtc_dpms_overlay(intel_crtc, false);
4271 	intel_crtc_update_cursor(crtc, false);
4272 	intel_disable_planes(crtc);
4273 	intel_disable_primary_plane(dev_priv, plane, pipe);
4274 
4275 	intel_disable_pipe(dev_priv, pipe);
4276 
4277 	i9xx_pfit_disable(intel_crtc);
4278 
4279 	for_each_encoder_on_crtc(dev, crtc, encoder)
4280 		if (encoder->post_disable)
4281 			encoder->post_disable(encoder);
4282 
4283 	if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
4284 		vlv_disable_pll(dev_priv, pipe);
4285 	else if (!IS_VALLEYVIEW(dev))
4286 		i9xx_disable_pll(dev_priv, pipe);
4287 
4288 	intel_crtc->active = false;
4289 	intel_update_watermarks(crtc);
4290 
4291 	intel_update_fbc(dev);
4292 }
4293 
4294 static void i9xx_crtc_off(struct drm_crtc *crtc)
4295 {
4296 }
4297 
4298 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4299 				    bool enabled)
4300 {
4301 	struct drm_device *dev = crtc->dev;
4302 	struct drm_i915_private *dev_priv = dev->dev_private;
4303 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4304 	int pipe = intel_crtc->pipe;
4305 
4306 #if 0
4307 	if (!dev->primary->master)
4308 		return;
4309 
4310 	master_priv = dev->primary->master->driver_priv;
4311 	if (!master_priv->sarea_priv)
4312 		return;
4313 #else
4314 	if (!dev_priv->sarea_priv)
4315 		return;
4316 #endif
4317 
4318 	switch (pipe) {
4319 	case 0:
4320 		dev_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4321 		dev_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4322 		break;
4323 	case 1:
4324 		dev_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4325 		dev_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4326 		break;
4327 	default:
4328 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4329 		break;
4330 	}
4331 }
4332 
4333 /**
4334  * Sets the power management mode of the pipe and plane.
4335  */
4336 void intel_crtc_update_dpms(struct drm_crtc *crtc)
4337 {
4338 	struct drm_device *dev = crtc->dev;
4339 	struct drm_i915_private *dev_priv = dev->dev_private;
4340 	struct intel_encoder *intel_encoder;
4341 	bool enable = false;
4342 
4343 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4344 		enable |= intel_encoder->connectors_active;
4345 
4346 	if (enable)
4347 		dev_priv->display.crtc_enable(crtc);
4348 	else
4349 		dev_priv->display.crtc_disable(crtc);
4350 
4351 	intel_crtc_update_sarea(crtc, enable);
4352 }
4353 
4354 static void intel_crtc_disable(struct drm_crtc *crtc)
4355 {
4356 	struct drm_device *dev = crtc->dev;
4357 	struct drm_connector *connector;
4358 	struct drm_i915_private *dev_priv = dev->dev_private;
4359 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4360 
4361 	/* crtc should still be enabled when we disable it. */
4362 	WARN_ON(!crtc->enabled);
4363 
4364 	dev_priv->display.crtc_disable(crtc);
4365 	intel_crtc->eld_vld = false;
4366 	intel_crtc_update_sarea(crtc, false);
4367 	dev_priv->display.off(crtc);
4368 
4369 	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4370 	assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4371 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4372 
4373 	if (crtc->fb) {
4374 		mutex_lock(&dev->struct_mutex);
4375 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
4376 		mutex_unlock(&dev->struct_mutex);
4377 		crtc->fb = NULL;
4378 	}
4379 
4380 	/* Update computed state. */
4381 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4382 		if (!connector->encoder || !connector->encoder->crtc)
4383 			continue;
4384 
4385 		if (connector->encoder->crtc != crtc)
4386 			continue;
4387 
4388 		connector->dpms = DRM_MODE_DPMS_OFF;
4389 		to_intel_encoder(connector->encoder)->connectors_active = false;
4390 	}
4391 }
4392 
4393 void intel_encoder_destroy(struct drm_encoder *encoder)
4394 {
4395 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4396 
4397 	drm_encoder_cleanup(encoder);
4398 	kfree(intel_encoder);
4399 }
4400 
4401 /* Simple dpms helper for encoders with just one connector, no cloning and only
4402  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4403  * state of the entire output pipe. */
4404 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4405 {
4406 	if (mode == DRM_MODE_DPMS_ON) {
4407 		encoder->connectors_active = true;
4408 
4409 		intel_crtc_update_dpms(encoder->base.crtc);
4410 	} else {
4411 		encoder->connectors_active = false;
4412 
4413 		intel_crtc_update_dpms(encoder->base.crtc);
4414 	}
4415 }
4416 
4417 /* Cross check the actual hw state with our own modeset state tracking (and it's
4418  * internal consistency). */
4419 static void intel_connector_check_state(struct intel_connector *connector)
4420 {
4421 	if (connector->get_hw_state(connector)) {
4422 		struct intel_encoder *encoder = connector->encoder;
4423 		struct drm_crtc *crtc;
4424 		bool encoder_enabled;
4425 		enum i915_pipe pipe;
4426 
4427 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4428 			      connector->base.base.id,
4429 			      drm_get_connector_name(&connector->base));
4430 
4431 		WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4432 		     "wrong connector dpms state\n");
4433 		WARN(connector->base.encoder != &encoder->base,
4434 		     "active connector not linked to encoder\n");
4435 		WARN(!encoder->connectors_active,
4436 		     "encoder->connectors_active not set\n");
4437 
4438 		encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4439 		WARN(!encoder_enabled, "encoder not enabled\n");
4440 		if (WARN_ON(!encoder->base.crtc))
4441 			return;
4442 
4443 		crtc = encoder->base.crtc;
4444 
4445 		WARN(!crtc->enabled, "crtc not enabled\n");
4446 		WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4447 		WARN(pipe != to_intel_crtc(crtc)->pipe,
4448 		     "encoder active on the wrong pipe\n");
4449 	}
4450 }
4451 
4452 /* Even simpler default implementation, if there's really no special case to
4453  * consider. */
4454 void intel_connector_dpms(struct drm_connector *connector, int mode)
4455 {
4456 	/* All the simple cases only support two dpms states. */
4457 	if (mode != DRM_MODE_DPMS_ON)
4458 		mode = DRM_MODE_DPMS_OFF;
4459 
4460 	if (mode == connector->dpms)
4461 		return;
4462 
4463 	connector->dpms = mode;
4464 
4465 	/* Only need to change hw state when actually enabled */
4466 	if (connector->encoder)
4467 		intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4468 
4469 	intel_modeset_check_state(connector->dev);
4470 }
4471 
4472 /* Simple connector->get_hw_state implementation for encoders that support only
4473  * one connector and no cloning and hence the encoder state determines the state
4474  * of the connector. */
4475 bool intel_connector_get_hw_state(struct intel_connector *connector)
4476 {
4477 	enum i915_pipe pipe = 0;
4478 	struct intel_encoder *encoder = connector->encoder;
4479 
4480 	return encoder->get_hw_state(encoder, &pipe);
4481 }
4482 
4483 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
4484 				     struct intel_crtc_config *pipe_config)
4485 {
4486 	struct drm_i915_private *dev_priv = dev->dev_private;
4487 	struct intel_crtc *pipe_B_crtc =
4488 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4489 
4490 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4491 		      pipe_name(pipe), pipe_config->fdi_lanes);
4492 	if (pipe_config->fdi_lanes > 4) {
4493 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4494 			      pipe_name(pipe), pipe_config->fdi_lanes);
4495 		return false;
4496 	}
4497 
4498 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4499 		if (pipe_config->fdi_lanes > 2) {
4500 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4501 				      pipe_config->fdi_lanes);
4502 			return false;
4503 		} else {
4504 			return true;
4505 		}
4506 	}
4507 
4508 	if (INTEL_INFO(dev)->num_pipes == 2)
4509 		return true;
4510 
4511 	/* Ivybridge 3 pipe is really complicated */
4512 	switch (pipe) {
4513 	case PIPE_A:
4514 		return true;
4515 	case PIPE_B:
4516 		if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4517 		    pipe_config->fdi_lanes > 2) {
4518 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4519 				      pipe_name(pipe), pipe_config->fdi_lanes);
4520 			return false;
4521 		}
4522 		return true;
4523 	case PIPE_C:
4524 		if (!pipe_has_enabled_pch(pipe_B_crtc) ||
4525 		    pipe_B_crtc->config.fdi_lanes <= 2) {
4526 			if (pipe_config->fdi_lanes > 2) {
4527 				DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4528 					      pipe_name(pipe), pipe_config->fdi_lanes);
4529 				return false;
4530 			}
4531 		} else {
4532 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4533 			return false;
4534 		}
4535 		return true;
4536 	default:
4537 		BUG();
4538 	}
4539 }
4540 
4541 #define RETRY 1
4542 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
4543 				       struct intel_crtc_config *pipe_config)
4544 {
4545 	struct drm_device *dev = intel_crtc->base.dev;
4546 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4547 	int lane, link_bw, fdi_dotclock;
4548 	bool setup_ok, needs_recompute = false;
4549 
4550 retry:
4551 	/* FDI is a binary signal running at ~2.7GHz, encoding
4552 	 * each output octet as 10 bits. The actual frequency
4553 	 * is stored as a divider into a 100MHz clock, and the
4554 	 * mode pixel clock is stored in units of 1KHz.
4555 	 * Hence the bw of each lane in terms of the mode signal
4556 	 * is:
4557 	 */
4558 	link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4559 
4560 	fdi_dotclock = adjusted_mode->crtc_clock;
4561 
4562 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4563 					   pipe_config->pipe_bpp);
4564 
4565 	pipe_config->fdi_lanes = lane;
4566 
4567 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
4568 			       link_bw, &pipe_config->fdi_m_n);
4569 
4570 	setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4571 					    intel_crtc->pipe, pipe_config);
4572 	if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4573 		pipe_config->pipe_bpp -= 2*3;
4574 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4575 			      pipe_config->pipe_bpp);
4576 		needs_recompute = true;
4577 		pipe_config->bw_constrained = true;
4578 
4579 		goto retry;
4580 	}
4581 
4582 	if (needs_recompute)
4583 		return RETRY;
4584 
4585 	return setup_ok ? 0 : -EINVAL;
4586 }
4587 
4588 static void hsw_compute_ips_config(struct intel_crtc *crtc,
4589 				   struct intel_crtc_config *pipe_config)
4590 {
4591 	pipe_config->ips_enabled = i915_enable_ips &&
4592 				   hsw_crtc_supports_ips(crtc) &&
4593 				   pipe_config->pipe_bpp <= 24;
4594 }
4595 
4596 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4597 				     struct intel_crtc_config *pipe_config)
4598 {
4599 	struct drm_device *dev = crtc->base.dev;
4600 	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
4601 
4602 	/* FIXME should check pixel clock limits on all platforms */
4603 	if (INTEL_INFO(dev)->gen < 4) {
4604 		struct drm_i915_private *dev_priv = dev->dev_private;
4605 		int clock_limit =
4606 			dev_priv->display.get_display_clock_speed(dev);
4607 
4608 		/*
4609 		 * Enable pixel doubling when the dot clock
4610 		 * is > 90% of the (display) core speed.
4611 		 *
4612 		 * GDG double wide on either pipe,
4613 		 * otherwise pipe A only.
4614 		 */
4615 		if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4616 		    adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4617 			clock_limit *= 2;
4618 			pipe_config->double_wide = true;
4619 		}
4620 
4621 		if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4622 			return -EINVAL;
4623 	}
4624 
4625 	/*
4626 	 * Pipe horizontal size must be even in:
4627 	 * - DVO ganged mode
4628 	 * - LVDS dual channel mode
4629 	 * - Double wide pipe
4630 	 */
4631 	if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4632 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4633 		pipe_config->pipe_src_w &= ~1;
4634 
4635 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4636 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4637 	 */
4638 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4639 		adjusted_mode->hsync_start == adjusted_mode->hdisplay)
4640 		return -EINVAL;
4641 
4642 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
4643 		pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
4644 	} else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
4645 		/* only a 8bpc pipe, with 6bpc dither through the panel fitter
4646 		 * for lvds. */
4647 		pipe_config->pipe_bpp = 8*3;
4648 	}
4649 
4650 	if (HAS_IPS(dev))
4651 		hsw_compute_ips_config(crtc, pipe_config);
4652 
4653 	/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4654 	 * clock survives for now. */
4655 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4656 		pipe_config->shared_dpll = crtc->config.shared_dpll;
4657 
4658 	if (pipe_config->has_pch_encoder)
4659 		return ironlake_fdi_compute_config(crtc, pipe_config);
4660 
4661 	return 0;
4662 }
4663 
4664 static int valleyview_get_display_clock_speed(struct drm_device *dev)
4665 {
4666 	return 400000; /* FIXME */
4667 }
4668 
4669 static int i945_get_display_clock_speed(struct drm_device *dev)
4670 {
4671 	return 400000;
4672 }
4673 
4674 static int i915_get_display_clock_speed(struct drm_device *dev)
4675 {
4676 	return 333000;
4677 }
4678 
4679 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4680 {
4681 	return 200000;
4682 }
4683 
4684 static int pnv_get_display_clock_speed(struct drm_device *dev)
4685 {
4686 	u16 gcfgc = 0;
4687 
4688 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4689 
4690 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4691 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4692 		return 267000;
4693 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4694 		return 333000;
4695 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4696 		return 444000;
4697 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4698 		return 200000;
4699 	default:
4700 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4701 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4702 		return 133000;
4703 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4704 		return 167000;
4705 	}
4706 }
4707 
4708 static int i915gm_get_display_clock_speed(struct drm_device *dev)
4709 {
4710 	u16 gcfgc = 0;
4711 
4712 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4713 
4714 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4715 		return 133000;
4716 	else {
4717 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4718 		case GC_DISPLAY_CLOCK_333_MHZ:
4719 			return 333000;
4720 		default:
4721 		case GC_DISPLAY_CLOCK_190_200_MHZ:
4722 			return 190000;
4723 		}
4724 	}
4725 }
4726 
4727 static int i865_get_display_clock_speed(struct drm_device *dev)
4728 {
4729 	return 266000;
4730 }
4731 
4732 static int i855_get_display_clock_speed(struct drm_device *dev)
4733 {
4734 	u16 hpllcc = 0;
4735 	/* Assume that the hardware is in the high speed state.  This
4736 	 * should be the default.
4737 	 */
4738 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4739 	case GC_CLOCK_133_200:
4740 	case GC_CLOCK_100_200:
4741 		return 200000;
4742 	case GC_CLOCK_166_250:
4743 		return 250000;
4744 	case GC_CLOCK_100_133:
4745 		return 133000;
4746 	}
4747 
4748 	/* Shouldn't happen */
4749 	return 0;
4750 }
4751 
4752 static int i830_get_display_clock_speed(struct drm_device *dev)
4753 {
4754 	return 133000;
4755 }
4756 
4757 static void
4758 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
4759 {
4760 	while (*num > DATA_LINK_M_N_MASK ||
4761 	       *den > DATA_LINK_M_N_MASK) {
4762 		*num >>= 1;
4763 		*den >>= 1;
4764 	}
4765 }
4766 
4767 static void compute_m_n(unsigned int m, unsigned int n,
4768 			uint32_t *ret_m, uint32_t *ret_n)
4769 {
4770 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4771 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
4772 	intel_reduce_m_n_ratio(ret_m, ret_n);
4773 }
4774 
4775 void
4776 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4777 		       int pixel_clock, int link_clock,
4778 		       struct intel_link_m_n *m_n)
4779 {
4780 	m_n->tu = 64;
4781 
4782 	compute_m_n(bits_per_pixel * pixel_clock,
4783 		    link_clock * nlanes * 8,
4784 		    &m_n->gmch_m, &m_n->gmch_n);
4785 
4786 	compute_m_n(pixel_clock, link_clock,
4787 		    &m_n->link_m, &m_n->link_n);
4788 }
4789 
4790 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4791 {
4792 	if (i915_panel_use_ssc >= 0)
4793 		return i915_panel_use_ssc != 0;
4794 	return dev_priv->vbt.lvds_use_ssc
4795 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4796 }
4797 
4798 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4799 {
4800 	struct drm_device *dev = crtc->dev;
4801 	struct drm_i915_private *dev_priv = dev->dev_private;
4802 	int refclk;
4803 
4804 	if (IS_VALLEYVIEW(dev)) {
4805 		refclk = 100000;
4806 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4807 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4808 		refclk = dev_priv->vbt.lvds_ssc_freq;
4809 		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
4810 	} else if (!IS_GEN2(dev)) {
4811 		refclk = 96000;
4812 	} else {
4813 		refclk = 48000;
4814 	}
4815 
4816 	return refclk;
4817 }
4818 
4819 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
4820 {
4821 	return (1 << dpll->n) << 16 | dpll->m2;
4822 }
4823 
4824 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4825 {
4826 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
4827 }
4828 
4829 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
4830 				     intel_clock_t *reduced_clock)
4831 {
4832 	struct drm_device *dev = crtc->base.dev;
4833 	struct drm_i915_private *dev_priv = dev->dev_private;
4834 	int pipe = crtc->pipe;
4835 	u32 fp, fp2 = 0;
4836 
4837 	if (IS_PINEVIEW(dev)) {
4838 		fp = pnv_dpll_compute_fp(&crtc->config.dpll);
4839 		if (reduced_clock)
4840 			fp2 = pnv_dpll_compute_fp(reduced_clock);
4841 	} else {
4842 		fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
4843 		if (reduced_clock)
4844 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
4845 	}
4846 
4847 	I915_WRITE(FP0(pipe), fp);
4848 	crtc->config.dpll_hw_state.fp0 = fp;
4849 
4850 	crtc->lowfreq_avail = false;
4851 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4852 	    reduced_clock && i915_powersave) {
4853 		I915_WRITE(FP1(pipe), fp2);
4854 		crtc->config.dpll_hw_state.fp1 = fp2;
4855 		crtc->lowfreq_avail = true;
4856 	} else {
4857 		I915_WRITE(FP1(pipe), fp);
4858 		crtc->config.dpll_hw_state.fp1 = fp;
4859 	}
4860 }
4861 
4862 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
4863 		pipe)
4864 {
4865 	u32 reg_val;
4866 
4867 	/*
4868 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4869 	 * and set it to a reasonable value instead.
4870 	 */
4871 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4872 	reg_val &= 0xffffff00;
4873 	reg_val |= 0x00000030;
4874 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4875 
4876 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4877 	reg_val &= 0x8cffffff;
4878 	reg_val = 0x8c000000;
4879 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4880 
4881 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
4882 	reg_val &= 0xffffff00;
4883 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
4884 
4885 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
4886 	reg_val &= 0x00ffffff;
4887 	reg_val |= 0xb0000000;
4888 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
4889 }
4890 
4891 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4892 					 struct intel_link_m_n *m_n)
4893 {
4894 	struct drm_device *dev = crtc->base.dev;
4895 	struct drm_i915_private *dev_priv = dev->dev_private;
4896 	int pipe = crtc->pipe;
4897 
4898 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4899 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4900 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4901 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4902 }
4903 
4904 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4905 					 struct intel_link_m_n *m_n)
4906 {
4907 	struct drm_device *dev = crtc->base.dev;
4908 	struct drm_i915_private *dev_priv = dev->dev_private;
4909 	int pipe = crtc->pipe;
4910 	enum transcoder transcoder = crtc->config.cpu_transcoder;
4911 
4912 	if (INTEL_INFO(dev)->gen >= 5) {
4913 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4914 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4915 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4916 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4917 	} else {
4918 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4919 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4920 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4921 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
4922 	}
4923 }
4924 
4925 static void intel_dp_set_m_n(struct intel_crtc *crtc)
4926 {
4927 	if (crtc->config.has_pch_encoder)
4928 		intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4929 	else
4930 		intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4931 }
4932 
4933 static void vlv_update_pll(struct intel_crtc *crtc)
4934 {
4935 	struct drm_device *dev = crtc->base.dev;
4936 	struct drm_i915_private *dev_priv = dev->dev_private;
4937 	int pipe = crtc->pipe;
4938 	u32 dpll, mdiv;
4939 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
4940 	u32 coreclk, reg_val, dpll_md;
4941 
4942 	mutex_lock(&dev_priv->dpio_lock);
4943 
4944 	bestn = crtc->config.dpll.n;
4945 	bestm1 = crtc->config.dpll.m1;
4946 	bestm2 = crtc->config.dpll.m2;
4947 	bestp1 = crtc->config.dpll.p1;
4948 	bestp2 = crtc->config.dpll.p2;
4949 
4950 	/* See eDP HDMI DPIO driver vbios notes doc */
4951 
4952 	/* PLL B needs special handling */
4953 	if (pipe)
4954 		vlv_pllb_recal_opamp(dev_priv, pipe);
4955 
4956 	/* Set up Tx target for periodic Rcomp update */
4957 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
4958 
4959 	/* Disable target IRef on PLL */
4960 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
4961 	reg_val &= 0x00ffffff;
4962 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
4963 
4964 	/* Disable fast lock */
4965 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
4966 
4967 	/* Set idtafcrecal before PLL is enabled */
4968 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4969 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4970 	mdiv |= ((bestn << DPIO_N_SHIFT));
4971 	mdiv |= (1 << DPIO_K_SHIFT);
4972 
4973 	/*
4974 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4975 	 * but we don't support that).
4976 	 * Note: don't use the DAC post divider as it seems unstable.
4977 	 */
4978 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
4979 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4980 
4981 	mdiv |= DPIO_ENABLE_CALIBRATION;
4982 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
4983 
4984 	/* Set HBR and RBR LPF coefficients */
4985 	if (crtc->config.port_clock == 162000 ||
4986 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
4987 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
4988 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4989 				 0x009f0003);
4990 	else
4991 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
4992 				 0x00d0000f);
4993 
4994 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4995 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4996 		/* Use SSC source */
4997 		if (!pipe)
4998 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
4999 					 0x0df40000);
5000 		else
5001 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5002 					 0x0df70000);
5003 	} else { /* HDMI or VGA */
5004 		/* Use bend source */
5005 		if (!pipe)
5006 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5007 					 0x0df70000);
5008 		else
5009 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5010 					 0x0df40000);
5011 	}
5012 
5013 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5014 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5015 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5016 	    intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5017 		coreclk |= 0x01000000;
5018 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5019 
5020 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5021 
5022 	/*
5023 	 * Enable DPIO clock input. We should never disable the reference
5024 	 * clock for pipe B, since VGA hotplug / manual detection depends
5025 	 * on it.
5026 	 */
5027 	dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5028 		DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5029 	/* We should never disable this, set it here for state tracking */
5030 	if (pipe == PIPE_B)
5031 		dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5032 	dpll |= DPLL_VCO_ENABLE;
5033 	crtc->config.dpll_hw_state.dpll = dpll;
5034 
5035 	dpll_md = (crtc->config.pixel_multiplier - 1)
5036 		<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5037 	crtc->config.dpll_hw_state.dpll_md = dpll_md;
5038 
5039 	if (crtc->config.has_dp_encoder)
5040 		intel_dp_set_m_n(crtc);
5041 
5042 	mutex_unlock(&dev_priv->dpio_lock);
5043 }
5044 
5045 static void i9xx_update_pll(struct intel_crtc *crtc,
5046 			    intel_clock_t *reduced_clock,
5047 			    int num_connectors)
5048 {
5049 	struct drm_device *dev = crtc->base.dev;
5050 	struct drm_i915_private *dev_priv = dev->dev_private;
5051 	u32 dpll;
5052 	bool is_sdvo;
5053 	struct dpll *clock = &crtc->config.dpll;
5054 
5055 	i9xx_update_pll_dividers(crtc, reduced_clock);
5056 
5057 	is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5058 		intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5059 
5060 	dpll = DPLL_VGA_MODE_DIS;
5061 
5062 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5063 		dpll |= DPLLB_MODE_LVDS;
5064 	else
5065 		dpll |= DPLLB_MODE_DAC_SERIAL;
5066 
5067 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5068 		dpll |= (crtc->config.pixel_multiplier - 1)
5069 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
5070 	}
5071 
5072 	if (is_sdvo)
5073 		dpll |= DPLL_SDVO_HIGH_SPEED;
5074 
5075 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5076 		dpll |= DPLL_SDVO_HIGH_SPEED;
5077 
5078 	/* compute bitmask from p1 value */
5079 	if (IS_PINEVIEW(dev))
5080 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5081 	else {
5082 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5083 		if (IS_G4X(dev) && reduced_clock)
5084 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5085 	}
5086 	switch (clock->p2) {
5087 	case 5:
5088 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5089 		break;
5090 	case 7:
5091 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5092 		break;
5093 	case 10:
5094 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5095 		break;
5096 	case 14:
5097 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5098 		break;
5099 	}
5100 	if (INTEL_INFO(dev)->gen >= 4)
5101 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5102 
5103 	if (crtc->config.sdvo_tv_clock)
5104 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5105 	else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5106 		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5107 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5108 	else
5109 		dpll |= PLL_REF_INPUT_DREFCLK;
5110 
5111 	dpll |= DPLL_VCO_ENABLE;
5112 	crtc->config.dpll_hw_state.dpll = dpll;
5113 
5114 	if (INTEL_INFO(dev)->gen >= 4) {
5115 		u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5116 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
5117 		crtc->config.dpll_hw_state.dpll_md = dpll_md;
5118 	}
5119 
5120 	if (crtc->config.has_dp_encoder)
5121 		intel_dp_set_m_n(crtc);
5122 }
5123 
5124 static void i8xx_update_pll(struct intel_crtc *crtc,
5125 			    intel_clock_t *reduced_clock,
5126 			    int num_connectors)
5127 {
5128 	struct drm_device *dev = crtc->base.dev;
5129 	struct drm_i915_private *dev_priv = dev->dev_private;
5130 	u32 dpll;
5131 	struct dpll *clock = &crtc->config.dpll;
5132 
5133 	i9xx_update_pll_dividers(crtc, reduced_clock);
5134 
5135 	dpll = DPLL_VGA_MODE_DIS;
5136 
5137 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5138 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5139 	} else {
5140 		if (clock->p1 == 2)
5141 			dpll |= PLL_P1_DIVIDE_BY_TWO;
5142 		else
5143 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5144 		if (clock->p2 == 4)
5145 			dpll |= PLL_P2_DIVIDE_BY_4;
5146 	}
5147 
5148 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5149 		dpll |= DPLL_DVO_2X_MODE;
5150 
5151 	if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5152 		 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5153 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5154 	else
5155 		dpll |= PLL_REF_INPUT_DREFCLK;
5156 
5157 	dpll |= DPLL_VCO_ENABLE;
5158 	crtc->config.dpll_hw_state.dpll = dpll;
5159 }
5160 
5161 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5162 {
5163 	struct drm_device *dev = intel_crtc->base.dev;
5164 	struct drm_i915_private *dev_priv = dev->dev_private;
5165 	enum i915_pipe pipe = intel_crtc->pipe;
5166 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5167 	struct drm_display_mode *adjusted_mode =
5168 		&intel_crtc->config.adjusted_mode;
5169 	uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
5170 
5171 	/* We need to be careful not to changed the adjusted mode, for otherwise
5172 	 * the hw state checker will get angry at the mismatch. */
5173 	crtc_vtotal = adjusted_mode->crtc_vtotal;
5174 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5175 
5176 	if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5177 		/* the chip adds 2 halflines automatically */
5178 		crtc_vtotal -= 1;
5179 		crtc_vblank_end -= 1;
5180 		vsyncshift = adjusted_mode->crtc_hsync_start
5181 			     - adjusted_mode->crtc_htotal / 2;
5182 	} else {
5183 		vsyncshift = 0;
5184 	}
5185 
5186 	if (INTEL_INFO(dev)->gen > 3)
5187 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5188 
5189 	I915_WRITE(HTOTAL(cpu_transcoder),
5190 		   (adjusted_mode->crtc_hdisplay - 1) |
5191 		   ((adjusted_mode->crtc_htotal - 1) << 16));
5192 	I915_WRITE(HBLANK(cpu_transcoder),
5193 		   (adjusted_mode->crtc_hblank_start - 1) |
5194 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5195 	I915_WRITE(HSYNC(cpu_transcoder),
5196 		   (adjusted_mode->crtc_hsync_start - 1) |
5197 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5198 
5199 	I915_WRITE(VTOTAL(cpu_transcoder),
5200 		   (adjusted_mode->crtc_vdisplay - 1) |
5201 		   ((crtc_vtotal - 1) << 16));
5202 	I915_WRITE(VBLANK(cpu_transcoder),
5203 		   (adjusted_mode->crtc_vblank_start - 1) |
5204 		   ((crtc_vblank_end - 1) << 16));
5205 	I915_WRITE(VSYNC(cpu_transcoder),
5206 		   (adjusted_mode->crtc_vsync_start - 1) |
5207 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5208 
5209 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5210 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5211 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
5212 	 * bits. */
5213 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5214 	    (pipe == PIPE_B || pipe == PIPE_C))
5215 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5216 
5217 	/* pipesrc controls the size that is scaled from, which should
5218 	 * always be the user's requested size.
5219 	 */
5220 	I915_WRITE(PIPESRC(pipe),
5221 		   ((intel_crtc->config.pipe_src_w - 1) << 16) |
5222 		   (intel_crtc->config.pipe_src_h - 1));
5223 }
5224 
5225 static void intel_get_pipe_timings(struct intel_crtc *crtc,
5226 				   struct intel_crtc_config *pipe_config)
5227 {
5228 	struct drm_device *dev = crtc->base.dev;
5229 	struct drm_i915_private *dev_priv = dev->dev_private;
5230 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5231 	uint32_t tmp;
5232 
5233 	tmp = I915_READ(HTOTAL(cpu_transcoder));
5234 	pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5235 	pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5236 	tmp = I915_READ(HBLANK(cpu_transcoder));
5237 	pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5238 	pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5239 	tmp = I915_READ(HSYNC(cpu_transcoder));
5240 	pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5241 	pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5242 
5243 	tmp = I915_READ(VTOTAL(cpu_transcoder));
5244 	pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5245 	pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5246 	tmp = I915_READ(VBLANK(cpu_transcoder));
5247 	pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5248 	pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5249 	tmp = I915_READ(VSYNC(cpu_transcoder));
5250 	pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5251 	pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5252 
5253 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5254 		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5255 		pipe_config->adjusted_mode.crtc_vtotal += 1;
5256 		pipe_config->adjusted_mode.crtc_vblank_end += 1;
5257 	}
5258 
5259 	tmp = I915_READ(PIPESRC(crtc->pipe));
5260 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5261 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5262 
5263 	pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5264 	pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5265 }
5266 
5267 static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
5268 					     struct intel_crtc_config *pipe_config)
5269 {
5270 	struct drm_crtc *crtc = &intel_crtc->base;
5271 
5272 	crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5273 	crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
5274 	crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5275 	crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5276 
5277 	crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5278 	crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5279 	crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5280 	crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5281 
5282 	crtc->mode.flags = pipe_config->adjusted_mode.flags;
5283 
5284 	crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
5285 	crtc->mode.flags |= pipe_config->adjusted_mode.flags;
5286 }
5287 
5288 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5289 {
5290 	struct drm_device *dev = intel_crtc->base.dev;
5291 	struct drm_i915_private *dev_priv = dev->dev_private;
5292 	uint32_t pipeconf;
5293 
5294 	pipeconf = 0;
5295 
5296 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5297 	    I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5298 		pipeconf |= PIPECONF_ENABLE;
5299 
5300 	if (intel_crtc->config.double_wide)
5301 		pipeconf |= PIPECONF_DOUBLE_WIDE;
5302 
5303 	/* only g4x and later have fancy bpc/dither controls */
5304 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5305 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
5306 		if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5307 			pipeconf |= PIPECONF_DITHER_EN |
5308 				    PIPECONF_DITHER_TYPE_SP;
5309 
5310 		switch (intel_crtc->config.pipe_bpp) {
5311 		case 18:
5312 			pipeconf |= PIPECONF_6BPC;
5313 			break;
5314 		case 24:
5315 			pipeconf |= PIPECONF_8BPC;
5316 			break;
5317 		case 30:
5318 			pipeconf |= PIPECONF_10BPC;
5319 			break;
5320 		default:
5321 			/* Case prevented by intel_choose_pipe_bpp_dither. */
5322 			BUG();
5323 		}
5324 	}
5325 
5326 	if (HAS_PIPE_CXSR(dev)) {
5327 		if (intel_crtc->lowfreq_avail) {
5328 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5329 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5330 		} else {
5331 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5332 		}
5333 	}
5334 
5335 	if (!IS_GEN2(dev) &&
5336 	    intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5337 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5338 	else
5339 		pipeconf |= PIPECONF_PROGRESSIVE;
5340 
5341 	if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5342 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5343 
5344 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5345 	POSTING_READ(PIPECONF(intel_crtc->pipe));
5346 }
5347 
5348 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5349 			      int x, int y,
5350 			      struct drm_framebuffer *fb)
5351 {
5352 	struct drm_device *dev = crtc->dev;
5353 	struct drm_i915_private *dev_priv = dev->dev_private;
5354 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5355 	int pipe = intel_crtc->pipe;
5356 	int plane = intel_crtc->plane;
5357 	int refclk, num_connectors = 0;
5358 	intel_clock_t clock, reduced_clock;
5359 	u32 dspcntr;
5360 	bool ok, has_reduced_clock = false;
5361 	bool is_lvds = false, is_dsi = false;
5362 	struct intel_encoder *encoder;
5363 	const intel_limit_t *limit;
5364 	int ret;
5365 
5366 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5367 		switch (encoder->type) {
5368 		case INTEL_OUTPUT_LVDS:
5369 			is_lvds = true;
5370 			break;
5371 		case INTEL_OUTPUT_DSI:
5372 			is_dsi = true;
5373 			break;
5374 		}
5375 
5376 		num_connectors++;
5377 	}
5378 
5379 	if (is_dsi)
5380 		goto skip_dpll;
5381 
5382 	if (!intel_crtc->config.clock_set) {
5383 		refclk = i9xx_get_refclk(crtc, num_connectors);
5384 
5385 		/*
5386 		 * Returns a set of divisors for the desired target clock with
5387 		 * the given refclk, or FALSE.  The returned values represent
5388 		 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5389 		 * 2) / p1 / p2.
5390 		 */
5391 		limit = intel_limit(crtc, refclk);
5392 		ok = dev_priv->display.find_dpll(limit, crtc,
5393 						 intel_crtc->config.port_clock,
5394 						 refclk, NULL, &clock);
5395 		if (!ok) {
5396 			DRM_ERROR("Couldn't find PLL settings for mode!\n");
5397 			return -EINVAL;
5398 		}
5399 
5400 		if (is_lvds && dev_priv->lvds_downclock_avail) {
5401 			/*
5402 			 * Ensure we match the reduced clock's P to the target
5403 			 * clock.  If the clocks don't match, we can't switch
5404 			 * the display clock by using the FP0/FP1. In such case
5405 			 * we will disable the LVDS downclock feature.
5406 			 */
5407 			has_reduced_clock =
5408 				dev_priv->display.find_dpll(limit, crtc,
5409 							    dev_priv->lvds_downclock,
5410 							    refclk, &clock,
5411 							    &reduced_clock);
5412 		}
5413 		/* Compat-code for transition, will disappear. */
5414 		intel_crtc->config.dpll.n = clock.n;
5415 		intel_crtc->config.dpll.m1 = clock.m1;
5416 		intel_crtc->config.dpll.m2 = clock.m2;
5417 		intel_crtc->config.dpll.p1 = clock.p1;
5418 		intel_crtc->config.dpll.p2 = clock.p2;
5419 	}
5420 
5421 	if (IS_GEN2(dev)) {
5422 		i8xx_update_pll(intel_crtc,
5423 				has_reduced_clock ? &reduced_clock : NULL,
5424 				num_connectors);
5425 	} else if (IS_VALLEYVIEW(dev)) {
5426 		vlv_update_pll(intel_crtc);
5427 	} else {
5428 		i9xx_update_pll(intel_crtc,
5429 				has_reduced_clock ? &reduced_clock : NULL,
5430                                 num_connectors);
5431 	}
5432 
5433 skip_dpll:
5434 	/* Set up the display plane register */
5435 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5436 
5437 	if (!IS_VALLEYVIEW(dev)) {
5438 		if (pipe == 0)
5439 			dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5440 		else
5441 			dspcntr |= DISPPLANE_SEL_PIPE_B;
5442 	}
5443 
5444 	intel_set_pipe_timings(intel_crtc);
5445 
5446 	/* pipesrc and dspsize control the size that is scaled from,
5447 	 * which should always be the user's requested size.
5448 	 */
5449 	I915_WRITE(DSPSIZE(plane),
5450 		   ((intel_crtc->config.pipe_src_h - 1) << 16) |
5451 		   (intel_crtc->config.pipe_src_w - 1));
5452 	I915_WRITE(DSPPOS(plane), 0);
5453 
5454 	i9xx_set_pipeconf(intel_crtc);
5455 
5456 	I915_WRITE(DSPCNTR(plane), dspcntr);
5457 	POSTING_READ(DSPCNTR(plane));
5458 
5459 	ret = intel_pipe_set_base(crtc, x, y, fb);
5460 
5461 	return ret;
5462 }
5463 
5464 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5465 				 struct intel_crtc_config *pipe_config)
5466 {
5467 	struct drm_device *dev = crtc->base.dev;
5468 	struct drm_i915_private *dev_priv = dev->dev_private;
5469 	uint32_t tmp;
5470 
5471 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
5472 		return;
5473 
5474 	tmp = I915_READ(PFIT_CONTROL);
5475 	if (!(tmp & PFIT_ENABLE))
5476 		return;
5477 
5478 	/* Check whether the pfit is attached to our pipe. */
5479 	if (INTEL_INFO(dev)->gen < 4) {
5480 		if (crtc->pipe != PIPE_B)
5481 			return;
5482 	} else {
5483 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5484 			return;
5485 	}
5486 
5487 	pipe_config->gmch_pfit.control = tmp;
5488 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5489 	if (INTEL_INFO(dev)->gen < 5)
5490 		pipe_config->gmch_pfit.lvds_border_bits =
5491 			I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5492 }
5493 
5494 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5495 			       struct intel_crtc_config *pipe_config)
5496 {
5497 	struct drm_device *dev = crtc->base.dev;
5498 	struct drm_i915_private *dev_priv = dev->dev_private;
5499 	int pipe = pipe_config->cpu_transcoder;
5500 	intel_clock_t clock;
5501 	u32 mdiv;
5502 	int refclk = 100000;
5503 
5504 	mutex_lock(&dev_priv->dpio_lock);
5505 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
5506 	mutex_unlock(&dev_priv->dpio_lock);
5507 
5508 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5509 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
5510 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5511 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5512 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5513 
5514 	vlv_clock(refclk, &clock);
5515 
5516 	/* clock.dot is the fast clock */
5517 	pipe_config->port_clock = clock.dot / 5;
5518 }
5519 
5520 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5521 				 struct intel_crtc_config *pipe_config)
5522 {
5523 	struct drm_device *dev = crtc->base.dev;
5524 	struct drm_i915_private *dev_priv = dev->dev_private;
5525 	uint32_t tmp;
5526 
5527 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5528 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
5529 
5530 	tmp = I915_READ(PIPECONF(crtc->pipe));
5531 	if (!(tmp & PIPECONF_ENABLE))
5532 		return false;
5533 
5534 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5535 		switch (tmp & PIPECONF_BPC_MASK) {
5536 		case PIPECONF_6BPC:
5537 			pipe_config->pipe_bpp = 18;
5538 			break;
5539 		case PIPECONF_8BPC:
5540 			pipe_config->pipe_bpp = 24;
5541 			break;
5542 		case PIPECONF_10BPC:
5543 			pipe_config->pipe_bpp = 30;
5544 			break;
5545 		default:
5546 			break;
5547 		}
5548 	}
5549 
5550 	if (INTEL_INFO(dev)->gen < 4)
5551 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5552 
5553 	intel_get_pipe_timings(crtc, pipe_config);
5554 
5555 	i9xx_get_pfit_config(crtc, pipe_config);
5556 
5557 	if (INTEL_INFO(dev)->gen >= 4) {
5558 		tmp = I915_READ(DPLL_MD(crtc->pipe));
5559 		pipe_config->pixel_multiplier =
5560 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5561 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
5562 		pipe_config->dpll_hw_state.dpll_md = tmp;
5563 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5564 		tmp = I915_READ(DPLL(crtc->pipe));
5565 		pipe_config->pixel_multiplier =
5566 			((tmp & SDVO_MULTIPLIER_MASK)
5567 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5568 	} else {
5569 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
5570 		 * port and will be fixed up in the encoder->get_config
5571 		 * function. */
5572 		pipe_config->pixel_multiplier = 1;
5573 	}
5574 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5575 	if (!IS_VALLEYVIEW(dev)) {
5576 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5577 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
5578 	} else {
5579 		/* Mask out read-only status bits. */
5580 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5581 						     DPLL_PORTC_READY_MASK |
5582 						     DPLL_PORTB_READY_MASK);
5583 	}
5584 
5585 	if (IS_VALLEYVIEW(dev))
5586 		vlv_crtc_clock_get(crtc, pipe_config);
5587 	else
5588 		i9xx_crtc_clock_get(crtc, pipe_config);
5589 
5590 	return true;
5591 }
5592 
5593 static void ironlake_init_pch_refclk(struct drm_device *dev)
5594 {
5595 	struct drm_i915_private *dev_priv = dev->dev_private;
5596 	struct drm_mode_config *mode_config = &dev->mode_config;
5597 	struct intel_encoder *encoder;
5598 	u32 val, final;
5599 	bool has_lvds = false;
5600 	bool has_cpu_edp = false;
5601 	bool has_panel = false;
5602 	bool has_ck505 = false;
5603 	bool can_ssc = false;
5604 
5605 	/* We need to take the global config into account */
5606 	list_for_each_entry(encoder, &mode_config->encoder_list,
5607 			    base.head) {
5608 		switch (encoder->type) {
5609 		case INTEL_OUTPUT_LVDS:
5610 			has_panel = true;
5611 			has_lvds = true;
5612 			break;
5613 		case INTEL_OUTPUT_EDP:
5614 			has_panel = true;
5615 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
5616 				has_cpu_edp = true;
5617 			break;
5618 		}
5619 	}
5620 
5621 	if (HAS_PCH_IBX(dev)) {
5622 		has_ck505 = dev_priv->vbt.display_clock_mode;
5623 		can_ssc = has_ck505;
5624 	} else {
5625 		has_ck505 = false;
5626 		can_ssc = true;
5627 	}
5628 
5629 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5630 		      has_panel, has_lvds, has_ck505);
5631 
5632 	/* Ironlake: try to setup display ref clock before DPLL
5633 	 * enabling. This is only under driver's control after
5634 	 * PCH B stepping, previous chipset stepping should be
5635 	 * ignoring this setting.
5636 	 */
5637 	val = I915_READ(PCH_DREF_CONTROL);
5638 
5639 	/* As we must carefully and slowly disable/enable each source in turn,
5640 	 * compute the final state we want first and check if we need to
5641 	 * make any changes at all.
5642 	 */
5643 	final = val;
5644 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
5645 	if (has_ck505)
5646 		final |= DREF_NONSPREAD_CK505_ENABLE;
5647 	else
5648 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
5649 
5650 	final &= ~DREF_SSC_SOURCE_MASK;
5651 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5652 	final &= ~DREF_SSC1_ENABLE;
5653 
5654 	if (has_panel) {
5655 		final |= DREF_SSC_SOURCE_ENABLE;
5656 
5657 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
5658 			final |= DREF_SSC1_ENABLE;
5659 
5660 		if (has_cpu_edp) {
5661 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
5662 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5663 			else
5664 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5665 		} else
5666 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5667 	} else {
5668 		final |= DREF_SSC_SOURCE_DISABLE;
5669 		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5670 	}
5671 
5672 	if (final == val)
5673 		return;
5674 
5675 	/* Always enable nonspread source */
5676 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
5677 
5678 	if (has_ck505)
5679 		val |= DREF_NONSPREAD_CK505_ENABLE;
5680 	else
5681 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
5682 
5683 	if (has_panel) {
5684 		val &= ~DREF_SSC_SOURCE_MASK;
5685 		val |= DREF_SSC_SOURCE_ENABLE;
5686 
5687 		/* SSC must be turned on before enabling the CPU output  */
5688 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5689 			DRM_DEBUG_KMS("Using SSC on panel\n");
5690 			val |= DREF_SSC1_ENABLE;
5691 		} else
5692 			val &= ~DREF_SSC1_ENABLE;
5693 
5694 		/* Get SSC going before enabling the outputs */
5695 		I915_WRITE(PCH_DREF_CONTROL, val);
5696 		POSTING_READ(PCH_DREF_CONTROL);
5697 		udelay(200);
5698 
5699 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5700 
5701 		/* Enable CPU source on CPU attached eDP */
5702 		if (has_cpu_edp) {
5703 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5704 				DRM_DEBUG_KMS("Using SSC on eDP\n");
5705 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5706 			}
5707 			else
5708 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5709 		} else
5710 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5711 
5712 		I915_WRITE(PCH_DREF_CONTROL, val);
5713 		POSTING_READ(PCH_DREF_CONTROL);
5714 		udelay(200);
5715 	} else {
5716 		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5717 
5718 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5719 
5720 		/* Turn off CPU output */
5721 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5722 
5723 		I915_WRITE(PCH_DREF_CONTROL, val);
5724 		POSTING_READ(PCH_DREF_CONTROL);
5725 		udelay(200);
5726 
5727 		/* Turn off the SSC source */
5728 		val &= ~DREF_SSC_SOURCE_MASK;
5729 		val |= DREF_SSC_SOURCE_DISABLE;
5730 
5731 		/* Turn off SSC1 */
5732 		val &= ~DREF_SSC1_ENABLE;
5733 
5734 		I915_WRITE(PCH_DREF_CONTROL, val);
5735 		POSTING_READ(PCH_DREF_CONTROL);
5736 		udelay(200);
5737 	}
5738 
5739 	BUG_ON(val != final);
5740 }
5741 
5742 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5743 {
5744 	uint32_t tmp;
5745 
5746 	tmp = I915_READ(SOUTH_CHICKEN2);
5747 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5748 	I915_WRITE(SOUTH_CHICKEN2, tmp);
5749 
5750 	if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5751 			       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5752 		DRM_ERROR("FDI mPHY reset assert timeout\n");
5753 
5754 	tmp = I915_READ(SOUTH_CHICKEN2);
5755 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5756 	I915_WRITE(SOUTH_CHICKEN2, tmp);
5757 
5758 	if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5759 				FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5760 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5761 }
5762 
5763 /* WaMPhyProgramming:hsw */
5764 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5765 {
5766 	uint32_t tmp;
5767 
5768 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5769 	tmp &= ~(0xFF << 24);
5770 	tmp |= (0x12 << 24);
5771 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5772 
5773 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5774 	tmp |= (1 << 11);
5775 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5776 
5777 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5778 	tmp |= (1 << 11);
5779 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5780 
5781 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5782 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5783 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5784 
5785 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5786 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5787 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5788 
5789 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5790 	tmp &= ~(7 << 13);
5791 	tmp |= (5 << 13);
5792 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5793 
5794 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5795 	tmp &= ~(7 << 13);
5796 	tmp |= (5 << 13);
5797 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5798 
5799 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5800 	tmp &= ~0xFF;
5801 	tmp |= 0x1C;
5802 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5803 
5804 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5805 	tmp &= ~0xFF;
5806 	tmp |= 0x1C;
5807 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5808 
5809 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5810 	tmp &= ~(0xFF << 16);
5811 	tmp |= (0x1C << 16);
5812 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5813 
5814 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5815 	tmp &= ~(0xFF << 16);
5816 	tmp |= (0x1C << 16);
5817 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5818 
5819 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5820 	tmp |= (1 << 27);
5821 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5822 
5823 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5824 	tmp |= (1 << 27);
5825 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5826 
5827 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5828 	tmp &= ~(0xF << 28);
5829 	tmp |= (4 << 28);
5830 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5831 
5832 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5833 	tmp &= ~(0xF << 28);
5834 	tmp |= (4 << 28);
5835 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5836 }
5837 
5838 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5839  * Programming" based on the parameters passed:
5840  * - Sequence to enable CLKOUT_DP
5841  * - Sequence to enable CLKOUT_DP without spread
5842  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5843  */
5844 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5845 				 bool with_fdi)
5846 {
5847 	struct drm_i915_private *dev_priv = dev->dev_private;
5848 	uint32_t reg, tmp;
5849 
5850 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5851 		with_spread = true;
5852 	if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5853 		 with_fdi, "LP PCH doesn't have FDI\n"))
5854 		with_fdi = false;
5855 
5856 	mutex_lock(&dev_priv->dpio_lock);
5857 
5858 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5859 	tmp &= ~SBI_SSCCTL_DISABLE;
5860 	tmp |= SBI_SSCCTL_PATHALT;
5861 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5862 
5863 	udelay(24);
5864 
5865 	if (with_spread) {
5866 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5867 		tmp &= ~SBI_SSCCTL_PATHALT;
5868 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5869 
5870 		if (with_fdi) {
5871 			lpt_reset_fdi_mphy(dev_priv);
5872 			lpt_program_fdi_mphy(dev_priv);
5873 		}
5874 	}
5875 
5876 	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5877 	       SBI_GEN0 : SBI_DBUFF0;
5878 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5879 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5880 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5881 
5882 	mutex_unlock(&dev_priv->dpio_lock);
5883 }
5884 
5885 /* Sequence to disable CLKOUT_DP */
5886 static void lpt_disable_clkout_dp(struct drm_device *dev)
5887 {
5888 	struct drm_i915_private *dev_priv = dev->dev_private;
5889 	uint32_t reg, tmp;
5890 
5891 	mutex_lock(&dev_priv->dpio_lock);
5892 
5893 	reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5894 	       SBI_GEN0 : SBI_DBUFF0;
5895 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5896 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5897 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5898 
5899 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5900 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5901 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5902 			tmp |= SBI_SSCCTL_PATHALT;
5903 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5904 			udelay(32);
5905 		}
5906 		tmp |= SBI_SSCCTL_DISABLE;
5907 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5908 	}
5909 
5910 	mutex_unlock(&dev_priv->dpio_lock);
5911 }
5912 
5913 static void lpt_init_pch_refclk(struct drm_device *dev)
5914 {
5915 	struct drm_mode_config *mode_config = &dev->mode_config;
5916 	struct intel_encoder *encoder;
5917 	bool has_vga = false;
5918 
5919 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5920 		switch (encoder->type) {
5921 		case INTEL_OUTPUT_ANALOG:
5922 			has_vga = true;
5923 			break;
5924 		}
5925 	}
5926 
5927 	if (has_vga)
5928 		lpt_enable_clkout_dp(dev, true, true);
5929 	else
5930 		lpt_disable_clkout_dp(dev);
5931 }
5932 
5933 /*
5934  * Initialize reference clocks when the driver loads
5935  */
5936 void intel_init_pch_refclk(struct drm_device *dev)
5937 {
5938 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5939 		ironlake_init_pch_refclk(dev);
5940 	else if (HAS_PCH_LPT(dev))
5941 		lpt_init_pch_refclk(dev);
5942 }
5943 
5944 static int ironlake_get_refclk(struct drm_crtc *crtc)
5945 {
5946 	struct drm_device *dev = crtc->dev;
5947 	struct drm_i915_private *dev_priv = dev->dev_private;
5948 	struct intel_encoder *encoder;
5949 	int num_connectors = 0;
5950 	bool is_lvds = false;
5951 
5952 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5953 		switch (encoder->type) {
5954 		case INTEL_OUTPUT_LVDS:
5955 			is_lvds = true;
5956 			break;
5957 		}
5958 		num_connectors++;
5959 	}
5960 
5961 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5962 		DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
5963 			      dev_priv->vbt.lvds_ssc_freq);
5964 		return dev_priv->vbt.lvds_ssc_freq;
5965 	}
5966 
5967 	return 120000;
5968 }
5969 
5970 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
5971 {
5972 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5973 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5974 	int pipe = intel_crtc->pipe;
5975 	uint32_t val;
5976 
5977 	val = 0;
5978 
5979 	switch (intel_crtc->config.pipe_bpp) {
5980 	case 18:
5981 		val |= PIPECONF_6BPC;
5982 		break;
5983 	case 24:
5984 		val |= PIPECONF_8BPC;
5985 		break;
5986 	case 30:
5987 		val |= PIPECONF_10BPC;
5988 		break;
5989 	case 36:
5990 		val |= PIPECONF_12BPC;
5991 		break;
5992 	default:
5993 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5994 		BUG();
5995 	}
5996 
5997 	if (intel_crtc->config.dither)
5998 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5999 
6000 	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6001 		val |= PIPECONF_INTERLACED_ILK;
6002 	else
6003 		val |= PIPECONF_PROGRESSIVE;
6004 
6005 	if (intel_crtc->config.limited_color_range)
6006 		val |= PIPECONF_COLOR_RANGE_SELECT;
6007 
6008 	I915_WRITE(PIPECONF(pipe), val);
6009 	POSTING_READ(PIPECONF(pipe));
6010 }
6011 
6012 /*
6013  * Set up the pipe CSC unit.
6014  *
6015  * Currently only full range RGB to limited range RGB conversion
6016  * is supported, but eventually this should handle various
6017  * RGB<->YCbCr scenarios as well.
6018  */
6019 static void intel_set_pipe_csc(struct drm_crtc *crtc)
6020 {
6021 	struct drm_device *dev = crtc->dev;
6022 	struct drm_i915_private *dev_priv = dev->dev_private;
6023 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6024 	int pipe = intel_crtc->pipe;
6025 	uint16_t coeff = 0x7800; /* 1.0 */
6026 
6027 	/*
6028 	 * TODO: Check what kind of values actually come out of the pipe
6029 	 * with these coeff/postoff values and adjust to get the best
6030 	 * accuracy. Perhaps we even need to take the bpc value into
6031 	 * consideration.
6032 	 */
6033 
6034 	if (intel_crtc->config.limited_color_range)
6035 		coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
6036 
6037 	/*
6038 	 * GY/GU and RY/RU should be the other way around according
6039 	 * to BSpec, but reality doesn't agree. Just set them up in
6040 	 * a way that results in the correct picture.
6041 	 */
6042 	I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
6043 	I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
6044 
6045 	I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
6046 	I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
6047 
6048 	I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
6049 	I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
6050 
6051 	I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
6052 	I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
6053 	I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
6054 
6055 	if (INTEL_INFO(dev)->gen > 6) {
6056 		uint16_t postoff = 0;
6057 
6058 		if (intel_crtc->config.limited_color_range)
6059 			postoff = (16 * (1 << 12) / 255) & 0x1fff;
6060 
6061 		I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6062 		I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
6063 		I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
6064 
6065 		I915_WRITE(PIPE_CSC_MODE(pipe), 0);
6066 	} else {
6067 		uint32_t mode = CSC_MODE_YUV_TO_RGB;
6068 
6069 		if (intel_crtc->config.limited_color_range)
6070 			mode |= CSC_BLACK_SCREEN_OFFSET;
6071 
6072 		I915_WRITE(PIPE_CSC_MODE(pipe), mode);
6073 	}
6074 }
6075 
6076 static void haswell_set_pipeconf(struct drm_crtc *crtc)
6077 {
6078 	struct drm_device *dev = crtc->dev;
6079 	struct drm_i915_private *dev_priv = dev->dev_private;
6080 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6081 	enum i915_pipe pipe = intel_crtc->pipe;
6082 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6083 	uint32_t val;
6084 
6085 	val = 0;
6086 
6087 	if (IS_HASWELL(dev) && intel_crtc->config.dither)
6088 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
6089 
6090 	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
6091 		val |= PIPECONF_INTERLACED_ILK;
6092 	else
6093 		val |= PIPECONF_PROGRESSIVE;
6094 
6095 	I915_WRITE(PIPECONF(cpu_transcoder), val);
6096 	POSTING_READ(PIPECONF(cpu_transcoder));
6097 
6098 	I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
6099 	POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
6100 
6101 	if (IS_BROADWELL(dev)) {
6102 		val = 0;
6103 
6104 		switch (intel_crtc->config.pipe_bpp) {
6105 		case 18:
6106 			val |= PIPEMISC_DITHER_6_BPC;
6107 			break;
6108 		case 24:
6109 			val |= PIPEMISC_DITHER_8_BPC;
6110 			break;
6111 		case 30:
6112 			val |= PIPEMISC_DITHER_10_BPC;
6113 			break;
6114 		case 36:
6115 			val |= PIPEMISC_DITHER_12_BPC;
6116 			break;
6117 		default:
6118 			/* Case prevented by pipe_config_set_bpp. */
6119 			BUG();
6120 		}
6121 
6122 		if (intel_crtc->config.dither)
6123 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
6124 
6125 		I915_WRITE(PIPEMISC(pipe), val);
6126 	}
6127 }
6128 
6129 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6130 				    intel_clock_t *clock,
6131 				    bool *has_reduced_clock,
6132 				    intel_clock_t *reduced_clock)
6133 {
6134 	struct drm_device *dev = crtc->dev;
6135 	struct drm_i915_private *dev_priv = dev->dev_private;
6136 	struct intel_encoder *intel_encoder;
6137 	int refclk;
6138 	const intel_limit_t *limit;
6139 	bool ret, is_lvds = false;
6140 
6141 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6142 		switch (intel_encoder->type) {
6143 		case INTEL_OUTPUT_LVDS:
6144 			is_lvds = true;
6145 			break;
6146 		}
6147 	}
6148 
6149 	refclk = ironlake_get_refclk(crtc);
6150 
6151 	/*
6152 	 * Returns a set of divisors for the desired target clock with the given
6153 	 * refclk, or FALSE.  The returned values represent the clock equation:
6154 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
6155 	 */
6156 	limit = intel_limit(crtc, refclk);
6157 	ret = dev_priv->display.find_dpll(limit, crtc,
6158 					  to_intel_crtc(crtc)->config.port_clock,
6159 					  refclk, NULL, clock);
6160 	if (!ret)
6161 		return false;
6162 
6163 	if (is_lvds && dev_priv->lvds_downclock_avail) {
6164 		/*
6165 		 * Ensure we match the reduced clock's P to the target clock.
6166 		 * If the clocks don't match, we can't switch the display clock
6167 		 * by using the FP0/FP1. In such case we will disable the LVDS
6168 		 * downclock feature.
6169 		*/
6170 		*has_reduced_clock =
6171 			dev_priv->display.find_dpll(limit, crtc,
6172 						    dev_priv->lvds_downclock,
6173 						    refclk, clock,
6174 						    reduced_clock);
6175 	}
6176 
6177 	return true;
6178 }
6179 
6180 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
6181 {
6182 	/*
6183 	 * Account for spread spectrum to avoid
6184 	 * oversubscribing the link. Max center spread
6185 	 * is 2.5%; use 5% for safety's sake.
6186 	 */
6187 	u32 bps = target_clock * bpp * 21 / 20;
6188 	return bps / (link_bw * 8) + 1;
6189 }
6190 
6191 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6192 {
6193 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
6194 }
6195 
6196 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
6197 				      u32 *fp,
6198 				      intel_clock_t *reduced_clock, u32 *fp2)
6199 {
6200 	struct drm_crtc *crtc = &intel_crtc->base;
6201 	struct drm_device *dev = crtc->dev;
6202 	struct drm_i915_private *dev_priv = dev->dev_private;
6203 	struct intel_encoder *intel_encoder;
6204 	uint32_t dpll;
6205 	int factor, num_connectors = 0;
6206 	bool is_lvds = false, is_sdvo = false;
6207 
6208 	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
6209 		switch (intel_encoder->type) {
6210 		case INTEL_OUTPUT_LVDS:
6211 			is_lvds = true;
6212 			break;
6213 		case INTEL_OUTPUT_SDVO:
6214 		case INTEL_OUTPUT_HDMI:
6215 			is_sdvo = true;
6216 			break;
6217 		}
6218 
6219 		num_connectors++;
6220 	}
6221 
6222 	/* Enable autotuning of the PLL clock (if permissible) */
6223 	factor = 21;
6224 	if (is_lvds) {
6225 		if ((intel_panel_use_ssc(dev_priv) &&
6226 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
6227 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
6228 			factor = 25;
6229 	} else if (intel_crtc->config.sdvo_tv_clock)
6230 		factor = 20;
6231 
6232 	if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
6233 		*fp |= FP_CB_TUNE;
6234 
6235 	if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
6236 		*fp2 |= FP_CB_TUNE;
6237 
6238 	dpll = 0;
6239 
6240 	if (is_lvds)
6241 		dpll |= DPLLB_MODE_LVDS;
6242 	else
6243 		dpll |= DPLLB_MODE_DAC_SERIAL;
6244 
6245 	dpll |= (intel_crtc->config.pixel_multiplier - 1)
6246 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
6247 
6248 	if (is_sdvo)
6249 		dpll |= DPLL_SDVO_HIGH_SPEED;
6250 	if (intel_crtc->config.has_dp_encoder)
6251 		dpll |= DPLL_SDVO_HIGH_SPEED;
6252 
6253 	/* compute bitmask from p1 value */
6254 	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
6255 	/* also FPA1 */
6256 	dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
6257 
6258 	switch (intel_crtc->config.dpll.p2) {
6259 	case 5:
6260 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
6261 		break;
6262 	case 7:
6263 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
6264 		break;
6265 	case 10:
6266 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
6267 		break;
6268 	case 14:
6269 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
6270 		break;
6271 	}
6272 
6273 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
6274 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
6275 	else
6276 		dpll |= PLL_REF_INPUT_DREFCLK;
6277 
6278 	return dpll | DPLL_VCO_ENABLE;
6279 }
6280 
6281 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6282 				  int x, int y,
6283 				  struct drm_framebuffer *fb)
6284 {
6285 	struct drm_device *dev = crtc->dev;
6286 	struct drm_i915_private *dev_priv = dev->dev_private;
6287 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6288 	int pipe = intel_crtc->pipe;
6289 	int plane = intel_crtc->plane;
6290 	int num_connectors = 0;
6291 	intel_clock_t clock, reduced_clock;
6292 	u32 dpll = 0, fp = 0, fp2 = 0;
6293 	bool ok, has_reduced_clock = false;
6294 	bool is_lvds = false;
6295 	struct intel_encoder *encoder;
6296 	struct intel_shared_dpll *pll;
6297 	int ret;
6298 
6299 	for_each_encoder_on_crtc(dev, crtc, encoder) {
6300 		switch (encoder->type) {
6301 		case INTEL_OUTPUT_LVDS:
6302 			is_lvds = true;
6303 			break;
6304 		}
6305 
6306 		num_connectors++;
6307 	}
6308 
6309 	WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
6310 	     "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
6311 
6312 	ok = ironlake_compute_clocks(crtc, &clock,
6313 				     &has_reduced_clock, &reduced_clock);
6314 	if (!ok && !intel_crtc->config.clock_set) {
6315 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
6316 		return -EINVAL;
6317 	}
6318 	/* Compat-code for transition, will disappear. */
6319 	if (!intel_crtc->config.clock_set) {
6320 		intel_crtc->config.dpll.n = clock.n;
6321 		intel_crtc->config.dpll.m1 = clock.m1;
6322 		intel_crtc->config.dpll.m2 = clock.m2;
6323 		intel_crtc->config.dpll.p1 = clock.p1;
6324 		intel_crtc->config.dpll.p2 = clock.p2;
6325 	}
6326 
6327 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
6328 	if (intel_crtc->config.has_pch_encoder) {
6329 		fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
6330 		if (has_reduced_clock)
6331 			fp2 = i9xx_dpll_compute_fp(&reduced_clock);
6332 
6333 		dpll = ironlake_compute_dpll(intel_crtc,
6334 					     &fp, &reduced_clock,
6335 					     has_reduced_clock ? &fp2 : NULL);
6336 
6337 		intel_crtc->config.dpll_hw_state.dpll = dpll;
6338 		intel_crtc->config.dpll_hw_state.fp0 = fp;
6339 		if (has_reduced_clock)
6340 			intel_crtc->config.dpll_hw_state.fp1 = fp2;
6341 		else
6342 			intel_crtc->config.dpll_hw_state.fp1 = fp;
6343 
6344 		pll = intel_get_shared_dpll(intel_crtc);
6345 		if (pll == NULL) {
6346 			DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6347 					 pipe_name(pipe));
6348 			return -EINVAL;
6349 		}
6350 	} else
6351 		intel_put_shared_dpll(intel_crtc);
6352 
6353 	if (intel_crtc->config.has_dp_encoder)
6354 		intel_dp_set_m_n(intel_crtc);
6355 
6356 	if (is_lvds && has_reduced_clock && i915_powersave)
6357 		intel_crtc->lowfreq_avail = true;
6358 	else
6359 		intel_crtc->lowfreq_avail = false;
6360 
6361 	intel_set_pipe_timings(intel_crtc);
6362 
6363 	if (intel_crtc->config.has_pch_encoder) {
6364 		intel_cpu_transcoder_set_m_n(intel_crtc,
6365 					     &intel_crtc->config.fdi_m_n);
6366 	}
6367 
6368 	ironlake_set_pipeconf(crtc);
6369 
6370 	/* Set up the display plane register */
6371 	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
6372 	POSTING_READ(DSPCNTR(plane));
6373 
6374 	ret = intel_pipe_set_base(crtc, x, y, fb);
6375 
6376 	return ret;
6377 }
6378 
6379 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6380 					 struct intel_link_m_n *m_n)
6381 {
6382 	struct drm_device *dev = crtc->base.dev;
6383 	struct drm_i915_private *dev_priv = dev->dev_private;
6384 	enum i915_pipe pipe = crtc->pipe;
6385 
6386 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6387 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6388 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6389 		& ~TU_SIZE_MASK;
6390 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6391 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6392 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6393 }
6394 
6395 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6396 					 enum transcoder transcoder,
6397 					 struct intel_link_m_n *m_n)
6398 {
6399 	struct drm_device *dev = crtc->base.dev;
6400 	struct drm_i915_private *dev_priv = dev->dev_private;
6401 	enum i915_pipe pipe = crtc->pipe;
6402 
6403 	if (INTEL_INFO(dev)->gen >= 5) {
6404 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6405 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6406 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6407 			& ~TU_SIZE_MASK;
6408 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6409 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6410 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6411 	} else {
6412 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6413 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6414 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6415 			& ~TU_SIZE_MASK;
6416 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6417 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6418 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6419 	}
6420 }
6421 
6422 void intel_dp_get_m_n(struct intel_crtc *crtc,
6423 		      struct intel_crtc_config *pipe_config)
6424 {
6425 	if (crtc->config.has_pch_encoder)
6426 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6427 	else
6428 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6429 					     &pipe_config->dp_m_n);
6430 }
6431 
6432 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6433 					struct intel_crtc_config *pipe_config)
6434 {
6435 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6436 				     &pipe_config->fdi_m_n);
6437 }
6438 
6439 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6440 				     struct intel_crtc_config *pipe_config)
6441 {
6442 	struct drm_device *dev = crtc->base.dev;
6443 	struct drm_i915_private *dev_priv = dev->dev_private;
6444 	uint32_t tmp;
6445 
6446 	tmp = I915_READ(PF_CTL(crtc->pipe));
6447 
6448 	if (tmp & PF_ENABLE) {
6449 		pipe_config->pch_pfit.enabled = true;
6450 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
6451 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
6452 
6453 		/* We currently do not free assignements of panel fitters on
6454 		 * ivb/hsw (since we don't use the higher upscaling modes which
6455 		 * differentiates them) so just WARN about this case for now. */
6456 		if (IS_GEN7(dev)) {
6457 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
6458 				PF_PIPE_SEL_IVB(crtc->pipe));
6459 		}
6460 	}
6461 }
6462 
6463 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6464 				     struct intel_crtc_config *pipe_config)
6465 {
6466 	struct drm_device *dev = crtc->base.dev;
6467 	struct drm_i915_private *dev_priv = dev->dev_private;
6468 	uint32_t tmp;
6469 
6470 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6471 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6472 
6473 	tmp = I915_READ(PIPECONF(crtc->pipe));
6474 	if (!(tmp & PIPECONF_ENABLE))
6475 		return false;
6476 
6477 	switch (tmp & PIPECONF_BPC_MASK) {
6478 	case PIPECONF_6BPC:
6479 		pipe_config->pipe_bpp = 18;
6480 		break;
6481 	case PIPECONF_8BPC:
6482 		pipe_config->pipe_bpp = 24;
6483 		break;
6484 	case PIPECONF_10BPC:
6485 		pipe_config->pipe_bpp = 30;
6486 		break;
6487 	case PIPECONF_12BPC:
6488 		pipe_config->pipe_bpp = 36;
6489 		break;
6490 	default:
6491 		break;
6492 	}
6493 
6494 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
6495 		struct intel_shared_dpll *pll;
6496 
6497 		pipe_config->has_pch_encoder = true;
6498 
6499 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
6500 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6501 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
6502 
6503 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
6504 
6505 		if (HAS_PCH_IBX(dev_priv->dev)) {
6506 			pipe_config->shared_dpll =
6507 				(enum intel_dpll_id) crtc->pipe;
6508 		} else {
6509 			tmp = I915_READ(PCH_DPLL_SEL);
6510 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6511 				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
6512 			else
6513 				pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
6514 		}
6515 
6516 		pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
6517 
6518 		WARN_ON(!pll->get_hw_state(dev_priv, pll,
6519 					   &pipe_config->dpll_hw_state));
6520 
6521 		tmp = pipe_config->dpll_hw_state.dpll;
6522 		pipe_config->pixel_multiplier =
6523 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6524 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
6525 
6526 		ironlake_pch_clock_get(crtc, pipe_config);
6527 	} else {
6528 		pipe_config->pixel_multiplier = 1;
6529 	}
6530 
6531 	intel_get_pipe_timings(crtc, pipe_config);
6532 
6533 	ironlake_get_pfit_config(crtc, pipe_config);
6534 
6535 	return true;
6536 }
6537 
6538 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6539 {
6540 	struct drm_device *dev = dev_priv->dev;
6541 	struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6542 	struct intel_crtc *crtc;
6543 	uint32_t val;
6544 
6545 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6546 		WARN(crtc->active, "CRTC for pipe %c enabled\n",
6547 		     pipe_name(crtc->pipe));
6548 
6549 	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6550 	WARN(plls->spll_refcount, "SPLL enabled\n");
6551 	WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6552 	WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6553 	WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6554 	WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6555 	     "CPU PWM1 enabled\n");
6556 	WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6557 	     "CPU PWM2 enabled\n");
6558 	WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6559 	     "PCH PWM1 enabled\n");
6560 	WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6561 	     "Utility pin enabled\n");
6562 	WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6563 
6564 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
6565 	val = I915_READ(DEIMR);
6566 	WARN((val | DE_PCH_EVENT_IVB) != 0xffffffff,
6567 	     "Unexpected DEIMR bits enabled: 0x%x\n", val);
6568 	val = I915_READ(SDEIMR);
6569 	WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
6570 	     "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6571 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
6572 }
6573 
6574 /*
6575  * This function implements pieces of two sequences from BSpec:
6576  * - Sequence for display software to disable LCPLL
6577  * - Sequence for display software to allow package C8+
6578  * The steps implemented here are just the steps that actually touch the LCPLL
6579  * register. Callers should take care of disabling all the display engine
6580  * functions, doing the mode unset, fixing interrupts, etc.
6581  */
6582 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6583 			      bool switch_to_fclk, bool allow_power_down)
6584 {
6585 	uint32_t val;
6586 
6587 	assert_can_disable_lcpll(dev_priv);
6588 
6589 	val = I915_READ(LCPLL_CTL);
6590 
6591 	if (switch_to_fclk) {
6592 		val |= LCPLL_CD_SOURCE_FCLK;
6593 		I915_WRITE(LCPLL_CTL, val);
6594 
6595 		if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6596 				       LCPLL_CD_SOURCE_FCLK_DONE, 1))
6597 			DRM_ERROR("Switching to FCLK failed\n");
6598 
6599 		val = I915_READ(LCPLL_CTL);
6600 	}
6601 
6602 	val |= LCPLL_PLL_DISABLE;
6603 	I915_WRITE(LCPLL_CTL, val);
6604 	POSTING_READ(LCPLL_CTL);
6605 
6606 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6607 		DRM_ERROR("LCPLL still locked\n");
6608 
6609 	val = I915_READ(D_COMP);
6610 	val |= D_COMP_COMP_DISABLE;
6611 	mutex_lock(&dev_priv->rps.hw_lock);
6612 	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6613 		DRM_ERROR("Failed to disable D_COMP\n");
6614 	mutex_unlock(&dev_priv->rps.hw_lock);
6615 	POSTING_READ(D_COMP);
6616 	ndelay(100);
6617 
6618 	if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6619 		DRM_ERROR("D_COMP RCOMP still in progress\n");
6620 
6621 	if (allow_power_down) {
6622 		val = I915_READ(LCPLL_CTL);
6623 		val |= LCPLL_POWER_DOWN_ALLOW;
6624 		I915_WRITE(LCPLL_CTL, val);
6625 		POSTING_READ(LCPLL_CTL);
6626 	}
6627 }
6628 
6629 /*
6630  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6631  * source.
6632  */
6633 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6634 {
6635 	uint32_t val;
6636 
6637 	val = I915_READ(LCPLL_CTL);
6638 
6639 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6640 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6641 		return;
6642 
6643 	/* Make sure we're not on PC8 state before disabling PC8, otherwise
6644 	 * we'll hang the machine! */
6645 	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
6646 
6647 	if (val & LCPLL_POWER_DOWN_ALLOW) {
6648 		val &= ~LCPLL_POWER_DOWN_ALLOW;
6649 		I915_WRITE(LCPLL_CTL, val);
6650 		POSTING_READ(LCPLL_CTL);
6651 	}
6652 
6653 	val = I915_READ(D_COMP);
6654 	val |= D_COMP_COMP_FORCE;
6655 	val &= ~D_COMP_COMP_DISABLE;
6656 	mutex_lock(&dev_priv->rps.hw_lock);
6657 	if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6658 		DRM_ERROR("Failed to enable D_COMP\n");
6659 	mutex_unlock(&dev_priv->rps.hw_lock);
6660 	POSTING_READ(D_COMP);
6661 
6662 	val = I915_READ(LCPLL_CTL);
6663 	val &= ~LCPLL_PLL_DISABLE;
6664 	I915_WRITE(LCPLL_CTL, val);
6665 
6666 	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6667 		DRM_ERROR("LCPLL not locked yet\n");
6668 
6669 	if (val & LCPLL_CD_SOURCE_FCLK) {
6670 		val = I915_READ(LCPLL_CTL);
6671 		val &= ~LCPLL_CD_SOURCE_FCLK;
6672 		I915_WRITE(LCPLL_CTL, val);
6673 
6674 		if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6675 					LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6676 			DRM_ERROR("Switching back to LCPLL failed\n");
6677 	}
6678 
6679 	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
6680 }
6681 
6682 void hsw_enable_pc8_work(struct work_struct *__work)
6683 {
6684 	struct drm_i915_private *dev_priv =
6685 		container_of(to_delayed_work(__work), struct drm_i915_private,
6686 			     pc8.enable_work);
6687 	struct drm_device *dev = dev_priv->dev;
6688 	uint32_t val;
6689 
6690 	WARN_ON(!HAS_PC8(dev));
6691 
6692 	if (dev_priv->pc8.enabled)
6693 		return;
6694 
6695 	DRM_DEBUG_KMS("Enabling package C8+\n");
6696 
6697 	dev_priv->pc8.enabled = true;
6698 
6699 	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6700 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6701 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6702 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6703 	}
6704 
6705 	lpt_disable_clkout_dp(dev);
6706 	hsw_pc8_disable_interrupts(dev);
6707 	hsw_disable_lcpll(dev_priv, true, true);
6708 
6709 	intel_runtime_pm_put(dev_priv);
6710 }
6711 
6712 static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6713 {
6714 	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6715 	WARN(dev_priv->pc8.disable_count < 1,
6716 	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6717 
6718 	dev_priv->pc8.disable_count--;
6719 	if (dev_priv->pc8.disable_count != 0)
6720 		return;
6721 
6722 	schedule_delayed_work(&dev_priv->pc8.enable_work,
6723 			      msecs_to_jiffies(i915_pc8_timeout));
6724 }
6725 
6726 static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6727 {
6728 	struct drm_device *dev = dev_priv->dev;
6729 	uint32_t val;
6730 
6731 	WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6732 	WARN(dev_priv->pc8.disable_count < 0,
6733 	     "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6734 
6735 	dev_priv->pc8.disable_count++;
6736 	if (dev_priv->pc8.disable_count != 1)
6737 		return;
6738 
6739 	WARN_ON(!HAS_PC8(dev));
6740 
6741 	cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6742 	if (!dev_priv->pc8.enabled)
6743 		return;
6744 
6745 	DRM_DEBUG_KMS("Disabling package C8+\n");
6746 
6747 	intel_runtime_pm_get(dev_priv);
6748 
6749 	hsw_restore_lcpll(dev_priv);
6750 	hsw_pc8_restore_interrupts(dev);
6751 	lpt_init_pch_refclk(dev);
6752 
6753 	if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6754 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
6755 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6756 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6757 	}
6758 
6759 	intel_prepare_ddi(dev);
6760 	i915_gem_init_swizzling(dev);
6761 	mutex_lock(&dev_priv->rps.hw_lock);
6762 	gen6_update_ring_freq(dev);
6763 	mutex_unlock(&dev_priv->rps.hw_lock);
6764 	dev_priv->pc8.enabled = false;
6765 }
6766 
6767 void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6768 {
6769 	if (!HAS_PC8(dev_priv->dev))
6770 		return;
6771 
6772 	mutex_lock(&dev_priv->pc8.lock);
6773 	__hsw_enable_package_c8(dev_priv);
6774 	mutex_unlock(&dev_priv->pc8.lock);
6775 }
6776 
6777 void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6778 {
6779 	if (!HAS_PC8(dev_priv->dev))
6780 		return;
6781 
6782 	mutex_lock(&dev_priv->pc8.lock);
6783 	__hsw_disable_package_c8(dev_priv);
6784 	mutex_unlock(&dev_priv->pc8.lock);
6785 }
6786 
6787 static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6788 {
6789 	struct drm_device *dev = dev_priv->dev;
6790 	struct intel_crtc *crtc;
6791 	uint32_t val;
6792 
6793 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6794 		if (crtc->base.enabled)
6795 			return false;
6796 
6797 	/* This case is still possible since we have the i915.disable_power_well
6798 	 * parameter and also the KVMr or something else might be requesting the
6799 	 * power well. */
6800 	val = I915_READ(HSW_PWR_WELL_DRIVER);
6801 	if (val != 0) {
6802 		DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6803 		return false;
6804 	}
6805 
6806 	return true;
6807 }
6808 
6809 /* Since we're called from modeset_global_resources there's no way to
6810  * symmetrically increase and decrease the refcount, so we use
6811  * dev_priv->pc8.requirements_met to track whether we already have the refcount
6812  * or not.
6813  */
6814 static void hsw_update_package_c8(struct drm_device *dev)
6815 {
6816 	struct drm_i915_private *dev_priv = dev->dev_private;
6817 	bool allow;
6818 
6819 	if (!HAS_PC8(dev_priv->dev))
6820 		return;
6821 
6822 	if (!i915_enable_pc8)
6823 		return;
6824 
6825 	mutex_lock(&dev_priv->pc8.lock);
6826 
6827 	allow = hsw_can_enable_package_c8(dev_priv);
6828 
6829 	if (allow == dev_priv->pc8.requirements_met)
6830 		goto done;
6831 
6832 	dev_priv->pc8.requirements_met = allow;
6833 
6834 	if (allow)
6835 		__hsw_enable_package_c8(dev_priv);
6836 	else
6837 		__hsw_disable_package_c8(dev_priv);
6838 
6839 done:
6840 	mutex_unlock(&dev_priv->pc8.lock);
6841 }
6842 
6843 static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6844 {
6845 	if (!HAS_PC8(dev_priv->dev))
6846 		return;
6847 
6848 	mutex_lock(&dev_priv->pc8.lock);
6849 	if (!dev_priv->pc8.gpu_idle) {
6850 		dev_priv->pc8.gpu_idle = true;
6851 		__hsw_enable_package_c8(dev_priv);
6852 	}
6853 	mutex_unlock(&dev_priv->pc8.lock);
6854 }
6855 
6856 static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6857 {
6858 	if (!HAS_PC8(dev_priv->dev))
6859 		return;
6860 
6861 	mutex_lock(&dev_priv->pc8.lock);
6862 	if (dev_priv->pc8.gpu_idle) {
6863 		dev_priv->pc8.gpu_idle = false;
6864 		__hsw_disable_package_c8(dev_priv);
6865 	}
6866 	mutex_unlock(&dev_priv->pc8.lock);
6867 }
6868 
6869 #define for_each_power_domain(domain, mask)				\
6870 	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
6871 		if ((1 << (domain)) & (mask))
6872 
6873 static unsigned long get_pipe_power_domains(struct drm_device *dev,
6874 					    enum i915_pipe pipe, bool pfit_enabled)
6875 {
6876 	unsigned long mask;
6877 	enum transcoder transcoder;
6878 
6879 	transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
6880 
6881 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
6882 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6883 	if (pfit_enabled)
6884 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6885 
6886 	return mask;
6887 }
6888 
6889 void intel_display_set_init_power(struct drm_device *dev, bool enable)
6890 {
6891 	struct drm_i915_private *dev_priv = dev->dev_private;
6892 
6893 	if (dev_priv->power_domains.init_power_on == enable)
6894 		return;
6895 
6896 	if (enable)
6897 		intel_display_power_get(dev, POWER_DOMAIN_INIT);
6898 	else
6899 		intel_display_power_put(dev, POWER_DOMAIN_INIT);
6900 
6901 	dev_priv->power_domains.init_power_on = enable;
6902 }
6903 
6904 static void modeset_update_power_wells(struct drm_device *dev)
6905 {
6906 	unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
6907 	struct intel_crtc *crtc;
6908 
6909 	/*
6910 	 * First get all needed power domains, then put all unneeded, to avoid
6911 	 * any unnecessary toggling of the power wells.
6912 	 */
6913 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6914 		enum intel_display_power_domain domain;
6915 
6916 		if (!crtc->base.enabled)
6917 			continue;
6918 
6919 		pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
6920 						crtc->pipe,
6921 						crtc->config.pch_pfit.enabled);
6922 
6923 		for_each_power_domain(domain, pipe_domains[crtc->pipe])
6924 			intel_display_power_get(dev, domain);
6925 	}
6926 
6927 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
6928 		enum intel_display_power_domain domain;
6929 
6930 		for_each_power_domain(domain, crtc->enabled_power_domains)
6931 			intel_display_power_put(dev, domain);
6932 
6933 		crtc->enabled_power_domains = pipe_domains[crtc->pipe];
6934 	}
6935 
6936 	intel_display_set_init_power(dev, false);
6937 }
6938 
6939 static void haswell_modeset_global_resources(struct drm_device *dev)
6940 {
6941 	modeset_update_power_wells(dev);
6942 	hsw_update_package_c8(dev);
6943 }
6944 
6945 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
6946 				 int x, int y,
6947 				 struct drm_framebuffer *fb)
6948 {
6949 	struct drm_device *dev = crtc->dev;
6950 	struct drm_i915_private *dev_priv = dev->dev_private;
6951 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6952 	int plane = intel_crtc->plane;
6953 	int ret;
6954 
6955 	if (!intel_ddi_pll_select(intel_crtc))
6956 		return -EINVAL;
6957 	intel_ddi_pll_enable(intel_crtc);
6958 
6959 	if (intel_crtc->config.has_dp_encoder)
6960 		intel_dp_set_m_n(intel_crtc);
6961 
6962 	intel_crtc->lowfreq_avail = false;
6963 
6964 	intel_set_pipe_timings(intel_crtc);
6965 
6966 	if (intel_crtc->config.has_pch_encoder) {
6967 		intel_cpu_transcoder_set_m_n(intel_crtc,
6968 					     &intel_crtc->config.fdi_m_n);
6969 	}
6970 
6971 	haswell_set_pipeconf(crtc);
6972 
6973 	intel_set_pipe_csc(crtc);
6974 
6975 	/* Set up the display plane register */
6976 	I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
6977 	POSTING_READ(DSPCNTR(plane));
6978 
6979 	ret = intel_pipe_set_base(crtc, x, y, fb);
6980 
6981 	return ret;
6982 }
6983 
6984 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6985 				    struct intel_crtc_config *pipe_config)
6986 {
6987 	struct drm_device *dev = crtc->base.dev;
6988 	struct drm_i915_private *dev_priv = dev->dev_private;
6989 	enum intel_display_power_domain pfit_domain;
6990 	uint32_t tmp;
6991 
6992 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6993 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6994 
6995 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6996 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
6997 		enum i915_pipe trans_edp_pipe;
6998 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6999 		default:
7000 			WARN(1, "unknown pipe linked to edp transcoder\n");
7001 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
7002 		case TRANS_DDI_EDP_INPUT_A_ON:
7003 			trans_edp_pipe = PIPE_A;
7004 			break;
7005 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
7006 			trans_edp_pipe = PIPE_B;
7007 			break;
7008 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
7009 			trans_edp_pipe = PIPE_C;
7010 			break;
7011 		}
7012 
7013 		if (trans_edp_pipe == crtc->pipe)
7014 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
7015 	}
7016 
7017 	if (!intel_display_power_enabled(dev,
7018 			POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7019 		return false;
7020 
7021 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
7022 	if (!(tmp & PIPECONF_ENABLE))
7023 		return false;
7024 
7025 	/*
7026 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
7027 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
7028 	 * the PCH transcoder is on.
7029 	 */
7030 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
7031 	if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
7032 	    I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7033 		pipe_config->has_pch_encoder = true;
7034 
7035 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7036 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
7037 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
7038 
7039 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
7040 	}
7041 
7042 	intel_get_pipe_timings(crtc, pipe_config);
7043 
7044 	pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7045 	if (intel_display_power_enabled(dev, pfit_domain))
7046 		ironlake_get_pfit_config(crtc, pipe_config);
7047 
7048 	if (IS_HASWELL(dev))
7049 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7050 			(I915_READ(IPS_CTL) & IPS_ENABLE);
7051 
7052 	pipe_config->pixel_multiplier = 1;
7053 
7054 	return true;
7055 }
7056 
7057 static int intel_crtc_mode_set(struct drm_crtc *crtc,
7058 			       int x, int y,
7059 			       struct drm_framebuffer *fb)
7060 {
7061 	struct drm_device *dev = crtc->dev;
7062 	struct drm_i915_private *dev_priv = dev->dev_private;
7063 	struct intel_encoder *encoder;
7064 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7065 	struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
7066 	int pipe = intel_crtc->pipe;
7067 	int ret;
7068 
7069 	drm_vblank_pre_modeset(dev, pipe);
7070 
7071 	ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
7072 
7073 	drm_vblank_post_modeset(dev, pipe);
7074 
7075 	if (ret != 0)
7076 		return ret;
7077 
7078 	for_each_encoder_on_crtc(dev, crtc, encoder) {
7079 		DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7080 			encoder->base.base.id,
7081 			drm_get_encoder_name(&encoder->base),
7082 			mode->base.id, mode->name);
7083 		encoder->mode_set(encoder);
7084 	}
7085 
7086 	return 0;
7087 }
7088 
7089 static struct {
7090 	int clock;
7091 	u32 config;
7092 } hdmi_audio_clock[] = {
7093 	{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7094 	{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7095 	{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7096 	{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7097 	{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7098 	{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7099 	{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7100 	{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7101 	{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7102 	{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7103 };
7104 
7105 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7106 static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7107 {
7108 	int i;
7109 
7110 	for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7111 		if (mode->clock == hdmi_audio_clock[i].clock)
7112 			break;
7113 	}
7114 
7115 	if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7116 		DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7117 		i = 1;
7118 	}
7119 
7120 	DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7121 		      hdmi_audio_clock[i].clock,
7122 		      hdmi_audio_clock[i].config);
7123 
7124 	return hdmi_audio_clock[i].config;
7125 }
7126 
7127 static bool intel_eld_uptodate(struct drm_connector *connector,
7128 			       int reg_eldv, uint32_t bits_eldv,
7129 			       int reg_elda, uint32_t bits_elda,
7130 			       int reg_edid)
7131 {
7132 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7133 	uint8_t *eld = connector->eld;
7134 	uint32_t i;
7135 
7136 	i = I915_READ(reg_eldv);
7137 	i &= bits_eldv;
7138 
7139 	if (!eld[0])
7140 		return !i;
7141 
7142 	if (!i)
7143 		return false;
7144 
7145 	i = I915_READ(reg_elda);
7146 	i &= ~bits_elda;
7147 	I915_WRITE(reg_elda, i);
7148 
7149 	for (i = 0; i < eld[2]; i++)
7150 		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7151 			return false;
7152 
7153 	return true;
7154 }
7155 
7156 static void g4x_write_eld(struct drm_connector *connector,
7157 			  struct drm_crtc *crtc,
7158 			  struct drm_display_mode *mode)
7159 {
7160 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7161 	uint8_t *eld = connector->eld;
7162 	uint32_t eldv;
7163 	uint32_t len;
7164 	uint32_t i;
7165 
7166 	i = I915_READ(G4X_AUD_VID_DID);
7167 
7168 	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7169 		eldv = G4X_ELDV_DEVCL_DEVBLC;
7170 	else
7171 		eldv = G4X_ELDV_DEVCTG;
7172 
7173 	if (intel_eld_uptodate(connector,
7174 			       G4X_AUD_CNTL_ST, eldv,
7175 			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7176 			       G4X_HDMIW_HDMIEDID))
7177 		return;
7178 
7179 	i = I915_READ(G4X_AUD_CNTL_ST);
7180 	i &= ~(eldv | G4X_ELD_ADDR);
7181 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
7182 	I915_WRITE(G4X_AUD_CNTL_ST, i);
7183 
7184 	if (!eld[0])
7185 		return;
7186 
7187 	len = min_t(uint8_t, eld[2], len);
7188 	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7189 	for (i = 0; i < len; i++)
7190 		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
7191 
7192 	i = I915_READ(G4X_AUD_CNTL_ST);
7193 	i |= eldv;
7194 	I915_WRITE(G4X_AUD_CNTL_ST, i);
7195 }
7196 
7197 static void haswell_write_eld(struct drm_connector *connector,
7198 			      struct drm_crtc *crtc,
7199 			      struct drm_display_mode *mode)
7200 {
7201 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7202 	uint8_t *eld = connector->eld;
7203 	struct drm_device *dev = crtc->dev;
7204 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7205 	uint32_t eldv;
7206 	uint32_t i;
7207 	int len;
7208 	int pipe = to_intel_crtc(crtc)->pipe;
7209 	int tmp;
7210 
7211 	int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
7212 	int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
7213 	int aud_config = HSW_AUD_CFG(pipe);
7214 	int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
7215 
7216 
7217 	DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
7218 
7219 	/* Audio output enable */
7220 	DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
7221 	tmp = I915_READ(aud_cntrl_st2);
7222 	tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
7223 	I915_WRITE(aud_cntrl_st2, tmp);
7224 
7225 	/* Wait for 1 vertical blank */
7226 	intel_wait_for_vblank(dev, pipe);
7227 
7228 	/* Set ELD valid state */
7229 	tmp = I915_READ(aud_cntrl_st2);
7230 	DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
7231 	tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
7232 	I915_WRITE(aud_cntrl_st2, tmp);
7233 	tmp = I915_READ(aud_cntrl_st2);
7234 	DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
7235 
7236 	/* Enable HDMI mode */
7237 	tmp = I915_READ(aud_config);
7238 	DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
7239 	/* clear N_programing_enable and N_value_index */
7240 	tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
7241 	I915_WRITE(aud_config, tmp);
7242 
7243 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7244 
7245 	eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7246 	intel_crtc->eld_vld = true;
7247 
7248 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7249 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7250 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7251 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7252 	} else {
7253 		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7254 	}
7255 
7256 	if (intel_eld_uptodate(connector,
7257 			       aud_cntrl_st2, eldv,
7258 			       aud_cntl_st, IBX_ELD_ADDRESS,
7259 			       hdmiw_hdmiedid))
7260 		return;
7261 
7262 	i = I915_READ(aud_cntrl_st2);
7263 	i &= ~eldv;
7264 	I915_WRITE(aud_cntrl_st2, i);
7265 
7266 	if (!eld[0])
7267 		return;
7268 
7269 	i = I915_READ(aud_cntl_st);
7270 	i &= ~IBX_ELD_ADDRESS;
7271 	I915_WRITE(aud_cntl_st, i);
7272 	i = (i >> 29) & DIP_PORT_SEL_MASK;		/* DIP_Port_Select, 0x1 = PortB */
7273 	DRM_DEBUG_DRIVER("port num:%d\n", i);
7274 
7275 	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7276 	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7277 	for (i = 0; i < len; i++)
7278 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7279 
7280 	i = I915_READ(aud_cntrl_st2);
7281 	i |= eldv;
7282 	I915_WRITE(aud_cntrl_st2, i);
7283 
7284 }
7285 
7286 static void ironlake_write_eld(struct drm_connector *connector,
7287 			       struct drm_crtc *crtc,
7288 			       struct drm_display_mode *mode)
7289 {
7290 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
7291 	uint8_t *eld = connector->eld;
7292 	uint32_t eldv;
7293 	uint32_t i;
7294 	int len;
7295 	int hdmiw_hdmiedid;
7296 	int aud_config;
7297 	int aud_cntl_st;
7298 	int aud_cntrl_st2;
7299 	int pipe = to_intel_crtc(crtc)->pipe;
7300 
7301 	if (HAS_PCH_IBX(connector->dev)) {
7302 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
7303 		aud_config = IBX_AUD_CFG(pipe);
7304 		aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
7305 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
7306 	} else if (IS_VALLEYVIEW(connector->dev)) {
7307 		hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
7308 		aud_config = VLV_AUD_CFG(pipe);
7309 		aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
7310 		aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
7311 	} else {
7312 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
7313 		aud_config = CPT_AUD_CFG(pipe);
7314 		aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
7315 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
7316 	}
7317 
7318 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
7319 
7320 	if (IS_VALLEYVIEW(connector->dev))  {
7321 		struct intel_encoder *intel_encoder;
7322 		struct intel_digital_port *intel_dig_port;
7323 
7324 		intel_encoder = intel_attached_encoder(connector);
7325 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
7326 		i = intel_dig_port->port;
7327 	} else {
7328 		i = I915_READ(aud_cntl_st);
7329 		i = (i >> 29) & DIP_PORT_SEL_MASK;
7330 		/* DIP_Port_Select, 0x1 = PortB */
7331 	}
7332 
7333 	if (!i) {
7334 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
7335 		/* operate blindly on all ports */
7336 		eldv = IBX_ELD_VALIDB;
7337 		eldv |= IBX_ELD_VALIDB << 4;
7338 		eldv |= IBX_ELD_VALIDB << 8;
7339 	} else {
7340 		DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
7341 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
7342 	}
7343 
7344 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
7345 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
7346 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
7347 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
7348 	} else {
7349 		I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
7350 	}
7351 
7352 	if (intel_eld_uptodate(connector,
7353 			       aud_cntrl_st2, eldv,
7354 			       aud_cntl_st, IBX_ELD_ADDRESS,
7355 			       hdmiw_hdmiedid))
7356 		return;
7357 
7358 	i = I915_READ(aud_cntrl_st2);
7359 	i &= ~eldv;
7360 	I915_WRITE(aud_cntrl_st2, i);
7361 
7362 	if (!eld[0])
7363 		return;
7364 
7365 	i = I915_READ(aud_cntl_st);
7366 	i &= ~IBX_ELD_ADDRESS;
7367 	I915_WRITE(aud_cntl_st, i);
7368 
7369 	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
7370 	DRM_DEBUG_DRIVER("ELD size %d\n", len);
7371 	for (i = 0; i < len; i++)
7372 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
7373 
7374 	i = I915_READ(aud_cntrl_st2);
7375 	i |= eldv;
7376 	I915_WRITE(aud_cntrl_st2, i);
7377 }
7378 
7379 void intel_write_eld(struct drm_encoder *encoder,
7380 		     struct drm_display_mode *mode)
7381 {
7382 	struct drm_crtc *crtc = encoder->crtc;
7383 	struct drm_connector *connector;
7384 	struct drm_device *dev = encoder->dev;
7385 	struct drm_i915_private *dev_priv = dev->dev_private;
7386 
7387 	connector = drm_select_eld(encoder, mode);
7388 	if (!connector)
7389 		return;
7390 
7391 	DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7392 			 connector->base.id,
7393 			 drm_get_connector_name(connector),
7394 			 connector->encoder->base.id,
7395 			 drm_get_encoder_name(connector->encoder));
7396 
7397 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
7398 
7399 	if (dev_priv->display.write_eld)
7400 		dev_priv->display.write_eld(connector, crtc, mode);
7401 }
7402 
7403 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
7404 {
7405 	struct drm_device *dev = crtc->dev;
7406 	struct drm_i915_private *dev_priv = dev->dev_private;
7407 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7408 	bool visible = base != 0;
7409 	u32 cntl;
7410 
7411 	if (intel_crtc->cursor_visible == visible)
7412 		return;
7413 
7414 	cntl = I915_READ(_CURACNTR);
7415 	if (visible) {
7416 		/* On these chipsets we can only modify the base whilst
7417 		 * the cursor is disabled.
7418 		 */
7419 		I915_WRITE(_CURABASE, base);
7420 
7421 		cntl &= ~(CURSOR_FORMAT_MASK);
7422 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
7423 		cntl |= CURSOR_ENABLE |
7424 			CURSOR_GAMMA_ENABLE |
7425 			CURSOR_FORMAT_ARGB;
7426 	} else
7427 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
7428 	I915_WRITE(_CURACNTR, cntl);
7429 
7430 	intel_crtc->cursor_visible = visible;
7431 }
7432 
7433 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
7434 {
7435 	struct drm_device *dev = crtc->dev;
7436 	struct drm_i915_private *dev_priv = dev->dev_private;
7437 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7438 	int pipe = intel_crtc->pipe;
7439 	bool visible = base != 0;
7440 
7441 	if (intel_crtc->cursor_visible != visible) {
7442 		uint32_t cntl = I915_READ(CURCNTR(pipe));
7443 		if (base) {
7444 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
7445 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7446 			cntl |= pipe << 28; /* Connect to correct pipe */
7447 		} else {
7448 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7449 			cntl |= CURSOR_MODE_DISABLE;
7450 		}
7451 		I915_WRITE(CURCNTR(pipe), cntl);
7452 
7453 		intel_crtc->cursor_visible = visible;
7454 	}
7455 	/* and commit changes on next vblank */
7456 	POSTING_READ(CURCNTR(pipe));
7457 	I915_WRITE(CURBASE(pipe), base);
7458 	POSTING_READ(CURBASE(pipe));
7459 }
7460 
7461 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7462 {
7463 	struct drm_device *dev = crtc->dev;
7464 	struct drm_i915_private *dev_priv = dev->dev_private;
7465 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7466 	int pipe = intel_crtc->pipe;
7467 	bool visible = base != 0;
7468 
7469 	if (intel_crtc->cursor_visible != visible) {
7470 		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7471 		if (base) {
7472 			cntl &= ~CURSOR_MODE;
7473 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7474 		} else {
7475 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7476 			cntl |= CURSOR_MODE_DISABLE;
7477 		}
7478 		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7479 			cntl |= CURSOR_PIPE_CSC_ENABLE;
7480 			cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7481 		}
7482 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
7483 
7484 		intel_crtc->cursor_visible = visible;
7485 	}
7486 	/* and commit changes on next vblank */
7487 	POSTING_READ(CURCNTR_IVB(pipe));
7488 	I915_WRITE(CURBASE_IVB(pipe), base);
7489 	POSTING_READ(CURBASE_IVB(pipe));
7490 }
7491 
7492 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
7493 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7494 				     bool on)
7495 {
7496 	struct drm_device *dev = crtc->dev;
7497 	struct drm_i915_private *dev_priv = dev->dev_private;
7498 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7499 	int pipe = intel_crtc->pipe;
7500 	int x = intel_crtc->cursor_x;
7501 	int y = intel_crtc->cursor_y;
7502 	u32 base = 0, pos = 0;
7503 	bool visible;
7504 
7505 	if (on)
7506 		base = intel_crtc->cursor_addr;
7507 
7508 	if (x >= intel_crtc->config.pipe_src_w)
7509 		base = 0;
7510 
7511 	if (y >= intel_crtc->config.pipe_src_h)
7512 		base = 0;
7513 
7514 	if (x < 0) {
7515 		if (x + intel_crtc->cursor_width <= 0)
7516 			base = 0;
7517 
7518 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
7519 		x = -x;
7520 	}
7521 	pos |= x << CURSOR_X_SHIFT;
7522 
7523 	if (y < 0) {
7524 		if (y + intel_crtc->cursor_height <= 0)
7525 			base = 0;
7526 
7527 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
7528 		y = -y;
7529 	}
7530 	pos |= y << CURSOR_Y_SHIFT;
7531 
7532 	visible = base != 0;
7533 	if (!visible && !intel_crtc->cursor_visible)
7534 		return;
7535 
7536 	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) {
7537 		I915_WRITE(CURPOS_IVB(pipe), pos);
7538 		ivb_update_cursor(crtc, base);
7539 	} else {
7540 		I915_WRITE(CURPOS(pipe), pos);
7541 		if (IS_845G(dev) || IS_I865G(dev))
7542 			i845_update_cursor(crtc, base);
7543 		else
7544 			i9xx_update_cursor(crtc, base);
7545 	}
7546 }
7547 
7548 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7549 				 struct drm_file *file,
7550 				 uint32_t handle,
7551 				 uint32_t width, uint32_t height)
7552 {
7553 	struct drm_device *dev = crtc->dev;
7554 	struct drm_i915_private *dev_priv = dev->dev_private;
7555 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7556 	struct drm_i915_gem_object *obj;
7557 	uint32_t addr;
7558 	int ret;
7559 
7560 	/* if we want to turn off the cursor ignore width and height */
7561 	if (!handle) {
7562 		DRM_DEBUG_KMS("cursor off\n");
7563 		addr = 0;
7564 		obj = NULL;
7565 		mutex_lock(&dev->struct_mutex);
7566 		goto finish;
7567 	}
7568 
7569 	/* Currently we only support 64x64 cursors */
7570 	if (width != 64 || height != 64) {
7571 		DRM_ERROR("we currently only support 64x64 cursors\n");
7572 		return -EINVAL;
7573 	}
7574 
7575 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
7576 	if (&obj->base == NULL)
7577 		return -ENOENT;
7578 
7579 	if (obj->base.size < width * height * 4) {
7580 		DRM_ERROR("buffer is to small\n");
7581 		ret = -ENOMEM;
7582 		goto fail;
7583 	}
7584 
7585 	/* we only need to pin inside GTT if cursor is non-phy */
7586 	mutex_lock(&dev->struct_mutex);
7587 	if (!dev_priv->info->cursor_needs_physical) {
7588 		unsigned alignment;
7589 
7590 		if (obj->tiling_mode) {
7591 			DRM_ERROR("cursor cannot be tiled\n");
7592 			ret = -EINVAL;
7593 			goto fail_locked;
7594 		}
7595 
7596 		/* Note that the w/a also requires 2 PTE of padding following
7597 		 * the bo. We currently fill all unused PTE with the shadow
7598 		 * page and so we should always have valid PTE following the
7599 		 * cursor preventing the VT-d warning.
7600 		 */
7601 		alignment = 0;
7602 		if (need_vtd_wa(dev))
7603 			alignment = 64*1024;
7604 
7605 		ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
7606 		if (ret) {
7607 			DRM_ERROR("failed to move cursor bo into the GTT\n");
7608 			goto fail_locked;
7609 		}
7610 
7611 		ret = i915_gem_object_put_fence(obj);
7612 		if (ret) {
7613 			DRM_ERROR("failed to release fence for cursor");
7614 			goto fail_unpin;
7615 		}
7616 
7617 		addr = i915_gem_obj_ggtt_offset(obj);
7618 	} else {
7619 		int align = IS_I830(dev) ? 16 * 1024 : 256;
7620 		ret = i915_gem_attach_phys_object(dev, obj,
7621 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7622 						  align);
7623 		if (ret) {
7624 			DRM_ERROR("failed to attach phys object\n");
7625 			goto fail_locked;
7626 		}
7627 		addr = obj->phys_obj->handle->busaddr;
7628 	}
7629 
7630 	if (IS_GEN2(dev))
7631 		I915_WRITE(CURSIZE, (height << 12) | width);
7632 
7633  finish:
7634 	if (intel_crtc->cursor_bo) {
7635 		if (dev_priv->info->cursor_needs_physical) {
7636 			if (intel_crtc->cursor_bo != obj)
7637 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7638 		} else
7639 			i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7640 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7641 	}
7642 
7643 	mutex_unlock(&dev->struct_mutex);
7644 
7645 	intel_crtc->cursor_addr = addr;
7646 	intel_crtc->cursor_bo = obj;
7647 	intel_crtc->cursor_width = width;
7648 	intel_crtc->cursor_height = height;
7649 
7650 	if (intel_crtc->active)
7651 		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7652 
7653 	return 0;
7654 fail_unpin:
7655 	i915_gem_object_unpin_from_display_plane(obj);
7656 fail_locked:
7657 	mutex_unlock(&dev->struct_mutex);
7658 fail:
7659 	drm_gem_object_unreference_unlocked(&obj->base);
7660 	return ret;
7661 }
7662 
7663 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7664 {
7665 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7666 
7667 	intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
7668 	intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
7669 
7670 	if (intel_crtc->active)
7671 		intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7672 
7673 	return 0;
7674 }
7675 
7676 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7677 				 u16 *blue, uint32_t start, uint32_t size)
7678 {
7679 	int end = (start + size > 256) ? 256 : start + size, i;
7680 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7681 
7682 	for (i = start; i < end; i++) {
7683 		intel_crtc->lut_r[i] = red[i] >> 8;
7684 		intel_crtc->lut_g[i] = green[i] >> 8;
7685 		intel_crtc->lut_b[i] = blue[i] >> 8;
7686 	}
7687 
7688 	intel_crtc_load_lut(crtc);
7689 }
7690 
7691 /* VESA 640x480x72Hz mode to set on the pipe */
7692 static struct drm_display_mode load_detect_mode = {
7693 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7694 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7695 };
7696 
7697 static struct drm_framebuffer *
7698 intel_framebuffer_create(struct drm_device *dev,
7699 			 struct drm_mode_fb_cmd2 *mode_cmd,
7700 			 struct drm_i915_gem_object *obj)
7701 {
7702 	struct intel_framebuffer *intel_fb;
7703 	int ret;
7704 
7705 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7706 	if (!intel_fb) {
7707 		drm_gem_object_unreference_unlocked(&obj->base);
7708 		return ERR_PTR(-ENOMEM);
7709 	}
7710 
7711 	ret = i915_mutex_lock_interruptible(dev);
7712 	if (ret)
7713 		goto err;
7714 
7715 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7716 	mutex_unlock(&dev->struct_mutex);
7717 	if (ret)
7718 		goto err;
7719 
7720 	return &intel_fb->base;
7721 err:
7722 	drm_gem_object_unreference_unlocked(&obj->base);
7723 	kfree(intel_fb);
7724 
7725 	return ERR_PTR(ret);
7726 }
7727 
7728 static u32
7729 intel_framebuffer_pitch_for_width(int width, int bpp)
7730 {
7731 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7732 	return ALIGN(pitch, 64);
7733 }
7734 
7735 static u32
7736 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7737 {
7738 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7739 	return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7740 }
7741 
7742 static struct drm_framebuffer *
7743 intel_framebuffer_create_for_mode(struct drm_device *dev,
7744 				  struct drm_display_mode *mode,
7745 				  int depth, int bpp)
7746 {
7747 	struct drm_i915_gem_object *obj;
7748 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
7749 
7750 	obj = i915_gem_alloc_object(dev,
7751 				    intel_framebuffer_size_for_mode(mode, bpp));
7752 	if (obj == NULL)
7753 		return ERR_PTR(-ENOMEM);
7754 
7755 	mode_cmd.width = mode->hdisplay;
7756 	mode_cmd.height = mode->vdisplay;
7757 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
7758 								bpp);
7759 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
7760 
7761 	return intel_framebuffer_create(dev, &mode_cmd, obj);
7762 }
7763 
7764 static struct drm_framebuffer *
7765 mode_fits_in_fbdev(struct drm_device *dev,
7766 		   struct drm_display_mode *mode)
7767 {
7768 #ifdef CONFIG_DRM_I915_FBDEV
7769 	struct drm_i915_private *dev_priv = dev->dev_private;
7770 	struct drm_i915_gem_object *obj;
7771 	struct drm_framebuffer *fb;
7772 
7773 	if (dev_priv->fbdev == NULL)
7774 		return NULL;
7775 
7776 	obj = dev_priv->fbdev->ifb.obj;
7777 	if (obj == NULL)
7778 		return NULL;
7779 
7780 	fb = &dev_priv->fbdev->ifb.base;
7781 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7782 							       fb->bits_per_pixel))
7783 		return NULL;
7784 
7785 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
7786 		return NULL;
7787 
7788 	return fb;
7789 #else
7790 	return NULL;
7791 #endif
7792 }
7793 
7794 bool intel_get_load_detect_pipe(struct drm_connector *connector,
7795 				struct drm_display_mode *mode,
7796 				struct intel_load_detect_pipe *old)
7797 {
7798 	struct intel_crtc *intel_crtc;
7799 	struct intel_encoder *intel_encoder =
7800 		intel_attached_encoder(connector);
7801 	struct drm_crtc *possible_crtc;
7802 	struct drm_encoder *encoder = &intel_encoder->base;
7803 	struct drm_crtc *crtc = NULL;
7804 	struct drm_device *dev = encoder->dev;
7805 	struct drm_framebuffer *fb;
7806 	int i = -1;
7807 
7808 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7809 		      connector->base.id, drm_get_connector_name(connector),
7810 		      encoder->base.id, drm_get_encoder_name(encoder));
7811 
7812 	/*
7813 	 * Algorithm gets a little messy:
7814 	 *
7815 	 *   - if the connector already has an assigned crtc, use it (but make
7816 	 *     sure it's on first)
7817 	 *
7818 	 *   - try to find the first unused crtc that can drive this connector,
7819 	 *     and use that if we find one
7820 	 */
7821 
7822 	/* See if we already have a CRTC for this connector */
7823 	if (encoder->crtc) {
7824 		crtc = encoder->crtc;
7825 
7826 		mutex_lock(&crtc->mutex);
7827 
7828 		old->dpms_mode = connector->dpms;
7829 		old->load_detect_temp = false;
7830 
7831 		/* Make sure the crtc and connector are running */
7832 		if (connector->dpms != DRM_MODE_DPMS_ON)
7833 			connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
7834 
7835 		return true;
7836 	}
7837 
7838 	/* Find an unused one (if possible) */
7839 	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7840 		i++;
7841 		if (!(encoder->possible_crtcs & (1 << i)))
7842 			continue;
7843 		if (!possible_crtc->enabled) {
7844 			crtc = possible_crtc;
7845 			break;
7846 		}
7847 	}
7848 
7849 	/*
7850 	 * If we didn't find an unused CRTC, don't use any.
7851 	 */
7852 	if (!crtc) {
7853 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
7854 		return false;
7855 	}
7856 
7857 	mutex_lock(&crtc->mutex);
7858 	intel_encoder->new_crtc = to_intel_crtc(crtc);
7859 	to_intel_connector(connector)->new_encoder = intel_encoder;
7860 
7861 	intel_crtc = to_intel_crtc(crtc);
7862 	old->dpms_mode = connector->dpms;
7863 	old->load_detect_temp = true;
7864 	old->release_fb = NULL;
7865 
7866 	if (!mode)
7867 		mode = &load_detect_mode;
7868 
7869 	/* We need a framebuffer large enough to accommodate all accesses
7870 	 * that the plane may generate whilst we perform load detection.
7871 	 * We can not rely on the fbcon either being present (we get called
7872 	 * during its initialisation to detect all boot displays, or it may
7873 	 * not even exist) or that it is large enough to satisfy the
7874 	 * requested mode.
7875 	 */
7876 	fb = mode_fits_in_fbdev(dev, mode);
7877 	if (fb == NULL) {
7878 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
7879 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7880 		old->release_fb = fb;
7881 	} else
7882 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
7883 	if (IS_ERR(fb)) {
7884 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7885 		mutex_unlock(&crtc->mutex);
7886 		return false;
7887 	}
7888 
7889 	if (intel_set_mode(crtc, mode, 0, 0, fb)) {
7890 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
7891 		if (old->release_fb)
7892 			old->release_fb->funcs->destroy(old->release_fb);
7893 		mutex_unlock(&crtc->mutex);
7894 		return false;
7895 	}
7896 
7897 	/* let the connector get through one full cycle before testing */
7898 	intel_wait_for_vblank(dev, intel_crtc->pipe);
7899 	return true;
7900 }
7901 
7902 void intel_release_load_detect_pipe(struct drm_connector *connector,
7903 				    struct intel_load_detect_pipe *old)
7904 {
7905 	struct intel_encoder *intel_encoder =
7906 		intel_attached_encoder(connector);
7907 	struct drm_encoder *encoder = &intel_encoder->base;
7908 	struct drm_crtc *crtc = encoder->crtc;
7909 
7910 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7911 		      connector->base.id, drm_get_connector_name(connector),
7912 		      encoder->base.id, drm_get_encoder_name(encoder));
7913 
7914 	if (old->load_detect_temp) {
7915 		to_intel_connector(connector)->new_encoder = NULL;
7916 		intel_encoder->new_crtc = NULL;
7917 		intel_set_mode(crtc, NULL, 0, 0, NULL);
7918 
7919 		if (old->release_fb) {
7920 			drm_framebuffer_unregister_private(old->release_fb);
7921 			drm_framebuffer_unreference(old->release_fb);
7922 		}
7923 
7924 		mutex_unlock(&crtc->mutex);
7925 		return;
7926 	}
7927 
7928 	/* Switch crtc and encoder back off if necessary */
7929 	if (old->dpms_mode != DRM_MODE_DPMS_ON)
7930 		connector->funcs->dpms(connector, old->dpms_mode);
7931 
7932 	mutex_unlock(&crtc->mutex);
7933 }
7934 
7935 static int i9xx_pll_refclk(struct drm_device *dev,
7936 			   const struct intel_crtc_config *pipe_config)
7937 {
7938 	struct drm_i915_private *dev_priv = dev->dev_private;
7939 	u32 dpll = pipe_config->dpll_hw_state.dpll;
7940 
7941 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7942 		return dev_priv->vbt.lvds_ssc_freq;
7943 	else if (HAS_PCH_SPLIT(dev))
7944 		return 120000;
7945 	else if (!IS_GEN2(dev))
7946 		return 96000;
7947 	else
7948 		return 48000;
7949 }
7950 
7951 /* Returns the clock of the currently programmed mode of the given pipe. */
7952 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7953 				struct intel_crtc_config *pipe_config)
7954 {
7955 	struct drm_device *dev = crtc->base.dev;
7956 	struct drm_i915_private *dev_priv = dev->dev_private;
7957 	int pipe = pipe_config->cpu_transcoder;
7958 	u32 dpll = pipe_config->dpll_hw_state.dpll;
7959 	u32 fp;
7960 	intel_clock_t clock;
7961 	int refclk = i9xx_pll_refclk(dev, pipe_config);
7962 
7963 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
7964 		fp = pipe_config->dpll_hw_state.fp0;
7965 	else
7966 		fp = pipe_config->dpll_hw_state.fp1;
7967 
7968 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
7969 	if (IS_PINEVIEW(dev)) {
7970 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7971 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
7972 	} else {
7973 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7974 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7975 	}
7976 
7977 	if (!IS_GEN2(dev)) {
7978 		if (IS_PINEVIEW(dev))
7979 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7980 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
7981 		else
7982 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
7983 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
7984 
7985 		switch (dpll & DPLL_MODE_MASK) {
7986 		case DPLLB_MODE_DAC_SERIAL:
7987 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7988 				5 : 10;
7989 			break;
7990 		case DPLLB_MODE_LVDS:
7991 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7992 				7 : 14;
7993 			break;
7994 		default:
7995 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
7996 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
7997 			return;
7998 		}
7999 
8000 		if (IS_PINEVIEW(dev))
8001 			pineview_clock(refclk, &clock);
8002 		else
8003 			i9xx_clock(refclk, &clock);
8004 	} else {
8005 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
8006 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
8007 
8008 		if (is_lvds) {
8009 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
8010 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
8011 
8012 			if (lvds & LVDS_CLKB_POWER_UP)
8013 				clock.p2 = 7;
8014 			else
8015 				clock.p2 = 14;
8016 		} else {
8017 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
8018 				clock.p1 = 2;
8019 			else {
8020 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
8021 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
8022 			}
8023 			if (dpll & PLL_P2_DIVIDE_BY_4)
8024 				clock.p2 = 4;
8025 			else
8026 				clock.p2 = 2;
8027 		}
8028 
8029 		i9xx_clock(refclk, &clock);
8030 	}
8031 
8032 	/*
8033 	 * This value includes pixel_multiplier. We will use
8034 	 * port_clock to compute adjusted_mode.crtc_clock in the
8035 	 * encoder's get_config() function.
8036 	 */
8037 	pipe_config->port_clock = clock.dot;
8038 }
8039 
8040 int intel_dotclock_calculate(int link_freq,
8041 			     const struct intel_link_m_n *m_n)
8042 {
8043 	/*
8044 	 * The calculation for the data clock is:
8045 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
8046 	 * But we want to avoid losing precison if possible, so:
8047 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
8048 	 *
8049 	 * and the link clock is simpler:
8050 	 * link_clock = (m * link_clock) / n
8051 	 */
8052 
8053 	if (!m_n->link_n)
8054 		return 0;
8055 
8056 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
8057 }
8058 
8059 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
8060 				   struct intel_crtc_config *pipe_config)
8061 {
8062 	struct drm_device *dev = crtc->base.dev;
8063 
8064 	/* read out port_clock from the DPLL */
8065 	i9xx_crtc_clock_get(crtc, pipe_config);
8066 
8067 	/*
8068 	 * This value does not include pixel_multiplier.
8069 	 * We will check that port_clock and adjusted_mode.crtc_clock
8070 	 * agree once we know their relationship in the encoder's
8071 	 * get_config() function.
8072 	 */
8073 	pipe_config->adjusted_mode.crtc_clock =
8074 		intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
8075 					 &pipe_config->fdi_m_n);
8076 }
8077 
8078 /** Returns the currently programmed mode of the given pipe. */
8079 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8080 					     struct drm_crtc *crtc)
8081 {
8082 	struct drm_i915_private *dev_priv = dev->dev_private;
8083 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8084 	enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8085 	struct drm_display_mode *mode;
8086 	struct intel_crtc_config pipe_config;
8087 	int htot = I915_READ(HTOTAL(cpu_transcoder));
8088 	int hsync = I915_READ(HSYNC(cpu_transcoder));
8089 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
8090 	int vsync = I915_READ(VSYNC(cpu_transcoder));
8091 	enum i915_pipe pipe = intel_crtc->pipe;
8092 
8093 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
8094 	if (!mode)
8095 		return NULL;
8096 
8097 	/*
8098 	 * Construct a pipe_config sufficient for getting the clock info
8099 	 * back out of crtc_clock_get.
8100 	 *
8101 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
8102 	 * to use a real value here instead.
8103 	 */
8104 	pipe_config.cpu_transcoder = (enum transcoder) pipe;
8105 	pipe_config.pixel_multiplier = 1;
8106 	pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
8107 	pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
8108 	pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
8109 	i9xx_crtc_clock_get(intel_crtc, &pipe_config);
8110 
8111 	mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
8112 	mode->hdisplay = (htot & 0xffff) + 1;
8113 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
8114 	mode->hsync_start = (hsync & 0xffff) + 1;
8115 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
8116 	mode->vdisplay = (vtot & 0xffff) + 1;
8117 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
8118 	mode->vsync_start = (vsync & 0xffff) + 1;
8119 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
8120 
8121 	drm_mode_set_name(mode);
8122 
8123 	return mode;
8124 }
8125 
8126 static void intel_increase_pllclock(struct drm_crtc *crtc)
8127 {
8128 	struct drm_device *dev = crtc->dev;
8129 	drm_i915_private_t *dev_priv = dev->dev_private;
8130 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8131 	int pipe = intel_crtc->pipe;
8132 	int dpll_reg = DPLL(pipe);
8133 	int dpll;
8134 
8135 	if (HAS_PCH_SPLIT(dev))
8136 		return;
8137 
8138 	if (!dev_priv->lvds_downclock_avail)
8139 		return;
8140 
8141 	dpll = I915_READ(dpll_reg);
8142 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
8143 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
8144 
8145 		assert_panel_unlocked(dev_priv, pipe);
8146 
8147 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
8148 		I915_WRITE(dpll_reg, dpll);
8149 		intel_wait_for_vblank(dev, pipe);
8150 
8151 		dpll = I915_READ(dpll_reg);
8152 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
8153 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
8154 	}
8155 }
8156 
8157 static void intel_decrease_pllclock(struct drm_crtc *crtc)
8158 {
8159 	struct drm_device *dev = crtc->dev;
8160 	drm_i915_private_t *dev_priv = dev->dev_private;
8161 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8162 
8163 	if (HAS_PCH_SPLIT(dev))
8164 		return;
8165 
8166 	if (!dev_priv->lvds_downclock_avail)
8167 		return;
8168 
8169 	/*
8170 	 * Since this is called by a timer, we should never get here in
8171 	 * the manual case.
8172 	 */
8173 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
8174 		int pipe = intel_crtc->pipe;
8175 		int dpll_reg = DPLL(pipe);
8176 		int dpll;
8177 
8178 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
8179 
8180 		assert_panel_unlocked(dev_priv, pipe);
8181 
8182 		dpll = I915_READ(dpll_reg);
8183 		dpll |= DISPLAY_RATE_SELECT_FPA1;
8184 		I915_WRITE(dpll_reg, dpll);
8185 		intel_wait_for_vblank(dev, pipe);
8186 		dpll = I915_READ(dpll_reg);
8187 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
8188 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
8189 	}
8190 
8191 }
8192 
8193 void intel_mark_busy(struct drm_device *dev)
8194 {
8195 	struct drm_i915_private *dev_priv = dev->dev_private;
8196 
8197 	hsw_package_c8_gpu_busy(dev_priv);
8198 	i915_update_gfx_val(dev_priv);
8199 }
8200 
8201 void intel_mark_idle(struct drm_device *dev)
8202 {
8203 	struct drm_i915_private *dev_priv = dev->dev_private;
8204 	struct drm_crtc *crtc;
8205 
8206 	hsw_package_c8_gpu_idle(dev_priv);
8207 
8208 	if (!i915_powersave)
8209 		return;
8210 
8211 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8212 		if (!crtc->fb)
8213 			continue;
8214 
8215 		intel_decrease_pllclock(crtc);
8216 	}
8217 
8218 	if (dev_priv->info->gen >= 6)
8219 		gen6_rps_idle(dev->dev_private);
8220 }
8221 
8222 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
8223 			struct intel_ring_buffer *ring)
8224 {
8225 	struct drm_device *dev = obj->base.dev;
8226 	struct drm_crtc *crtc;
8227 
8228 	if (!i915_powersave)
8229 		return;
8230 
8231 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8232 		if (!crtc->fb)
8233 			continue;
8234 
8235 		if (to_intel_framebuffer(crtc->fb)->obj != obj)
8236 			continue;
8237 
8238 		intel_increase_pllclock(crtc);
8239 		if (ring && intel_fbc_enabled(dev))
8240 			ring->fbc_dirty = true;
8241 	}
8242 }
8243 
8244 static void intel_crtc_destroy(struct drm_crtc *crtc)
8245 {
8246 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8247 	struct drm_device *dev = crtc->dev;
8248 	struct intel_unpin_work *work;
8249 
8250 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8251 	work = intel_crtc->unpin_work;
8252 	intel_crtc->unpin_work = NULL;
8253 	lockmgr(&dev->event_lock, LK_RELEASE);
8254 
8255 	if (work) {
8256 		cancel_work_sync(&work->work);
8257 		kfree(work);
8258 	}
8259 
8260 	intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
8261 
8262 	drm_crtc_cleanup(crtc);
8263 
8264 	kfree(intel_crtc);
8265 }
8266 
8267 static void intel_unpin_work_fn(struct work_struct *__work)
8268 {
8269 	struct intel_unpin_work *work =
8270 		container_of(__work, struct intel_unpin_work, work);
8271 	struct drm_device *dev = work->crtc->dev;
8272 
8273 	mutex_lock(&dev->struct_mutex);
8274 	intel_unpin_fb_obj(work->old_fb_obj);
8275 	drm_gem_object_unreference(&work->pending_flip_obj->base);
8276 	drm_gem_object_unreference(&work->old_fb_obj->base);
8277 
8278 	intel_update_fbc(dev);
8279 	mutex_unlock(&dev->struct_mutex);
8280 
8281 	BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
8282 	atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
8283 
8284 	kfree(work);
8285 }
8286 
8287 static void do_intel_finish_page_flip(struct drm_device *dev,
8288 				      struct drm_crtc *crtc)
8289 {
8290 	drm_i915_private_t *dev_priv = dev->dev_private;
8291 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8292 	struct intel_unpin_work *work;
8293 
8294 	/* Ignore early vblank irqs */
8295 	if (intel_crtc == NULL)
8296 		return;
8297 
8298 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8299 	work = intel_crtc->unpin_work;
8300 
8301 	/* Ensure we don't miss a work->pending update ... */
8302 	smp_rmb();
8303 
8304 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
8305 		lockmgr(&dev->event_lock, LK_RELEASE);
8306 		return;
8307 	}
8308 
8309 	/* and that the unpin work is consistent wrt ->pending. */
8310 	smp_rmb();
8311 
8312 	intel_crtc->unpin_work = NULL;
8313 
8314 	if (work->event)
8315 		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
8316 
8317 	drm_vblank_put(dev, intel_crtc->pipe);
8318 
8319 	lockmgr(&dev->event_lock, LK_RELEASE);
8320 
8321 	wake_up_all(&dev_priv->pending_flip_queue);
8322 
8323 	queue_work(dev_priv->wq, &work->work);
8324 
8325 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
8326 }
8327 
8328 void intel_finish_page_flip(struct drm_device *dev, int pipe)
8329 {
8330 	drm_i915_private_t *dev_priv = dev->dev_private;
8331 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8332 
8333 	do_intel_finish_page_flip(dev, crtc);
8334 }
8335 
8336 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8337 {
8338 	drm_i915_private_t *dev_priv = dev->dev_private;
8339 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8340 
8341 	do_intel_finish_page_flip(dev, crtc);
8342 }
8343 
8344 void intel_prepare_page_flip(struct drm_device *dev, int plane)
8345 {
8346 	drm_i915_private_t *dev_priv = dev->dev_private;
8347 	struct intel_crtc *intel_crtc =
8348 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8349 
8350 	/* NB: An MMIO update of the plane base pointer will also
8351 	 * generate a page-flip completion irq, i.e. every modeset
8352 	 * is also accompanied by a spurious intel_prepare_page_flip().
8353 	 */
8354 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8355 	if (intel_crtc->unpin_work)
8356 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
8357 	lockmgr(&dev->event_lock, LK_RELEASE);
8358 }
8359 
8360 inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
8361 {
8362 	/* Ensure that the work item is consistent when activating it ... */
8363 	smp_wmb();
8364 	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
8365 	/* and that it is marked active as soon as the irq could fire. */
8366 	smp_wmb();
8367 }
8368 
8369 static int intel_gen2_queue_flip(struct drm_device *dev,
8370 				 struct drm_crtc *crtc,
8371 				 struct drm_framebuffer *fb,
8372 				 struct drm_i915_gem_object *obj,
8373 				 uint32_t flags)
8374 {
8375 	struct drm_i915_private *dev_priv = dev->dev_private;
8376 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8377 	u32 flip_mask;
8378 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8379 	int ret;
8380 
8381 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8382 	if (ret)
8383 		goto err;
8384 
8385 	ret = intel_ring_begin(ring, 6);
8386 	if (ret)
8387 		goto err_unpin;
8388 
8389 	/* Can't queue multiple flips, so wait for the previous
8390 	 * one to finish before executing the next.
8391 	 */
8392 	if (intel_crtc->plane)
8393 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8394 	else
8395 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8396 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8397 	intel_ring_emit(ring, MI_NOOP);
8398 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8399 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8400 	intel_ring_emit(ring, fb->pitches[0]);
8401 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8402 	intel_ring_emit(ring, 0); /* aux display base address, unused */
8403 
8404 	intel_mark_page_flip_active(intel_crtc);
8405 	__intel_ring_advance(ring);
8406 	return 0;
8407 
8408 err_unpin:
8409 	intel_unpin_fb_obj(obj);
8410 err:
8411 	return ret;
8412 }
8413 
8414 static int intel_gen3_queue_flip(struct drm_device *dev,
8415 				 struct drm_crtc *crtc,
8416 				 struct drm_framebuffer *fb,
8417 				 struct drm_i915_gem_object *obj,
8418 				 uint32_t flags)
8419 {
8420 	struct drm_i915_private *dev_priv = dev->dev_private;
8421 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8422 	u32 flip_mask;
8423 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8424 	int ret;
8425 
8426 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8427 	if (ret)
8428 		goto err;
8429 
8430 	ret = intel_ring_begin(ring, 6);
8431 	if (ret)
8432 		goto err_unpin;
8433 
8434 	if (intel_crtc->plane)
8435 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
8436 	else
8437 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
8438 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
8439 	intel_ring_emit(ring, MI_NOOP);
8440 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
8441 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8442 	intel_ring_emit(ring, fb->pitches[0]);
8443 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8444 	intel_ring_emit(ring, MI_NOOP);
8445 
8446 	intel_mark_page_flip_active(intel_crtc);
8447 	__intel_ring_advance(ring);
8448 	return 0;
8449 
8450 err_unpin:
8451 	intel_unpin_fb_obj(obj);
8452 err:
8453 	return ret;
8454 }
8455 
8456 static int intel_gen4_queue_flip(struct drm_device *dev,
8457 				 struct drm_crtc *crtc,
8458 				 struct drm_framebuffer *fb,
8459 				 struct drm_i915_gem_object *obj,
8460 				 uint32_t flags)
8461 {
8462 	struct drm_i915_private *dev_priv = dev->dev_private;
8463 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8464 	uint32_t pf, pipesrc;
8465 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8466 	int ret;
8467 
8468 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8469 	if (ret)
8470 		goto err;
8471 
8472 	ret = intel_ring_begin(ring, 4);
8473 	if (ret)
8474 		goto err_unpin;
8475 
8476 	/* i965+ uses the linear or tiled offsets from the
8477 	 * Display Registers (which do not change across a page-flip)
8478 	 * so we need only reprogram the base address.
8479 	 */
8480 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8481 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8482 	intel_ring_emit(ring, fb->pitches[0]);
8483 	intel_ring_emit(ring,
8484 			(i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
8485 			obj->tiling_mode);
8486 
8487 	/* XXX Enabling the panel-fitter across page-flip is so far
8488 	 * untested on non-native modes, so ignore it for now.
8489 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
8490 	 */
8491 	pf = 0;
8492 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8493 	intel_ring_emit(ring, pf | pipesrc);
8494 
8495 	intel_mark_page_flip_active(intel_crtc);
8496 	__intel_ring_advance(ring);
8497 	return 0;
8498 
8499 err_unpin:
8500 	intel_unpin_fb_obj(obj);
8501 err:
8502 	return ret;
8503 }
8504 
8505 static int intel_gen6_queue_flip(struct drm_device *dev,
8506 				 struct drm_crtc *crtc,
8507 				 struct drm_framebuffer *fb,
8508 				 struct drm_i915_gem_object *obj,
8509 				 uint32_t flags)
8510 {
8511 	struct drm_i915_private *dev_priv = dev->dev_private;
8512 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8513 	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8514 	uint32_t pf, pipesrc;
8515 	int ret;
8516 
8517 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8518 	if (ret)
8519 		goto err;
8520 
8521 	ret = intel_ring_begin(ring, 4);
8522 	if (ret)
8523 		goto err_unpin;
8524 
8525 	intel_ring_emit(ring, MI_DISPLAY_FLIP |
8526 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8527 	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
8528 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8529 
8530 	/* Contrary to the suggestions in the documentation,
8531 	 * "Enable Panel Fitter" does not seem to be required when page
8532 	 * flipping with a non-native mode, and worse causes a normal
8533 	 * modeset to fail.
8534 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
8535 	 */
8536 	pf = 0;
8537 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8538 	intel_ring_emit(ring, pf | pipesrc);
8539 
8540 	intel_mark_page_flip_active(intel_crtc);
8541 	__intel_ring_advance(ring);
8542 	return 0;
8543 
8544 err_unpin:
8545 	intel_unpin_fb_obj(obj);
8546 err:
8547 	return ret;
8548 }
8549 
8550 static int intel_gen7_queue_flip(struct drm_device *dev,
8551 				 struct drm_crtc *crtc,
8552 				 struct drm_framebuffer *fb,
8553 				 struct drm_i915_gem_object *obj,
8554 				 uint32_t flags)
8555 {
8556 	struct drm_i915_private *dev_priv = dev->dev_private;
8557 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8558 	struct intel_ring_buffer *ring;
8559 	uint32_t plane_bit = 0;
8560 	int len, ret;
8561 
8562 	ring = obj->ring;
8563 	if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
8564 		ring = &dev_priv->ring[BCS];
8565 
8566 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8567 	if (ret)
8568 		goto err;
8569 
8570 	switch(intel_crtc->plane) {
8571 	case PLANE_A:
8572 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8573 		break;
8574 	case PLANE_B:
8575 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
8576 		break;
8577 	case PLANE_C:
8578 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
8579 		break;
8580 	default:
8581 		WARN_ONCE(1, "unknown plane in flip command\n");
8582 		ret = -ENODEV;
8583 		goto err_unpin;
8584 	}
8585 
8586 	len = 4;
8587 	if (ring->id == RCS)
8588 		len += 6;
8589 
8590 	/*
8591 	 * BSpec MI_DISPLAY_FLIP for IVB:
8592 	 * "The full packet must be contained within the same cache line."
8593 	 *
8594 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
8595 	 * cacheline, if we ever start emitting more commands before
8596 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
8597 	 * then do the cacheline alignment, and finally emit the
8598 	 * MI_DISPLAY_FLIP.
8599 	 */
8600 	ret = intel_ring_cacheline_align(ring);
8601 	if (ret)
8602 		goto err_unpin;
8603 
8604 	ret = intel_ring_begin(ring, len);
8605 	if (ret)
8606 		goto err_unpin;
8607 
8608 	/* Unmask the flip-done completion message. Note that the bspec says that
8609 	 * we should do this for both the BCS and RCS, and that we must not unmask
8610 	 * more than one flip event at any time (or ensure that one flip message
8611 	 * can be sent by waiting for flip-done prior to queueing new flips).
8612 	 * Experimentation says that BCS works despite DERRMR masking all
8613 	 * flip-done completion events and that unmasking all planes at once
8614 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
8615 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
8616 	 */
8617 	if (ring->id == RCS) {
8618 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
8619 		intel_ring_emit(ring, DERRMR);
8620 		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8621 					DERRMR_PIPEB_PRI_FLIP_DONE |
8622 					DERRMR_PIPEC_PRI_FLIP_DONE));
8623 		intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8624 				MI_SRM_LRM_GLOBAL_GTT);
8625 		intel_ring_emit(ring, DERRMR);
8626 		intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8627 	}
8628 
8629 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
8630 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
8631 	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8632 	intel_ring_emit(ring, (MI_NOOP));
8633 
8634 	intel_mark_page_flip_active(intel_crtc);
8635 	__intel_ring_advance(ring);
8636 	return 0;
8637 
8638 err_unpin:
8639 	intel_unpin_fb_obj(obj);
8640 err:
8641 	return ret;
8642 }
8643 
8644 static int intel_default_queue_flip(struct drm_device *dev,
8645 				    struct drm_crtc *crtc,
8646 				    struct drm_framebuffer *fb,
8647 				    struct drm_i915_gem_object *obj,
8648 				    uint32_t flags)
8649 {
8650 	return -ENODEV;
8651 }
8652 
8653 static int intel_crtc_page_flip(struct drm_crtc *crtc,
8654 				struct drm_framebuffer *fb,
8655 				struct drm_pending_vblank_event *event,
8656 				uint32_t page_flip_flags)
8657 {
8658 	struct drm_device *dev = crtc->dev;
8659 	struct drm_i915_private *dev_priv = dev->dev_private;
8660 	struct drm_framebuffer *old_fb = crtc->fb;
8661 	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
8662 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8663 	struct intel_unpin_work *work;
8664 	int ret;
8665 
8666 	/* Can't change pixel format via MI display flips. */
8667 	if (fb->pixel_format != crtc->fb->pixel_format)
8668 		return -EINVAL;
8669 
8670 	/*
8671 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8672 	 * Note that pitch changes could also affect these register.
8673 	 */
8674 	if (INTEL_INFO(dev)->gen > 3 &&
8675 	    (fb->offsets[0] != crtc->fb->offsets[0] ||
8676 	     fb->pitches[0] != crtc->fb->pitches[0]))
8677 		return -EINVAL;
8678 
8679 	work = kzalloc(sizeof(*work), GFP_KERNEL);
8680 	if (work == NULL)
8681 		return -ENOMEM;
8682 
8683 	work->event = event;
8684 	work->crtc = crtc;
8685 	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
8686 	INIT_WORK(&work->work, intel_unpin_work_fn);
8687 
8688 	ret = drm_vblank_get(dev, intel_crtc->pipe);
8689 	if (ret)
8690 		goto free_work;
8691 
8692 	/* We borrow the event spin lock for protecting unpin_work */
8693 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8694 	if (intel_crtc->unpin_work) {
8695 		lockmgr(&dev->event_lock, LK_RELEASE);
8696 		kfree(work);
8697 		drm_vblank_put(dev, intel_crtc->pipe);
8698 
8699 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
8700 		return -EBUSY;
8701 	}
8702 	intel_crtc->unpin_work = work;
8703 	lockmgr(&dev->event_lock, LK_RELEASE);
8704 
8705 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8706 		flush_workqueue(dev_priv->wq);
8707 
8708 	ret = i915_mutex_lock_interruptible(dev);
8709 	if (ret)
8710 		goto cleanup;
8711 
8712 	/* Reference the objects for the scheduled work. */
8713 	drm_gem_object_reference(&work->old_fb_obj->base);
8714 	drm_gem_object_reference(&obj->base);
8715 
8716 	crtc->fb = fb;
8717 
8718 	work->pending_flip_obj = obj;
8719 
8720 	work->enable_stall_check = true;
8721 
8722 	atomic_inc(&intel_crtc->unpin_work_count);
8723 	intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
8724 
8725 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
8726 	if (ret)
8727 		goto cleanup_pending;
8728 
8729 	intel_disable_fbc(dev);
8730 	intel_mark_fb_busy(obj, NULL);
8731 	mutex_unlock(&dev->struct_mutex);
8732 
8733 	trace_i915_flip_request(intel_crtc->plane, obj);
8734 
8735 	return 0;
8736 
8737 cleanup_pending:
8738 	atomic_dec(&intel_crtc->unpin_work_count);
8739 	crtc->fb = old_fb;
8740 	drm_gem_object_unreference(&work->old_fb_obj->base);
8741 	drm_gem_object_unreference(&obj->base);
8742 	mutex_unlock(&dev->struct_mutex);
8743 
8744 cleanup:
8745 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
8746 	intel_crtc->unpin_work = NULL;
8747 	lockmgr(&dev->event_lock, LK_RELEASE);
8748 
8749 	drm_vblank_put(dev, intel_crtc->pipe);
8750 free_work:
8751 	kfree(work);
8752 
8753 	return ret;
8754 }
8755 
8756 static struct drm_crtc_helper_funcs intel_helper_funcs = {
8757 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
8758 	.load_lut = intel_crtc_load_lut,
8759 };
8760 
8761 /**
8762  * intel_modeset_update_staged_output_state
8763  *
8764  * Updates the staged output configuration state, e.g. after we've read out the
8765  * current hw state.
8766  */
8767 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
8768 {
8769 	struct intel_encoder *encoder;
8770 	struct intel_connector *connector;
8771 
8772 	list_for_each_entry(connector, &dev->mode_config.connector_list,
8773 			    base.head) {
8774 		connector->new_encoder =
8775 			to_intel_encoder(connector->base.encoder);
8776 	}
8777 
8778 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8779 			    base.head) {
8780 		encoder->new_crtc =
8781 			to_intel_crtc(encoder->base.crtc);
8782 	}
8783 }
8784 
8785 /**
8786  * intel_modeset_commit_output_state
8787  *
8788  * This function copies the stage display pipe configuration to the real one.
8789  */
8790 static void intel_modeset_commit_output_state(struct drm_device *dev)
8791 {
8792 	struct intel_encoder *encoder;
8793 	struct intel_connector *connector;
8794 
8795 	list_for_each_entry(connector, &dev->mode_config.connector_list,
8796 			    base.head) {
8797 		connector->base.encoder = &connector->new_encoder->base;
8798 	}
8799 
8800 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8801 			    base.head) {
8802 		encoder->base.crtc = &encoder->new_crtc->base;
8803 	}
8804 }
8805 
8806 static void
8807 connected_sink_compute_bpp(struct intel_connector * connector,
8808 			   struct intel_crtc_config *pipe_config)
8809 {
8810 	int bpp = pipe_config->pipe_bpp;
8811 
8812 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8813 		connector->base.base.id,
8814 		drm_get_connector_name(&connector->base));
8815 
8816 	/* Don't use an invalid EDID bpc value */
8817 	if (connector->base.display_info.bpc &&
8818 	    connector->base.display_info.bpc * 3 < bpp) {
8819 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8820 			      bpp, connector->base.display_info.bpc*3);
8821 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8822 	}
8823 
8824 	/* Clamp bpp to 8 on screens without EDID 1.4 */
8825 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
8826 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8827 			      bpp);
8828 		pipe_config->pipe_bpp = 24;
8829 	}
8830 }
8831 
8832 static int
8833 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8834 			  struct drm_framebuffer *fb,
8835 			  struct intel_crtc_config *pipe_config)
8836 {
8837 	struct drm_device *dev = crtc->base.dev;
8838 	struct intel_connector *connector;
8839 	int bpp;
8840 
8841 	switch (fb->pixel_format) {
8842 	case DRM_FORMAT_C8:
8843 		bpp = 8*3; /* since we go through a colormap */
8844 		break;
8845 	case DRM_FORMAT_XRGB1555:
8846 	case DRM_FORMAT_ARGB1555:
8847 		/* checked in intel_framebuffer_init already */
8848 		if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8849 			return -EINVAL;
8850 	case DRM_FORMAT_RGB565:
8851 		bpp = 6*3; /* min is 18bpp */
8852 		break;
8853 	case DRM_FORMAT_XBGR8888:
8854 	case DRM_FORMAT_ABGR8888:
8855 		/* checked in intel_framebuffer_init already */
8856 		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8857 			return -EINVAL;
8858 	case DRM_FORMAT_XRGB8888:
8859 	case DRM_FORMAT_ARGB8888:
8860 		bpp = 8*3;
8861 		break;
8862 	case DRM_FORMAT_XRGB2101010:
8863 	case DRM_FORMAT_ARGB2101010:
8864 	case DRM_FORMAT_XBGR2101010:
8865 	case DRM_FORMAT_ABGR2101010:
8866 		/* checked in intel_framebuffer_init already */
8867 		if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8868 			return -EINVAL;
8869 		bpp = 10*3;
8870 		break;
8871 	/* TODO: gen4+ supports 16 bpc floating point, too. */
8872 	default:
8873 		DRM_DEBUG_KMS("unsupported depth\n");
8874 		return -EINVAL;
8875 	}
8876 
8877 	pipe_config->pipe_bpp = bpp;
8878 
8879 	/* Clamp display bpp to EDID value */
8880 	list_for_each_entry(connector, &dev->mode_config.connector_list,
8881 			    base.head) {
8882 		if (!connector->new_encoder ||
8883 		    connector->new_encoder->new_crtc != crtc)
8884 			continue;
8885 
8886 		connected_sink_compute_bpp(connector, pipe_config);
8887 	}
8888 
8889 	return bpp;
8890 }
8891 
8892 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8893 {
8894 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8895 			"type: 0x%x flags: 0x%x\n",
8896 		mode->crtc_clock,
8897 		mode->crtc_hdisplay, mode->crtc_hsync_start,
8898 		mode->crtc_hsync_end, mode->crtc_htotal,
8899 		mode->crtc_vdisplay, mode->crtc_vsync_start,
8900 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8901 }
8902 
8903 static void intel_dump_pipe_config(struct intel_crtc *crtc,
8904 				   struct intel_crtc_config *pipe_config,
8905 				   const char *context)
8906 {
8907 	DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8908 		      context, pipe_name(crtc->pipe));
8909 
8910 	DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8911 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8912 		      pipe_config->pipe_bpp, pipe_config->dither);
8913 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8914 		      pipe_config->has_pch_encoder,
8915 		      pipe_config->fdi_lanes,
8916 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8917 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8918 		      pipe_config->fdi_m_n.tu);
8919 	DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8920 		      pipe_config->has_dp_encoder,
8921 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8922 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8923 		      pipe_config->dp_m_n.tu);
8924 	DRM_DEBUG_KMS("requested mode:\n");
8925 	drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8926 	DRM_DEBUG_KMS("adjusted mode:\n");
8927 	drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
8928 	intel_dump_crtc_timings(&pipe_config->adjusted_mode);
8929 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
8930 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8931 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
8932 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8933 		      pipe_config->gmch_pfit.control,
8934 		      pipe_config->gmch_pfit.pgm_ratios,
8935 		      pipe_config->gmch_pfit.lvds_border_bits);
8936 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
8937 		      pipe_config->pch_pfit.pos,
8938 		      pipe_config->pch_pfit.size,
8939 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
8940 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
8941 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
8942 }
8943 
8944 static bool check_encoder_cloning(struct drm_crtc *crtc)
8945 {
8946 	int num_encoders = 0;
8947 	bool uncloneable_encoders = false;
8948 	struct intel_encoder *encoder;
8949 
8950 	list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8951 			    base.head) {
8952 		if (&encoder->new_crtc->base != crtc)
8953 			continue;
8954 
8955 		num_encoders++;
8956 		if (!encoder->cloneable)
8957 			uncloneable_encoders = true;
8958 	}
8959 
8960 	return !(num_encoders > 1 && uncloneable_encoders);
8961 }
8962 
8963 static struct intel_crtc_config *
8964 intel_modeset_pipe_config(struct drm_crtc *crtc,
8965 			  struct drm_framebuffer *fb,
8966 			  struct drm_display_mode *mode)
8967 {
8968 	struct drm_device *dev = crtc->dev;
8969 	struct intel_encoder *encoder;
8970 	struct intel_crtc_config *pipe_config;
8971 	int plane_bpp, ret = -EINVAL;
8972 	bool retry = true;
8973 
8974 	if (!check_encoder_cloning(crtc)) {
8975 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8976 		return ERR_PTR(-EINVAL);
8977 	}
8978 
8979 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8980 	if (!pipe_config)
8981 		return ERR_PTR(-ENOMEM);
8982 
8983 	drm_mode_copy(&pipe_config->adjusted_mode, mode);
8984 	drm_mode_copy(&pipe_config->requested_mode, mode);
8985 
8986 	pipe_config->cpu_transcoder =
8987 		(enum transcoder) to_intel_crtc(crtc)->pipe;
8988 	pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8989 
8990 	/*
8991 	 * Sanitize sync polarity flags based on requested ones. If neither
8992 	 * positive or negative polarity is requested, treat this as meaning
8993 	 * negative polarity.
8994 	 */
8995 	if (!(pipe_config->adjusted_mode.flags &
8996 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8997 		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8998 
8999 	if (!(pipe_config->adjusted_mode.flags &
9000 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
9001 		pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
9002 
9003 	/* Compute a starting value for pipe_config->pipe_bpp taking the source
9004 	 * plane pixel format and any sink constraints into account. Returns the
9005 	 * source plane bpp so that dithering can be selected on mismatches
9006 	 * after encoders and crtc also have had their say. */
9007 	plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
9008 					      fb, pipe_config);
9009 	if (plane_bpp < 0)
9010 		goto fail;
9011 
9012 	/*
9013 	 * Determine the real pipe dimensions. Note that stereo modes can
9014 	 * increase the actual pipe size due to the frame doubling and
9015 	 * insertion of additional space for blanks between the frame. This
9016 	 * is stored in the crtc timings. We use the requested mode to do this
9017 	 * computation to clearly distinguish it from the adjusted mode, which
9018 	 * can be changed by the connectors in the below retry loop.
9019 	 */
9020 	drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
9021 	pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
9022 	pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
9023 
9024 encoder_retry:
9025 	/* Ensure the port clock defaults are reset when retrying. */
9026 	pipe_config->port_clock = 0;
9027 	pipe_config->pixel_multiplier = 1;
9028 
9029 	/* Fill in default crtc timings, allow encoders to overwrite them. */
9030 	drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
9031 
9032 	/* Pass our mode to the connectors and the CRTC to give them a chance to
9033 	 * adjust it according to limitations or connector properties, and also
9034 	 * a chance to reject the mode entirely.
9035 	 */
9036 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9037 			    base.head) {
9038 
9039 		if (&encoder->new_crtc->base != crtc)
9040 			continue;
9041 
9042 		if (!(encoder->compute_config(encoder, pipe_config))) {
9043 			DRM_DEBUG_KMS("Encoder config failure\n");
9044 			goto fail;
9045 		}
9046 	}
9047 
9048 	/* Set default port clock if not overwritten by the encoder. Needs to be
9049 	 * done afterwards in case the encoder adjusts the mode. */
9050 	if (!pipe_config->port_clock)
9051 		pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
9052 			* pipe_config->pixel_multiplier;
9053 
9054 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
9055 	if (ret < 0) {
9056 		DRM_DEBUG_KMS("CRTC fixup failed\n");
9057 		goto fail;
9058 	}
9059 
9060 	if (ret == RETRY) {
9061 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
9062 			ret = -EINVAL;
9063 			goto fail;
9064 		}
9065 
9066 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
9067 		retry = false;
9068 		goto encoder_retry;
9069 	}
9070 
9071 	pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
9072 	DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
9073 		      plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
9074 
9075 	return pipe_config;
9076 fail:
9077 	kfree(pipe_config);
9078 	return ERR_PTR(ret);
9079 }
9080 
9081 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
9082  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
9083 static void
9084 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
9085 			     unsigned *prepare_pipes, unsigned *disable_pipes)
9086 {
9087 	struct intel_crtc *intel_crtc;
9088 	struct drm_device *dev = crtc->dev;
9089 	struct intel_encoder *encoder;
9090 	struct intel_connector *connector;
9091 	struct drm_crtc *tmp_crtc;
9092 
9093 	*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
9094 
9095 	/* Check which crtcs have changed outputs connected to them, these need
9096 	 * to be part of the prepare_pipes mask. We don't (yet) support global
9097 	 * modeset across multiple crtcs, so modeset_pipes will only have one
9098 	 * bit set at most. */
9099 	list_for_each_entry(connector, &dev->mode_config.connector_list,
9100 			    base.head) {
9101 		if (connector->base.encoder == &connector->new_encoder->base)
9102 			continue;
9103 
9104 		if (connector->base.encoder) {
9105 			tmp_crtc = connector->base.encoder->crtc;
9106 
9107 			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9108 		}
9109 
9110 		if (connector->new_encoder)
9111 			*prepare_pipes |=
9112 				1 << connector->new_encoder->new_crtc->pipe;
9113 	}
9114 
9115 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9116 			    base.head) {
9117 		if (encoder->base.crtc == &encoder->new_crtc->base)
9118 			continue;
9119 
9120 		if (encoder->base.crtc) {
9121 			tmp_crtc = encoder->base.crtc;
9122 
9123 			*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
9124 		}
9125 
9126 		if (encoder->new_crtc)
9127 			*prepare_pipes |= 1 << encoder->new_crtc->pipe;
9128 	}
9129 
9130 	/* Check for any pipes that will be fully disabled ... */
9131 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9132 			    base.head) {
9133 		bool used = false;
9134 
9135 		/* Don't try to disable disabled crtcs. */
9136 		if (!intel_crtc->base.enabled)
9137 			continue;
9138 
9139 		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9140 				    base.head) {
9141 			if (encoder->new_crtc == intel_crtc)
9142 				used = true;
9143 		}
9144 
9145 		if (!used)
9146 			*disable_pipes |= 1 << intel_crtc->pipe;
9147 	}
9148 
9149 
9150 	/* set_mode is also used to update properties on life display pipes. */
9151 	intel_crtc = to_intel_crtc(crtc);
9152 	if (crtc->enabled)
9153 		*prepare_pipes |= 1 << intel_crtc->pipe;
9154 
9155 	/*
9156 	 * For simplicity do a full modeset on any pipe where the output routing
9157 	 * changed. We could be more clever, but that would require us to be
9158 	 * more careful with calling the relevant encoder->mode_set functions.
9159 	 */
9160 	if (*prepare_pipes)
9161 		*modeset_pipes = *prepare_pipes;
9162 
9163 	/* ... and mask these out. */
9164 	*modeset_pipes &= ~(*disable_pipes);
9165 	*prepare_pipes &= ~(*disable_pipes);
9166 
9167 	/*
9168 	 * HACK: We don't (yet) fully support global modesets. intel_set_config
9169 	 * obies this rule, but the modeset restore mode of
9170 	 * intel_modeset_setup_hw_state does not.
9171 	 */
9172 	*modeset_pipes &= 1 << intel_crtc->pipe;
9173 	*prepare_pipes &= 1 << intel_crtc->pipe;
9174 
9175 	DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
9176 		      *modeset_pipes, *prepare_pipes, *disable_pipes);
9177 }
9178 
9179 static bool intel_crtc_in_use(struct drm_crtc *crtc)
9180 {
9181 	struct drm_encoder *encoder;
9182 	struct drm_device *dev = crtc->dev;
9183 
9184 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
9185 		if (encoder->crtc == crtc)
9186 			return true;
9187 
9188 	return false;
9189 }
9190 
9191 static void
9192 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
9193 {
9194 	struct intel_encoder *intel_encoder;
9195 	struct intel_crtc *intel_crtc;
9196 	struct drm_connector *connector;
9197 
9198 	list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
9199 			    base.head) {
9200 		if (!intel_encoder->base.crtc)
9201 			continue;
9202 
9203 		intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
9204 
9205 		if (prepare_pipes & (1 << intel_crtc->pipe))
9206 			intel_encoder->connectors_active = false;
9207 	}
9208 
9209 	intel_modeset_commit_output_state(dev);
9210 
9211 	/* Update computed state. */
9212 	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
9213 			    base.head) {
9214 		intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
9215 	}
9216 
9217 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9218 		if (!connector->encoder || !connector->encoder->crtc)
9219 			continue;
9220 
9221 		intel_crtc = to_intel_crtc(connector->encoder->crtc);
9222 
9223 		if (prepare_pipes & (1 << intel_crtc->pipe)) {
9224 			struct drm_property *dpms_property =
9225 				dev->mode_config.dpms_property;
9226 
9227 			connector->dpms = DRM_MODE_DPMS_ON;
9228 			drm_object_property_set_value(&connector->base,
9229 							 dpms_property,
9230 							 DRM_MODE_DPMS_ON);
9231 
9232 			intel_encoder = to_intel_encoder(connector->encoder);
9233 			intel_encoder->connectors_active = true;
9234 		}
9235 	}
9236 
9237 }
9238 
9239 static bool intel_fuzzy_clock_check(int clock1, int clock2)
9240 {
9241 	int diff;
9242 
9243 	if (clock1 == clock2)
9244 		return true;
9245 
9246 	if (!clock1 || !clock2)
9247 		return false;
9248 
9249 	diff = abs(clock1 - clock2);
9250 
9251 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
9252 		return true;
9253 
9254 	return false;
9255 }
9256 
9257 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
9258 	list_for_each_entry((intel_crtc), \
9259 			    &(dev)->mode_config.crtc_list, \
9260 			    base.head) \
9261 		if (mask & (1 <<(intel_crtc)->pipe))
9262 
9263 static bool
9264 intel_pipe_config_compare(struct drm_device *dev,
9265 			  struct intel_crtc_config *current_config,
9266 			  struct intel_crtc_config *pipe_config)
9267 {
9268 #define PIPE_CONF_CHECK_X(name)	\
9269 	if (current_config->name != pipe_config->name) { \
9270 		DRM_ERROR("mismatch in " #name " " \
9271 			  "(expected 0x%08x, found 0x%08x)\n", \
9272 			  current_config->name, \
9273 			  pipe_config->name); \
9274 		return false; \
9275 	}
9276 
9277 #define PIPE_CONF_CHECK_I(name)	\
9278 	if (current_config->name != pipe_config->name) { \
9279 		DRM_ERROR("mismatch in " #name " " \
9280 			  "(expected %i, found %i)\n", \
9281 			  current_config->name, \
9282 			  pipe_config->name); \
9283 		return false; \
9284 	}
9285 
9286 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
9287 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
9288 		DRM_ERROR("mismatch in " #name "(" #mask ") "	   \
9289 			  "(expected %i, found %i)\n", \
9290 			  current_config->name & (mask), \
9291 			  pipe_config->name & (mask)); \
9292 		return false; \
9293 	}
9294 
9295 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
9296 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
9297 		DRM_ERROR("mismatch in " #name " " \
9298 			  "(expected %i, found %i)\n", \
9299 			  current_config->name, \
9300 			  pipe_config->name); \
9301 		return false; \
9302 	}
9303 
9304 #define PIPE_CONF_QUIRK(quirk)	\
9305 	((current_config->quirks | pipe_config->quirks) & (quirk))
9306 
9307 	PIPE_CONF_CHECK_I(cpu_transcoder);
9308 
9309 	PIPE_CONF_CHECK_I(has_pch_encoder);
9310 	PIPE_CONF_CHECK_I(fdi_lanes);
9311 	PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
9312 	PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
9313 	PIPE_CONF_CHECK_I(fdi_m_n.link_m);
9314 	PIPE_CONF_CHECK_I(fdi_m_n.link_n);
9315 	PIPE_CONF_CHECK_I(fdi_m_n.tu);
9316 
9317 	PIPE_CONF_CHECK_I(has_dp_encoder);
9318 	PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
9319 	PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
9320 	PIPE_CONF_CHECK_I(dp_m_n.link_m);
9321 	PIPE_CONF_CHECK_I(dp_m_n.link_n);
9322 	PIPE_CONF_CHECK_I(dp_m_n.tu);
9323 
9324 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
9325 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
9326 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
9327 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
9328 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
9329 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
9330 
9331 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
9332 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
9333 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
9334 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
9335 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
9336 	PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
9337 
9338 	PIPE_CONF_CHECK_I(pixel_multiplier);
9339 
9340 	PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9341 			      DRM_MODE_FLAG_INTERLACE);
9342 
9343 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
9344 		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9345 				      DRM_MODE_FLAG_PHSYNC);
9346 		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9347 				      DRM_MODE_FLAG_NHSYNC);
9348 		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9349 				      DRM_MODE_FLAG_PVSYNC);
9350 		PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
9351 				      DRM_MODE_FLAG_NVSYNC);
9352 	}
9353 
9354 	PIPE_CONF_CHECK_I(pipe_src_w);
9355 	PIPE_CONF_CHECK_I(pipe_src_h);
9356 
9357 	PIPE_CONF_CHECK_I(gmch_pfit.control);
9358 	/* pfit ratios are autocomputed by the hw on gen4+ */
9359 	if (INTEL_INFO(dev)->gen < 4)
9360 		PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9361 	PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9362 	PIPE_CONF_CHECK_I(pch_pfit.enabled);
9363 	if (current_config->pch_pfit.enabled) {
9364 		PIPE_CONF_CHECK_I(pch_pfit.pos);
9365 		PIPE_CONF_CHECK_I(pch_pfit.size);
9366 	}
9367 
9368 	/* BDW+ don't expose a synchronous way to read the state */
9369 	if (IS_HASWELL(dev))
9370 		PIPE_CONF_CHECK_I(ips_enabled);
9371 
9372 	PIPE_CONF_CHECK_I(double_wide);
9373 
9374 	PIPE_CONF_CHECK_I(shared_dpll);
9375 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
9376 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
9377 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
9378 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
9379 
9380 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9381 		PIPE_CONF_CHECK_I(pipe_bpp);
9382 
9383 	if (!HAS_DDI(dev)) {
9384 		PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9385 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9386 	}
9387 
9388 #undef PIPE_CONF_CHECK_X
9389 #undef PIPE_CONF_CHECK_I
9390 #undef PIPE_CONF_CHECK_FLAGS
9391 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
9392 #undef PIPE_CONF_QUIRK
9393 
9394 	return true;
9395 }
9396 
9397 static void
9398 check_connector_state(struct drm_device *dev)
9399 {
9400 	struct intel_connector *connector;
9401 
9402 	list_for_each_entry(connector, &dev->mode_config.connector_list,
9403 			    base.head) {
9404 		/* This also checks the encoder/connector hw state with the
9405 		 * ->get_hw_state callbacks. */
9406 		intel_connector_check_state(connector);
9407 
9408 		WARN(&connector->new_encoder->base != connector->base.encoder,
9409 		     "connector's staged encoder doesn't match current encoder\n");
9410 	}
9411 }
9412 
9413 static void
9414 check_encoder_state(struct drm_device *dev)
9415 {
9416 	struct intel_encoder *encoder;
9417 	struct intel_connector *connector;
9418 
9419 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9420 			    base.head) {
9421 		bool enabled = false;
9422 		bool active = false;
9423 		enum i915_pipe pipe, tracked_pipe;
9424 
9425 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
9426 			      encoder->base.base.id,
9427 			      drm_get_encoder_name(&encoder->base));
9428 
9429 		WARN(&encoder->new_crtc->base != encoder->base.crtc,
9430 		     "encoder's stage crtc doesn't match current crtc\n");
9431 		WARN(encoder->connectors_active && !encoder->base.crtc,
9432 		     "encoder's active_connectors set, but no crtc\n");
9433 
9434 		list_for_each_entry(connector, &dev->mode_config.connector_list,
9435 				    base.head) {
9436 			if (connector->base.encoder != &encoder->base)
9437 				continue;
9438 			enabled = true;
9439 			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
9440 				active = true;
9441 		}
9442 		WARN(!!encoder->base.crtc != enabled,
9443 		     "encoder's enabled state mismatch "
9444 		     "(expected %i, found %i)\n",
9445 		     !!encoder->base.crtc, enabled);
9446 		WARN(active && !encoder->base.crtc,
9447 		     "active encoder with no crtc\n");
9448 
9449 		WARN(encoder->connectors_active != active,
9450 		     "encoder's computed active state doesn't match tracked active state "
9451 		     "(expected %i, found %i)\n", active, encoder->connectors_active);
9452 
9453 		active = encoder->get_hw_state(encoder, &pipe);
9454 		WARN(active != encoder->connectors_active,
9455 		     "encoder's hw state doesn't match sw tracking "
9456 		     "(expected %i, found %i)\n",
9457 		     encoder->connectors_active, active);
9458 
9459 		if (!encoder->base.crtc)
9460 			continue;
9461 
9462 		tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
9463 		WARN(active && pipe != tracked_pipe,
9464 		     "active encoder's pipe doesn't match"
9465 		     "(expected %i, found %i)\n",
9466 		     tracked_pipe, pipe);
9467 
9468 	}
9469 }
9470 
9471 static void
9472 check_crtc_state(struct drm_device *dev)
9473 {
9474 	drm_i915_private_t *dev_priv = dev->dev_private;
9475 	struct intel_crtc *crtc;
9476 	struct intel_encoder *encoder;
9477 	struct intel_crtc_config pipe_config;
9478 
9479 	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9480 			    base.head) {
9481 		bool enabled = false;
9482 		bool active = false;
9483 
9484 		memset(&pipe_config, 0, sizeof(pipe_config));
9485 
9486 		DRM_DEBUG_KMS("[CRTC:%d]\n",
9487 			      crtc->base.base.id);
9488 
9489 		WARN(crtc->active && !crtc->base.enabled,
9490 		     "active crtc, but not enabled in sw tracking\n");
9491 
9492 		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9493 				    base.head) {
9494 			if (encoder->base.crtc != &crtc->base)
9495 				continue;
9496 			enabled = true;
9497 			if (encoder->connectors_active)
9498 				active = true;
9499 		}
9500 
9501 		WARN(active != crtc->active,
9502 		     "crtc's computed active state doesn't match tracked active state "
9503 		     "(expected %i, found %i)\n", active, crtc->active);
9504 		WARN(enabled != crtc->base.enabled,
9505 		     "crtc's computed enabled state doesn't match tracked enabled state "
9506 		     "(expected %i, found %i)\n", enabled, crtc->base.enabled);
9507 
9508 		active = dev_priv->display.get_pipe_config(crtc,
9509 							   &pipe_config);
9510 
9511 		/* hw state is inconsistent with the pipe A quirk */
9512 		if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
9513 			active = crtc->active;
9514 
9515 		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9516 				    base.head) {
9517 			enum i915_pipe pipe;
9518 			if (encoder->base.crtc != &crtc->base)
9519 				continue;
9520 			if (encoder->get_hw_state(encoder, &pipe))
9521 				encoder->get_config(encoder, &pipe_config);
9522 		}
9523 
9524 		WARN(crtc->active != active,
9525 		     "crtc active state doesn't match with hw state "
9526 		     "(expected %i, found %i)\n", crtc->active, active);
9527 
9528 		if (active &&
9529 		    !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
9530 			WARN(1, "pipe state doesn't match!\n");
9531 			intel_dump_pipe_config(crtc, &pipe_config,
9532 					       "[hw state]");
9533 			intel_dump_pipe_config(crtc, &crtc->config,
9534 					       "[sw state]");
9535 		}
9536 	}
9537 }
9538 
9539 static void
9540 check_shared_dpll_state(struct drm_device *dev)
9541 {
9542 	drm_i915_private_t *dev_priv = dev->dev_private;
9543 	struct intel_crtc *crtc;
9544 	struct intel_dpll_hw_state dpll_hw_state;
9545 	int i;
9546 
9547 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9548 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9549 		int enabled_crtcs = 0, active_crtcs = 0;
9550 		bool active;
9551 
9552 		memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9553 
9554 		DRM_DEBUG_KMS("%s\n", pll->name);
9555 
9556 		active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
9557 
9558 		WARN(pll->active > pll->refcount,
9559 		     "more active pll users than references: %i vs %i\n",
9560 		     pll->active, pll->refcount);
9561 		WARN(pll->active && !pll->on,
9562 		     "pll in active use but not on in sw tracking\n");
9563 		WARN(pll->on && !pll->active,
9564 		     "pll in on but not on in use in sw tracking\n");
9565 		WARN(pll->on != active,
9566 		     "pll on state mismatch (expected %i, found %i)\n",
9567 		     pll->on, active);
9568 
9569 		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9570 				    base.head) {
9571 			if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9572 				enabled_crtcs++;
9573 			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9574 				active_crtcs++;
9575 		}
9576 		WARN(pll->active != active_crtcs,
9577 		     "pll active crtcs mismatch (expected %i, found %i)\n",
9578 		     pll->active, active_crtcs);
9579 		WARN(pll->refcount != enabled_crtcs,
9580 		     "pll enabled crtcs mismatch (expected %i, found %i)\n",
9581 		     pll->refcount, enabled_crtcs);
9582 
9583 		WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
9584 				       sizeof(dpll_hw_state)),
9585 		     "pll hw state mismatch\n");
9586 	}
9587 }
9588 
9589 void
9590 intel_modeset_check_state(struct drm_device *dev)
9591 {
9592 	check_connector_state(dev);
9593 	check_encoder_state(dev);
9594 	check_crtc_state(dev);
9595 	check_shared_dpll_state(dev);
9596 }
9597 
9598 void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9599 				     int dotclock)
9600 {
9601 	/*
9602 	 * FDI already provided one idea for the dotclock.
9603 	 * Yell if the encoder disagrees.
9604 	 */
9605 	WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9606 	     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9607 	     pipe_config->adjusted_mode.crtc_clock, dotclock);
9608 }
9609 
9610 static int __intel_set_mode(struct drm_crtc *crtc,
9611 			    struct drm_display_mode *mode,
9612 			    int x, int y, struct drm_framebuffer *fb)
9613 {
9614 	struct drm_device *dev = crtc->dev;
9615 	drm_i915_private_t *dev_priv = dev->dev_private;
9616 	struct drm_display_mode *saved_mode;
9617 	struct intel_crtc_config *pipe_config = NULL;
9618 	struct intel_crtc *intel_crtc;
9619 	unsigned disable_pipes, prepare_pipes, modeset_pipes;
9620 	int ret = 0;
9621 
9622 	saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK);
9623 	if (!saved_mode)
9624 		return -ENOMEM;
9625 
9626 	intel_modeset_affected_pipes(crtc, &modeset_pipes,
9627 				     &prepare_pipes, &disable_pipes);
9628 
9629 	*saved_mode = crtc->mode;
9630 
9631 	/* Hack: Because we don't (yet) support global modeset on multiple
9632 	 * crtcs, we don't keep track of the new mode for more than one crtc.
9633 	 * Hence simply check whether any bit is set in modeset_pipes in all the
9634 	 * pieces of code that are not yet converted to deal with mutliple crtcs
9635 	 * changing their mode at the same time. */
9636 	if (modeset_pipes) {
9637 		pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
9638 		if (IS_ERR(pipe_config)) {
9639 			ret = PTR_ERR(pipe_config);
9640 			pipe_config = NULL;
9641 
9642 			goto out;
9643 		}
9644 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9645 				       "[modeset]");
9646 	}
9647 
9648 	/*
9649 	 * See if the config requires any additional preparation, e.g.
9650 	 * to adjust global state with pipes off.  We need to do this
9651 	 * here so we can get the modeset_pipe updated config for the new
9652 	 * mode set on this crtc.  For other crtcs we need to use the
9653 	 * adjusted_mode bits in the crtc directly.
9654 	 */
9655 	if (IS_VALLEYVIEW(dev)) {
9656 		valleyview_modeset_global_pipes(dev, &prepare_pipes,
9657 						modeset_pipes, pipe_config);
9658 
9659 		/* may have added more to prepare_pipes than we should */
9660 		prepare_pipes &= ~disable_pipes;
9661 	}
9662 
9663 	for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
9664 		intel_crtc_disable(&intel_crtc->base);
9665 
9666 	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
9667 		if (intel_crtc->base.enabled)
9668 			dev_priv->display.crtc_disable(&intel_crtc->base);
9669 	}
9670 
9671 	/* crtc->mode is already used by the ->mode_set callbacks, hence we need
9672 	 * to set it here already despite that we pass it down the callchain.
9673 	 */
9674 	if (modeset_pipes) {
9675 		crtc->mode = *mode;
9676 		/* mode_set/enable/disable functions rely on a correct pipe
9677 		 * config. */
9678 		to_intel_crtc(crtc)->config = *pipe_config;
9679 
9680 		/*
9681 		 * Calculate and store various constants which
9682 		 * are later needed by vblank and swap-completion
9683 		 * timestamping. They are derived from true hwmode.
9684 		 */
9685 		drm_calc_timestamping_constants(crtc,
9686 						&pipe_config->adjusted_mode);
9687 	}
9688 
9689 	/* Only after disabling all output pipelines that will be changed can we
9690 	 * update the the output configuration. */
9691 	intel_modeset_update_state(dev, prepare_pipes);
9692 
9693 	if (dev_priv->display.modeset_global_resources)
9694 		dev_priv->display.modeset_global_resources(dev);
9695 
9696 	/* Set up the DPLL and any encoders state that needs to adjust or depend
9697 	 * on the DPLL.
9698 	 */
9699 	for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
9700 		ret = intel_crtc_mode_set(&intel_crtc->base,
9701 					  x, y, fb);
9702 		if (ret)
9703 			goto done;
9704 	}
9705 
9706 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
9707 	for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
9708 		dev_priv->display.crtc_enable(&intel_crtc->base);
9709 
9710 	/* FIXME: add subpixel order */
9711 done:
9712 	if (ret && crtc->enabled)
9713 		crtc->mode = *saved_mode;
9714 
9715 out:
9716 	kfree(pipe_config);
9717 	kfree(saved_mode);
9718 	return ret;
9719 }
9720 
9721 static int intel_set_mode(struct drm_crtc *crtc,
9722 			  struct drm_display_mode *mode,
9723 			  int x, int y, struct drm_framebuffer *fb)
9724 {
9725 	int ret;
9726 
9727 	ret = __intel_set_mode(crtc, mode, x, y, fb);
9728 
9729 	if (ret == 0)
9730 		intel_modeset_check_state(crtc->dev);
9731 
9732 	return ret;
9733 }
9734 
9735 void intel_crtc_restore_mode(struct drm_crtc *crtc)
9736 {
9737 	intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9738 }
9739 
9740 #undef for_each_intel_crtc_masked
9741 
9742 static void intel_set_config_free(struct intel_set_config *config)
9743 {
9744 	if (!config)
9745 		return;
9746 
9747 	kfree(config->save_connector_encoders);
9748 	kfree(config->save_encoder_crtcs);
9749 	kfree(config);
9750 }
9751 
9752 static int intel_set_config_save_state(struct drm_device *dev,
9753 				       struct intel_set_config *config)
9754 {
9755 	struct drm_encoder *encoder;
9756 	struct drm_connector *connector;
9757 	int count;
9758 
9759 	config->save_encoder_crtcs =
9760 		kcalloc(dev->mode_config.num_encoder,
9761 			sizeof(struct drm_crtc *), GFP_KERNEL);
9762 	if (!config->save_encoder_crtcs)
9763 		return -ENOMEM;
9764 
9765 	config->save_connector_encoders =
9766 		kcalloc(dev->mode_config.num_connector,
9767 			sizeof(struct drm_encoder *), GFP_KERNEL);
9768 	if (!config->save_connector_encoders)
9769 		return -ENOMEM;
9770 
9771 	/* Copy data. Note that driver private data is not affected.
9772 	 * Should anything bad happen only the expected state is
9773 	 * restored, not the drivers personal bookkeeping.
9774 	 */
9775 	count = 0;
9776 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
9777 		config->save_encoder_crtcs[count++] = encoder->crtc;
9778 	}
9779 
9780 	count = 0;
9781 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
9782 		config->save_connector_encoders[count++] = connector->encoder;
9783 	}
9784 
9785 	return 0;
9786 }
9787 
9788 static void intel_set_config_restore_state(struct drm_device *dev,
9789 					   struct intel_set_config *config)
9790 {
9791 	struct intel_encoder *encoder;
9792 	struct intel_connector *connector;
9793 	int count;
9794 
9795 	count = 0;
9796 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9797 		encoder->new_crtc =
9798 			to_intel_crtc(config->save_encoder_crtcs[count++]);
9799 	}
9800 
9801 	count = 0;
9802 	list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9803 		connector->new_encoder =
9804 			to_intel_encoder(config->save_connector_encoders[count++]);
9805 	}
9806 }
9807 
9808 static bool
9809 is_crtc_connector_off(struct drm_mode_set *set)
9810 {
9811 	int i;
9812 
9813 	if (set->num_connectors == 0)
9814 		return false;
9815 
9816 	if (WARN_ON(set->connectors == NULL))
9817 		return false;
9818 
9819 	for (i = 0; i < set->num_connectors; i++)
9820 		if (set->connectors[i]->encoder &&
9821 		    set->connectors[i]->encoder->crtc == set->crtc &&
9822 		    set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
9823 			return true;
9824 
9825 	return false;
9826 }
9827 
9828 static void
9829 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9830 				      struct intel_set_config *config)
9831 {
9832 
9833 	/* We should be able to check here if the fb has the same properties
9834 	 * and then just flip_or_move it */
9835 	if (is_crtc_connector_off(set)) {
9836 		config->mode_changed = true;
9837 	} else if (set->crtc->fb != set->fb) {
9838 		/* If we have no fb then treat it as a full mode set */
9839 		if (set->crtc->fb == NULL) {
9840 			struct intel_crtc *intel_crtc =
9841 				to_intel_crtc(set->crtc);
9842 
9843 			if (intel_crtc->active && i915_fastboot) {
9844 				DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9845 				config->fb_changed = true;
9846 			} else {
9847 				DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9848 				config->mode_changed = true;
9849 			}
9850 		} else if (set->fb == NULL) {
9851 			config->mode_changed = true;
9852 		} else if (set->fb->pixel_format !=
9853 			   set->crtc->fb->pixel_format) {
9854 			config->mode_changed = true;
9855 		} else {
9856 			config->fb_changed = true;
9857 		}
9858 	}
9859 
9860 	if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
9861 		config->fb_changed = true;
9862 
9863 	if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9864 		DRM_DEBUG_KMS("modes are different, full mode set\n");
9865 		drm_mode_debug_printmodeline(&set->crtc->mode);
9866 		drm_mode_debug_printmodeline(set->mode);
9867 		config->mode_changed = true;
9868 	}
9869 
9870 	DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9871 			set->crtc->base.id, config->mode_changed, config->fb_changed);
9872 }
9873 
9874 static int
9875 intel_modeset_stage_output_state(struct drm_device *dev,
9876 				 struct drm_mode_set *set,
9877 				 struct intel_set_config *config)
9878 {
9879 	struct drm_crtc *new_crtc;
9880 	struct intel_connector *connector;
9881 	struct intel_encoder *encoder;
9882 	int ro;
9883 
9884 	/* The upper layers ensure that we either disable a crtc or have a list
9885 	 * of connectors. For paranoia, double-check this. */
9886 	WARN_ON(!set->fb && (set->num_connectors != 0));
9887 	WARN_ON(set->fb && (set->num_connectors == 0));
9888 
9889 	list_for_each_entry(connector, &dev->mode_config.connector_list,
9890 			    base.head) {
9891 		/* Otherwise traverse passed in connector list and get encoders
9892 		 * for them. */
9893 		for (ro = 0; ro < set->num_connectors; ro++) {
9894 			if (set->connectors[ro] == &connector->base) {
9895 				connector->new_encoder = connector->encoder;
9896 				break;
9897 			}
9898 		}
9899 
9900 		/* If we disable the crtc, disable all its connectors. Also, if
9901 		 * the connector is on the changing crtc but not on the new
9902 		 * connector list, disable it. */
9903 		if ((!set->fb || ro == set->num_connectors) &&
9904 		    connector->base.encoder &&
9905 		    connector->base.encoder->crtc == set->crtc) {
9906 			connector->new_encoder = NULL;
9907 
9908 			DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9909 				connector->base.base.id,
9910 				drm_get_connector_name(&connector->base));
9911 		}
9912 
9913 
9914 		if (&connector->new_encoder->base != connector->base.encoder) {
9915 			DRM_DEBUG_KMS("encoder changed, full mode switch\n");
9916 			config->mode_changed = true;
9917 		}
9918 	}
9919 	/* connector->new_encoder is now updated for all connectors. */
9920 
9921 	/* Update crtc of enabled connectors. */
9922 	list_for_each_entry(connector, &dev->mode_config.connector_list,
9923 			    base.head) {
9924 		if (!connector->new_encoder)
9925 			continue;
9926 
9927 		new_crtc = connector->new_encoder->base.crtc;
9928 
9929 		for (ro = 0; ro < set->num_connectors; ro++) {
9930 			if (set->connectors[ro] == &connector->base)
9931 				new_crtc = set->crtc;
9932 		}
9933 
9934 		/* Make sure the new CRTC will work with the encoder */
9935 		if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
9936 					 new_crtc)) {
9937 			return -EINVAL;
9938 		}
9939 		connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9940 
9941 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9942 			connector->base.base.id,
9943 			drm_get_connector_name(&connector->base),
9944 			new_crtc->base.id);
9945 	}
9946 
9947 	/* Check for any encoders that needs to be disabled. */
9948 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9949 			    base.head) {
9950 		int num_connectors = 0;
9951 		list_for_each_entry(connector,
9952 				    &dev->mode_config.connector_list,
9953 				    base.head) {
9954 			if (connector->new_encoder == encoder) {
9955 				WARN_ON(!connector->new_encoder->new_crtc);
9956 				num_connectors++;
9957 			}
9958 		}
9959 
9960 		if (num_connectors == 0)
9961 			encoder->new_crtc = NULL;
9962 		else if (num_connectors > 1)
9963 			return -EINVAL;
9964 
9965 		/* Only now check for crtc changes so we don't miss encoders
9966 		 * that will be disabled. */
9967 		if (&encoder->new_crtc->base != encoder->base.crtc) {
9968 			DRM_DEBUG_KMS("crtc changed, full mode switch\n");
9969 			config->mode_changed = true;
9970 		}
9971 	}
9972 	/* Now we've also updated encoder->new_crtc for all encoders. */
9973 
9974 	return 0;
9975 }
9976 
9977 static int intel_crtc_set_config(struct drm_mode_set *set)
9978 {
9979 	struct drm_device *dev;
9980 	struct drm_mode_set save_set;
9981 	struct intel_set_config *config;
9982 	int ret;
9983 
9984 	BUG_ON(!set);
9985 	BUG_ON(!set->crtc);
9986 	BUG_ON(!set->crtc->helper_private);
9987 
9988 	/* Enforce sane interface api - has been abused by the fb helper. */
9989 	BUG_ON(!set->mode && set->fb);
9990 	BUG_ON(set->fb && set->num_connectors == 0);
9991 
9992 	if (set->fb) {
9993 		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9994 				set->crtc->base.id, set->fb->base.id,
9995 				(int)set->num_connectors, set->x, set->y);
9996 	} else {
9997 		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
9998 	}
9999 
10000 	dev = set->crtc->dev;
10001 
10002 	ret = -ENOMEM;
10003 	config = kzalloc(sizeof(*config), GFP_KERNEL);
10004 	if (!config)
10005 		goto out_config;
10006 
10007 	ret = intel_set_config_save_state(dev, config);
10008 	if (ret)
10009 		goto out_config;
10010 
10011 	save_set.crtc = set->crtc;
10012 	save_set.mode = &set->crtc->mode;
10013 	save_set.x = set->crtc->x;
10014 	save_set.y = set->crtc->y;
10015 	save_set.fb = set->crtc->fb;
10016 
10017 	/* Compute whether we need a full modeset, only an fb base update or no
10018 	 * change at all. In the future we might also check whether only the
10019 	 * mode changed, e.g. for LVDS where we only change the panel fitter in
10020 	 * such cases. */
10021 	intel_set_config_compute_mode_changes(set, config);
10022 
10023 	ret = intel_modeset_stage_output_state(dev, set, config);
10024 	if (ret)
10025 		goto fail;
10026 
10027 	if (config->mode_changed) {
10028 		ret = intel_set_mode(set->crtc, set->mode,
10029 				     set->x, set->y, set->fb);
10030 	} else if (config->fb_changed) {
10031 		intel_crtc_wait_for_pending_flips(set->crtc);
10032 
10033 		ret = intel_pipe_set_base(set->crtc,
10034 					  set->x, set->y, set->fb);
10035 		/*
10036 		 * In the fastboot case this may be our only check of the
10037 		 * state after boot.  It would be better to only do it on
10038 		 * the first update, but we don't have a nice way of doing that
10039 		 * (and really, set_config isn't used much for high freq page
10040 		 * flipping, so increasing its cost here shouldn't be a big
10041 		 * deal).
10042 		 */
10043 		if (i915_fastboot && ret == 0)
10044 			intel_modeset_check_state(set->crtc->dev);
10045 	}
10046 
10047 	if (ret) {
10048 		DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
10049 			      set->crtc->base.id, ret);
10050 fail:
10051 		intel_set_config_restore_state(dev, config);
10052 
10053 		/* Try to restore the config */
10054 		if (config->mode_changed &&
10055 		    intel_set_mode(save_set.crtc, save_set.mode,
10056 				   save_set.x, save_set.y, save_set.fb))
10057 			DRM_ERROR("failed to restore config after modeset failure\n");
10058 	}
10059 
10060 out_config:
10061 	intel_set_config_free(config);
10062 	return ret;
10063 }
10064 
10065 static const struct drm_crtc_funcs intel_crtc_funcs = {
10066 	.cursor_set = intel_crtc_cursor_set,
10067 	.cursor_move = intel_crtc_cursor_move,
10068 	.gamma_set = intel_crtc_gamma_set,
10069 	.set_config = intel_crtc_set_config,
10070 	.destroy = intel_crtc_destroy,
10071 	.page_flip = intel_crtc_page_flip,
10072 };
10073 
10074 static void intel_cpu_pll_init(struct drm_device *dev)
10075 {
10076 	if (HAS_DDI(dev))
10077 		intel_ddi_pll_init(dev);
10078 }
10079 
10080 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
10081 				      struct intel_shared_dpll *pll,
10082 				      struct intel_dpll_hw_state *hw_state)
10083 {
10084 	uint32_t val;
10085 
10086 	val = I915_READ(PCH_DPLL(pll->id));
10087 	hw_state->dpll = val;
10088 	hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
10089 	hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
10090 
10091 	return val & DPLL_VCO_ENABLE;
10092 }
10093 
10094 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
10095 				  struct intel_shared_dpll *pll)
10096 {
10097 	I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
10098 	I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
10099 }
10100 
10101 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
10102 				struct intel_shared_dpll *pll)
10103 {
10104 	/* PCH refclock must be enabled first */
10105 	ibx_assert_pch_refclk_enabled(dev_priv);
10106 
10107 	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10108 
10109 	/* Wait for the clocks to stabilize. */
10110 	POSTING_READ(PCH_DPLL(pll->id));
10111 	udelay(150);
10112 
10113 	/* The pixel multiplier can only be updated once the
10114 	 * DPLL is enabled and the clocks are stable.
10115 	 *
10116 	 * So write it again.
10117 	 */
10118 	I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
10119 	POSTING_READ(PCH_DPLL(pll->id));
10120 	udelay(200);
10121 }
10122 
10123 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
10124 				 struct intel_shared_dpll *pll)
10125 {
10126 	struct drm_device *dev = dev_priv->dev;
10127 	struct intel_crtc *crtc;
10128 
10129 	/* Make sure no transcoder isn't still depending on us. */
10130 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
10131 		if (intel_crtc_to_shared_dpll(crtc) == pll)
10132 			assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
10133 	}
10134 
10135 	I915_WRITE(PCH_DPLL(pll->id), 0);
10136 	POSTING_READ(PCH_DPLL(pll->id));
10137 	udelay(200);
10138 }
10139 
10140 static char *ibx_pch_dpll_names[] = {
10141 	"PCH DPLL A",
10142 	"PCH DPLL B",
10143 };
10144 
10145 static void ibx_pch_dpll_init(struct drm_device *dev)
10146 {
10147 	struct drm_i915_private *dev_priv = dev->dev_private;
10148 	int i;
10149 
10150 	dev_priv->num_shared_dpll = 2;
10151 
10152 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10153 		dev_priv->shared_dplls[i].id = i;
10154 		dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
10155 		dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
10156 		dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
10157 		dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
10158 		dev_priv->shared_dplls[i].get_hw_state =
10159 			ibx_pch_dpll_get_hw_state;
10160 	}
10161 }
10162 
10163 static void intel_shared_dpll_init(struct drm_device *dev)
10164 {
10165 	struct drm_i915_private *dev_priv = dev->dev_private;
10166 
10167 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
10168 		ibx_pch_dpll_init(dev);
10169 	else
10170 		dev_priv->num_shared_dpll = 0;
10171 
10172 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
10173 }
10174 
10175 static void intel_crtc_init(struct drm_device *dev, int pipe)
10176 {
10177 	drm_i915_private_t *dev_priv = dev->dev_private;
10178 	struct intel_crtc *intel_crtc;
10179 	int i;
10180 
10181 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
10182 	if (intel_crtc == NULL)
10183 		return;
10184 
10185 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
10186 
10187 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
10188 	for (i = 0; i < 256; i++) {
10189 		intel_crtc->lut_r[i] = i;
10190 		intel_crtc->lut_g[i] = i;
10191 		intel_crtc->lut_b[i] = i;
10192 	}
10193 
10194 	/*
10195 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
10196 	 * is hooked to plane B. Hence we want plane A feeding pipe B.
10197 	 */
10198 	intel_crtc->pipe = pipe;
10199 	intel_crtc->plane = pipe;
10200 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
10201 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
10202 		intel_crtc->plane = !pipe;
10203 	}
10204 
10205 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
10206 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
10207 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
10208 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
10209 
10210 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
10211 }
10212 
10213 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
10214 {
10215 	struct drm_encoder *encoder = connector->base.encoder;
10216 
10217 	WARN_ON(!mutex_is_locked(&connector->base.dev->mode_config.mutex));
10218 
10219 	if (!encoder)
10220 		return INVALID_PIPE;
10221 
10222 	return to_intel_crtc(encoder->crtc)->pipe;
10223 }
10224 
10225 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
10226 				struct drm_file *file)
10227 {
10228 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10229 	struct drm_mode_object *drmmode_obj;
10230 	struct intel_crtc *crtc;
10231 
10232 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
10233 		return -ENODEV;
10234 
10235 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
10236 			DRM_MODE_OBJECT_CRTC);
10237 
10238 	if (!drmmode_obj) {
10239 		DRM_ERROR("no such CRTC id\n");
10240 		return -ENOENT;
10241 	}
10242 
10243 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
10244 	pipe_from_crtc_id->pipe = crtc->pipe;
10245 
10246 	return 0;
10247 }
10248 
10249 static int intel_encoder_clones(struct intel_encoder *encoder)
10250 {
10251 	struct drm_device *dev = encoder->base.dev;
10252 	struct intel_encoder *source_encoder;
10253 	int index_mask = 0;
10254 	int entry = 0;
10255 
10256 	list_for_each_entry(source_encoder,
10257 			    &dev->mode_config.encoder_list, base.head) {
10258 
10259 		if (encoder == source_encoder)
10260 			index_mask |= (1 << entry);
10261 
10262 		/* Intel hw has only one MUX where enocoders could be cloned. */
10263 		if (encoder->cloneable && source_encoder->cloneable)
10264 			index_mask |= (1 << entry);
10265 
10266 		entry++;
10267 	}
10268 
10269 	return index_mask;
10270 }
10271 
10272 static bool has_edp_a(struct drm_device *dev)
10273 {
10274 	struct drm_i915_private *dev_priv = dev->dev_private;
10275 
10276 	if (!IS_MOBILE(dev))
10277 		return false;
10278 
10279 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
10280 		return false;
10281 
10282 	if (IS_GEN5(dev) &&
10283 	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
10284 		return false;
10285 
10286 	return true;
10287 }
10288 
10289 const char *intel_output_name(int output)
10290 {
10291 	static const char *names[] = {
10292 		[INTEL_OUTPUT_UNUSED] = "Unused",
10293 		[INTEL_OUTPUT_ANALOG] = "Analog",
10294 		[INTEL_OUTPUT_DVO] = "DVO",
10295 		[INTEL_OUTPUT_SDVO] = "SDVO",
10296 		[INTEL_OUTPUT_LVDS] = "LVDS",
10297 		[INTEL_OUTPUT_TVOUT] = "TV",
10298 		[INTEL_OUTPUT_HDMI] = "HDMI",
10299 		[INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort",
10300 		[INTEL_OUTPUT_EDP] = "eDP",
10301 		[INTEL_OUTPUT_DSI] = "DSI",
10302 		[INTEL_OUTPUT_UNKNOWN] = "Unknown",
10303 	};
10304 
10305 	if (output < 0 || output >= ARRAY_SIZE(names) || !names[output])
10306 		return "Invalid";
10307 
10308 	return names[output];
10309 }
10310 
10311 static void intel_setup_outputs(struct drm_device *dev)
10312 {
10313 	struct drm_i915_private *dev_priv = dev->dev_private;
10314 	struct intel_encoder *encoder;
10315 	bool dpd_is_edp = false;
10316 
10317 	intel_lvds_init(dev);
10318 
10319 	if (!IS_ULT(dev))
10320 		intel_crt_init(dev);
10321 
10322 	if (HAS_DDI(dev)) {
10323 		int found;
10324 
10325 		/* Haswell uses DDI functions to detect digital outputs */
10326 		found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
10327 		/* DDI A only supports eDP */
10328 		if (found)
10329 			intel_ddi_init(dev, PORT_A);
10330 
10331 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
10332 		 * register */
10333 		found = I915_READ(SFUSE_STRAP);
10334 
10335 		if (found & SFUSE_STRAP_DDIB_DETECTED)
10336 			intel_ddi_init(dev, PORT_B);
10337 		if (found & SFUSE_STRAP_DDIC_DETECTED)
10338 			intel_ddi_init(dev, PORT_C);
10339 		if (found & SFUSE_STRAP_DDID_DETECTED)
10340 			intel_ddi_init(dev, PORT_D);
10341 	} else if (HAS_PCH_SPLIT(dev)) {
10342 		int found;
10343 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10344 
10345 		if (has_edp_a(dev))
10346 			intel_dp_init(dev, DP_A, PORT_A);
10347 
10348 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
10349 			/* PCH SDVOB multiplex with HDMIB */
10350 			found = intel_sdvo_init(dev, PCH_SDVOB, true);
10351 			if (!found)
10352 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
10353 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
10354 				intel_dp_init(dev, PCH_DP_B, PORT_B);
10355 		}
10356 
10357 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
10358 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
10359 
10360 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
10361 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
10362 
10363 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
10364 			intel_dp_init(dev, PCH_DP_C, PORT_C);
10365 
10366 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
10367 			intel_dp_init(dev, PCH_DP_D, PORT_D);
10368 	} else if (IS_VALLEYVIEW(dev)) {
10369 		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
10370 			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
10371 					PORT_B);
10372 			if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
10373 				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
10374 		}
10375 
10376 		if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
10377 			intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10378 					PORT_C);
10379 			if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10380 				intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10381 		}
10382 
10383 		intel_dsi_init(dev);
10384 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
10385 		bool found = false;
10386 
10387 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10388 			DRM_DEBUG_KMS("probing SDVOB\n");
10389 			found = intel_sdvo_init(dev, GEN3_SDVOB, true);
10390 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
10391 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
10392 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
10393 			}
10394 
10395 			if (!found && SUPPORTS_INTEGRATED_DP(dev))
10396 				intel_dp_init(dev, DP_B, PORT_B);
10397 		}
10398 
10399 		/* Before G4X SDVOC doesn't have its own detect register */
10400 
10401 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
10402 			DRM_DEBUG_KMS("probing SDVOC\n");
10403 			found = intel_sdvo_init(dev, GEN3_SDVOC, false);
10404 		}
10405 
10406 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
10407 
10408 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
10409 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
10410 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
10411 			}
10412 			if (SUPPORTS_INTEGRATED_DP(dev))
10413 				intel_dp_init(dev, DP_C, PORT_C);
10414 		}
10415 
10416 		if (SUPPORTS_INTEGRATED_DP(dev) &&
10417 		    (I915_READ(DP_D) & DP_DETECTED))
10418 			intel_dp_init(dev, DP_D, PORT_D);
10419 #if 0
10420 	} else if (IS_GEN2(dev))
10421 		intel_dvo_init(dev);
10422 #endif
10423 	}
10424 
10425 	if (SUPPORTS_TV(dev))
10426 		intel_tv_init(dev);
10427 
10428 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
10429 		encoder->base.possible_crtcs = encoder->crtc_mask;
10430 		encoder->base.possible_clones =
10431 			intel_encoder_clones(encoder);
10432 	}
10433 
10434 	intel_init_pch_refclk(dev);
10435 
10436 	drm_helper_move_panel_connectors_to_head(dev);
10437 }
10438 
10439 void intel_framebuffer_fini(struct intel_framebuffer *fb)
10440 {
10441 	drm_framebuffer_cleanup(&fb->base);
10442 	WARN_ON(!fb->obj->framebuffer_references--);
10443 	drm_gem_object_unreference_unlocked(&fb->obj->base);
10444 }
10445 
10446 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
10447 {
10448 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10449 
10450 	intel_framebuffer_fini(intel_fb);
10451 	kfree(intel_fb);
10452 }
10453 
10454 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
10455 						struct drm_file *file,
10456 						unsigned int *handle)
10457 {
10458 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
10459 	struct drm_i915_gem_object *obj = intel_fb->obj;
10460 
10461 	return drm_gem_handle_create(file, &obj->base, handle);
10462 }
10463 
10464 static const struct drm_framebuffer_funcs intel_fb_funcs = {
10465 	.destroy = intel_user_framebuffer_destroy,
10466 	.create_handle = intel_user_framebuffer_create_handle,
10467 };
10468 
10469 int intel_framebuffer_init(struct drm_device *dev,
10470 			   struct intel_framebuffer *intel_fb,
10471 			   struct drm_mode_fb_cmd2 *mode_cmd,
10472 			   struct drm_i915_gem_object *obj)
10473 {
10474 	int aligned_height, tile_height;
10475 	int pitch_limit;
10476 	int ret;
10477 
10478 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
10479 
10480 	if (obj->tiling_mode == I915_TILING_Y) {
10481 		DRM_DEBUG("hardware does not support tiling Y\n");
10482 		return -EINVAL;
10483 	}
10484 
10485 	if (mode_cmd->pitches[0] & 63) {
10486 		DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
10487 			  mode_cmd->pitches[0]);
10488 		return -EINVAL;
10489 	}
10490 
10491 	if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
10492 		pitch_limit = 32*1024;
10493 	} else if (INTEL_INFO(dev)->gen >= 4) {
10494 		if (obj->tiling_mode)
10495 			pitch_limit = 16*1024;
10496 		else
10497 			pitch_limit = 32*1024;
10498 	} else if (INTEL_INFO(dev)->gen >= 3) {
10499 		if (obj->tiling_mode)
10500 			pitch_limit = 8*1024;
10501 		else
10502 			pitch_limit = 16*1024;
10503 	} else
10504 		/* XXX DSPC is limited to 4k tiled */
10505 		pitch_limit = 8*1024;
10506 
10507 	if (mode_cmd->pitches[0] > pitch_limit) {
10508 		DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
10509 			  obj->tiling_mode ? "tiled" : "linear",
10510 			  mode_cmd->pitches[0], pitch_limit);
10511 		return -EINVAL;
10512 	}
10513 
10514 	if (obj->tiling_mode != I915_TILING_NONE &&
10515 	    mode_cmd->pitches[0] != obj->stride) {
10516 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
10517 			  mode_cmd->pitches[0], obj->stride);
10518 		return -EINVAL;
10519 	}
10520 
10521 	/* Reject formats not supported by any plane early. */
10522 	switch (mode_cmd->pixel_format) {
10523 	case DRM_FORMAT_C8:
10524 	case DRM_FORMAT_RGB565:
10525 	case DRM_FORMAT_XRGB8888:
10526 	case DRM_FORMAT_ARGB8888:
10527 		break;
10528 	case DRM_FORMAT_XRGB1555:
10529 	case DRM_FORMAT_ARGB1555:
10530 		if (INTEL_INFO(dev)->gen > 3) {
10531 			DRM_DEBUG("unsupported pixel format: %s\n",
10532 				  drm_get_format_name(mode_cmd->pixel_format));
10533 			return -EINVAL;
10534 		}
10535 		break;
10536 	case DRM_FORMAT_XBGR8888:
10537 	case DRM_FORMAT_ABGR8888:
10538 	case DRM_FORMAT_XRGB2101010:
10539 	case DRM_FORMAT_ARGB2101010:
10540 	case DRM_FORMAT_XBGR2101010:
10541 	case DRM_FORMAT_ABGR2101010:
10542 		if (INTEL_INFO(dev)->gen < 4) {
10543 			DRM_DEBUG("unsupported pixel format: %s\n",
10544 				  drm_get_format_name(mode_cmd->pixel_format));
10545 			return -EINVAL;
10546 		}
10547 		break;
10548 	case DRM_FORMAT_YUYV:
10549 	case DRM_FORMAT_UYVY:
10550 	case DRM_FORMAT_YVYU:
10551 	case DRM_FORMAT_VYUY:
10552 		if (INTEL_INFO(dev)->gen < 5) {
10553 			DRM_DEBUG("unsupported pixel format: %s\n",
10554 				  drm_get_format_name(mode_cmd->pixel_format));
10555 			return -EINVAL;
10556 		}
10557 		break;
10558 	default:
10559 		DRM_DEBUG("unsupported pixel format: %s\n",
10560 			  drm_get_format_name(mode_cmd->pixel_format));
10561 		return -EINVAL;
10562 	}
10563 
10564 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
10565 	if (mode_cmd->offsets[0] != 0)
10566 		return -EINVAL;
10567 
10568 	tile_height = IS_GEN2(dev) ? 16 : 8;
10569 	aligned_height = ALIGN(mode_cmd->height,
10570 			       obj->tiling_mode ? tile_height : 1);
10571 	/* FIXME drm helper for size checks (especially planar formats)? */
10572 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
10573 		return -EINVAL;
10574 
10575 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
10576 	intel_fb->obj = obj;
10577 	intel_fb->obj->framebuffer_references++;
10578 
10579 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
10580 	if (ret) {
10581 		DRM_ERROR("framebuffer init failed %d\n", ret);
10582 		return ret;
10583 	}
10584 
10585 	return 0;
10586 }
10587 
10588 static struct drm_framebuffer *
10589 intel_user_framebuffer_create(struct drm_device *dev,
10590 			      struct drm_file *filp,
10591 			      struct drm_mode_fb_cmd2 *mode_cmd)
10592 {
10593 	struct drm_i915_gem_object *obj;
10594 
10595 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
10596 						mode_cmd->handles[0]));
10597 	if (&obj->base == NULL)
10598 		return ERR_PTR(-ENOENT);
10599 
10600 	return intel_framebuffer_create(dev, mode_cmd, obj);
10601 }
10602 
10603 #ifndef CONFIG_DRM_I915_FBDEV
10604 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
10605 {
10606 }
10607 #endif
10608 
10609 static const struct drm_mode_config_funcs intel_mode_funcs = {
10610 	.fb_create = intel_user_framebuffer_create,
10611 	.output_poll_changed = intel_fbdev_output_poll_changed,
10612 };
10613 
10614 /* Set up chip specific display functions */
10615 static void intel_init_display(struct drm_device *dev)
10616 {
10617 	struct drm_i915_private *dev_priv = dev->dev_private;
10618 
10619 	if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
10620 		dev_priv->display.find_dpll = g4x_find_best_dpll;
10621 	else if (IS_VALLEYVIEW(dev))
10622 		dev_priv->display.find_dpll = vlv_find_best_dpll;
10623 	else if (IS_PINEVIEW(dev))
10624 		dev_priv->display.find_dpll = pnv_find_best_dpll;
10625 	else
10626 		dev_priv->display.find_dpll = i9xx_find_best_dpll;
10627 
10628 	if (HAS_DDI(dev)) {
10629 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
10630 		dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
10631 		dev_priv->display.crtc_enable = haswell_crtc_enable;
10632 		dev_priv->display.crtc_disable = haswell_crtc_disable;
10633 		dev_priv->display.off = haswell_crtc_off;
10634 		dev_priv->display.update_plane = ironlake_update_plane;
10635 	} else if (HAS_PCH_SPLIT(dev)) {
10636 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
10637 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
10638 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
10639 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
10640 		dev_priv->display.off = ironlake_crtc_off;
10641 		dev_priv->display.update_plane = ironlake_update_plane;
10642 	} else if (IS_VALLEYVIEW(dev)) {
10643 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10644 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10645 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
10646 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10647 		dev_priv->display.off = i9xx_crtc_off;
10648 		dev_priv->display.update_plane = i9xx_update_plane;
10649 	} else {
10650 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10651 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10652 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
10653 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
10654 		dev_priv->display.off = i9xx_crtc_off;
10655 		dev_priv->display.update_plane = i9xx_update_plane;
10656 	}
10657 
10658 	/* Returns the core display clock speed */
10659 	if (IS_VALLEYVIEW(dev))
10660 		dev_priv->display.get_display_clock_speed =
10661 			valleyview_get_display_clock_speed;
10662 	else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
10663 		dev_priv->display.get_display_clock_speed =
10664 			i945_get_display_clock_speed;
10665 	else if (IS_I915G(dev))
10666 		dev_priv->display.get_display_clock_speed =
10667 			i915_get_display_clock_speed;
10668 	else if (IS_I945GM(dev) || IS_845G(dev))
10669 		dev_priv->display.get_display_clock_speed =
10670 			i9xx_misc_get_display_clock_speed;
10671 	else if (IS_PINEVIEW(dev))
10672 		dev_priv->display.get_display_clock_speed =
10673 			pnv_get_display_clock_speed;
10674 	else if (IS_I915GM(dev))
10675 		dev_priv->display.get_display_clock_speed =
10676 			i915gm_get_display_clock_speed;
10677 	else if (IS_I865G(dev))
10678 		dev_priv->display.get_display_clock_speed =
10679 			i865_get_display_clock_speed;
10680 	else if (IS_I85X(dev))
10681 		dev_priv->display.get_display_clock_speed =
10682 			i855_get_display_clock_speed;
10683 	else /* 852, 830 */
10684 		dev_priv->display.get_display_clock_speed =
10685 			i830_get_display_clock_speed;
10686 
10687 	if (HAS_PCH_SPLIT(dev)) {
10688 		if (IS_GEN5(dev)) {
10689 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
10690 			dev_priv->display.write_eld = ironlake_write_eld;
10691 		} else if (IS_GEN6(dev)) {
10692 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
10693 			dev_priv->display.write_eld = ironlake_write_eld;
10694 		} else if (IS_IVYBRIDGE(dev)) {
10695 			/* FIXME: detect B0+ stepping and use auto training */
10696 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
10697 			dev_priv->display.write_eld = ironlake_write_eld;
10698 			dev_priv->display.modeset_global_resources =
10699 				ivb_modeset_global_resources;
10700 		} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
10701 			dev_priv->display.fdi_link_train = hsw_fdi_link_train;
10702 			dev_priv->display.write_eld = haswell_write_eld;
10703 			dev_priv->display.modeset_global_resources =
10704 				haswell_modeset_global_resources;
10705 		}
10706 	} else if (IS_G4X(dev)) {
10707 		dev_priv->display.write_eld = g4x_write_eld;
10708 	} else if (IS_VALLEYVIEW(dev)) {
10709 		dev_priv->display.modeset_global_resources =
10710 			valleyview_modeset_global_resources;
10711 		dev_priv->display.write_eld = ironlake_write_eld;
10712 	}
10713 
10714 	/* Default just returns -ENODEV to indicate unsupported */
10715 	dev_priv->display.queue_flip = intel_default_queue_flip;
10716 
10717 	switch (INTEL_INFO(dev)->gen) {
10718 	case 2:
10719 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
10720 		break;
10721 
10722 	case 3:
10723 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
10724 		break;
10725 
10726 	case 4:
10727 	case 5:
10728 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
10729 		break;
10730 
10731 	case 6:
10732 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
10733 		break;
10734 	case 7:
10735 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
10736 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
10737 		break;
10738 	}
10739 
10740 	intel_panel_init_backlight_funcs(dev);
10741 }
10742 
10743 /*
10744  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
10745  * resume, or other times.  This quirk makes sure that's the case for
10746  * affected systems.
10747  */
10748 static void quirk_pipea_force(struct drm_device *dev)
10749 {
10750 	struct drm_i915_private *dev_priv = dev->dev_private;
10751 
10752 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
10753 	DRM_INFO("applying pipe a force quirk\n");
10754 }
10755 
10756 /*
10757  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
10758  */
10759 static void quirk_ssc_force_disable(struct drm_device *dev)
10760 {
10761 	struct drm_i915_private *dev_priv = dev->dev_private;
10762 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
10763 	DRM_INFO("applying lvds SSC disable quirk\n");
10764 }
10765 
10766 /*
10767  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
10768  * brightness value
10769  */
10770 static void quirk_invert_brightness(struct drm_device *dev)
10771 {
10772 	struct drm_i915_private *dev_priv = dev->dev_private;
10773 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
10774 	DRM_INFO("applying inverted panel brightness quirk\n");
10775 }
10776 
10777 struct intel_quirk {
10778 	int device;
10779 	int subsystem_vendor;
10780 	int subsystem_device;
10781 	void (*hook)(struct drm_device *dev);
10782 };
10783 
10784 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
10785 struct intel_dmi_quirk {
10786 	void (*hook)(struct drm_device *dev);
10787 	const struct dmi_system_id (*dmi_id_list)[];
10788 };
10789 
10790 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
10791 {
10792 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
10793 	return 1;
10794 }
10795 
10796 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
10797 	{
10798 		.dmi_id_list = &(const struct dmi_system_id[]) {
10799 			{
10800 				.callback = intel_dmi_reverse_brightness,
10801 				.ident = "NCR Corporation",
10802 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
10803 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
10804 				},
10805 			},
10806 			{ }  /* terminating entry */
10807 		},
10808 		.hook = quirk_invert_brightness,
10809 	},
10810 };
10811 
10812 static struct intel_quirk intel_quirks[] = {
10813 	/* HP Mini needs pipe A force quirk (LP: #322104) */
10814 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
10815 
10816 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10817 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
10818 
10819 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10820 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10821 
10822 	/* 830 needs to leave pipe A & dpll A up */
10823 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
10824 
10825 	/* Lenovo U160 cannot use SSC on LVDS */
10826 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
10827 
10828 	/* Sony Vaio Y cannot use SSC on LVDS */
10829 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10830 
10831 	/* Acer Aspire 5734Z must invert backlight brightness */
10832 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10833 
10834 	/* Acer/eMachines G725 */
10835 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10836 
10837 	/* Acer/eMachines e725 */
10838 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10839 
10840 	/* Acer/Packard Bell NCL20 */
10841 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10842 
10843 	/* Acer Aspire 4736Z */
10844 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10845 };
10846 
10847 static void intel_init_quirks(struct drm_device *dev)
10848 {
10849 	struct device *d = dev->dev;
10850 	int i;
10851 
10852 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10853 		struct intel_quirk *q = &intel_quirks[i];
10854 
10855 		if (pci_get_device(d) == q->device &&
10856 		    (pci_get_subvendor(d) == q->subsystem_vendor ||
10857 		     q->subsystem_vendor == PCI_ANY_ID) &&
10858 		    (pci_get_subdevice(d) == q->subsystem_device ||
10859 		     q->subsystem_device == PCI_ANY_ID))
10860 			q->hook(dev);
10861 	}
10862 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
10863 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
10864 			intel_dmi_quirks[i].hook(dev);
10865 	}
10866 }
10867 
10868 /* Disable the VGA plane that we never use */
10869 static void i915_disable_vga(struct drm_device *dev)
10870 {
10871 	struct drm_i915_private *dev_priv = dev->dev_private;
10872 	u8 sr1;
10873 	u32 vga_reg = i915_vgacntrl_reg(dev);
10874 
10875 #if 0
10876 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10877 #endif
10878 	outb(VGA_SR_INDEX, SR01);
10879 	sr1 = inb(VGA_SR_DATA);
10880 	outb(VGA_SR_DATA, sr1 | 1 << 5);
10881 #if 0
10882 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10883 #endif
10884 	udelay(300);
10885 
10886 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10887 	POSTING_READ(vga_reg);
10888 }
10889 
10890 void intel_modeset_init_hw(struct drm_device *dev)
10891 {
10892 	intel_prepare_ddi(dev);
10893 
10894 	intel_init_clock_gating(dev);
10895 
10896 	intel_reset_dpio(dev);
10897 
10898 	mutex_lock(&dev->struct_mutex);
10899 	intel_enable_gt_powersave(dev);
10900 	mutex_unlock(&dev->struct_mutex);
10901 }
10902 
10903 void intel_modeset_suspend_hw(struct drm_device *dev)
10904 {
10905 	intel_suspend_hw(dev);
10906 }
10907 
10908 void intel_modeset_init(struct drm_device *dev)
10909 {
10910 	struct drm_i915_private *dev_priv = dev->dev_private;
10911 	int i, j, ret;
10912 
10913 	drm_mode_config_init(dev);
10914 
10915 	dev->mode_config.min_width = 0;
10916 	dev->mode_config.min_height = 0;
10917 
10918 	dev->mode_config.preferred_depth = 24;
10919 	dev->mode_config.prefer_shadow = 1;
10920 
10921 	dev->mode_config.funcs = &intel_mode_funcs;
10922 
10923 	intel_init_quirks(dev);
10924 
10925 	intel_init_pm(dev);
10926 
10927 	if (INTEL_INFO(dev)->num_pipes == 0)
10928 		return;
10929 
10930 	intel_init_display(dev);
10931 
10932 	if (IS_GEN2(dev)) {
10933 		dev->mode_config.max_width = 2048;
10934 		dev->mode_config.max_height = 2048;
10935 	} else if (IS_GEN3(dev)) {
10936 		dev->mode_config.max_width = 4096;
10937 		dev->mode_config.max_height = 4096;
10938 	} else {
10939 		dev->mode_config.max_width = 8192;
10940 		dev->mode_config.max_height = 8192;
10941 	}
10942 	dev->mode_config.fb_base = dev->agp->base;
10943 
10944 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
10945 		      INTEL_INFO(dev)->num_pipes,
10946 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
10947 
10948 	for_each_pipe(i) {
10949 		intel_crtc_init(dev, i);
10950 		for (j = 0; j < dev_priv->num_plane; j++) {
10951 			ret = intel_plane_init(dev, i, j);
10952 			if (ret)
10953 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10954 					      pipe_name(i), sprite_name(i, j), ret);
10955 		}
10956 	}
10957 
10958 	intel_init_dpio(dev);
10959 	intel_reset_dpio(dev);
10960 
10961 	intel_cpu_pll_init(dev);
10962 	intel_shared_dpll_init(dev);
10963 
10964 	/* Just disable it once at startup */
10965 	i915_disable_vga(dev);
10966 	intel_setup_outputs(dev);
10967 
10968 	/* Just in case the BIOS is doing something questionable. */
10969 	intel_disable_fbc(dev);
10970 }
10971 
10972 static void
10973 intel_connector_break_all_links(struct intel_connector *connector)
10974 {
10975 	connector->base.dpms = DRM_MODE_DPMS_OFF;
10976 	connector->base.encoder = NULL;
10977 	connector->encoder->connectors_active = false;
10978 	connector->encoder->base.crtc = NULL;
10979 }
10980 
10981 static void intel_enable_pipe_a(struct drm_device *dev)
10982 {
10983 	struct intel_connector *connector;
10984 	struct drm_connector *crt = NULL;
10985 	struct intel_load_detect_pipe load_detect_temp;
10986 
10987 	/* We can't just switch on the pipe A, we need to set things up with a
10988 	 * proper mode and output configuration. As a gross hack, enable pipe A
10989 	 * by enabling the load detect pipe once. */
10990 	list_for_each_entry(connector,
10991 			    &dev->mode_config.connector_list,
10992 			    base.head) {
10993 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10994 			crt = &connector->base;
10995 			break;
10996 		}
10997 	}
10998 
10999 	if (!crt)
11000 		return;
11001 
11002 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
11003 		intel_release_load_detect_pipe(crt, &load_detect_temp);
11004 
11005 
11006 }
11007 
11008 static bool
11009 intel_check_plane_mapping(struct intel_crtc *crtc)
11010 {
11011 	struct drm_device *dev = crtc->base.dev;
11012 	struct drm_i915_private *dev_priv = dev->dev_private;
11013 	u32 reg, val;
11014 
11015 	if (INTEL_INFO(dev)->num_pipes == 1)
11016 		return true;
11017 
11018 	reg = DSPCNTR(!crtc->plane);
11019 	val = I915_READ(reg);
11020 
11021 	if ((val & DISPLAY_PLANE_ENABLE) &&
11022 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
11023 		return false;
11024 
11025 	return true;
11026 }
11027 
11028 static void intel_sanitize_crtc(struct intel_crtc *crtc)
11029 {
11030 	struct drm_device *dev = crtc->base.dev;
11031 	struct drm_i915_private *dev_priv = dev->dev_private;
11032 	u32 reg;
11033 
11034 	/* Clear any frame start delays used for debugging left by the BIOS */
11035 	reg = PIPECONF(crtc->config.cpu_transcoder);
11036 	I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
11037 
11038 	/* We need to sanitize the plane -> pipe mapping first because this will
11039 	 * disable the crtc (and hence change the state) if it is wrong. Note
11040 	 * that gen4+ has a fixed plane -> pipe mapping.  */
11041 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
11042 		struct intel_connector *connector;
11043 		bool plane;
11044 
11045 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
11046 			      crtc->base.base.id);
11047 
11048 		/* Pipe has the wrong plane attached and the plane is active.
11049 		 * Temporarily change the plane mapping and disable everything
11050 		 * ...  */
11051 		plane = crtc->plane;
11052 		crtc->plane = !plane;
11053 		dev_priv->display.crtc_disable(&crtc->base);
11054 		crtc->plane = plane;
11055 
11056 		/* ... and break all links. */
11057 		list_for_each_entry(connector, &dev->mode_config.connector_list,
11058 				    base.head) {
11059 			if (connector->encoder->base.crtc != &crtc->base)
11060 				continue;
11061 
11062 			intel_connector_break_all_links(connector);
11063 		}
11064 
11065 		WARN_ON(crtc->active);
11066 		crtc->base.enabled = false;
11067 	}
11068 
11069 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
11070 	    crtc->pipe == PIPE_A && !crtc->active) {
11071 		/* BIOS forgot to enable pipe A, this mostly happens after
11072 		 * resume. Force-enable the pipe to fix this, the update_dpms
11073 		 * call below we restore the pipe to the right state, but leave
11074 		 * the required bits on. */
11075 		intel_enable_pipe_a(dev);
11076 	}
11077 
11078 	/* Adjust the state of the output pipe according to whether we
11079 	 * have active connectors/encoders. */
11080 	intel_crtc_update_dpms(&crtc->base);
11081 
11082 	if (crtc->active != crtc->base.enabled) {
11083 		struct intel_encoder *encoder;
11084 
11085 		/* This can happen either due to bugs in the get_hw_state
11086 		 * functions or because the pipe is force-enabled due to the
11087 		 * pipe A quirk. */
11088 		DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
11089 			      crtc->base.base.id,
11090 			      crtc->base.enabled ? "enabled" : "disabled",
11091 			      crtc->active ? "enabled" : "disabled");
11092 
11093 		crtc->base.enabled = crtc->active;
11094 
11095 		/* Because we only establish the connector -> encoder ->
11096 		 * crtc links if something is active, this means the
11097 		 * crtc is now deactivated. Break the links. connector
11098 		 * -> encoder links are only establish when things are
11099 		 *  actually up, hence no need to break them. */
11100 		WARN_ON(crtc->active);
11101 
11102 		for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
11103 			WARN_ON(encoder->connectors_active);
11104 			encoder->base.crtc = NULL;
11105 		}
11106 	}
11107 }
11108 
11109 static void intel_sanitize_encoder(struct intel_encoder *encoder)
11110 {
11111 	struct intel_connector *connector;
11112 	struct drm_device *dev = encoder->base.dev;
11113 
11114 	/* We need to check both for a crtc link (meaning that the
11115 	 * encoder is active and trying to read from a pipe) and the
11116 	 * pipe itself being active. */
11117 	bool has_active_crtc = encoder->base.crtc &&
11118 		to_intel_crtc(encoder->base.crtc)->active;
11119 
11120 	if (encoder->connectors_active && !has_active_crtc) {
11121 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
11122 			      encoder->base.base.id,
11123 			      drm_get_encoder_name(&encoder->base));
11124 
11125 		/* Connector is active, but has no active pipe. This is
11126 		 * fallout from our resume register restoring. Disable
11127 		 * the encoder manually again. */
11128 		if (encoder->base.crtc) {
11129 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
11130 				      encoder->base.base.id,
11131 				      drm_get_encoder_name(&encoder->base));
11132 			encoder->disable(encoder);
11133 		}
11134 
11135 		/* Inconsistent output/port/pipe state happens presumably due to
11136 		 * a bug in one of the get_hw_state functions. Or someplace else
11137 		 * in our code, like the register restore mess on resume. Clamp
11138 		 * things to off as a safer default. */
11139 		list_for_each_entry(connector,
11140 				    &dev->mode_config.connector_list,
11141 				    base.head) {
11142 			if (connector->encoder != encoder)
11143 				continue;
11144 
11145 			intel_connector_break_all_links(connector);
11146 		}
11147 	}
11148 	/* Enabled encoders without active connectors will be fixed in
11149 	 * the crtc fixup. */
11150 }
11151 
11152 void i915_redisable_vga(struct drm_device *dev)
11153 {
11154 	struct drm_i915_private *dev_priv = dev->dev_private;
11155 	u32 vga_reg = i915_vgacntrl_reg(dev);
11156 
11157 	/* This function can be called both from intel_modeset_setup_hw_state or
11158 	 * at a very early point in our resume sequence, where the power well
11159 	 * structures are not yet restored. Since this function is at a very
11160 	 * paranoid "someone might have enabled VGA while we were not looking"
11161 	 * level, just check if the power well is enabled instead of trying to
11162 	 * follow the "don't touch the power well if we don't need it" policy
11163 	 * the rest of the driver uses. */
11164 	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
11165 	    (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
11166 		return;
11167 
11168 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
11169 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
11170 		i915_disable_vga(dev);
11171 	}
11172 }
11173 
11174 static void intel_modeset_readout_hw_state(struct drm_device *dev)
11175 {
11176 	struct drm_i915_private *dev_priv = dev->dev_private;
11177 	enum i915_pipe pipe;
11178 	struct intel_crtc *crtc;
11179 	struct intel_encoder *encoder;
11180 	struct intel_connector *connector;
11181 	int i;
11182 
11183 	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11184 			    base.head) {
11185 		memset(&crtc->config, 0, sizeof(crtc->config));
11186 
11187 		crtc->active = dev_priv->display.get_pipe_config(crtc,
11188 								 &crtc->config);
11189 
11190 		crtc->base.enabled = crtc->active;
11191 		crtc->primary_enabled = crtc->active;
11192 
11193 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
11194 			      crtc->base.base.id,
11195 			      crtc->active ? "enabled" : "disabled");
11196 	}
11197 
11198 	/* FIXME: Smash this into the new shared dpll infrastructure. */
11199 	if (HAS_DDI(dev))
11200 		intel_ddi_setup_hw_pll_state(dev);
11201 
11202 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11203 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11204 
11205 		pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
11206 		pll->active = 0;
11207 		list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11208 				    base.head) {
11209 			if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
11210 				pll->active++;
11211 		}
11212 		pll->refcount = pll->active;
11213 
11214 		DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
11215 			      pll->name, pll->refcount, pll->on);
11216 	}
11217 
11218 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11219 			    base.head) {
11220 		pipe = 0;
11221 
11222 		if (encoder->get_hw_state(encoder, &pipe)) {
11223 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11224 			encoder->base.crtc = &crtc->base;
11225 			encoder->get_config(encoder, &crtc->config);
11226 		} else {
11227 			encoder->base.crtc = NULL;
11228 		}
11229 
11230 		encoder->connectors_active = false;
11231 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
11232 			      encoder->base.base.id,
11233 			      drm_get_encoder_name(&encoder->base),
11234 			      encoder->base.crtc ? "enabled" : "disabled",
11235 			      pipe_name(pipe));
11236 	}
11237 
11238 	list_for_each_entry(connector, &dev->mode_config.connector_list,
11239 			    base.head) {
11240 		if (connector->get_hw_state(connector)) {
11241 			connector->base.dpms = DRM_MODE_DPMS_ON;
11242 			connector->encoder->connectors_active = true;
11243 			connector->base.encoder = &connector->encoder->base;
11244 		} else {
11245 			connector->base.dpms = DRM_MODE_DPMS_OFF;
11246 			connector->base.encoder = NULL;
11247 		}
11248 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
11249 			      connector->base.base.id,
11250 			      drm_get_connector_name(&connector->base),
11251 			      connector->base.encoder ? "enabled" : "disabled");
11252 	}
11253 }
11254 
11255 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
11256  * and i915 state tracking structures. */
11257 void intel_modeset_setup_hw_state(struct drm_device *dev,
11258 				  bool force_restore)
11259 {
11260 	struct drm_i915_private *dev_priv = dev->dev_private;
11261 	enum i915_pipe pipe;
11262 	struct intel_crtc *crtc;
11263 	struct intel_encoder *encoder;
11264 	int i;
11265 
11266 	intel_modeset_readout_hw_state(dev);
11267 
11268 	/*
11269 	 * Now that we have the config, copy it to each CRTC struct
11270 	 * Note that this could go away if we move to using crtc_config
11271 	 * checking everywhere.
11272 	 */
11273 	list_for_each_entry(crtc, &dev->mode_config.crtc_list,
11274 			    base.head) {
11275 		if (crtc->active && i915_fastboot) {
11276 			intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
11277 
11278 			DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
11279 				      crtc->base.base.id);
11280 			drm_mode_debug_printmodeline(&crtc->base.mode);
11281 		}
11282 	}
11283 
11284 	/* HW state is read out, now we need to sanitize this mess. */
11285 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
11286 			    base.head) {
11287 		intel_sanitize_encoder(encoder);
11288 	}
11289 
11290 	for_each_pipe(pipe) {
11291 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
11292 		intel_sanitize_crtc(crtc);
11293 		intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
11294 	}
11295 
11296 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
11297 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
11298 
11299 		if (!pll->on || pll->active)
11300 			continue;
11301 
11302 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
11303 
11304 		pll->disable(dev_priv, pll);
11305 		pll->on = false;
11306 	}
11307 
11308 	if (HAS_PCH_SPLIT(dev))
11309 		ilk_wm_get_hw_state(dev);
11310 
11311 	if (force_restore) {
11312 		i915_redisable_vga(dev);
11313 
11314 		/*
11315 		 * We need to use raw interfaces for restoring state to avoid
11316 		 * checking (bogus) intermediate states.
11317 		 */
11318 		for_each_pipe(pipe) {
11319 			struct drm_crtc *crtc =
11320 				dev_priv->pipe_to_crtc_mapping[pipe];
11321 
11322 			__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
11323 					 crtc->fb);
11324 		}
11325 	} else {
11326 		intel_modeset_update_staged_output_state(dev);
11327 	}
11328 
11329 	intel_modeset_check_state(dev);
11330 }
11331 
11332 void intel_modeset_gem_init(struct drm_device *dev)
11333 {
11334 	intel_modeset_init_hw(dev);
11335 
11336 	intel_setup_overlay(dev);
11337 
11338 	mutex_lock(&dev->mode_config.mutex);
11339 	drm_mode_config_reset(dev);
11340 	intel_modeset_setup_hw_state(dev, false);
11341 	mutex_unlock(&dev->mode_config.mutex);
11342 }
11343 
11344 void intel_modeset_cleanup(struct drm_device *dev)
11345 {
11346 	struct drm_i915_private *dev_priv = dev->dev_private;
11347 	struct drm_crtc *crtc;
11348 	struct drm_connector *connector;
11349 
11350 	/*
11351 	 * Interrupts and polling as the first thing to avoid creating havoc.
11352 	 * Too much stuff here (turning of rps, connectors, ...) would
11353 	 * experience fancy races otherwise.
11354 	 */
11355 	drm_irq_uninstall(dev);
11356 	cancel_work_sync(&dev_priv->hotplug_work);
11357 	/*
11358 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
11359 	 * poll handlers. Hence disable polling after hpd handling is shut down.
11360 	 */
11361 	drm_kms_helper_poll_fini(dev);
11362 
11363 	mutex_lock(&dev->struct_mutex);
11364 
11365 	intel_unregister_dsm_handler();
11366 
11367 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
11368 		/* Skip inactive CRTCs */
11369 		if (!crtc->fb)
11370 			continue;
11371 
11372 		intel_increase_pllclock(crtc);
11373 	}
11374 
11375 	intel_disable_fbc(dev);
11376 
11377 	intel_disable_gt_powersave(dev);
11378 
11379 	ironlake_teardown_rc6(dev);
11380 
11381 	mutex_unlock(&dev->struct_mutex);
11382 
11383 	/* flush any delayed tasks or pending work */
11384 	flush_scheduled_work();
11385 
11386 	/* destroy the backlight and sysfs files before encoders/connectors */
11387 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
11388 		intel_panel_destroy_backlight(connector);
11389 		drm_sysfs_connector_remove(connector);
11390 	}
11391 
11392 	drm_mode_config_cleanup(dev);
11393 
11394 	intel_cleanup_overlay(dev);
11395 }
11396 
11397 /*
11398  * Return which encoder is currently attached for connector.
11399  */
11400 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
11401 {
11402 	return &intel_attached_encoder(connector)->base;
11403 }
11404 
11405 void intel_connector_attach_encoder(struct intel_connector *connector,
11406 				    struct intel_encoder *encoder)
11407 {
11408 	connector->encoder = encoder;
11409 	drm_mode_connector_attach_encoder(&connector->base,
11410 					  &encoder->base);
11411 }
11412 
11413 /*
11414  * set vga decode state - true == enable VGA decode
11415  */
11416 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11417 {
11418 	struct drm_i915_private *dev_priv = dev->dev_private;
11419 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11420 	u16 gmch_ctrl;
11421 
11422 	pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11423 	if (state)
11424 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11425 	else
11426 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11427 	pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11428 	return 0;
11429 }
11430 
11431 #if 0
11432 struct intel_display_error_state {
11433 
11434 	u32 power_well_driver;
11435 
11436 	int num_transcoders;
11437 
11438 	struct intel_cursor_error_state {
11439 		u32 control;
11440 		u32 position;
11441 		u32 base;
11442 		u32 size;
11443 	} cursor[I915_MAX_PIPES];
11444 
11445 	struct intel_pipe_error_state {
11446 		bool power_domain_on;
11447 		u32 source;
11448 	} pipe[I915_MAX_PIPES];
11449 
11450 	struct intel_plane_error_state {
11451 		u32 control;
11452 		u32 stride;
11453 		u32 size;
11454 		u32 pos;
11455 		u32 addr;
11456 		u32 surface;
11457 		u32 tile_offset;
11458 	} plane[I915_MAX_PIPES];
11459 
11460 	struct intel_transcoder_error_state {
11461 		bool power_domain_on;
11462 		enum transcoder cpu_transcoder;
11463 
11464 		u32 conf;
11465 
11466 		u32 htotal;
11467 		u32 hblank;
11468 		u32 hsync;
11469 		u32 vtotal;
11470 		u32 vblank;
11471 		u32 vsync;
11472 	} transcoder[4];
11473 };
11474 
11475 struct intel_display_error_state *
11476 intel_display_capture_error_state(struct drm_device *dev)
11477 {
11478 	drm_i915_private_t *dev_priv = dev->dev_private;
11479 	struct intel_display_error_state *error;
11480 	int transcoders[] = {
11481 		TRANSCODER_A,
11482 		TRANSCODER_B,
11483 		TRANSCODER_C,
11484 		TRANSCODER_EDP,
11485 	};
11486 	int i;
11487 
11488 	if (INTEL_INFO(dev)->num_pipes == 0)
11489 		return NULL;
11490 
11491 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
11492 	if (error == NULL)
11493 		return NULL;
11494 
11495 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
11496 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
11497 
11498 	for_each_pipe(i) {
11499 		error->pipe[i].power_domain_on =
11500 			intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
11501 		if (!error->pipe[i].power_domain_on)
11502 			continue;
11503 
11504 		if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
11505 			error->cursor[i].control = I915_READ(CURCNTR(i));
11506 			error->cursor[i].position = I915_READ(CURPOS(i));
11507 			error->cursor[i].base = I915_READ(CURBASE(i));
11508 		} else {
11509 			error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
11510 			error->cursor[i].position = I915_READ(CURPOS_IVB(i));
11511 			error->cursor[i].base = I915_READ(CURBASE_IVB(i));
11512 		}
11513 
11514 		error->plane[i].control = I915_READ(DSPCNTR(i));
11515 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
11516 		if (INTEL_INFO(dev)->gen <= 3) {
11517 			error->plane[i].size = I915_READ(DSPSIZE(i));
11518 			error->plane[i].pos = I915_READ(DSPPOS(i));
11519 		}
11520 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11521 			error->plane[i].addr = I915_READ(DSPADDR(i));
11522 		if (INTEL_INFO(dev)->gen >= 4) {
11523 			error->plane[i].surface = I915_READ(DSPSURF(i));
11524 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
11525 		}
11526 
11527 		error->pipe[i].source = I915_READ(PIPESRC(i));
11528 	}
11529 
11530 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11531 	if (HAS_DDI(dev_priv->dev))
11532 		error->num_transcoders++; /* Account for eDP. */
11533 
11534 	for (i = 0; i < error->num_transcoders; i++) {
11535 		enum transcoder cpu_transcoder = transcoders[i];
11536 
11537 		error->transcoder[i].power_domain_on =
11538 			intel_display_power_enabled_sw(dev,
11539 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
11540 		if (!error->transcoder[i].power_domain_on)
11541 			continue;
11542 
11543 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
11544 
11545 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
11546 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
11547 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
11548 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
11549 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
11550 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
11551 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
11552 	}
11553 
11554 	return error;
11555 }
11556 
11557 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
11558 
11559 void
11560 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
11561 				struct drm_device *dev,
11562 				struct intel_display_error_state *error)
11563 {
11564 	int i;
11565 
11566 	if (!error)
11567 		return;
11568 
11569 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
11570 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
11571 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
11572 			   error->power_well_driver);
11573 	for_each_pipe(i) {
11574 		err_printf(m, "Pipe [%d]:\n", i);
11575 		err_printf(m, "  Power: %s\n",
11576 			   error->pipe[i].power_domain_on ? "on" : "off");
11577 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
11578 
11579 		err_printf(m, "Plane [%d]:\n", i);
11580 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
11581 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
11582 		if (INTEL_INFO(dev)->gen <= 3) {
11583 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
11584 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
11585 		}
11586 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
11587 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
11588 		if (INTEL_INFO(dev)->gen >= 4) {
11589 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
11590 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
11591 		}
11592 
11593 		err_printf(m, "Cursor [%d]:\n", i);
11594 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
11595 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
11596 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
11597 	}
11598 
11599 	for (i = 0; i < error->num_transcoders; i++) {
11600 		err_printf(m, "CPU transcoder: %c\n",
11601 			   transcoder_name(error->transcoder[i].cpu_transcoder));
11602 		err_printf(m, "  Power: %s\n",
11603 			   error->transcoder[i].power_domain_on ? "on" : "off");
11604 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
11605 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
11606 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
11607 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
11608 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
11609 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
11610 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
11611 	}
11612 }
11613 #endif
11614