xref: /dflybsd-src/sys/dev/drm/i915/intel_display.c (revision 450f08dbfd98cded95c51be4079ef10f5adb3241)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  *
26  * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $
27  */
28 
29 #include <ddb/ddb.h>
30 #include <sys/limits.h>
31 
32 #include <dev/drm/drmP.h>
33 #include <dev/drm/drm.h>
34 #include "i915_drm.h"
35 #include "i915_drv.h"
36 #include "intel_drv.h"
37 #include <dev/drm/drm_edid.h>
38 #include <dev/drm/drm_dp_helper.h>
39 #include <dev/drm/drm_crtc_helper.h>
40 
41 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
42 
43 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
44 static void intel_update_watermarks(struct drm_device *dev);
45 static void intel_increase_pllclock(struct drm_crtc *crtc);
46 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 
48 typedef struct {
49 	/* given values */
50 	int n;
51 	int m1, m2;
52 	int p1, p2;
53 	/* derived values */
54 	int	dot;
55 	int	vco;
56 	int	m;
57 	int	p;
58 } intel_clock_t;
59 
60 typedef struct {
61 	int	min, max;
62 } intel_range_t;
63 
64 typedef struct {
65 	int	dot_limit;
66 	int	p2_slow, p2_fast;
67 } intel_p2_t;
68 
69 #define INTEL_P2_NUM		      2
70 typedef struct intel_limit intel_limit_t;
71 struct intel_limit {
72 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
73 	intel_p2_t	    p2;
74 	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
75 			int, int, intel_clock_t *, intel_clock_t *);
76 };
77 
78 /* FDI */
79 #define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
80 
81 static bool
82 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
83 		    int target, int refclk, intel_clock_t *match_clock,
84 		    intel_clock_t *best_clock);
85 static bool
86 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87 			int target, int refclk, intel_clock_t *match_clock,
88 			intel_clock_t *best_clock);
89 
90 static bool
91 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
92 		      int target, int refclk, intel_clock_t *match_clock,
93 		      intel_clock_t *best_clock);
94 static bool
95 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
96 			   int target, int refclk, intel_clock_t *match_clock,
97 			   intel_clock_t *best_clock);
98 
99 static inline u32 /* units of 100MHz */
100 intel_fdi_link_freq(struct drm_device *dev)
101 {
102 	if (IS_GEN5(dev)) {
103 		struct drm_i915_private *dev_priv = dev->dev_private;
104 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
105 	} else
106 		return 27;
107 }
108 
109 static const intel_limit_t intel_limits_i8xx_dvo = {
110 	.dot = { .min = 25000, .max = 350000 },
111 	.vco = { .min = 930000, .max = 1400000 },
112 	.n = { .min = 3, .max = 16 },
113 	.m = { .min = 96, .max = 140 },
114 	.m1 = { .min = 18, .max = 26 },
115 	.m2 = { .min = 6, .max = 16 },
116 	.p = { .min = 4, .max = 128 },
117 	.p1 = { .min = 2, .max = 33 },
118 	.p2 = { .dot_limit = 165000,
119 		.p2_slow = 4, .p2_fast = 2 },
120 	.find_pll = intel_find_best_PLL,
121 };
122 
123 static const intel_limit_t intel_limits_i8xx_lvds = {
124 	.dot = { .min = 25000, .max = 350000 },
125 	.vco = { .min = 930000, .max = 1400000 },
126 	.n = { .min = 3, .max = 16 },
127 	.m = { .min = 96, .max = 140 },
128 	.m1 = { .min = 18, .max = 26 },
129 	.m2 = { .min = 6, .max = 16 },
130 	.p = { .min = 4, .max = 128 },
131 	.p1 = { .min = 1, .max = 6 },
132 	.p2 = { .dot_limit = 165000,
133 		.p2_slow = 14, .p2_fast = 7 },
134 	.find_pll = intel_find_best_PLL,
135 };
136 
137 static const intel_limit_t intel_limits_i9xx_sdvo = {
138 	.dot = { .min = 20000, .max = 400000 },
139 	.vco = { .min = 1400000, .max = 2800000 },
140 	.n = { .min = 1, .max = 6 },
141 	.m = { .min = 70, .max = 120 },
142 	.m1 = { .min = 10, .max = 22 },
143 	.m2 = { .min = 5, .max = 9 },
144 	.p = { .min = 5, .max = 80 },
145 	.p1 = { .min = 1, .max = 8 },
146 	.p2 = { .dot_limit = 200000,
147 		.p2_slow = 10, .p2_fast = 5 },
148 	.find_pll = intel_find_best_PLL,
149 };
150 
151 static const intel_limit_t intel_limits_i9xx_lvds = {
152 	.dot = { .min = 20000, .max = 400000 },
153 	.vco = { .min = 1400000, .max = 2800000 },
154 	.n = { .min = 1, .max = 6 },
155 	.m = { .min = 70, .max = 120 },
156 	.m1 = { .min = 10, .max = 22 },
157 	.m2 = { .min = 5, .max = 9 },
158 	.p = { .min = 7, .max = 98 },
159 	.p1 = { .min = 1, .max = 8 },
160 	.p2 = { .dot_limit = 112000,
161 		.p2_slow = 14, .p2_fast = 7 },
162 	.find_pll = intel_find_best_PLL,
163 };
164 
165 
166 static const intel_limit_t intel_limits_g4x_sdvo = {
167 	.dot = { .min = 25000, .max = 270000 },
168 	.vco = { .min = 1750000, .max = 3500000},
169 	.n = { .min = 1, .max = 4 },
170 	.m = { .min = 104, .max = 138 },
171 	.m1 = { .min = 17, .max = 23 },
172 	.m2 = { .min = 5, .max = 11 },
173 	.p = { .min = 10, .max = 30 },
174 	.p1 = { .min = 1, .max = 3},
175 	.p2 = { .dot_limit = 270000,
176 		.p2_slow = 10,
177 		.p2_fast = 10
178 	},
179 	.find_pll = intel_g4x_find_best_PLL,
180 };
181 
182 static const intel_limit_t intel_limits_g4x_hdmi = {
183 	.dot = { .min = 22000, .max = 400000 },
184 	.vco = { .min = 1750000, .max = 3500000},
185 	.n = { .min = 1, .max = 4 },
186 	.m = { .min = 104, .max = 138 },
187 	.m1 = { .min = 16, .max = 23 },
188 	.m2 = { .min = 5, .max = 11 },
189 	.p = { .min = 5, .max = 80 },
190 	.p1 = { .min = 1, .max = 8},
191 	.p2 = { .dot_limit = 165000,
192 		.p2_slow = 10, .p2_fast = 5 },
193 	.find_pll = intel_g4x_find_best_PLL,
194 };
195 
196 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
197 	.dot = { .min = 20000, .max = 115000 },
198 	.vco = { .min = 1750000, .max = 3500000 },
199 	.n = { .min = 1, .max = 3 },
200 	.m = { .min = 104, .max = 138 },
201 	.m1 = { .min = 17, .max = 23 },
202 	.m2 = { .min = 5, .max = 11 },
203 	.p = { .min = 28, .max = 112 },
204 	.p1 = { .min = 2, .max = 8 },
205 	.p2 = { .dot_limit = 0,
206 		.p2_slow = 14, .p2_fast = 14
207 	},
208 	.find_pll = intel_g4x_find_best_PLL,
209 };
210 
211 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
212 	.dot = { .min = 80000, .max = 224000 },
213 	.vco = { .min = 1750000, .max = 3500000 },
214 	.n = { .min = 1, .max = 3 },
215 	.m = { .min = 104, .max = 138 },
216 	.m1 = { .min = 17, .max = 23 },
217 	.m2 = { .min = 5, .max = 11 },
218 	.p = { .min = 14, .max = 42 },
219 	.p1 = { .min = 2, .max = 6 },
220 	.p2 = { .dot_limit = 0,
221 		.p2_slow = 7, .p2_fast = 7
222 	},
223 	.find_pll = intel_g4x_find_best_PLL,
224 };
225 
226 static const intel_limit_t intel_limits_g4x_display_port = {
227 	.dot = { .min = 161670, .max = 227000 },
228 	.vco = { .min = 1750000, .max = 3500000},
229 	.n = { .min = 1, .max = 2 },
230 	.m = { .min = 97, .max = 108 },
231 	.m1 = { .min = 0x10, .max = 0x12 },
232 	.m2 = { .min = 0x05, .max = 0x06 },
233 	.p = { .min = 10, .max = 20 },
234 	.p1 = { .min = 1, .max = 2},
235 	.p2 = { .dot_limit = 0,
236 		.p2_slow = 10, .p2_fast = 10 },
237 	.find_pll = intel_find_pll_g4x_dp,
238 };
239 
240 static const intel_limit_t intel_limits_pineview_sdvo = {
241 	.dot = { .min = 20000, .max = 400000},
242 	.vco = { .min = 1700000, .max = 3500000 },
243 	/* Pineview's Ncounter is a ring counter */
244 	.n = { .min = 3, .max = 6 },
245 	.m = { .min = 2, .max = 256 },
246 	/* Pineview only has one combined m divider, which we treat as m2. */
247 	.m1 = { .min = 0, .max = 0 },
248 	.m2 = { .min = 0, .max = 254 },
249 	.p = { .min = 5, .max = 80 },
250 	.p1 = { .min = 1, .max = 8 },
251 	.p2 = { .dot_limit = 200000,
252 		.p2_slow = 10, .p2_fast = 5 },
253 	.find_pll = intel_find_best_PLL,
254 };
255 
256 static const intel_limit_t intel_limits_pineview_lvds = {
257 	.dot = { .min = 20000, .max = 400000 },
258 	.vco = { .min = 1700000, .max = 3500000 },
259 	.n = { .min = 3, .max = 6 },
260 	.m = { .min = 2, .max = 256 },
261 	.m1 = { .min = 0, .max = 0 },
262 	.m2 = { .min = 0, .max = 254 },
263 	.p = { .min = 7, .max = 112 },
264 	.p1 = { .min = 1, .max = 8 },
265 	.p2 = { .dot_limit = 112000,
266 		.p2_slow = 14, .p2_fast = 14 },
267 	.find_pll = intel_find_best_PLL,
268 };
269 
270 /* Ironlake / Sandybridge
271  *
272  * We calculate clock using (register_value + 2) for N/M1/M2, so here
273  * the range value for them is (actual_value - 2).
274  */
275 static const intel_limit_t intel_limits_ironlake_dac = {
276 	.dot = { .min = 25000, .max = 350000 },
277 	.vco = { .min = 1760000, .max = 3510000 },
278 	.n = { .min = 1, .max = 5 },
279 	.m = { .min = 79, .max = 127 },
280 	.m1 = { .min = 12, .max = 22 },
281 	.m2 = { .min = 5, .max = 9 },
282 	.p = { .min = 5, .max = 80 },
283 	.p1 = { .min = 1, .max = 8 },
284 	.p2 = { .dot_limit = 225000,
285 		.p2_slow = 10, .p2_fast = 5 },
286 	.find_pll = intel_g4x_find_best_PLL,
287 };
288 
289 static const intel_limit_t intel_limits_ironlake_single_lvds = {
290 	.dot = { .min = 25000, .max = 350000 },
291 	.vco = { .min = 1760000, .max = 3510000 },
292 	.n = { .min = 1, .max = 3 },
293 	.m = { .min = 79, .max = 118 },
294 	.m1 = { .min = 12, .max = 22 },
295 	.m2 = { .min = 5, .max = 9 },
296 	.p = { .min = 28, .max = 112 },
297 	.p1 = { .min = 2, .max = 8 },
298 	.p2 = { .dot_limit = 225000,
299 		.p2_slow = 14, .p2_fast = 14 },
300 	.find_pll = intel_g4x_find_best_PLL,
301 };
302 
303 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
304 	.dot = { .min = 25000, .max = 350000 },
305 	.vco = { .min = 1760000, .max = 3510000 },
306 	.n = { .min = 1, .max = 3 },
307 	.m = { .min = 79, .max = 127 },
308 	.m1 = { .min = 12, .max = 22 },
309 	.m2 = { .min = 5, .max = 9 },
310 	.p = { .min = 14, .max = 56 },
311 	.p1 = { .min = 2, .max = 8 },
312 	.p2 = { .dot_limit = 225000,
313 		.p2_slow = 7, .p2_fast = 7 },
314 	.find_pll = intel_g4x_find_best_PLL,
315 };
316 
317 /* LVDS 100mhz refclk limits. */
318 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
319 	.dot = { .min = 25000, .max = 350000 },
320 	.vco = { .min = 1760000, .max = 3510000 },
321 	.n = { .min = 1, .max = 2 },
322 	.m = { .min = 79, .max = 126 },
323 	.m1 = { .min = 12, .max = 22 },
324 	.m2 = { .min = 5, .max = 9 },
325 	.p = { .min = 28, .max = 112 },
326 	.p1 = { .min = 2, .max = 8 },
327 	.p2 = { .dot_limit = 225000,
328 		.p2_slow = 14, .p2_fast = 14 },
329 	.find_pll = intel_g4x_find_best_PLL,
330 };
331 
332 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
333 	.dot = { .min = 25000, .max = 350000 },
334 	.vco = { .min = 1760000, .max = 3510000 },
335 	.n = { .min = 1, .max = 3 },
336 	.m = { .min = 79, .max = 126 },
337 	.m1 = { .min = 12, .max = 22 },
338 	.m2 = { .min = 5, .max = 9 },
339 	.p = { .min = 14, .max = 42 },
340 	.p1 = { .min = 2, .max = 6 },
341 	.p2 = { .dot_limit = 225000,
342 		.p2_slow = 7, .p2_fast = 7 },
343 	.find_pll = intel_g4x_find_best_PLL,
344 };
345 
346 static const intel_limit_t intel_limits_ironlake_display_port = {
347 	.dot = { .min = 25000, .max = 350000 },
348 	.vco = { .min = 1760000, .max = 3510000},
349 	.n = { .min = 1, .max = 2 },
350 	.m = { .min = 81, .max = 90 },
351 	.m1 = { .min = 12, .max = 22 },
352 	.m2 = { .min = 5, .max = 9 },
353 	.p = { .min = 10, .max = 20 },
354 	.p1 = { .min = 1, .max = 2},
355 	.p2 = { .dot_limit = 0,
356 		.p2_slow = 10, .p2_fast = 10 },
357 	.find_pll = intel_find_pll_ironlake_dp,
358 };
359 
360 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
361 						int refclk)
362 {
363 	struct drm_device *dev = crtc->dev;
364 	struct drm_i915_private *dev_priv = dev->dev_private;
365 	const intel_limit_t *limit;
366 
367 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
368 		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
369 		    LVDS_CLKB_POWER_UP) {
370 			/* LVDS dual channel */
371 			if (refclk == 100000)
372 				limit = &intel_limits_ironlake_dual_lvds_100m;
373 			else
374 				limit = &intel_limits_ironlake_dual_lvds;
375 		} else {
376 			if (refclk == 100000)
377 				limit = &intel_limits_ironlake_single_lvds_100m;
378 			else
379 				limit = &intel_limits_ironlake_single_lvds;
380 		}
381 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
382 			HAS_eDP)
383 		limit = &intel_limits_ironlake_display_port;
384 	else
385 		limit = &intel_limits_ironlake_dac;
386 
387 	return limit;
388 }
389 
390 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
391 {
392 	struct drm_device *dev = crtc->dev;
393 	struct drm_i915_private *dev_priv = dev->dev_private;
394 	const intel_limit_t *limit;
395 
396 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
398 		    LVDS_CLKB_POWER_UP)
399 			/* LVDS with dual channel */
400 			limit = &intel_limits_g4x_dual_channel_lvds;
401 		else
402 			/* LVDS with dual channel */
403 			limit = &intel_limits_g4x_single_channel_lvds;
404 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
405 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
406 		limit = &intel_limits_g4x_hdmi;
407 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
408 		limit = &intel_limits_g4x_sdvo;
409 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
410 		limit = &intel_limits_g4x_display_port;
411 	} else /* The option is for other outputs */
412 		limit = &intel_limits_i9xx_sdvo;
413 
414 	return limit;
415 }
416 
417 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
418 {
419 	struct drm_device *dev = crtc->dev;
420 	const intel_limit_t *limit;
421 
422 	if (HAS_PCH_SPLIT(dev))
423 		limit = intel_ironlake_limit(crtc, refclk);
424 	else if (IS_G4X(dev)) {
425 		limit = intel_g4x_limit(crtc);
426 	} else if (IS_PINEVIEW(dev)) {
427 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 			limit = &intel_limits_pineview_lvds;
429 		else
430 			limit = &intel_limits_pineview_sdvo;
431 	} else if (!IS_GEN2(dev)) {
432 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
433 			limit = &intel_limits_i9xx_lvds;
434 		else
435 			limit = &intel_limits_i9xx_sdvo;
436 	} else {
437 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
438 			limit = &intel_limits_i8xx_lvds;
439 		else
440 			limit = &intel_limits_i8xx_dvo;
441 	}
442 	return limit;
443 }
444 
445 /* m1 is reserved as 0 in Pineview, n is a ring counter */
446 static void pineview_clock(int refclk, intel_clock_t *clock)
447 {
448 	clock->m = clock->m2 + 2;
449 	clock->p = clock->p1 * clock->p2;
450 	clock->vco = refclk * clock->m / clock->n;
451 	clock->dot = clock->vco / clock->p;
452 }
453 
454 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
455 {
456 	if (IS_PINEVIEW(dev)) {
457 		pineview_clock(refclk, clock);
458 		return;
459 	}
460 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
461 	clock->p = clock->p1 * clock->p2;
462 	clock->vco = refclk * clock->m / (clock->n + 2);
463 	clock->dot = clock->vco / clock->p;
464 }
465 
466 /**
467  * Returns whether any output on the specified pipe is of the specified type
468  */
469 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
470 {
471 	struct drm_device *dev = crtc->dev;
472 	struct drm_mode_config *mode_config = &dev->mode_config;
473 	struct intel_encoder *encoder;
474 
475 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
476 		if (encoder->base.crtc == crtc && encoder->type == type)
477 			return true;
478 
479 	return false;
480 }
481 
482 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
483 /**
484  * Returns whether the given set of divisors are valid for a given refclk with
485  * the given connectors.
486  */
487 
488 static bool intel_PLL_is_valid(struct drm_device *dev,
489 			       const intel_limit_t *limit,
490 			       const intel_clock_t *clock)
491 {
492 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
493 		INTELPllInvalid("p1 out of range\n");
494 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
495 		INTELPllInvalid("p out of range\n");
496 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
497 		INTELPllInvalid("m2 out of range\n");
498 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
499 		INTELPllInvalid("m1 out of range\n");
500 	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
501 		INTELPllInvalid("m1 <= m2\n");
502 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
503 		INTELPllInvalid("m out of range\n");
504 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
505 		INTELPllInvalid("n out of range\n");
506 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
507 		INTELPllInvalid("vco out of range\n");
508 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
509 	 * connector, etc., rather than just a single range.
510 	 */
511 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
512 		INTELPllInvalid("dot out of range\n");
513 
514 	return true;
515 }
516 
517 static bool
518 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
519 		    int target, int refclk, intel_clock_t *match_clock,
520 		    intel_clock_t *best_clock)
521 
522 {
523 	struct drm_device *dev = crtc->dev;
524 	struct drm_i915_private *dev_priv = dev->dev_private;
525 	intel_clock_t clock;
526 	int err = target;
527 
528 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
529 	    (I915_READ(LVDS)) != 0) {
530 		/*
531 		 * For LVDS, if the panel is on, just rely on its current
532 		 * settings for dual-channel.  We haven't figured out how to
533 		 * reliably set up different single/dual channel state, if we
534 		 * even can.
535 		 */
536 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
537 		    LVDS_CLKB_POWER_UP)
538 			clock.p2 = limit->p2.p2_fast;
539 		else
540 			clock.p2 = limit->p2.p2_slow;
541 	} else {
542 		if (target < limit->p2.dot_limit)
543 			clock.p2 = limit->p2.p2_slow;
544 		else
545 			clock.p2 = limit->p2.p2_fast;
546 	}
547 
548 	memset(best_clock, 0, sizeof(*best_clock));
549 
550 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
551 	     clock.m1++) {
552 		for (clock.m2 = limit->m2.min;
553 		     clock.m2 <= limit->m2.max; clock.m2++) {
554 			/* m1 is always 0 in Pineview */
555 			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
556 				break;
557 			for (clock.n = limit->n.min;
558 			     clock.n <= limit->n.max; clock.n++) {
559 				for (clock.p1 = limit->p1.min;
560 					clock.p1 <= limit->p1.max; clock.p1++) {
561 					int this_err;
562 
563 					intel_clock(dev, refclk, &clock);
564 					if (!intel_PLL_is_valid(dev, limit,
565 								&clock))
566 						continue;
567 					if (match_clock &&
568 					    clock.p != match_clock->p)
569 						continue;
570 
571 					this_err = abs(clock.dot - target);
572 					if (this_err < err) {
573 						*best_clock = clock;
574 						err = this_err;
575 					}
576 				}
577 			}
578 		}
579 	}
580 
581 	return (err != target);
582 }
583 
584 static bool
585 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
586 			int target, int refclk, intel_clock_t *match_clock,
587 			intel_clock_t *best_clock)
588 {
589 	struct drm_device *dev = crtc->dev;
590 	struct drm_i915_private *dev_priv = dev->dev_private;
591 	intel_clock_t clock;
592 	int max_n;
593 	bool found;
594 	/* approximately equals target * 0.00585 */
595 	int err_most = (target >> 8) + (target >> 9);
596 	found = false;
597 
598 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
599 		int lvds_reg;
600 
601 		if (HAS_PCH_SPLIT(dev))
602 			lvds_reg = PCH_LVDS;
603 		else
604 			lvds_reg = LVDS;
605 		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
606 		    LVDS_CLKB_POWER_UP)
607 			clock.p2 = limit->p2.p2_fast;
608 		else
609 			clock.p2 = limit->p2.p2_slow;
610 	} else {
611 		if (target < limit->p2.dot_limit)
612 			clock.p2 = limit->p2.p2_slow;
613 		else
614 			clock.p2 = limit->p2.p2_fast;
615 	}
616 
617 	memset(best_clock, 0, sizeof(*best_clock));
618 	max_n = limit->n.max;
619 	/* based on hardware requirement, prefer smaller n to precision */
620 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
621 		/* based on hardware requirement, prefere larger m1,m2 */
622 		for (clock.m1 = limit->m1.max;
623 		     clock.m1 >= limit->m1.min; clock.m1--) {
624 			for (clock.m2 = limit->m2.max;
625 			     clock.m2 >= limit->m2.min; clock.m2--) {
626 				for (clock.p1 = limit->p1.max;
627 				     clock.p1 >= limit->p1.min; clock.p1--) {
628 					int this_err;
629 
630 					intel_clock(dev, refclk, &clock);
631 					if (!intel_PLL_is_valid(dev, limit,
632 								&clock))
633 						continue;
634 					if (match_clock &&
635 					    clock.p != match_clock->p)
636 						continue;
637 
638 					this_err = abs(clock.dot - target);
639 					if (this_err < err_most) {
640 						*best_clock = clock;
641 						err_most = this_err;
642 						max_n = clock.n;
643 						found = true;
644 					}
645 				}
646 			}
647 		}
648 	}
649 	return found;
650 }
651 
652 static bool
653 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
654 			   int target, int refclk, intel_clock_t *match_clock,
655 			   intel_clock_t *best_clock)
656 {
657 	struct drm_device *dev = crtc->dev;
658 	intel_clock_t clock;
659 
660 	if (target < 200000) {
661 		clock.n = 1;
662 		clock.p1 = 2;
663 		clock.p2 = 10;
664 		clock.m1 = 12;
665 		clock.m2 = 9;
666 	} else {
667 		clock.n = 2;
668 		clock.p1 = 1;
669 		clock.p2 = 10;
670 		clock.m1 = 14;
671 		clock.m2 = 8;
672 	}
673 	intel_clock(dev, refclk, &clock);
674 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
675 	return true;
676 }
677 
678 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
679 static bool
680 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
681 		      int target, int refclk, intel_clock_t *match_clock,
682 		      intel_clock_t *best_clock)
683 {
684 	intel_clock_t clock;
685 	if (target < 200000) {
686 		clock.p1 = 2;
687 		clock.p2 = 10;
688 		clock.n = 2;
689 		clock.m1 = 23;
690 		clock.m2 = 8;
691 	} else {
692 		clock.p1 = 1;
693 		clock.p2 = 10;
694 		clock.n = 1;
695 		clock.m1 = 14;
696 		clock.m2 = 2;
697 	}
698 	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
699 	clock.p = (clock.p1 * clock.p2);
700 	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
701 	clock.vco = 0;
702 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
703 	return true;
704 }
705 
706 /**
707  * intel_wait_for_vblank - wait for vblank on a given pipe
708  * @dev: drm device
709  * @pipe: pipe to wait for
710  *
711  * Wait for vblank to occur on a given pipe.  Needed for various bits of
712  * mode setting code.
713  */
714 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
715 {
716 	struct drm_i915_private *dev_priv = dev->dev_private;
717 	int pipestat_reg = PIPESTAT(pipe);
718 
719 	/* Clear existing vblank status. Note this will clear any other
720 	 * sticky status fields as well.
721 	 *
722 	 * This races with i915_driver_irq_handler() with the result
723 	 * that either function could miss a vblank event.  Here it is not
724 	 * fatal, as we will either wait upon the next vblank interrupt or
725 	 * timeout.  Generally speaking intel_wait_for_vblank() is only
726 	 * called during modeset at which time the GPU should be idle and
727 	 * should *not* be performing page flips and thus not waiting on
728 	 * vblanks...
729 	 * Currently, the result of us stealing a vblank from the irq
730 	 * handler is that a single frame will be skipped during swapbuffers.
731 	 */
732 	I915_WRITE(pipestat_reg,
733 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
734 
735 	/* Wait for vblank interrupt bit to set */
736 	if (_intel_wait_for(dev,
737 	    I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
738 	    50, 1, "915vbl"))
739 		DRM_DEBUG_KMS("vblank wait timed out\n");
740 }
741 
742 /*
743  * intel_wait_for_pipe_off - wait for pipe to turn off
744  * @dev: drm device
745  * @pipe: pipe to wait for
746  *
747  * After disabling a pipe, we can't wait for vblank in the usual way,
748  * spinning on the vblank interrupt status bit, since we won't actually
749  * see an interrupt when the pipe is disabled.
750  *
751  * On Gen4 and above:
752  *   wait for the pipe register state bit to turn off
753  *
754  * Otherwise:
755  *   wait for the display line value to settle (it usually
756  *   ends up stopping at the start of the next frame).
757  *
758  */
759 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
760 {
761 	struct drm_i915_private *dev_priv = dev->dev_private;
762 
763 	if (INTEL_INFO(dev)->gen >= 4) {
764 		int reg = PIPECONF(pipe);
765 
766 		/* Wait for the Pipe State to go off */
767 		if (_intel_wait_for(dev,
768 		    (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
769 		    1, "915pip"))
770 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
771 	} else {
772 		u32 last_line;
773 		int reg = PIPEDSL(pipe);
774 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
775 
776 		/* Wait for the display line to settle */
777 		do {
778 			last_line = I915_READ(reg) & DSL_LINEMASK;
779 			DELAY(5000);
780 		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
781 			 time_after(timeout, jiffies));
782 		if (time_after(jiffies, timeout))
783 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
784 	}
785 }
786 
787 static const char *state_string(bool enabled)
788 {
789 	return enabled ? "on" : "off";
790 }
791 
792 /* Only for pre-ILK configs */
793 static void assert_pll(struct drm_i915_private *dev_priv,
794 		       enum i915_pipe pipe, bool state)
795 {
796 	int reg;
797 	u32 val;
798 	bool cur_state;
799 
800 	reg = DPLL(pipe);
801 	val = I915_READ(reg);
802 	cur_state = !!(val & DPLL_VCO_ENABLE);
803 	if (cur_state != state)
804 		kprintf("PLL state assertion failure (expected %s, current %s)\n",
805 		    state_string(state), state_string(cur_state));
806 }
807 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
808 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
809 
810 /* For ILK+ */
811 static void assert_pch_pll(struct drm_i915_private *dev_priv,
812 			   enum i915_pipe pipe, bool state)
813 {
814 	int reg;
815 	u32 val;
816 	bool cur_state;
817 
818 	if (HAS_PCH_CPT(dev_priv->dev)) {
819 		u32 pch_dpll;
820 
821 		pch_dpll = I915_READ(PCH_DPLL_SEL);
822 
823 		/* Make sure the selected PLL is enabled to the transcoder */
824 		KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
825 		    ("transcoder %d PLL not enabled\n", pipe));
826 
827 		/* Convert the transcoder pipe number to a pll pipe number */
828 		pipe = (pch_dpll >> (4 * pipe)) & 1;
829 	}
830 
831 	reg = PCH_DPLL(pipe);
832 	val = I915_READ(reg);
833 	cur_state = !!(val & DPLL_VCO_ENABLE);
834 	if (cur_state != state)
835 		kprintf("PCH PLL state assertion failure (expected %s, current %s)\n",
836 		    state_string(state), state_string(cur_state));
837 }
838 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
839 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
840 
841 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
842 			  enum i915_pipe pipe, bool state)
843 {
844 	int reg;
845 	u32 val;
846 	bool cur_state;
847 
848 	reg = FDI_TX_CTL(pipe);
849 	val = I915_READ(reg);
850 	cur_state = !!(val & FDI_TX_ENABLE);
851 	if (cur_state != state)
852 		kprintf("FDI TX state assertion failure (expected %s, current %s)\n",
853 		    state_string(state), state_string(cur_state));
854 }
855 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
856 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
857 
858 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
859 			  enum i915_pipe pipe, bool state)
860 {
861 	int reg;
862 	u32 val;
863 	bool cur_state;
864 
865 	reg = FDI_RX_CTL(pipe);
866 	val = I915_READ(reg);
867 	cur_state = !!(val & FDI_RX_ENABLE);
868 	if (cur_state != state)
869 		kprintf("FDI RX state assertion failure (expected %s, current %s)\n",
870 		    state_string(state), state_string(cur_state));
871 }
872 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
873 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
874 
875 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
876 				      enum i915_pipe pipe)
877 {
878 	int reg;
879 	u32 val;
880 
881 	/* ILK FDI PLL is always enabled */
882 	if (dev_priv->info->gen == 5)
883 		return;
884 
885 	reg = FDI_TX_CTL(pipe);
886 	val = I915_READ(reg);
887 	if (!(val & FDI_TX_PLL_ENABLE))
888 		kprintf("FDI TX PLL assertion failure, should be active but is disabled\n");
889 }
890 
891 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
892 				      enum i915_pipe pipe)
893 {
894 	int reg;
895 	u32 val;
896 
897 	reg = FDI_RX_CTL(pipe);
898 	val = I915_READ(reg);
899 	if (!(val & FDI_RX_PLL_ENABLE))
900 		kprintf("FDI RX PLL assertion failure, should be active but is disabled\n");
901 }
902 
903 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
904 				  enum i915_pipe pipe)
905 {
906 	int pp_reg, lvds_reg;
907 	u32 val;
908 	enum i915_pipe panel_pipe = PIPE_A;
909 	bool locked = true;
910 
911 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
912 		pp_reg = PCH_PP_CONTROL;
913 		lvds_reg = PCH_LVDS;
914 	} else {
915 		pp_reg = PP_CONTROL;
916 		lvds_reg = LVDS;
917 	}
918 
919 	val = I915_READ(pp_reg);
920 	if (!(val & PANEL_POWER_ON) ||
921 	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
922 		locked = false;
923 
924 	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
925 		panel_pipe = PIPE_B;
926 
927 	if (panel_pipe == pipe && locked)
928 		kprintf("panel assertion failure, pipe %c regs locked\n",
929 	     pipe_name(pipe));
930 }
931 
932 void assert_pipe(struct drm_i915_private *dev_priv,
933 		 enum i915_pipe pipe, bool state)
934 {
935 	int reg;
936 	u32 val;
937 	bool cur_state;
938 
939 	/* if we need the pipe A quirk it must be always on */
940 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
941 		state = true;
942 
943 	reg = PIPECONF(pipe);
944 	val = I915_READ(reg);
945 	cur_state = !!(val & PIPECONF_ENABLE);
946 	if (cur_state != state)
947 		kprintf("pipe %c assertion failure (expected %s, current %s)\n",
948 		    pipe_name(pipe), state_string(state), state_string(cur_state));
949 }
950 
951 static void assert_plane(struct drm_i915_private *dev_priv,
952 			 enum plane plane, bool state)
953 {
954 	int reg;
955 	u32 val;
956 	bool cur_state;
957 
958 	reg = DSPCNTR(plane);
959 	val = I915_READ(reg);
960 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
961 	if (cur_state != state)
962 		kprintf("plane %c assertion failure, (expected %s, current %s)\n",
963 		       plane_name(plane), state_string(state), state_string(cur_state));
964 }
965 
966 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
967 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
968 
969 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
970 				   enum i915_pipe pipe)
971 {
972 	int reg, i;
973 	u32 val;
974 	int cur_pipe;
975 
976 	/* Planes are fixed to pipes on ILK+ */
977 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
978 		reg = DSPCNTR(pipe);
979 		val = I915_READ(reg);
980 		if ((val & DISPLAY_PLANE_ENABLE) != 0)
981 			kprintf("plane %c assertion failure, should be disabled but not\n",
982 			       plane_name(pipe));
983 		return;
984 	}
985 
986 	/* Need to check both planes against the pipe */
987 	for (i = 0; i < 2; i++) {
988 		reg = DSPCNTR(i);
989 		val = I915_READ(reg);
990 		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
991 			DISPPLANE_SEL_PIPE_SHIFT;
992 		if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
993 			kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n",
994 		     plane_name(i), pipe_name(pipe));
995 	}
996 }
997 
998 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
999 {
1000 	u32 val;
1001 	bool enabled;
1002 
1003 	val = I915_READ(PCH_DREF_CONTROL);
1004 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1005 			    DREF_SUPERSPREAD_SOURCE_MASK));
1006 	if (!enabled)
1007 		kprintf("PCH refclk assertion failure, should be active but is disabled\n");
1008 }
1009 
1010 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1011 				       enum i915_pipe pipe)
1012 {
1013 	int reg;
1014 	u32 val;
1015 	bool enabled;
1016 
1017 	reg = TRANSCONF(pipe);
1018 	val = I915_READ(reg);
1019 	enabled = !!(val & TRANS_ENABLE);
1020 	if (enabled)
1021 		kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n",
1022 	     pipe_name(pipe));
1023 }
1024 
1025 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1026 			      enum i915_pipe pipe, u32 val)
1027 {
1028 	if ((val & PORT_ENABLE) == 0)
1029 		return false;
1030 
1031 	if (HAS_PCH_CPT(dev_priv->dev)) {
1032 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1033 			return false;
1034 	} else {
1035 		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1036 			return false;
1037 	}
1038 	return true;
1039 }
1040 
1041 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1042 			      enum i915_pipe pipe, u32 val)
1043 {
1044 	if ((val & LVDS_PORT_EN) == 0)
1045 		return false;
1046 
1047 	if (HAS_PCH_CPT(dev_priv->dev)) {
1048 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1049 			return false;
1050 	} else {
1051 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1052 			return false;
1053 	}
1054 	return true;
1055 }
1056 
1057 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1058 			      enum i915_pipe pipe, u32 val)
1059 {
1060 	if ((val & ADPA_DAC_ENABLE) == 0)
1061 		return false;
1062 	if (HAS_PCH_CPT(dev_priv->dev)) {
1063 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1064 			return false;
1065 	} else {
1066 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1067 			return false;
1068 	}
1069 	return true;
1070 }
1071 
1072 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1073 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1074 {
1075 	if ((val & DP_PORT_EN) == 0)
1076 		return false;
1077 
1078 	if (HAS_PCH_CPT(dev_priv->dev)) {
1079 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1080 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1081 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1082 			return false;
1083 	} else {
1084 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1085 			return false;
1086 	}
1087 	return true;
1088 }
1089 
1090 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1091 				   enum i915_pipe pipe, int reg, u32 port_sel)
1092 {
1093 	u32 val = I915_READ(reg);
1094 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
1095 		kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1096 	     reg, pipe_name(pipe));
1097 }
1098 
1099 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1100 				     enum i915_pipe pipe, int reg)
1101 {
1102 	u32 val = I915_READ(reg);
1103 	if (hdmi_pipe_enabled(dev_priv, val, pipe))
1104 		kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1105 	     reg, pipe_name(pipe));
1106 }
1107 
1108 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1109 				      enum i915_pipe pipe)
1110 {
1111 	int reg;
1112 	u32 val;
1113 
1114 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1115 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1116 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1117 
1118 	reg = PCH_ADPA;
1119 	val = I915_READ(reg);
1120 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1121 		kprintf("PCH VGA enabled on transcoder %c, should be disabled\n",
1122 	     pipe_name(pipe));
1123 
1124 	reg = PCH_LVDS;
1125 	val = I915_READ(reg);
1126 	if (lvds_pipe_enabled(dev_priv, val, pipe))
1127 		kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n",
1128 	     pipe_name(pipe));
1129 
1130 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1131 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1132 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1133 }
1134 
1135 /**
1136  * intel_enable_pll - enable a PLL
1137  * @dev_priv: i915 private structure
1138  * @pipe: pipe PLL to enable
1139  *
1140  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1141  * make sure the PLL reg is writable first though, since the panel write
1142  * protect mechanism may be enabled.
1143  *
1144  * Note!  This is for pre-ILK only.
1145  */
1146 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1147 {
1148 	int reg;
1149 	u32 val;
1150 
1151 	/* No really, not for ILK+ */
1152 	KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
1153 
1154 	/* PLL is protected by panel, make sure we can write it */
1155 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1156 		assert_panel_unlocked(dev_priv, pipe);
1157 
1158 	reg = DPLL(pipe);
1159 	val = I915_READ(reg);
1160 	val |= DPLL_VCO_ENABLE;
1161 
1162 	/* We do this three times for luck */
1163 	I915_WRITE(reg, val);
1164 	POSTING_READ(reg);
1165 	DELAY(150); /* wait for warmup */
1166 	I915_WRITE(reg, val);
1167 	POSTING_READ(reg);
1168 	DELAY(150); /* wait for warmup */
1169 	I915_WRITE(reg, val);
1170 	POSTING_READ(reg);
1171 	DELAY(150); /* wait for warmup */
1172 }
1173 
1174 /**
1175  * intel_disable_pll - disable a PLL
1176  * @dev_priv: i915 private structure
1177  * @pipe: pipe PLL to disable
1178  *
1179  * Disable the PLL for @pipe, making sure the pipe is off first.
1180  *
1181  * Note!  This is for pre-ILK only.
1182  */
1183 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1184 {
1185 	int reg;
1186 	u32 val;
1187 
1188 	/* Don't disable pipe A or pipe A PLLs if needed */
1189 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1190 		return;
1191 
1192 	/* Make sure the pipe isn't still relying on us */
1193 	assert_pipe_disabled(dev_priv, pipe);
1194 
1195 	reg = DPLL(pipe);
1196 	val = I915_READ(reg);
1197 	val &= ~DPLL_VCO_ENABLE;
1198 	I915_WRITE(reg, val);
1199 	POSTING_READ(reg);
1200 }
1201 
1202 /**
1203  * intel_enable_pch_pll - enable PCH PLL
1204  * @dev_priv: i915 private structure
1205  * @pipe: pipe PLL to enable
1206  *
1207  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1208  * drives the transcoder clock.
1209  */
1210 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1211 				 enum i915_pipe pipe)
1212 {
1213 	int reg;
1214 	u32 val;
1215 
1216 	if (pipe > 1)
1217 		return;
1218 
1219 	/* PCH only available on ILK+ */
1220 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1221 
1222 	/* PCH refclock must be enabled first */
1223 	assert_pch_refclk_enabled(dev_priv);
1224 
1225 	reg = PCH_DPLL(pipe);
1226 	val = I915_READ(reg);
1227 	val |= DPLL_VCO_ENABLE;
1228 	I915_WRITE(reg, val);
1229 	POSTING_READ(reg);
1230 	DELAY(200);
1231 }
1232 
1233 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1234 				  enum i915_pipe pipe)
1235 {
1236 	int reg;
1237 	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1238 		pll_sel = TRANSC_DPLL_ENABLE;
1239 
1240 	if (pipe > 1)
1241 		return;
1242 
1243 	/* PCH only available on ILK+ */
1244 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1245 
1246 	/* Make sure transcoder isn't still depending on us */
1247 	assert_transcoder_disabled(dev_priv, pipe);
1248 
1249 	if (pipe == 0)
1250 		pll_sel |= TRANSC_DPLLA_SEL;
1251 	else if (pipe == 1)
1252 		pll_sel |= TRANSC_DPLLB_SEL;
1253 
1254 
1255 	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1256 		return;
1257 
1258 	reg = PCH_DPLL(pipe);
1259 	val = I915_READ(reg);
1260 	val &= ~DPLL_VCO_ENABLE;
1261 	I915_WRITE(reg, val);
1262 	POSTING_READ(reg);
1263 	DELAY(200);
1264 }
1265 
1266 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267 				    enum i915_pipe pipe)
1268 {
1269 	int reg;
1270 	u32 val, pipeconf_val;
1271 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1272 
1273 	/* PCH only available on ILK+ */
1274 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1275 
1276 	/* Make sure PCH DPLL is enabled */
1277 	assert_pch_pll_enabled(dev_priv, pipe);
1278 
1279 	/* FDI must be feeding us bits for PCH ports */
1280 	assert_fdi_tx_enabled(dev_priv, pipe);
1281 	assert_fdi_rx_enabled(dev_priv, pipe);
1282 
1283 
1284 	reg = TRANSCONF(pipe);
1285 	val = I915_READ(reg);
1286 	pipeconf_val = I915_READ(PIPECONF(pipe));
1287 
1288 	if (HAS_PCH_IBX(dev_priv->dev)) {
1289 		/*
1290 		 * make the BPC in transcoder be consistent with
1291 		 * that in pipeconf reg.
1292 		 */
1293 		val &= ~PIPE_BPC_MASK;
1294 		val |= pipeconf_val & PIPE_BPC_MASK;
1295 	}
1296 
1297 	val &= ~TRANS_INTERLACE_MASK;
1298 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1299 		if (HAS_PCH_IBX(dev_priv->dev) &&
1300 		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1301 			val |= TRANS_LEGACY_INTERLACED_ILK;
1302 		else
1303 			val |= TRANS_INTERLACED;
1304 	else
1305 		val |= TRANS_PROGRESSIVE;
1306 
1307 	I915_WRITE(reg, val | TRANS_ENABLE);
1308 	if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
1309 	    100, 1, "915trc"))
1310 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1311 }
1312 
1313 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1314 				     enum i915_pipe pipe)
1315 {
1316 	int reg;
1317 	u32 val;
1318 
1319 	/* FDI relies on the transcoder */
1320 	assert_fdi_tx_disabled(dev_priv, pipe);
1321 	assert_fdi_rx_disabled(dev_priv, pipe);
1322 
1323 	/* Ports must be off as well */
1324 	assert_pch_ports_disabled(dev_priv, pipe);
1325 
1326 	reg = TRANSCONF(pipe);
1327 	val = I915_READ(reg);
1328 	val &= ~TRANS_ENABLE;
1329 	I915_WRITE(reg, val);
1330 	/* wait for PCH transcoder off, transcoder state */
1331 	if (_intel_wait_for(dev_priv->dev,
1332 	    (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
1333 	    1, "915trd"))
1334 		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1335 }
1336 
1337 /**
1338  * intel_enable_pipe - enable a pipe, asserting requirements
1339  * @dev_priv: i915 private structure
1340  * @pipe: pipe to enable
1341  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1342  *
1343  * Enable @pipe, making sure that various hardware specific requirements
1344  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1345  *
1346  * @pipe should be %PIPE_A or %PIPE_B.
1347  *
1348  * Will wait until the pipe is actually running (i.e. first vblank) before
1349  * returning.
1350  */
1351 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
1352 			      bool pch_port)
1353 {
1354 	int reg;
1355 	u32 val;
1356 
1357 	/*
1358 	 * A pipe without a PLL won't actually be able to drive bits from
1359 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1360 	 * need the check.
1361 	 */
1362 	if (!HAS_PCH_SPLIT(dev_priv->dev))
1363 		assert_pll_enabled(dev_priv, pipe);
1364 	else {
1365 		if (pch_port) {
1366 			/* if driving the PCH, we need FDI enabled */
1367 			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1368 			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1369 		}
1370 		/* FIXME: assert CPU port conditions for SNB+ */
1371 	}
1372 
1373 	reg = PIPECONF(pipe);
1374 	val = I915_READ(reg);
1375 	if (val & PIPECONF_ENABLE)
1376 		return;
1377 
1378 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1379 	intel_wait_for_vblank(dev_priv->dev, pipe);
1380 }
1381 
1382 /**
1383  * intel_disable_pipe - disable a pipe, asserting requirements
1384  * @dev_priv: i915 private structure
1385  * @pipe: pipe to disable
1386  *
1387  * Disable @pipe, making sure that various hardware specific requirements
1388  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1389  *
1390  * @pipe should be %PIPE_A or %PIPE_B.
1391  *
1392  * Will wait until the pipe has shut down before returning.
1393  */
1394 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1395 			       enum i915_pipe pipe)
1396 {
1397 	int reg;
1398 	u32 val;
1399 
1400 	/*
1401 	 * Make sure planes won't keep trying to pump pixels to us,
1402 	 * or we might hang the display.
1403 	 */
1404 	assert_planes_disabled(dev_priv, pipe);
1405 
1406 	/* Don't disable pipe A or pipe A PLLs if needed */
1407 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1408 		return;
1409 
1410 	reg = PIPECONF(pipe);
1411 	val = I915_READ(reg);
1412 	if ((val & PIPECONF_ENABLE) == 0)
1413 		return;
1414 
1415 	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1416 	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1417 }
1418 
1419 /*
1420  * Plane regs are double buffered, going from enabled->disabled needs a
1421  * trigger in order to latch.  The display address reg provides this.
1422  */
1423 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1424 				      enum plane plane)
1425 {
1426 	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1427 	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1428 }
1429 
1430 /**
1431  * intel_enable_plane - enable a display plane on a given pipe
1432  * @dev_priv: i915 private structure
1433  * @plane: plane to enable
1434  * @pipe: pipe being fed
1435  *
1436  * Enable @plane on @pipe, making sure that @pipe is running first.
1437  */
1438 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1439 			       enum plane plane, enum i915_pipe pipe)
1440 {
1441 	int reg;
1442 	u32 val;
1443 
1444 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1445 	assert_pipe_enabled(dev_priv, pipe);
1446 
1447 	reg = DSPCNTR(plane);
1448 	val = I915_READ(reg);
1449 	if (val & DISPLAY_PLANE_ENABLE)
1450 		return;
1451 
1452 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1453 	intel_flush_display_plane(dev_priv, plane);
1454 	intel_wait_for_vblank(dev_priv->dev, pipe);
1455 }
1456 
1457 /**
1458  * intel_disable_plane - disable a display plane
1459  * @dev_priv: i915 private structure
1460  * @plane: plane to disable
1461  * @pipe: pipe consuming the data
1462  *
1463  * Disable @plane; should be an independent operation.
1464  */
1465 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1466 				enum plane plane, enum i915_pipe pipe)
1467 {
1468 	int reg;
1469 	u32 val;
1470 
1471 	reg = DSPCNTR(plane);
1472 	val = I915_READ(reg);
1473 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1474 		return;
1475 
1476 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1477 	intel_flush_display_plane(dev_priv, plane);
1478 	intel_wait_for_vblank(dev_priv->dev, pipe);
1479 }
1480 
1481 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1482 			   enum i915_pipe pipe, int reg, u32 port_sel)
1483 {
1484 	u32 val = I915_READ(reg);
1485 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1486 		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1487 		I915_WRITE(reg, val & ~DP_PORT_EN);
1488 	}
1489 }
1490 
1491 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1492 			     enum i915_pipe pipe, int reg)
1493 {
1494 	u32 val = I915_READ(reg);
1495 	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1496 		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1497 			      reg, pipe);
1498 		I915_WRITE(reg, val & ~PORT_ENABLE);
1499 	}
1500 }
1501 
1502 /* Disable any ports connected to this transcoder */
1503 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1504 				    enum i915_pipe pipe)
1505 {
1506 	u32 reg, val;
1507 
1508 	val = I915_READ(PCH_PP_CONTROL);
1509 	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1510 
1511 	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1512 	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1513 	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1514 
1515 	reg = PCH_ADPA;
1516 	val = I915_READ(reg);
1517 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1518 		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1519 
1520 	reg = PCH_LVDS;
1521 	val = I915_READ(reg);
1522 	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1523 		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1524 		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1525 		POSTING_READ(reg);
1526 		DELAY(100);
1527 	}
1528 
1529 	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1530 	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1531 	disable_pch_hdmi(dev_priv, pipe, HDMID);
1532 }
1533 
1534 static void i8xx_disable_fbc(struct drm_device *dev)
1535 {
1536 	struct drm_i915_private *dev_priv = dev->dev_private;
1537 	u32 fbc_ctl;
1538 
1539 	/* Disable compression */
1540 	fbc_ctl = I915_READ(FBC_CONTROL);
1541 	if ((fbc_ctl & FBC_CTL_EN) == 0)
1542 		return;
1543 
1544 	fbc_ctl &= ~FBC_CTL_EN;
1545 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1546 
1547 	/* Wait for compressing bit to clear */
1548 	if (_intel_wait_for(dev,
1549 	    (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
1550 	    1, "915fbd")) {
1551 		DRM_DEBUG_KMS("FBC idle timed out\n");
1552 		return;
1553 	}
1554 
1555 	DRM_DEBUG_KMS("disabled FBC\n");
1556 }
1557 
1558 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1559 {
1560 	struct drm_device *dev = crtc->dev;
1561 	struct drm_i915_private *dev_priv = dev->dev_private;
1562 	struct drm_framebuffer *fb = crtc->fb;
1563 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1564 	struct drm_i915_gem_object *obj = intel_fb->obj;
1565 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1566 	int cfb_pitch;
1567 	int plane, i;
1568 	u32 fbc_ctl, fbc_ctl2;
1569 
1570 	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1571 	if (fb->pitches[0] < cfb_pitch)
1572 		cfb_pitch = fb->pitches[0];
1573 
1574 	/* FBC_CTL wants 64B units */
1575 	cfb_pitch = (cfb_pitch / 64) - 1;
1576 	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1577 
1578 	/* Clear old tags */
1579 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1580 		I915_WRITE(FBC_TAG + (i * 4), 0);
1581 
1582 	/* Set it up... */
1583 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1584 	fbc_ctl2 |= plane;
1585 	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1586 	I915_WRITE(FBC_FENCE_OFF, crtc->y);
1587 
1588 	/* enable it... */
1589 	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1590 	if (IS_I945GM(dev))
1591 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1592 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1593 	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1594 	fbc_ctl |= obj->fence_reg;
1595 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1596 
1597 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1598 		      cfb_pitch, crtc->y, intel_crtc->plane);
1599 }
1600 
1601 static bool i8xx_fbc_enabled(struct drm_device *dev)
1602 {
1603 	struct drm_i915_private *dev_priv = dev->dev_private;
1604 
1605 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1606 }
1607 
1608 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1609 {
1610 	struct drm_device *dev = crtc->dev;
1611 	struct drm_i915_private *dev_priv = dev->dev_private;
1612 	struct drm_framebuffer *fb = crtc->fb;
1613 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1614 	struct drm_i915_gem_object *obj = intel_fb->obj;
1615 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1616 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1617 	unsigned long stall_watermark = 200;
1618 	u32 dpfc_ctl;
1619 
1620 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1621 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1622 	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1623 
1624 	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1625 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1626 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1627 	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1628 
1629 	/* enable it... */
1630 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1631 
1632 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1633 }
1634 
1635 static void g4x_disable_fbc(struct drm_device *dev)
1636 {
1637 	struct drm_i915_private *dev_priv = dev->dev_private;
1638 	u32 dpfc_ctl;
1639 
1640 	/* Disable compression */
1641 	dpfc_ctl = I915_READ(DPFC_CONTROL);
1642 	if (dpfc_ctl & DPFC_CTL_EN) {
1643 		dpfc_ctl &= ~DPFC_CTL_EN;
1644 		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1645 
1646 		DRM_DEBUG_KMS("disabled FBC\n");
1647 	}
1648 }
1649 
1650 static bool g4x_fbc_enabled(struct drm_device *dev)
1651 {
1652 	struct drm_i915_private *dev_priv = dev->dev_private;
1653 
1654 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1655 }
1656 
1657 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1658 {
1659 	struct drm_i915_private *dev_priv = dev->dev_private;
1660 	u32 blt_ecoskpd;
1661 
1662 	/* Make sure blitter notifies FBC of writes */
1663 	gen6_gt_force_wake_get(dev_priv);
1664 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1665 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1666 		GEN6_BLITTER_LOCK_SHIFT;
1667 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1668 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1669 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1670 	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1671 			 GEN6_BLITTER_LOCK_SHIFT);
1672 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1673 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1674 	gen6_gt_force_wake_put(dev_priv);
1675 }
1676 
1677 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1678 {
1679 	struct drm_device *dev = crtc->dev;
1680 	struct drm_i915_private *dev_priv = dev->dev_private;
1681 	struct drm_framebuffer *fb = crtc->fb;
1682 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1683 	struct drm_i915_gem_object *obj = intel_fb->obj;
1684 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1685 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1686 	unsigned long stall_watermark = 200;
1687 	u32 dpfc_ctl;
1688 
1689 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1690 	dpfc_ctl &= DPFC_RESERVED;
1691 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1692 	/* Set persistent mode for front-buffer rendering, ala X. */
1693 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1694 	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1695 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1696 
1697 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1698 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1699 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1700 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1701 	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1702 	/* enable it... */
1703 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1704 
1705 	if (IS_GEN6(dev)) {
1706 		I915_WRITE(SNB_DPFC_CTL_SA,
1707 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1708 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1709 		sandybridge_blit_fbc_update(dev);
1710 	}
1711 
1712 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1713 }
1714 
1715 static void ironlake_disable_fbc(struct drm_device *dev)
1716 {
1717 	struct drm_i915_private *dev_priv = dev->dev_private;
1718 	u32 dpfc_ctl;
1719 
1720 	/* Disable compression */
1721 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1722 	if (dpfc_ctl & DPFC_CTL_EN) {
1723 		dpfc_ctl &= ~DPFC_CTL_EN;
1724 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1725 
1726 		DRM_DEBUG_KMS("disabled FBC\n");
1727 	}
1728 }
1729 
1730 static bool ironlake_fbc_enabled(struct drm_device *dev)
1731 {
1732 	struct drm_i915_private *dev_priv = dev->dev_private;
1733 
1734 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1735 }
1736 
1737 bool intel_fbc_enabled(struct drm_device *dev)
1738 {
1739 	struct drm_i915_private *dev_priv = dev->dev_private;
1740 
1741 	if (!dev_priv->display.fbc_enabled)
1742 		return false;
1743 
1744 	return dev_priv->display.fbc_enabled(dev);
1745 }
1746 
1747 static void intel_fbc_work_fn(void *arg, int pending)
1748 {
1749 	struct intel_fbc_work *work = arg;
1750 	struct drm_device *dev = work->crtc->dev;
1751 	struct drm_i915_private *dev_priv = dev->dev_private;
1752 
1753 	DRM_LOCK(dev);
1754 	if (work == dev_priv->fbc_work) {
1755 		/* Double check that we haven't switched fb without cancelling
1756 		 * the prior work.
1757 		 */
1758 		if (work->crtc->fb == work->fb) {
1759 			dev_priv->display.enable_fbc(work->crtc,
1760 						     work->interval);
1761 
1762 			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1763 			dev_priv->cfb_fb = work->crtc->fb->base.id;
1764 			dev_priv->cfb_y = work->crtc->y;
1765 		}
1766 
1767 		dev_priv->fbc_work = NULL;
1768 	}
1769 	DRM_UNLOCK(dev);
1770 
1771 	drm_free(work, DRM_MEM_KMS);
1772 }
1773 
1774 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1775 {
1776 	u_int pending;
1777 
1778 	if (dev_priv->fbc_work == NULL)
1779 		return;
1780 
1781 	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1782 
1783 	/* Synchronisation is provided by struct_mutex and checking of
1784 	 * dev_priv->fbc_work, so we can perform the cancellation
1785 	 * entirely asynchronously.
1786 	 */
1787 	if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
1788 	    &pending) == 0)
1789 		/* tasklet was killed before being run, clean up */
1790 		drm_free(dev_priv->fbc_work, DRM_MEM_KMS);
1791 
1792 	/* Mark the work as no longer wanted so that if it does
1793 	 * wake-up (because the work was already running and waiting
1794 	 * for our mutex), it will discover that is no longer
1795 	 * necessary to run.
1796 	 */
1797 	dev_priv->fbc_work = NULL;
1798 }
1799 
1800 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1801 {
1802 	struct intel_fbc_work *work;
1803 	struct drm_device *dev = crtc->dev;
1804 	struct drm_i915_private *dev_priv = dev->dev_private;
1805 
1806 	if (!dev_priv->display.enable_fbc)
1807 		return;
1808 
1809 	intel_cancel_fbc_work(dev_priv);
1810 
1811 	work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
1812 	work->crtc = crtc;
1813 	work->fb = crtc->fb;
1814 	work->interval = interval;
1815 	TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
1816 	    work);
1817 
1818 	dev_priv->fbc_work = work;
1819 
1820 	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1821 
1822 	/* Delay the actual enabling to let pageflipping cease and the
1823 	 * display to settle before starting the compression. Note that
1824 	 * this delay also serves a second purpose: it allows for a
1825 	 * vblank to pass after disabling the FBC before we attempt
1826 	 * to modify the control registers.
1827 	 *
1828 	 * A more complicated solution would involve tracking vblanks
1829 	 * following the termination of the page-flipping sequence
1830 	 * and indeed performing the enable as a co-routine and not
1831 	 * waiting synchronously upon the vblank.
1832 	 */
1833 	taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
1834 	    msecs_to_jiffies(50));
1835 }
1836 
1837 void intel_disable_fbc(struct drm_device *dev)
1838 {
1839 	struct drm_i915_private *dev_priv = dev->dev_private;
1840 
1841 	intel_cancel_fbc_work(dev_priv);
1842 
1843 	if (!dev_priv->display.disable_fbc)
1844 		return;
1845 
1846 	dev_priv->display.disable_fbc(dev);
1847 	dev_priv->cfb_plane = -1;
1848 }
1849 
1850 /**
1851  * intel_update_fbc - enable/disable FBC as needed
1852  * @dev: the drm_device
1853  *
1854  * Set up the framebuffer compression hardware at mode set time.  We
1855  * enable it if possible:
1856  *   - plane A only (on pre-965)
1857  *   - no pixel mulitply/line duplication
1858  *   - no alpha buffer discard
1859  *   - no dual wide
1860  *   - framebuffer <= 2048 in width, 1536 in height
1861  *
1862  * We can't assume that any compression will take place (worst case),
1863  * so the compressed buffer has to be the same size as the uncompressed
1864  * one.  It also must reside (along with the line length buffer) in
1865  * stolen memory.
1866  *
1867  * We need to enable/disable FBC on a global basis.
1868  */
1869 static void intel_update_fbc(struct drm_device *dev)
1870 {
1871 	struct drm_i915_private *dev_priv = dev->dev_private;
1872 	struct drm_crtc *crtc = NULL, *tmp_crtc;
1873 	struct intel_crtc *intel_crtc;
1874 	struct drm_framebuffer *fb;
1875 	struct intel_framebuffer *intel_fb;
1876 	struct drm_i915_gem_object *obj;
1877 	int enable_fbc;
1878 
1879 	DRM_DEBUG_KMS("\n");
1880 
1881 	if (!i915_powersave)
1882 		return;
1883 
1884 	if (!I915_HAS_FBC(dev))
1885 		return;
1886 
1887 	/*
1888 	 * If FBC is already on, we just have to verify that we can
1889 	 * keep it that way...
1890 	 * Need to disable if:
1891 	 *   - more than one pipe is active
1892 	 *   - changing FBC params (stride, fence, mode)
1893 	 *   - new fb is too large to fit in compressed buffer
1894 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1895 	 */
1896 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1897 		if (tmp_crtc->enabled && tmp_crtc->fb) {
1898 			if (crtc) {
1899 				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1900 				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1901 				goto out_disable;
1902 			}
1903 			crtc = tmp_crtc;
1904 		}
1905 	}
1906 
1907 	if (!crtc || crtc->fb == NULL) {
1908 		DRM_DEBUG_KMS("no output, disabling\n");
1909 		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1910 		goto out_disable;
1911 	}
1912 
1913 	intel_crtc = to_intel_crtc(crtc);
1914 	fb = crtc->fb;
1915 	intel_fb = to_intel_framebuffer(fb);
1916 	obj = intel_fb->obj;
1917 
1918 	enable_fbc = i915_enable_fbc;
1919 	if (enable_fbc < 0) {
1920 		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1921 		enable_fbc = 1;
1922 		if (INTEL_INFO(dev)->gen <= 6)
1923 			enable_fbc = 0;
1924 	}
1925 	if (!enable_fbc) {
1926 		DRM_DEBUG_KMS("fbc disabled per module param\n");
1927 		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1928 		goto out_disable;
1929 	}
1930 	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1931 		DRM_DEBUG_KMS("framebuffer too large, disabling "
1932 			      "compression\n");
1933 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1934 		goto out_disable;
1935 	}
1936 	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1937 	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1938 		DRM_DEBUG_KMS("mode incompatible with compression, "
1939 			      "disabling\n");
1940 		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1941 		goto out_disable;
1942 	}
1943 	if ((crtc->mode.hdisplay > 2048) ||
1944 	    (crtc->mode.vdisplay > 1536)) {
1945 		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1946 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1947 		goto out_disable;
1948 	}
1949 	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1950 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1951 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1952 		goto out_disable;
1953 	}
1954 	if (obj->tiling_mode != I915_TILING_X ||
1955 	    obj->fence_reg == I915_FENCE_REG_NONE) {
1956 		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1957 		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1958 		goto out_disable;
1959 	}
1960 
1961 #ifdef DDB
1962 	/* If the kernel debugger is active, always disable compression */
1963 	if (db_active)
1964 		goto out_disable;
1965 #endif
1966 
1967 	/* If the scanout has not changed, don't modify the FBC settings.
1968 	 * Note that we make the fundamental assumption that the fb->obj
1969 	 * cannot be unpinned (and have its GTT offset and fence revoked)
1970 	 * without first being decoupled from the scanout and FBC disabled.
1971 	 */
1972 	if (dev_priv->cfb_plane == intel_crtc->plane &&
1973 	    dev_priv->cfb_fb == fb->base.id &&
1974 	    dev_priv->cfb_y == crtc->y)
1975 		return;
1976 
1977 	if (intel_fbc_enabled(dev)) {
1978 		/* We update FBC along two paths, after changing fb/crtc
1979 		 * configuration (modeswitching) and after page-flipping
1980 		 * finishes. For the latter, we know that not only did
1981 		 * we disable the FBC at the start of the page-flip
1982 		 * sequence, but also more than one vblank has passed.
1983 		 *
1984 		 * For the former case of modeswitching, it is possible
1985 		 * to switch between two FBC valid configurations
1986 		 * instantaneously so we do need to disable the FBC
1987 		 * before we can modify its control registers. We also
1988 		 * have to wait for the next vblank for that to take
1989 		 * effect. However, since we delay enabling FBC we can
1990 		 * assume that a vblank has passed since disabling and
1991 		 * that we can safely alter the registers in the deferred
1992 		 * callback.
1993 		 *
1994 		 * In the scenario that we go from a valid to invalid
1995 		 * and then back to valid FBC configuration we have
1996 		 * no strict enforcement that a vblank occurred since
1997 		 * disabling the FBC. However, along all current pipe
1998 		 * disabling paths we do need to wait for a vblank at
1999 		 * some point. And we wait before enabling FBC anyway.
2000 		 */
2001 		DRM_DEBUG_KMS("disabling active FBC for update\n");
2002 		intel_disable_fbc(dev);
2003 	}
2004 
2005 	intel_enable_fbc(crtc, 500);
2006 	return;
2007 
2008 out_disable:
2009 	/* Multiple disables should be harmless */
2010 	if (intel_fbc_enabled(dev)) {
2011 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2012 		intel_disable_fbc(dev);
2013 	}
2014 }
2015 
2016 int
2017 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2018 			   struct drm_i915_gem_object *obj,
2019 			   struct intel_ring_buffer *pipelined)
2020 {
2021 	struct drm_i915_private *dev_priv = dev->dev_private;
2022 	u32 alignment;
2023 	int ret;
2024 
2025 	alignment = 0; /* shut gcc */
2026 	switch (obj->tiling_mode) {
2027 	case I915_TILING_NONE:
2028 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2029 			alignment = 128 * 1024;
2030 		else if (INTEL_INFO(dev)->gen >= 4)
2031 			alignment = 4 * 1024;
2032 		else
2033 			alignment = 64 * 1024;
2034 		break;
2035 	case I915_TILING_X:
2036 		/* pin() will align the object as required by fence */
2037 		alignment = 0;
2038 		break;
2039 	case I915_TILING_Y:
2040 		/* FIXME: Is this true? */
2041 		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2042 		return -EINVAL;
2043 	default:
2044 		KASSERT(0, ("Wrong tiling for fb obj"));
2045 	}
2046 
2047 	dev_priv->mm.interruptible = false;
2048 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2049 	if (ret)
2050 		goto err_interruptible;
2051 
2052 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2053 	 * fence, whereas 965+ only requires a fence if using
2054 	 * framebuffer compression.  For simplicity, we always install
2055 	 * a fence as the cost is not that onerous.
2056 	 */
2057 	if (obj->tiling_mode != I915_TILING_NONE) {
2058 		ret = i915_gem_object_get_fence(obj, pipelined);
2059 		if (ret)
2060 			goto err_unpin;
2061 
2062 		i915_gem_object_pin_fence(obj);
2063 	}
2064 
2065 	dev_priv->mm.interruptible = true;
2066 	return 0;
2067 
2068 err_unpin:
2069 	i915_gem_object_unpin(obj);
2070 err_interruptible:
2071 	dev_priv->mm.interruptible = true;
2072 	return ret;
2073 }
2074 
2075 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2076 {
2077 	i915_gem_object_unpin_fence(obj);
2078 	i915_gem_object_unpin(obj);
2079 }
2080 
2081 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2082 			     int x, int y)
2083 {
2084 	struct drm_device *dev = crtc->dev;
2085 	struct drm_i915_private *dev_priv = dev->dev_private;
2086 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2087 	struct intel_framebuffer *intel_fb;
2088 	struct drm_i915_gem_object *obj;
2089 	int plane = intel_crtc->plane;
2090 	unsigned long Start, Offset;
2091 	u32 dspcntr;
2092 	u32 reg;
2093 
2094 	switch (plane) {
2095 	case 0:
2096 	case 1:
2097 		break;
2098 	default:
2099 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2100 		return -EINVAL;
2101 	}
2102 
2103 	intel_fb = to_intel_framebuffer(fb);
2104 	obj = intel_fb->obj;
2105 
2106 	reg = DSPCNTR(plane);
2107 	dspcntr = I915_READ(reg);
2108 	/* Mask out pixel format bits in case we change it */
2109 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2110 	switch (fb->bits_per_pixel) {
2111 	case 8:
2112 		dspcntr |= DISPPLANE_8BPP;
2113 		break;
2114 	case 16:
2115 		if (fb->depth == 15)
2116 			dspcntr |= DISPPLANE_15_16BPP;
2117 		else
2118 			dspcntr |= DISPPLANE_16BPP;
2119 		break;
2120 	case 24:
2121 	case 32:
2122 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2123 		break;
2124 	default:
2125 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2126 		return -EINVAL;
2127 	}
2128 	if (INTEL_INFO(dev)->gen >= 4) {
2129 		if (obj->tiling_mode != I915_TILING_NONE)
2130 			dspcntr |= DISPPLANE_TILED;
2131 		else
2132 			dspcntr &= ~DISPPLANE_TILED;
2133 	}
2134 
2135 	I915_WRITE(reg, dspcntr);
2136 
2137 	Start = obj->gtt_offset;
2138 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2139 
2140 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2141 		      Start, Offset, x, y, fb->pitches[0]);
2142 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2143 	if (INTEL_INFO(dev)->gen >= 4) {
2144 		I915_WRITE(DSPSURF(plane), Start);
2145 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2146 		I915_WRITE(DSPADDR(plane), Offset);
2147 	} else
2148 		I915_WRITE(DSPADDR(plane), Start + Offset);
2149 	POSTING_READ(reg);
2150 
2151 	return (0);
2152 }
2153 
2154 static int ironlake_update_plane(struct drm_crtc *crtc,
2155 				 struct drm_framebuffer *fb, int x, int y)
2156 {
2157 	struct drm_device *dev = crtc->dev;
2158 	struct drm_i915_private *dev_priv = dev->dev_private;
2159 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2160 	struct intel_framebuffer *intel_fb;
2161 	struct drm_i915_gem_object *obj;
2162 	int plane = intel_crtc->plane;
2163 	unsigned long Start, Offset;
2164 	u32 dspcntr;
2165 	u32 reg;
2166 
2167 	switch (plane) {
2168 	case 0:
2169 	case 1:
2170 	case 2:
2171 		break;
2172 	default:
2173 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2174 		return -EINVAL;
2175 	}
2176 
2177 	intel_fb = to_intel_framebuffer(fb);
2178 	obj = intel_fb->obj;
2179 
2180 	reg = DSPCNTR(plane);
2181 	dspcntr = I915_READ(reg);
2182 	/* Mask out pixel format bits in case we change it */
2183 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2184 	switch (fb->bits_per_pixel) {
2185 	case 8:
2186 		dspcntr |= DISPPLANE_8BPP;
2187 		break;
2188 	case 16:
2189 		if (fb->depth != 16) {
2190 			DRM_ERROR("bpp 16, depth %d\n", fb->depth);
2191 			return -EINVAL;
2192 		}
2193 
2194 		dspcntr |= DISPPLANE_16BPP;
2195 		break;
2196 	case 24:
2197 	case 32:
2198 		if (fb->depth == 24)
2199 			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2200 		else if (fb->depth == 30)
2201 			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2202 		else {
2203 			DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
2204 			    fb->depth);
2205 			return -EINVAL;
2206 		}
2207 		break;
2208 	default:
2209 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2210 		return -EINVAL;
2211 	}
2212 
2213 	if (obj->tiling_mode != I915_TILING_NONE)
2214 		dspcntr |= DISPPLANE_TILED;
2215 	else
2216 		dspcntr &= ~DISPPLANE_TILED;
2217 
2218 	/* must disable */
2219 	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2220 
2221 	I915_WRITE(reg, dspcntr);
2222 
2223 	Start = obj->gtt_offset;
2224 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2225 
2226 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2227 		      Start, Offset, x, y, fb->pitches[0]);
2228 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2229 	I915_WRITE(DSPSURF(plane), Start);
2230 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2231 	I915_WRITE(DSPADDR(plane), Offset);
2232 	POSTING_READ(reg);
2233 
2234 	return 0;
2235 }
2236 
2237 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2238 static int
2239 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2240 			   int x, int y, enum mode_set_atomic state)
2241 {
2242 	struct drm_device *dev = crtc->dev;
2243 	struct drm_i915_private *dev_priv = dev->dev_private;
2244 	int ret;
2245 
2246 	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2247 	if (ret)
2248 		return ret;
2249 
2250 	intel_update_fbc(dev);
2251 	intel_increase_pllclock(crtc);
2252 
2253 	return 0;
2254 }
2255 
2256 static int
2257 intel_finish_fb(struct drm_framebuffer *old_fb)
2258 {
2259 	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2260 	struct drm_device *dev = obj->base.dev;
2261 	struct drm_i915_private *dev_priv = dev->dev_private;
2262 	bool was_interruptible = dev_priv->mm.interruptible;
2263 	int ret;
2264 
2265 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2266 	while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
2267 	    atomic_load_acq_int(&obj->pending_flip) != 0) {
2268 		lksleep(&obj->pending_flip, &dev->event_lock,
2269 		    0, "915flp", 0);
2270 	}
2271 	lockmgr(&dev->event_lock, LK_RELEASE);
2272 
2273 	/* Big Hammer, we also need to ensure that any pending
2274 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2275 	 * current scanout is retired before unpinning the old
2276 	 * framebuffer.
2277 	 *
2278 	 * This should only fail upon a hung GPU, in which case we
2279 	 * can safely continue.
2280 	 */
2281 	dev_priv->mm.interruptible = false;
2282 	ret = i915_gem_object_finish_gpu(obj);
2283 	dev_priv->mm.interruptible = was_interruptible;
2284 	return ret;
2285 }
2286 
2287 static int
2288 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2289 		    struct drm_framebuffer *old_fb)
2290 {
2291 	struct drm_device *dev = crtc->dev;
2292 #if 0
2293 	struct drm_i915_master_private *master_priv;
2294 #else
2295 	drm_i915_private_t *dev_priv = dev->dev_private;
2296 #endif
2297 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2298 	int ret;
2299 
2300 	/* no fb bound */
2301 	if (!crtc->fb) {
2302 		DRM_ERROR("No FB bound\n");
2303 		return 0;
2304 	}
2305 
2306 	switch (intel_crtc->plane) {
2307 	case 0:
2308 	case 1:
2309 		break;
2310 	case 2:
2311 		if (IS_IVYBRIDGE(dev))
2312 			break;
2313 		/* fall through otherwise */
2314 	default:
2315 		DRM_ERROR("no plane for crtc\n");
2316 		return -EINVAL;
2317 	}
2318 
2319 	DRM_LOCK(dev);
2320 	ret = intel_pin_and_fence_fb_obj(dev,
2321 					 to_intel_framebuffer(crtc->fb)->obj,
2322 					 NULL);
2323 	if (ret != 0) {
2324 		DRM_UNLOCK(dev);
2325 		DRM_ERROR("pin & fence failed\n");
2326 		return ret;
2327 	}
2328 
2329 	if (old_fb)
2330 		intel_finish_fb(old_fb);
2331 
2332 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2333 					 LEAVE_ATOMIC_MODE_SET);
2334 	if (ret) {
2335 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2336 		DRM_UNLOCK(dev);
2337 		DRM_ERROR("failed to update base address\n");
2338 		return ret;
2339 	}
2340 
2341 	if (old_fb) {
2342 		intel_wait_for_vblank(dev, intel_crtc->pipe);
2343 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2344 	}
2345 
2346 	DRM_UNLOCK(dev);
2347 
2348 #if 0
2349 	if (!dev->primary->master)
2350 		return 0;
2351 
2352 	master_priv = dev->primary->master->driver_priv;
2353 	if (!master_priv->sarea_priv)
2354 		return 0;
2355 
2356 	if (intel_crtc->pipe) {
2357 		master_priv->sarea_priv->pipeB_x = x;
2358 		master_priv->sarea_priv->pipeB_y = y;
2359 	} else {
2360 		master_priv->sarea_priv->pipeA_x = x;
2361 		master_priv->sarea_priv->pipeA_y = y;
2362 	}
2363 #else
2364 
2365 	if (!dev_priv->sarea_priv)
2366 		return 0;
2367 
2368 	if (intel_crtc->pipe) {
2369 		dev_priv->sarea_priv->planeB_x = x;
2370 		dev_priv->sarea_priv->planeB_y = y;
2371 	} else {
2372 		dev_priv->sarea_priv->planeA_x = x;
2373 		dev_priv->sarea_priv->planeA_y = y;
2374 	}
2375 #endif
2376 
2377 	return 0;
2378 }
2379 
2380 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2381 {
2382 	struct drm_device *dev = crtc->dev;
2383 	struct drm_i915_private *dev_priv = dev->dev_private;
2384 	u32 dpa_ctl;
2385 
2386 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2387 	dpa_ctl = I915_READ(DP_A);
2388 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2389 
2390 	if (clock < 200000) {
2391 		u32 temp;
2392 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2393 		/* workaround for 160Mhz:
2394 		   1) program 0x4600c bits 15:0 = 0x8124
2395 		   2) program 0x46010 bit 0 = 1
2396 		   3) program 0x46034 bit 24 = 1
2397 		   4) program 0x64000 bit 14 = 1
2398 		   */
2399 		temp = I915_READ(0x4600c);
2400 		temp &= 0xffff0000;
2401 		I915_WRITE(0x4600c, temp | 0x8124);
2402 
2403 		temp = I915_READ(0x46010);
2404 		I915_WRITE(0x46010, temp | 1);
2405 
2406 		temp = I915_READ(0x46034);
2407 		I915_WRITE(0x46034, temp | (1 << 24));
2408 	} else {
2409 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2410 	}
2411 	I915_WRITE(DP_A, dpa_ctl);
2412 
2413 	POSTING_READ(DP_A);
2414 	DELAY(500);
2415 }
2416 
2417 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2418 {
2419 	struct drm_device *dev = crtc->dev;
2420 	struct drm_i915_private *dev_priv = dev->dev_private;
2421 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2422 	int pipe = intel_crtc->pipe;
2423 	u32 reg, temp;
2424 
2425 	/* enable normal train */
2426 	reg = FDI_TX_CTL(pipe);
2427 	temp = I915_READ(reg);
2428 	if (IS_IVYBRIDGE(dev)) {
2429 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2430 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2431 	} else {
2432 		temp &= ~FDI_LINK_TRAIN_NONE;
2433 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2434 	}
2435 	I915_WRITE(reg, temp);
2436 
2437 	reg = FDI_RX_CTL(pipe);
2438 	temp = I915_READ(reg);
2439 	if (HAS_PCH_CPT(dev)) {
2440 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2441 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2442 	} else {
2443 		temp &= ~FDI_LINK_TRAIN_NONE;
2444 		temp |= FDI_LINK_TRAIN_NONE;
2445 	}
2446 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2447 
2448 	/* wait one idle pattern time */
2449 	POSTING_READ(reg);
2450 	DELAY(1000);
2451 
2452 	/* IVB wants error correction enabled */
2453 	if (IS_IVYBRIDGE(dev))
2454 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2455 			   FDI_FE_ERRC_ENABLE);
2456 }
2457 
2458 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2459 {
2460 	struct drm_i915_private *dev_priv = dev->dev_private;
2461 	u32 flags = I915_READ(SOUTH_CHICKEN1);
2462 
2463 	flags |= FDI_PHASE_SYNC_OVR(pipe);
2464 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2465 	flags |= FDI_PHASE_SYNC_EN(pipe);
2466 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2467 	POSTING_READ(SOUTH_CHICKEN1);
2468 }
2469 
2470 /* The FDI link training functions for ILK/Ibexpeak. */
2471 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2472 {
2473 	struct drm_device *dev = crtc->dev;
2474 	struct drm_i915_private *dev_priv = dev->dev_private;
2475 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2476 	int pipe = intel_crtc->pipe;
2477 	int plane = intel_crtc->plane;
2478 	u32 reg, temp, tries;
2479 
2480 	/* FDI needs bits from pipe & plane first */
2481 	assert_pipe_enabled(dev_priv, pipe);
2482 	assert_plane_enabled(dev_priv, plane);
2483 
2484 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2485 	   for train result */
2486 	reg = FDI_RX_IMR(pipe);
2487 	temp = I915_READ(reg);
2488 	temp &= ~FDI_RX_SYMBOL_LOCK;
2489 	temp &= ~FDI_RX_BIT_LOCK;
2490 	I915_WRITE(reg, temp);
2491 	I915_READ(reg);
2492 	DELAY(150);
2493 
2494 	/* enable CPU FDI TX and PCH FDI RX */
2495 	reg = FDI_TX_CTL(pipe);
2496 	temp = I915_READ(reg);
2497 	temp &= ~(7 << 19);
2498 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2499 	temp &= ~FDI_LINK_TRAIN_NONE;
2500 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2501 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2502 
2503 	reg = FDI_RX_CTL(pipe);
2504 	temp = I915_READ(reg);
2505 	temp &= ~FDI_LINK_TRAIN_NONE;
2506 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2507 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2508 
2509 	POSTING_READ(reg);
2510 	DELAY(150);
2511 
2512 	/* Ironlake workaround, enable clock pointer after FDI enable*/
2513 	if (HAS_PCH_IBX(dev)) {
2514 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2515 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2516 			   FDI_RX_PHASE_SYNC_POINTER_EN);
2517 	}
2518 
2519 	reg = FDI_RX_IIR(pipe);
2520 	for (tries = 0; tries < 5; tries++) {
2521 		temp = I915_READ(reg);
2522 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2523 
2524 		if ((temp & FDI_RX_BIT_LOCK)) {
2525 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2526 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2527 			break;
2528 		}
2529 	}
2530 	if (tries == 5)
2531 		DRM_ERROR("FDI train 1 fail!\n");
2532 
2533 	/* Train 2 */
2534 	reg = FDI_TX_CTL(pipe);
2535 	temp = I915_READ(reg);
2536 	temp &= ~FDI_LINK_TRAIN_NONE;
2537 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2538 	I915_WRITE(reg, temp);
2539 
2540 	reg = FDI_RX_CTL(pipe);
2541 	temp = I915_READ(reg);
2542 	temp &= ~FDI_LINK_TRAIN_NONE;
2543 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2544 	I915_WRITE(reg, temp);
2545 
2546 	POSTING_READ(reg);
2547 	DELAY(150);
2548 
2549 	reg = FDI_RX_IIR(pipe);
2550 	for (tries = 0; tries < 5; tries++) {
2551 		temp = I915_READ(reg);
2552 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2553 
2554 		if (temp & FDI_RX_SYMBOL_LOCK) {
2555 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2556 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2557 			break;
2558 		}
2559 	}
2560 	if (tries == 5)
2561 		DRM_ERROR("FDI train 2 fail!\n");
2562 
2563 	DRM_DEBUG_KMS("FDI train done\n");
2564 
2565 }
2566 
2567 static const int snb_b_fdi_train_param[] = {
2568 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2569 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2570 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2571 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2572 };
2573 
2574 /* The FDI link training functions for SNB/Cougarpoint. */
2575 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2576 {
2577 	struct drm_device *dev = crtc->dev;
2578 	struct drm_i915_private *dev_priv = dev->dev_private;
2579 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2580 	int pipe = intel_crtc->pipe;
2581 	u32 reg, temp, i;
2582 
2583 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2584 	   for train result */
2585 	reg = FDI_RX_IMR(pipe);
2586 	temp = I915_READ(reg);
2587 	temp &= ~FDI_RX_SYMBOL_LOCK;
2588 	temp &= ~FDI_RX_BIT_LOCK;
2589 	I915_WRITE(reg, temp);
2590 
2591 	POSTING_READ(reg);
2592 	DELAY(150);
2593 
2594 	/* enable CPU FDI TX and PCH FDI RX */
2595 	reg = FDI_TX_CTL(pipe);
2596 	temp = I915_READ(reg);
2597 	temp &= ~(7 << 19);
2598 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2599 	temp &= ~FDI_LINK_TRAIN_NONE;
2600 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2601 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2602 	/* SNB-B */
2603 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2604 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2605 
2606 	reg = FDI_RX_CTL(pipe);
2607 	temp = I915_READ(reg);
2608 	if (HAS_PCH_CPT(dev)) {
2609 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2610 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2611 	} else {
2612 		temp &= ~FDI_LINK_TRAIN_NONE;
2613 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2614 	}
2615 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2616 
2617 	POSTING_READ(reg);
2618 	DELAY(150);
2619 
2620 	if (HAS_PCH_CPT(dev))
2621 		cpt_phase_pointer_enable(dev, pipe);
2622 
2623 	for (i = 0; i < 4; i++) {
2624 		reg = FDI_TX_CTL(pipe);
2625 		temp = I915_READ(reg);
2626 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2627 		temp |= snb_b_fdi_train_param[i];
2628 		I915_WRITE(reg, temp);
2629 
2630 		POSTING_READ(reg);
2631 		DELAY(500);
2632 
2633 		reg = FDI_RX_IIR(pipe);
2634 		temp = I915_READ(reg);
2635 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2636 
2637 		if (temp & FDI_RX_BIT_LOCK) {
2638 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2639 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2640 			break;
2641 		}
2642 	}
2643 	if (i == 4)
2644 		DRM_ERROR("FDI train 1 fail!\n");
2645 
2646 	/* Train 2 */
2647 	reg = FDI_TX_CTL(pipe);
2648 	temp = I915_READ(reg);
2649 	temp &= ~FDI_LINK_TRAIN_NONE;
2650 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2651 	if (IS_GEN6(dev)) {
2652 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2653 		/* SNB-B */
2654 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2655 	}
2656 	I915_WRITE(reg, temp);
2657 
2658 	reg = FDI_RX_CTL(pipe);
2659 	temp = I915_READ(reg);
2660 	if (HAS_PCH_CPT(dev)) {
2661 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2662 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2663 	} else {
2664 		temp &= ~FDI_LINK_TRAIN_NONE;
2665 		temp |= FDI_LINK_TRAIN_PATTERN_2;
2666 	}
2667 	I915_WRITE(reg, temp);
2668 
2669 	POSTING_READ(reg);
2670 	DELAY(150);
2671 
2672 	for (i = 0; i < 4; i++) {
2673 		reg = FDI_TX_CTL(pipe);
2674 		temp = I915_READ(reg);
2675 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2676 		temp |= snb_b_fdi_train_param[i];
2677 		I915_WRITE(reg, temp);
2678 
2679 		POSTING_READ(reg);
2680 		DELAY(500);
2681 
2682 		reg = FDI_RX_IIR(pipe);
2683 		temp = I915_READ(reg);
2684 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2685 
2686 		if (temp & FDI_RX_SYMBOL_LOCK) {
2687 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2688 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2689 			break;
2690 		}
2691 	}
2692 	if (i == 4)
2693 		DRM_ERROR("FDI train 2 fail!\n");
2694 
2695 	DRM_DEBUG_KMS("FDI train done.\n");
2696 }
2697 
2698 /* Manual link training for Ivy Bridge A0 parts */
2699 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2700 {
2701 	struct drm_device *dev = crtc->dev;
2702 	struct drm_i915_private *dev_priv = dev->dev_private;
2703 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2704 	int pipe = intel_crtc->pipe;
2705 	u32 reg, temp, i;
2706 
2707 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2708 	   for train result */
2709 	reg = FDI_RX_IMR(pipe);
2710 	temp = I915_READ(reg);
2711 	temp &= ~FDI_RX_SYMBOL_LOCK;
2712 	temp &= ~FDI_RX_BIT_LOCK;
2713 	I915_WRITE(reg, temp);
2714 
2715 	POSTING_READ(reg);
2716 	DELAY(150);
2717 
2718 	/* enable CPU FDI TX and PCH FDI RX */
2719 	reg = FDI_TX_CTL(pipe);
2720 	temp = I915_READ(reg);
2721 	temp &= ~(7 << 19);
2722 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2723 	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2724 	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2725 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2726 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2727 	temp |= FDI_COMPOSITE_SYNC;
2728 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2729 
2730 	reg = FDI_RX_CTL(pipe);
2731 	temp = I915_READ(reg);
2732 	temp &= ~FDI_LINK_TRAIN_AUTO;
2733 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2734 	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2735 	temp |= FDI_COMPOSITE_SYNC;
2736 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2737 
2738 	POSTING_READ(reg);
2739 	DELAY(150);
2740 
2741 	for (i = 0; i < 4; i++) {
2742 		reg = FDI_TX_CTL(pipe);
2743 		temp = I915_READ(reg);
2744 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2745 		temp |= snb_b_fdi_train_param[i];
2746 		I915_WRITE(reg, temp);
2747 
2748 		POSTING_READ(reg);
2749 		DELAY(500);
2750 
2751 		reg = FDI_RX_IIR(pipe);
2752 		temp = I915_READ(reg);
2753 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2754 
2755 		if (temp & FDI_RX_BIT_LOCK ||
2756 		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2757 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2758 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2759 			break;
2760 		}
2761 	}
2762 	if (i == 4)
2763 		DRM_ERROR("FDI train 1 fail!\n");
2764 
2765 	/* Train 2 */
2766 	reg = FDI_TX_CTL(pipe);
2767 	temp = I915_READ(reg);
2768 	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2769 	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2770 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2771 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2772 	I915_WRITE(reg, temp);
2773 
2774 	reg = FDI_RX_CTL(pipe);
2775 	temp = I915_READ(reg);
2776 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2777 	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2778 	I915_WRITE(reg, temp);
2779 
2780 	POSTING_READ(reg);
2781 	DELAY(150);
2782 
2783 	for (i = 0; i < 4; i++ ) {
2784 		reg = FDI_TX_CTL(pipe);
2785 		temp = I915_READ(reg);
2786 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2787 		temp |= snb_b_fdi_train_param[i];
2788 		I915_WRITE(reg, temp);
2789 
2790 		POSTING_READ(reg);
2791 		DELAY(500);
2792 
2793 		reg = FDI_RX_IIR(pipe);
2794 		temp = I915_READ(reg);
2795 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2796 
2797 		if (temp & FDI_RX_SYMBOL_LOCK) {
2798 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2799 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2800 			break;
2801 		}
2802 	}
2803 	if (i == 4)
2804 		DRM_ERROR("FDI train 2 fail!\n");
2805 
2806 	DRM_DEBUG_KMS("FDI train done.\n");
2807 }
2808 
2809 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2810 {
2811 	struct drm_device *dev = crtc->dev;
2812 	struct drm_i915_private *dev_priv = dev->dev_private;
2813 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2814 	int pipe = intel_crtc->pipe;
2815 	u32 reg, temp;
2816 
2817 	/* Write the TU size bits so error detection works */
2818 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2819 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2820 
2821 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2822 	reg = FDI_RX_CTL(pipe);
2823 	temp = I915_READ(reg);
2824 	temp &= ~((0x7 << 19) | (0x7 << 16));
2825 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2826 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2827 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2828 
2829 	POSTING_READ(reg);
2830 	DELAY(200);
2831 
2832 	/* Switch from Rawclk to PCDclk */
2833 	temp = I915_READ(reg);
2834 	I915_WRITE(reg, temp | FDI_PCDCLK);
2835 
2836 	POSTING_READ(reg);
2837 	DELAY(200);
2838 
2839 	/* Enable CPU FDI TX PLL, always on for Ironlake */
2840 	reg = FDI_TX_CTL(pipe);
2841 	temp = I915_READ(reg);
2842 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2843 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2844 
2845 		POSTING_READ(reg);
2846 		DELAY(100);
2847 	}
2848 }
2849 
2850 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2851 {
2852 	struct drm_i915_private *dev_priv = dev->dev_private;
2853 	u32 flags = I915_READ(SOUTH_CHICKEN1);
2854 
2855 	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2856 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2857 	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2858 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2859 	POSTING_READ(SOUTH_CHICKEN1);
2860 }
2861 
2862 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2863 {
2864 	struct drm_device *dev = crtc->dev;
2865 	struct drm_i915_private *dev_priv = dev->dev_private;
2866 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2867 	int pipe = intel_crtc->pipe;
2868 	u32 reg, temp;
2869 
2870 	/* disable CPU FDI tx and PCH FDI rx */
2871 	reg = FDI_TX_CTL(pipe);
2872 	temp = I915_READ(reg);
2873 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2874 	POSTING_READ(reg);
2875 
2876 	reg = FDI_RX_CTL(pipe);
2877 	temp = I915_READ(reg);
2878 	temp &= ~(0x7 << 16);
2879 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2880 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2881 
2882 	POSTING_READ(reg);
2883 	DELAY(100);
2884 
2885 	/* Ironlake workaround, disable clock pointer after downing FDI */
2886 	if (HAS_PCH_IBX(dev)) {
2887 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2888 		I915_WRITE(FDI_RX_CHICKEN(pipe),
2889 			   I915_READ(FDI_RX_CHICKEN(pipe) &
2890 				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2891 	} else if (HAS_PCH_CPT(dev)) {
2892 		cpt_phase_pointer_disable(dev, pipe);
2893 	}
2894 
2895 	/* still set train pattern 1 */
2896 	reg = FDI_TX_CTL(pipe);
2897 	temp = I915_READ(reg);
2898 	temp &= ~FDI_LINK_TRAIN_NONE;
2899 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2900 	I915_WRITE(reg, temp);
2901 
2902 	reg = FDI_RX_CTL(pipe);
2903 	temp = I915_READ(reg);
2904 	if (HAS_PCH_CPT(dev)) {
2905 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2906 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2907 	} else {
2908 		temp &= ~FDI_LINK_TRAIN_NONE;
2909 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2910 	}
2911 	/* BPC in FDI rx is consistent with that in PIPECONF */
2912 	temp &= ~(0x07 << 16);
2913 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2914 	I915_WRITE(reg, temp);
2915 
2916 	POSTING_READ(reg);
2917 	DELAY(100);
2918 }
2919 
2920 /*
2921  * When we disable a pipe, we need to clear any pending scanline wait events
2922  * to avoid hanging the ring, which we assume we are waiting on.
2923  */
2924 static void intel_clear_scanline_wait(struct drm_device *dev)
2925 {
2926 	struct drm_i915_private *dev_priv = dev->dev_private;
2927 	struct intel_ring_buffer *ring;
2928 	u32 tmp;
2929 
2930 	if (IS_GEN2(dev))
2931 		/* Can't break the hang on i8xx */
2932 		return;
2933 
2934 	ring = LP_RING(dev_priv);
2935 	tmp = I915_READ_CTL(ring);
2936 	if (tmp & RING_WAIT)
2937 		I915_WRITE_CTL(ring, tmp);
2938 }
2939 
2940 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2941 {
2942 	struct drm_i915_gem_object *obj;
2943 	struct drm_i915_private *dev_priv;
2944 	struct drm_device *dev;
2945 
2946 	if (crtc->fb == NULL)
2947 		return;
2948 
2949 	obj = to_intel_framebuffer(crtc->fb)->obj;
2950 	dev = crtc->dev;
2951 	dev_priv = dev->dev_private;
2952 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2953 	while (atomic_load_acq_int(&obj->pending_flip) != 0)
2954 		lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
2955 	lockmgr(&dev->event_lock, LK_RELEASE);
2956 }
2957 
2958 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2959 {
2960 	struct drm_device *dev = crtc->dev;
2961 	struct drm_mode_config *mode_config = &dev->mode_config;
2962 	struct intel_encoder *encoder;
2963 
2964 	/*
2965 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2966 	 * must be driven by its own crtc; no sharing is possible.
2967 	 */
2968 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2969 		if (encoder->base.crtc != crtc)
2970 			continue;
2971 
2972 		switch (encoder->type) {
2973 		case INTEL_OUTPUT_EDP:
2974 			if (!intel_encoder_is_pch_edp(&encoder->base))
2975 				return false;
2976 			continue;
2977 		}
2978 	}
2979 
2980 	return true;
2981 }
2982 
2983 /*
2984  * Enable PCH resources required for PCH ports:
2985  *   - PCH PLLs
2986  *   - FDI training & RX/TX
2987  *   - update transcoder timings
2988  *   - DP transcoding bits
2989  *   - transcoder
2990  */
2991 static void ironlake_pch_enable(struct drm_crtc *crtc)
2992 {
2993 	struct drm_device *dev = crtc->dev;
2994 	struct drm_i915_private *dev_priv = dev->dev_private;
2995 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2996 	int pipe = intel_crtc->pipe;
2997 	u32 reg, temp, transc_sel;
2998 
2999 	/* For PCH output, training FDI link */
3000 	dev_priv->display.fdi_link_train(crtc);
3001 
3002 	intel_enable_pch_pll(dev_priv, pipe);
3003 
3004 	if (HAS_PCH_CPT(dev)) {
3005 		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
3006 			TRANSC_DPLLB_SEL;
3007 
3008 		/* Be sure PCH DPLL SEL is set */
3009 		temp = I915_READ(PCH_DPLL_SEL);
3010 		if (pipe == 0) {
3011 			temp &= ~(TRANSA_DPLLB_SEL);
3012 			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3013 		} else if (pipe == 1) {
3014 			temp &= ~(TRANSB_DPLLB_SEL);
3015 			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3016 		} else if (pipe == 2) {
3017 			temp &= ~(TRANSC_DPLLB_SEL);
3018 			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
3019 		}
3020 		I915_WRITE(PCH_DPLL_SEL, temp);
3021 	}
3022 
3023 	/* set transcoder timing, panel must allow it */
3024 	assert_panel_unlocked(dev_priv, pipe);
3025 	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3026 	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3027 	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3028 
3029 	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3030 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3031 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3032 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3033 
3034 	intel_fdi_normal_train(crtc);
3035 
3036 	/* For PCH DP, enable TRANS_DP_CTL */
3037 	if (HAS_PCH_CPT(dev) &&
3038 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3039 	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3040 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3041 		reg = TRANS_DP_CTL(pipe);
3042 		temp = I915_READ(reg);
3043 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3044 			  TRANS_DP_SYNC_MASK |
3045 			  TRANS_DP_BPC_MASK);
3046 		temp |= (TRANS_DP_OUTPUT_ENABLE |
3047 			 TRANS_DP_ENH_FRAMING);
3048 		temp |= bpc << 9; /* same format but at 11:9 */
3049 
3050 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3051 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3052 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3053 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3054 
3055 		switch (intel_trans_dp_port_sel(crtc)) {
3056 		case PCH_DP_B:
3057 			temp |= TRANS_DP_PORT_SEL_B;
3058 			break;
3059 		case PCH_DP_C:
3060 			temp |= TRANS_DP_PORT_SEL_C;
3061 			break;
3062 		case PCH_DP_D:
3063 			temp |= TRANS_DP_PORT_SEL_D;
3064 			break;
3065 		default:
3066 			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3067 			temp |= TRANS_DP_PORT_SEL_B;
3068 			break;
3069 		}
3070 
3071 		I915_WRITE(reg, temp);
3072 	}
3073 
3074 	intel_enable_transcoder(dev_priv, pipe);
3075 }
3076 
3077 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3078 {
3079 	struct drm_i915_private *dev_priv = dev->dev_private;
3080 	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3081 	u32 temp;
3082 
3083 	temp = I915_READ(dslreg);
3084 	DELAY(500);
3085 	if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
3086 		/* Without this, mode sets may fail silently on FDI */
3087 		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3088 		DELAY(250);
3089 		I915_WRITE(tc2reg, 0);
3090 		if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
3091 		    "915cp2"))
3092 			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3093 	}
3094 }
3095 
3096 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3097 {
3098 	struct drm_device *dev = crtc->dev;
3099 	struct drm_i915_private *dev_priv = dev->dev_private;
3100 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3101 	int pipe = intel_crtc->pipe;
3102 	int plane = intel_crtc->plane;
3103 	u32 temp;
3104 	bool is_pch_port;
3105 
3106 	if (intel_crtc->active)
3107 		return;
3108 
3109 	intel_crtc->active = true;
3110 	intel_update_watermarks(dev);
3111 
3112 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3113 		temp = I915_READ(PCH_LVDS);
3114 		if ((temp & LVDS_PORT_EN) == 0)
3115 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3116 	}
3117 
3118 	is_pch_port = intel_crtc_driving_pch(crtc);
3119 
3120 	if (is_pch_port)
3121 		ironlake_fdi_pll_enable(crtc);
3122 	else
3123 		ironlake_fdi_disable(crtc);
3124 
3125 	/* Enable panel fitting for LVDS */
3126 	if (dev_priv->pch_pf_size &&
3127 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3128 		/* Force use of hard-coded filter coefficients
3129 		 * as some pre-programmed values are broken,
3130 		 * e.g. x201.
3131 		 */
3132 		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3133 		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3134 		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3135 	}
3136 
3137 	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3138 	intel_enable_plane(dev_priv, plane, pipe);
3139 
3140 	if (is_pch_port)
3141 		ironlake_pch_enable(crtc);
3142 
3143 	intel_crtc_load_lut(crtc);
3144 
3145 	DRM_LOCK(dev);
3146 	intel_update_fbc(dev);
3147 	DRM_UNLOCK(dev);
3148 
3149 	intel_crtc_update_cursor(crtc, true);
3150 }
3151 
3152 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3153 {
3154 	struct drm_device *dev = crtc->dev;
3155 	struct drm_i915_private *dev_priv = dev->dev_private;
3156 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3157 	int pipe = intel_crtc->pipe;
3158 	int plane = intel_crtc->plane;
3159 	u32 reg, temp;
3160 
3161 	if (!intel_crtc->active)
3162 		return;
3163 
3164 	intel_crtc_wait_for_pending_flips(crtc);
3165 	drm_vblank_off(dev, pipe);
3166 	intel_crtc_update_cursor(crtc, false);
3167 
3168 	intel_disable_plane(dev_priv, plane, pipe);
3169 
3170 	if (dev_priv->cfb_plane == plane)
3171 		intel_disable_fbc(dev);
3172 
3173 	intel_disable_pipe(dev_priv, pipe);
3174 
3175 	/* Disable PF */
3176 	I915_WRITE(PF_CTL(pipe), 0);
3177 	I915_WRITE(PF_WIN_SZ(pipe), 0);
3178 
3179 	ironlake_fdi_disable(crtc);
3180 
3181 	/* This is a horrible layering violation; we should be doing this in
3182 	 * the connector/encoder ->prepare instead, but we don't always have
3183 	 * enough information there about the config to know whether it will
3184 	 * actually be necessary or just cause undesired flicker.
3185 	 */
3186 	intel_disable_pch_ports(dev_priv, pipe);
3187 
3188 	intel_disable_transcoder(dev_priv, pipe);
3189 
3190 	if (HAS_PCH_CPT(dev)) {
3191 		/* disable TRANS_DP_CTL */
3192 		reg = TRANS_DP_CTL(pipe);
3193 		temp = I915_READ(reg);
3194 		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3195 		temp |= TRANS_DP_PORT_SEL_NONE;
3196 		I915_WRITE(reg, temp);
3197 
3198 		/* disable DPLL_SEL */
3199 		temp = I915_READ(PCH_DPLL_SEL);
3200 		switch (pipe) {
3201 		case 0:
3202 			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3203 			break;
3204 		case 1:
3205 			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3206 			break;
3207 		case 2:
3208 			/* C shares PLL A or B */
3209 			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3210 			break;
3211 		default:
3212 			KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3213 		}
3214 		I915_WRITE(PCH_DPLL_SEL, temp);
3215 	}
3216 
3217 	/* disable PCH DPLL */
3218 	if (!intel_crtc->no_pll)
3219 		intel_disable_pch_pll(dev_priv, pipe);
3220 
3221 	/* Switch from PCDclk to Rawclk */
3222 	reg = FDI_RX_CTL(pipe);
3223 	temp = I915_READ(reg);
3224 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3225 
3226 	/* Disable CPU FDI TX PLL */
3227 	reg = FDI_TX_CTL(pipe);
3228 	temp = I915_READ(reg);
3229 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3230 
3231 	POSTING_READ(reg);
3232 	DELAY(100);
3233 
3234 	reg = FDI_RX_CTL(pipe);
3235 	temp = I915_READ(reg);
3236 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3237 
3238 	/* Wait for the clocks to turn off. */
3239 	POSTING_READ(reg);
3240 	DELAY(100);
3241 
3242 	intel_crtc->active = false;
3243 	intel_update_watermarks(dev);
3244 
3245 	DRM_LOCK(dev);
3246 	intel_update_fbc(dev);
3247 	intel_clear_scanline_wait(dev);
3248 	DRM_UNLOCK(dev);
3249 }
3250 
3251 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3252 {
3253 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3254 	int pipe = intel_crtc->pipe;
3255 	int plane = intel_crtc->plane;
3256 
3257 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3258 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3259 	 */
3260 	switch (mode) {
3261 	case DRM_MODE_DPMS_ON:
3262 	case DRM_MODE_DPMS_STANDBY:
3263 	case DRM_MODE_DPMS_SUSPEND:
3264 		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3265 		ironlake_crtc_enable(crtc);
3266 		break;
3267 
3268 	case DRM_MODE_DPMS_OFF:
3269 		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3270 		ironlake_crtc_disable(crtc);
3271 		break;
3272 	}
3273 }
3274 
3275 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3276 {
3277 	if (!enable && intel_crtc->overlay) {
3278 		struct drm_device *dev = intel_crtc->base.dev;
3279 		struct drm_i915_private *dev_priv = dev->dev_private;
3280 
3281 		DRM_LOCK(dev);
3282 		dev_priv->mm.interruptible = false;
3283 		(void) intel_overlay_switch_off(intel_crtc->overlay);
3284 		dev_priv->mm.interruptible = true;
3285 		DRM_UNLOCK(dev);
3286 	}
3287 
3288 	/* Let userspace switch the overlay on again. In most cases userspace
3289 	 * has to recompute where to put it anyway.
3290 	 */
3291 }
3292 
3293 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3294 {
3295 	struct drm_device *dev = crtc->dev;
3296 	struct drm_i915_private *dev_priv = dev->dev_private;
3297 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3298 	int pipe = intel_crtc->pipe;
3299 	int plane = intel_crtc->plane;
3300 
3301 	if (intel_crtc->active)
3302 		return;
3303 
3304 	intel_crtc->active = true;
3305 	intel_update_watermarks(dev);
3306 
3307 	intel_enable_pll(dev_priv, pipe);
3308 	intel_enable_pipe(dev_priv, pipe, false);
3309 	intel_enable_plane(dev_priv, plane, pipe);
3310 
3311 	intel_crtc_load_lut(crtc);
3312 	intel_update_fbc(dev);
3313 
3314 	/* Give the overlay scaler a chance to enable if it's on this pipe */
3315 	intel_crtc_dpms_overlay(intel_crtc, true);
3316 	intel_crtc_update_cursor(crtc, true);
3317 }
3318 
3319 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3320 {
3321 	struct drm_device *dev = crtc->dev;
3322 	struct drm_i915_private *dev_priv = dev->dev_private;
3323 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3324 	int pipe = intel_crtc->pipe;
3325 	int plane = intel_crtc->plane;
3326 
3327 	if (!intel_crtc->active)
3328 		return;
3329 
3330 	/* Give the overlay scaler a chance to disable if it's on this pipe */
3331 	intel_crtc_wait_for_pending_flips(crtc);
3332 	drm_vblank_off(dev, pipe);
3333 	intel_crtc_dpms_overlay(intel_crtc, false);
3334 	intel_crtc_update_cursor(crtc, false);
3335 
3336 	if (dev_priv->cfb_plane == plane)
3337 		intel_disable_fbc(dev);
3338 
3339 	intel_disable_plane(dev_priv, plane, pipe);
3340 	intel_disable_pipe(dev_priv, pipe);
3341 	intel_disable_pll(dev_priv, pipe);
3342 
3343 	intel_crtc->active = false;
3344 	intel_update_fbc(dev);
3345 	intel_update_watermarks(dev);
3346 	intel_clear_scanline_wait(dev);
3347 }
3348 
3349 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3350 {
3351 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3352 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3353 	 */
3354 	switch (mode) {
3355 	case DRM_MODE_DPMS_ON:
3356 	case DRM_MODE_DPMS_STANDBY:
3357 	case DRM_MODE_DPMS_SUSPEND:
3358 		i9xx_crtc_enable(crtc);
3359 		break;
3360 	case DRM_MODE_DPMS_OFF:
3361 		i9xx_crtc_disable(crtc);
3362 		break;
3363 	}
3364 }
3365 
3366 /**
3367  * Sets the power management mode of the pipe and plane.
3368  */
3369 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3370 {
3371 	struct drm_device *dev = crtc->dev;
3372 	struct drm_i915_private *dev_priv = dev->dev_private;
3373 #if 0
3374 	struct drm_i915_master_private *master_priv;
3375 #endif
3376 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3377 	int pipe = intel_crtc->pipe;
3378 	bool enabled;
3379 
3380 	if (intel_crtc->dpms_mode == mode)
3381 		return;
3382 
3383 	intel_crtc->dpms_mode = mode;
3384 
3385 	dev_priv->display.dpms(crtc, mode);
3386 
3387 #if 0
3388 	if (!dev->primary->master)
3389 		return;
3390 
3391 	master_priv = dev->primary->master->driver_priv;
3392 	if (!master_priv->sarea_priv)
3393 		return;
3394 #else
3395 	if (!dev_priv->sarea_priv)
3396 		return;
3397 #endif
3398 
3399 	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3400 
3401 	switch (pipe) {
3402 	case 0:
3403 #if 0
3404 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3405 		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3406 #else
3407 		dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
3408 		dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
3409 #endif
3410 		break;
3411 	case 1:
3412 #if 0
3413 		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3414 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3415 #else
3416 		dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
3417 		dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
3418 #endif
3419 		break;
3420 	default:
3421 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3422 		break;
3423 	}
3424 }
3425 
3426 static void intel_crtc_disable(struct drm_crtc *crtc)
3427 {
3428 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3429 	struct drm_device *dev = crtc->dev;
3430 
3431 	/* Flush any pending WAITs before we disable the pipe. Note that
3432 	 * we need to drop the struct_mutex in order to acquire it again
3433 	 * during the lowlevel dpms routines around a couple of the
3434 	 * operations. It does not look trivial nor desirable to move
3435 	 * that locking higher. So instead we leave a window for the
3436 	 * submission of further commands on the fb before we can actually
3437 	 * disable it. This race with userspace exists anyway, and we can
3438 	 * only rely on the pipe being disabled by userspace after it
3439 	 * receives the hotplug notification and has flushed any pending
3440 	 * batches.
3441 	 */
3442 	if (crtc->fb) {
3443 		DRM_LOCK(dev);
3444 		intel_finish_fb(crtc->fb);
3445 		DRM_UNLOCK(dev);
3446 	}
3447 
3448 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3449  	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3450 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3451 
3452 	if (crtc->fb) {
3453 		DRM_LOCK(dev);
3454 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3455 		DRM_UNLOCK(dev);
3456 	}
3457 }
3458 
3459 /* Prepare for a mode set.
3460  *
3461  * Note we could be a lot smarter here.  We need to figure out which outputs
3462  * will be enabled, which disabled (in short, how the config will changes)
3463  * and perform the minimum necessary steps to accomplish that, e.g. updating
3464  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3465  * panel fitting is in the proper state, etc.
3466  */
3467 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3468 {
3469 	i9xx_crtc_disable(crtc);
3470 }
3471 
3472 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3473 {
3474 	i9xx_crtc_enable(crtc);
3475 }
3476 
3477 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3478 {
3479 	ironlake_crtc_disable(crtc);
3480 }
3481 
3482 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3483 {
3484 	ironlake_crtc_enable(crtc);
3485 }
3486 
3487 void intel_encoder_prepare(struct drm_encoder *encoder)
3488 {
3489 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3490 	/* lvds has its own version of prepare see intel_lvds_prepare */
3491 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3492 }
3493 
3494 void intel_encoder_commit(struct drm_encoder *encoder)
3495 {
3496 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3497 	struct drm_device *dev = encoder->dev;
3498 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3499 	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3500 
3501 	/* lvds has its own version of commit see intel_lvds_commit */
3502 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3503 
3504 	if (HAS_PCH_CPT(dev))
3505 		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3506 }
3507 
3508 void intel_encoder_destroy(struct drm_encoder *encoder)
3509 {
3510 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3511 
3512 	drm_encoder_cleanup(encoder);
3513 	drm_free(intel_encoder, DRM_MEM_KMS);
3514 }
3515 
3516 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3517 				  const struct drm_display_mode *mode,
3518 				  struct drm_display_mode *adjusted_mode)
3519 {
3520 	struct drm_device *dev = crtc->dev;
3521 
3522 	if (HAS_PCH_SPLIT(dev)) {
3523 		/* FDI link clock is fixed at 2.7G */
3524 		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3525 			return false;
3526 	}
3527 
3528 	/* All interlaced capable intel hw wants timings in frames. Note though
3529 	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3530 	 * timings, so we need to be careful not to clobber these.*/
3531 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3532 		drm_mode_set_crtcinfo(adjusted_mode, 0);
3533 
3534 	return true;
3535 }
3536 
3537 static int i945_get_display_clock_speed(struct drm_device *dev)
3538 {
3539 	return 400000;
3540 }
3541 
3542 static int i915_get_display_clock_speed(struct drm_device *dev)
3543 {
3544 	return 333000;
3545 }
3546 
3547 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3548 {
3549 	return 200000;
3550 }
3551 
3552 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3553 {
3554 	u16 gcfgc = 0;
3555 
3556 	gcfgc = pci_read_config(dev->device, GCFGC, 2);
3557 
3558 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3559 		return 133000;
3560 	else {
3561 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3562 		case GC_DISPLAY_CLOCK_333_MHZ:
3563 			return 333000;
3564 		default:
3565 		case GC_DISPLAY_CLOCK_190_200_MHZ:
3566 			return 190000;
3567 		}
3568 	}
3569 }
3570 
3571 static int i865_get_display_clock_speed(struct drm_device *dev)
3572 {
3573 	return 266000;
3574 }
3575 
3576 static int i855_get_display_clock_speed(struct drm_device *dev)
3577 {
3578 	u16 hpllcc = 0;
3579 	/* Assume that the hardware is in the high speed state.  This
3580 	 * should be the default.
3581 	 */
3582 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3583 	case GC_CLOCK_133_200:
3584 	case GC_CLOCK_100_200:
3585 		return 200000;
3586 	case GC_CLOCK_166_250:
3587 		return 250000;
3588 	case GC_CLOCK_100_133:
3589 		return 133000;
3590 	}
3591 
3592 	/* Shouldn't happen */
3593 	return 0;
3594 }
3595 
3596 static int i830_get_display_clock_speed(struct drm_device *dev)
3597 {
3598 	return 133000;
3599 }
3600 
3601 struct fdi_m_n {
3602 	u32        tu;
3603 	u32        gmch_m;
3604 	u32        gmch_n;
3605 	u32        link_m;
3606 	u32        link_n;
3607 };
3608 
3609 static void
3610 fdi_reduce_ratio(u32 *num, u32 *den)
3611 {
3612 	while (*num > 0xffffff || *den > 0xffffff) {
3613 		*num >>= 1;
3614 		*den >>= 1;
3615 	}
3616 }
3617 
3618 static void
3619 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3620 		     int link_clock, struct fdi_m_n *m_n)
3621 {
3622 	m_n->tu = 64; /* default size */
3623 
3624 	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3625 	m_n->gmch_m = bits_per_pixel * pixel_clock;
3626 	m_n->gmch_n = link_clock * nlanes * 8;
3627 	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3628 
3629 	m_n->link_m = pixel_clock;
3630 	m_n->link_n = link_clock;
3631 	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3632 }
3633 
3634 
3635 struct intel_watermark_params {
3636 	unsigned long fifo_size;
3637 	unsigned long max_wm;
3638 	unsigned long default_wm;
3639 	unsigned long guard_size;
3640 	unsigned long cacheline_size;
3641 };
3642 
3643 /* Pineview has different values for various configs */
3644 static const struct intel_watermark_params pineview_display_wm = {
3645 	PINEVIEW_DISPLAY_FIFO,
3646 	PINEVIEW_MAX_WM,
3647 	PINEVIEW_DFT_WM,
3648 	PINEVIEW_GUARD_WM,
3649 	PINEVIEW_FIFO_LINE_SIZE
3650 };
3651 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3652 	PINEVIEW_DISPLAY_FIFO,
3653 	PINEVIEW_MAX_WM,
3654 	PINEVIEW_DFT_HPLLOFF_WM,
3655 	PINEVIEW_GUARD_WM,
3656 	PINEVIEW_FIFO_LINE_SIZE
3657 };
3658 static const struct intel_watermark_params pineview_cursor_wm = {
3659 	PINEVIEW_CURSOR_FIFO,
3660 	PINEVIEW_CURSOR_MAX_WM,
3661 	PINEVIEW_CURSOR_DFT_WM,
3662 	PINEVIEW_CURSOR_GUARD_WM,
3663 	PINEVIEW_FIFO_LINE_SIZE,
3664 };
3665 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3666 	PINEVIEW_CURSOR_FIFO,
3667 	PINEVIEW_CURSOR_MAX_WM,
3668 	PINEVIEW_CURSOR_DFT_WM,
3669 	PINEVIEW_CURSOR_GUARD_WM,
3670 	PINEVIEW_FIFO_LINE_SIZE
3671 };
3672 static const struct intel_watermark_params g4x_wm_info = {
3673 	G4X_FIFO_SIZE,
3674 	G4X_MAX_WM,
3675 	G4X_MAX_WM,
3676 	2,
3677 	G4X_FIFO_LINE_SIZE,
3678 };
3679 static const struct intel_watermark_params g4x_cursor_wm_info = {
3680 	I965_CURSOR_FIFO,
3681 	I965_CURSOR_MAX_WM,
3682 	I965_CURSOR_DFT_WM,
3683 	2,
3684 	G4X_FIFO_LINE_SIZE,
3685 };
3686 static const struct intel_watermark_params i965_cursor_wm_info = {
3687 	I965_CURSOR_FIFO,
3688 	I965_CURSOR_MAX_WM,
3689 	I965_CURSOR_DFT_WM,
3690 	2,
3691 	I915_FIFO_LINE_SIZE,
3692 };
3693 static const struct intel_watermark_params i945_wm_info = {
3694 	I945_FIFO_SIZE,
3695 	I915_MAX_WM,
3696 	1,
3697 	2,
3698 	I915_FIFO_LINE_SIZE
3699 };
3700 static const struct intel_watermark_params i915_wm_info = {
3701 	I915_FIFO_SIZE,
3702 	I915_MAX_WM,
3703 	1,
3704 	2,
3705 	I915_FIFO_LINE_SIZE
3706 };
3707 static const struct intel_watermark_params i855_wm_info = {
3708 	I855GM_FIFO_SIZE,
3709 	I915_MAX_WM,
3710 	1,
3711 	2,
3712 	I830_FIFO_LINE_SIZE
3713 };
3714 static const struct intel_watermark_params i830_wm_info = {
3715 	I830_FIFO_SIZE,
3716 	I915_MAX_WM,
3717 	1,
3718 	2,
3719 	I830_FIFO_LINE_SIZE
3720 };
3721 
3722 static const struct intel_watermark_params ironlake_display_wm_info = {
3723 	ILK_DISPLAY_FIFO,
3724 	ILK_DISPLAY_MAXWM,
3725 	ILK_DISPLAY_DFTWM,
3726 	2,
3727 	ILK_FIFO_LINE_SIZE
3728 };
3729 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3730 	ILK_CURSOR_FIFO,
3731 	ILK_CURSOR_MAXWM,
3732 	ILK_CURSOR_DFTWM,
3733 	2,
3734 	ILK_FIFO_LINE_SIZE
3735 };
3736 static const struct intel_watermark_params ironlake_display_srwm_info = {
3737 	ILK_DISPLAY_SR_FIFO,
3738 	ILK_DISPLAY_MAX_SRWM,
3739 	ILK_DISPLAY_DFT_SRWM,
3740 	2,
3741 	ILK_FIFO_LINE_SIZE
3742 };
3743 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3744 	ILK_CURSOR_SR_FIFO,
3745 	ILK_CURSOR_MAX_SRWM,
3746 	ILK_CURSOR_DFT_SRWM,
3747 	2,
3748 	ILK_FIFO_LINE_SIZE
3749 };
3750 
3751 static const struct intel_watermark_params sandybridge_display_wm_info = {
3752 	SNB_DISPLAY_FIFO,
3753 	SNB_DISPLAY_MAXWM,
3754 	SNB_DISPLAY_DFTWM,
3755 	2,
3756 	SNB_FIFO_LINE_SIZE
3757 };
3758 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3759 	SNB_CURSOR_FIFO,
3760 	SNB_CURSOR_MAXWM,
3761 	SNB_CURSOR_DFTWM,
3762 	2,
3763 	SNB_FIFO_LINE_SIZE
3764 };
3765 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3766 	SNB_DISPLAY_SR_FIFO,
3767 	SNB_DISPLAY_MAX_SRWM,
3768 	SNB_DISPLAY_DFT_SRWM,
3769 	2,
3770 	SNB_FIFO_LINE_SIZE
3771 };
3772 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3773 	SNB_CURSOR_SR_FIFO,
3774 	SNB_CURSOR_MAX_SRWM,
3775 	SNB_CURSOR_DFT_SRWM,
3776 	2,
3777 	SNB_FIFO_LINE_SIZE
3778 };
3779 
3780 
3781 /**
3782  * intel_calculate_wm - calculate watermark level
3783  * @clock_in_khz: pixel clock
3784  * @wm: chip FIFO params
3785  * @pixel_size: display pixel size
3786  * @latency_ns: memory latency for the platform
3787  *
3788  * Calculate the watermark level (the level at which the display plane will
3789  * start fetching from memory again).  Each chip has a different display
3790  * FIFO size and allocation, so the caller needs to figure that out and pass
3791  * in the correct intel_watermark_params structure.
3792  *
3793  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3794  * on the pixel size.  When it reaches the watermark level, it'll start
3795  * fetching FIFO line sized based chunks from memory until the FIFO fills
3796  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3797  * will occur, and a display engine hang could result.
3798  */
3799 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3800 					const struct intel_watermark_params *wm,
3801 					int fifo_size,
3802 					int pixel_size,
3803 					unsigned long latency_ns)
3804 {
3805 	long entries_required, wm_size;
3806 
3807 	/*
3808 	 * Note: we need to make sure we don't overflow for various clock &
3809 	 * latency values.
3810 	 * clocks go from a few thousand to several hundred thousand.
3811 	 * latency is usually a few thousand
3812 	 */
3813 	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3814 		1000;
3815 	entries_required = howmany(entries_required, wm->cacheline_size);
3816 
3817 	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3818 
3819 	wm_size = fifo_size - (entries_required + wm->guard_size);
3820 
3821 	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3822 
3823 	/* Don't promote wm_size to unsigned... */
3824 	if (wm_size > (long)wm->max_wm)
3825 		wm_size = wm->max_wm;
3826 	if (wm_size <= 0)
3827 		wm_size = wm->default_wm;
3828 	return wm_size;
3829 }
3830 
3831 struct cxsr_latency {
3832 	int is_desktop;
3833 	int is_ddr3;
3834 	unsigned long fsb_freq;
3835 	unsigned long mem_freq;
3836 	unsigned long display_sr;
3837 	unsigned long display_hpll_disable;
3838 	unsigned long cursor_sr;
3839 	unsigned long cursor_hpll_disable;
3840 };
3841 
3842 static const struct cxsr_latency cxsr_latency_table[] = {
3843 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3844 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3845 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3846 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3847 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3848 
3849 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3850 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3851 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3852 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3853 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3854 
3855 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3856 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3857 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3858 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3859 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3860 
3861 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3862 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3863 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3864 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3865 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3866 
3867 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3868 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3869 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3870 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3871 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3872 
3873 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3874 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3875 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3876 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3877 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3878 };
3879 
3880 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3881 							 int is_ddr3,
3882 							 int fsb,
3883 							 int mem)
3884 {
3885 	const struct cxsr_latency *latency;
3886 	int i;
3887 
3888 	if (fsb == 0 || mem == 0)
3889 		return NULL;
3890 
3891 	for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
3892 		latency = &cxsr_latency_table[i];
3893 		if (is_desktop == latency->is_desktop &&
3894 		    is_ddr3 == latency->is_ddr3 &&
3895 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
3896 			return latency;
3897 	}
3898 
3899 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3900 
3901 	return NULL;
3902 }
3903 
3904 static void pineview_disable_cxsr(struct drm_device *dev)
3905 {
3906 	struct drm_i915_private *dev_priv = dev->dev_private;
3907 
3908 	/* deactivate cxsr */
3909 	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3910 }
3911 
3912 /*
3913  * Latency for FIFO fetches is dependent on several factors:
3914  *   - memory configuration (speed, channels)
3915  *   - chipset
3916  *   - current MCH state
3917  * It can be fairly high in some situations, so here we assume a fairly
3918  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3919  * set this value too high, the FIFO will fetch frequently to stay full)
3920  * and power consumption (set it too low to save power and we might see
3921  * FIFO underruns and display "flicker").
3922  *
3923  * A value of 5us seems to be a good balance; safe for very low end
3924  * platforms but not overly aggressive on lower latency configs.
3925  */
3926 static const int latency_ns = 5000;
3927 
3928 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3929 {
3930 	struct drm_i915_private *dev_priv = dev->dev_private;
3931 	uint32_t dsparb = I915_READ(DSPARB);
3932 	int size;
3933 
3934 	size = dsparb & 0x7f;
3935 	if (plane)
3936 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3937 
3938 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3939 		      plane ? "B" : "A", size);
3940 
3941 	return size;
3942 }
3943 
3944 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3945 {
3946 	struct drm_i915_private *dev_priv = dev->dev_private;
3947 	uint32_t dsparb = I915_READ(DSPARB);
3948 	int size;
3949 
3950 	size = dsparb & 0x1ff;
3951 	if (plane)
3952 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3953 	size >>= 1; /* Convert to cachelines */
3954 
3955 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3956 		      plane ? "B" : "A", size);
3957 
3958 	return size;
3959 }
3960 
3961 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3962 {
3963 	struct drm_i915_private *dev_priv = dev->dev_private;
3964 	uint32_t dsparb = I915_READ(DSPARB);
3965 	int size;
3966 
3967 	size = dsparb & 0x7f;
3968 	size >>= 2; /* Convert to cachelines */
3969 
3970 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3971 		      plane ? "B" : "A",
3972 		      size);
3973 
3974 	return size;
3975 }
3976 
3977 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3978 {
3979 	struct drm_i915_private *dev_priv = dev->dev_private;
3980 	uint32_t dsparb = I915_READ(DSPARB);
3981 	int size;
3982 
3983 	size = dsparb & 0x7f;
3984 	size >>= 1; /* Convert to cachelines */
3985 
3986 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3987 		      plane ? "B" : "A", size);
3988 
3989 	return size;
3990 }
3991 
3992 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3993 {
3994 	struct drm_crtc *crtc, *enabled = NULL;
3995 
3996 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3997 		if (crtc->enabled && crtc->fb) {
3998 			if (enabled)
3999 				return NULL;
4000 			enabled = crtc;
4001 		}
4002 	}
4003 
4004 	return enabled;
4005 }
4006 
4007 static void pineview_update_wm(struct drm_device *dev)
4008 {
4009 	struct drm_i915_private *dev_priv = dev->dev_private;
4010 	struct drm_crtc *crtc;
4011 	const struct cxsr_latency *latency;
4012 	u32 reg;
4013 	unsigned long wm;
4014 
4015 	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4016 					 dev_priv->fsb_freq, dev_priv->mem_freq);
4017 	if (!latency) {
4018 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4019 		pineview_disable_cxsr(dev);
4020 		return;
4021 	}
4022 
4023 	crtc = single_enabled_crtc(dev);
4024 	if (crtc) {
4025 		int clock = crtc->mode.clock;
4026 		int pixel_size = crtc->fb->bits_per_pixel / 8;
4027 
4028 		/* Display SR */
4029 		wm = intel_calculate_wm(clock, &pineview_display_wm,
4030 					pineview_display_wm.fifo_size,
4031 					pixel_size, latency->display_sr);
4032 		reg = I915_READ(DSPFW1);
4033 		reg &= ~DSPFW_SR_MASK;
4034 		reg |= wm << DSPFW_SR_SHIFT;
4035 		I915_WRITE(DSPFW1, reg);
4036 		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4037 
4038 		/* cursor SR */
4039 		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4040 					pineview_display_wm.fifo_size,
4041 					pixel_size, latency->cursor_sr);
4042 		reg = I915_READ(DSPFW3);
4043 		reg &= ~DSPFW_CURSOR_SR_MASK;
4044 		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4045 		I915_WRITE(DSPFW3, reg);
4046 
4047 		/* Display HPLL off SR */
4048 		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4049 					pineview_display_hplloff_wm.fifo_size,
4050 					pixel_size, latency->display_hpll_disable);
4051 		reg = I915_READ(DSPFW3);
4052 		reg &= ~DSPFW_HPLL_SR_MASK;
4053 		reg |= wm & DSPFW_HPLL_SR_MASK;
4054 		I915_WRITE(DSPFW3, reg);
4055 
4056 		/* cursor HPLL off SR */
4057 		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4058 					pineview_display_hplloff_wm.fifo_size,
4059 					pixel_size, latency->cursor_hpll_disable);
4060 		reg = I915_READ(DSPFW3);
4061 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
4062 		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4063 		I915_WRITE(DSPFW3, reg);
4064 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4065 
4066 		/* activate cxsr */
4067 		I915_WRITE(DSPFW3,
4068 			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4069 		DRM_DEBUG_KMS("Self-refresh is enabled\n");
4070 	} else {
4071 		pineview_disable_cxsr(dev);
4072 		DRM_DEBUG_KMS("Self-refresh is disabled\n");
4073 	}
4074 }
4075 
4076 static bool g4x_compute_wm0(struct drm_device *dev,
4077 			    int plane,
4078 			    const struct intel_watermark_params *display,
4079 			    int display_latency_ns,
4080 			    const struct intel_watermark_params *cursor,
4081 			    int cursor_latency_ns,
4082 			    int *plane_wm,
4083 			    int *cursor_wm)
4084 {
4085 	struct drm_crtc *crtc;
4086 	int htotal, hdisplay, clock, pixel_size;
4087 	int line_time_us, line_count;
4088 	int entries, tlb_miss;
4089 
4090 	crtc = intel_get_crtc_for_plane(dev, plane);
4091 	if (crtc->fb == NULL || !crtc->enabled) {
4092 		*cursor_wm = cursor->guard_size;
4093 		*plane_wm = display->guard_size;
4094 		return false;
4095 	}
4096 
4097 	htotal = crtc->mode.htotal;
4098 	hdisplay = crtc->mode.hdisplay;
4099 	clock = crtc->mode.clock;
4100 	pixel_size = crtc->fb->bits_per_pixel / 8;
4101 
4102 	/* Use the small buffer method to calculate plane watermark */
4103 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4104 	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4105 	if (tlb_miss > 0)
4106 		entries += tlb_miss;
4107 	entries = howmany(entries, display->cacheline_size);
4108 	*plane_wm = entries + display->guard_size;
4109 	if (*plane_wm > (int)display->max_wm)
4110 		*plane_wm = display->max_wm;
4111 
4112 	/* Use the large buffer method to calculate cursor watermark */
4113 	line_time_us = ((htotal * 1000) / clock);
4114 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4115 	entries = line_count * 64 * pixel_size;
4116 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4117 	if (tlb_miss > 0)
4118 		entries += tlb_miss;
4119 	entries = howmany(entries, cursor->cacheline_size);
4120 	*cursor_wm = entries + cursor->guard_size;
4121 	if (*cursor_wm > (int)cursor->max_wm)
4122 		*cursor_wm = (int)cursor->max_wm;
4123 
4124 	return true;
4125 }
4126 
4127 /*
4128  * Check the wm result.
4129  *
4130  * If any calculated watermark values is larger than the maximum value that
4131  * can be programmed into the associated watermark register, that watermark
4132  * must be disabled.
4133  */
4134 static bool g4x_check_srwm(struct drm_device *dev,
4135 			   int display_wm, int cursor_wm,
4136 			   const struct intel_watermark_params *display,
4137 			   const struct intel_watermark_params *cursor)
4138 {
4139 	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4140 		      display_wm, cursor_wm);
4141 
4142 	if (display_wm > display->max_wm) {
4143 		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4144 			      display_wm, display->max_wm);
4145 		return false;
4146 	}
4147 
4148 	if (cursor_wm > cursor->max_wm) {
4149 		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4150 			      cursor_wm, cursor->max_wm);
4151 		return false;
4152 	}
4153 
4154 	if (!(display_wm || cursor_wm)) {
4155 		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4156 		return false;
4157 	}
4158 
4159 	return true;
4160 }
4161 
4162 static bool g4x_compute_srwm(struct drm_device *dev,
4163 			     int plane,
4164 			     int latency_ns,
4165 			     const struct intel_watermark_params *display,
4166 			     const struct intel_watermark_params *cursor,
4167 			     int *display_wm, int *cursor_wm)
4168 {
4169 	struct drm_crtc *crtc;
4170 	int hdisplay, htotal, pixel_size, clock;
4171 	unsigned long line_time_us;
4172 	int line_count, line_size;
4173 	int small, large;
4174 	int entries;
4175 
4176 	if (!latency_ns) {
4177 		*display_wm = *cursor_wm = 0;
4178 		return false;
4179 	}
4180 
4181 	crtc = intel_get_crtc_for_plane(dev, plane);
4182 	hdisplay = crtc->mode.hdisplay;
4183 	htotal = crtc->mode.htotal;
4184 	clock = crtc->mode.clock;
4185 	pixel_size = crtc->fb->bits_per_pixel / 8;
4186 
4187 	line_time_us = (htotal * 1000) / clock;
4188 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4189 	line_size = hdisplay * pixel_size;
4190 
4191 	/* Use the minimum of the small and large buffer method for primary */
4192 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4193 	large = line_count * line_size;
4194 
4195 	entries = howmany(min(small, large), display->cacheline_size);
4196 	*display_wm = entries + display->guard_size;
4197 
4198 	/* calculate the self-refresh watermark for display cursor */
4199 	entries = line_count * pixel_size * 64;
4200 	entries = howmany(entries, cursor->cacheline_size);
4201 	*cursor_wm = entries + cursor->guard_size;
4202 
4203 	return g4x_check_srwm(dev,
4204 			      *display_wm, *cursor_wm,
4205 			      display, cursor);
4206 }
4207 
4208 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
4209 
4210 static void g4x_update_wm(struct drm_device *dev)
4211 {
4212 	static const int sr_latency_ns = 12000;
4213 	struct drm_i915_private *dev_priv = dev->dev_private;
4214 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4215 	int plane_sr, cursor_sr;
4216 	unsigned int enabled = 0;
4217 
4218 	if (g4x_compute_wm0(dev, 0,
4219 			    &g4x_wm_info, latency_ns,
4220 			    &g4x_cursor_wm_info, latency_ns,
4221 			    &planea_wm, &cursora_wm))
4222 		enabled |= 1;
4223 
4224 	if (g4x_compute_wm0(dev, 1,
4225 			    &g4x_wm_info, latency_ns,
4226 			    &g4x_cursor_wm_info, latency_ns,
4227 			    &planeb_wm, &cursorb_wm))
4228 		enabled |= 2;
4229 
4230 	plane_sr = cursor_sr = 0;
4231 	if (single_plane_enabled(enabled) &&
4232 	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4233 			     sr_latency_ns,
4234 			     &g4x_wm_info,
4235 			     &g4x_cursor_wm_info,
4236 			     &plane_sr, &cursor_sr))
4237 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4238 	else
4239 		I915_WRITE(FW_BLC_SELF,
4240 			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4241 
4242 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4243 		      planea_wm, cursora_wm,
4244 		      planeb_wm, cursorb_wm,
4245 		      plane_sr, cursor_sr);
4246 
4247 	I915_WRITE(DSPFW1,
4248 		   (plane_sr << DSPFW_SR_SHIFT) |
4249 		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4250 		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4251 		   planea_wm);
4252 	I915_WRITE(DSPFW2,
4253 		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4254 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4255 	/* HPLL off in SR has some issues on G4x... disable it */
4256 	I915_WRITE(DSPFW3,
4257 		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4258 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4259 }
4260 
4261 static void i965_update_wm(struct drm_device *dev)
4262 {
4263 	struct drm_i915_private *dev_priv = dev->dev_private;
4264 	struct drm_crtc *crtc;
4265 	int srwm = 1;
4266 	int cursor_sr = 16;
4267 
4268 	/* Calc sr entries for one plane configs */
4269 	crtc = single_enabled_crtc(dev);
4270 	if (crtc) {
4271 		/* self-refresh has much higher latency */
4272 		static const int sr_latency_ns = 12000;
4273 		int clock = crtc->mode.clock;
4274 		int htotal = crtc->mode.htotal;
4275 		int hdisplay = crtc->mode.hdisplay;
4276 		int pixel_size = crtc->fb->bits_per_pixel / 8;
4277 		unsigned long line_time_us;
4278 		int entries;
4279 
4280 		line_time_us = ((htotal * 1000) / clock);
4281 
4282 		/* Use ns/us then divide to preserve precision */
4283 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4284 			pixel_size * hdisplay;
4285 		entries = howmany(entries, I915_FIFO_LINE_SIZE);
4286 		srwm = I965_FIFO_SIZE - entries;
4287 		if (srwm < 0)
4288 			srwm = 1;
4289 		srwm &= 0x1ff;
4290 		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4291 			      entries, srwm);
4292 
4293 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4294 			pixel_size * 64;
4295 		entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
4296 		cursor_sr = i965_cursor_wm_info.fifo_size -
4297 			(entries + i965_cursor_wm_info.guard_size);
4298 
4299 		if (cursor_sr > i965_cursor_wm_info.max_wm)
4300 			cursor_sr = i965_cursor_wm_info.max_wm;
4301 
4302 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4303 			      "cursor %d\n", srwm, cursor_sr);
4304 
4305 		if (IS_CRESTLINE(dev))
4306 			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4307 	} else {
4308 		/* Turn off self refresh if both pipes are enabled */
4309 		if (IS_CRESTLINE(dev))
4310 			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4311 				   & ~FW_BLC_SELF_EN);
4312 	}
4313 
4314 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4315 		      srwm);
4316 
4317 	/* 965 has limitations... */
4318 	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4319 		   (8 << 16) | (8 << 8) | (8 << 0));
4320 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4321 	/* update cursor SR watermark */
4322 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4323 }
4324 
4325 static void i9xx_update_wm(struct drm_device *dev)
4326 {
4327 	struct drm_i915_private *dev_priv = dev->dev_private;
4328 	const struct intel_watermark_params *wm_info;
4329 	uint32_t fwater_lo;
4330 	uint32_t fwater_hi;
4331 	int cwm, srwm = 1;
4332 	int fifo_size;
4333 	int planea_wm, planeb_wm;
4334 	struct drm_crtc *crtc, *enabled = NULL;
4335 
4336 	if (IS_I945GM(dev))
4337 		wm_info = &i945_wm_info;
4338 	else if (!IS_GEN2(dev))
4339 		wm_info = &i915_wm_info;
4340 	else
4341 		wm_info = &i855_wm_info;
4342 
4343 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4344 	crtc = intel_get_crtc_for_plane(dev, 0);
4345 	if (crtc->enabled && crtc->fb) {
4346 		planea_wm = intel_calculate_wm(crtc->mode.clock,
4347 					       wm_info, fifo_size,
4348 					       crtc->fb->bits_per_pixel / 8,
4349 					       latency_ns);
4350 		enabled = crtc;
4351 	} else
4352 		planea_wm = fifo_size - wm_info->guard_size;
4353 
4354 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4355 	crtc = intel_get_crtc_for_plane(dev, 1);
4356 	if (crtc->enabled && crtc->fb) {
4357 		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4358 					       wm_info, fifo_size,
4359 					       crtc->fb->bits_per_pixel / 8,
4360 					       latency_ns);
4361 		if (enabled == NULL)
4362 			enabled = crtc;
4363 		else
4364 			enabled = NULL;
4365 	} else
4366 		planeb_wm = fifo_size - wm_info->guard_size;
4367 
4368 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4369 
4370 	/*
4371 	 * Overlay gets an aggressive default since video jitter is bad.
4372 	 */
4373 	cwm = 2;
4374 
4375 	/* Play safe and disable self-refresh before adjusting watermarks. */
4376 	if (IS_I945G(dev) || IS_I945GM(dev))
4377 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4378 	else if (IS_I915GM(dev))
4379 		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4380 
4381 	/* Calc sr entries for one plane configs */
4382 	if (HAS_FW_BLC(dev) && enabled) {
4383 		/* self-refresh has much higher latency */
4384 		static const int sr_latency_ns = 6000;
4385 		int clock = enabled->mode.clock;
4386 		int htotal = enabled->mode.htotal;
4387 		int hdisplay = enabled->mode.hdisplay;
4388 		int pixel_size = enabled->fb->bits_per_pixel / 8;
4389 		unsigned long line_time_us;
4390 		int entries;
4391 
4392 		line_time_us = (htotal * 1000) / clock;
4393 
4394 		/* Use ns/us then divide to preserve precision */
4395 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4396 			pixel_size * hdisplay;
4397 		entries = howmany(entries, wm_info->cacheline_size);
4398 		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4399 		srwm = wm_info->fifo_size - entries;
4400 		if (srwm < 0)
4401 			srwm = 1;
4402 
4403 		if (IS_I945G(dev) || IS_I945GM(dev))
4404 			I915_WRITE(FW_BLC_SELF,
4405 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4406 		else if (IS_I915GM(dev))
4407 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4408 	}
4409 
4410 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4411 		      planea_wm, planeb_wm, cwm, srwm);
4412 
4413 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4414 	fwater_hi = (cwm & 0x1f);
4415 
4416 	/* Set request length to 8 cachelines per fetch */
4417 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4418 	fwater_hi = fwater_hi | (1 << 8);
4419 
4420 	I915_WRITE(FW_BLC, fwater_lo);
4421 	I915_WRITE(FW_BLC2, fwater_hi);
4422 
4423 	if (HAS_FW_BLC(dev)) {
4424 		if (enabled) {
4425 			if (IS_I945G(dev) || IS_I945GM(dev))
4426 				I915_WRITE(FW_BLC_SELF,
4427 					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4428 			else if (IS_I915GM(dev))
4429 				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4430 			DRM_DEBUG_KMS("memory self refresh enabled\n");
4431 		} else
4432 			DRM_DEBUG_KMS("memory self refresh disabled\n");
4433 	}
4434 }
4435 
4436 static void i830_update_wm(struct drm_device *dev)
4437 {
4438 	struct drm_i915_private *dev_priv = dev->dev_private;
4439 	struct drm_crtc *crtc;
4440 	uint32_t fwater_lo;
4441 	int planea_wm;
4442 
4443 	crtc = single_enabled_crtc(dev);
4444 	if (crtc == NULL)
4445 		return;
4446 
4447 	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4448 				       dev_priv->display.get_fifo_size(dev, 0),
4449 				       crtc->fb->bits_per_pixel / 8,
4450 				       latency_ns);
4451 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4452 	fwater_lo |= (3<<8) | planea_wm;
4453 
4454 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4455 
4456 	I915_WRITE(FW_BLC, fwater_lo);
4457 }
4458 
4459 #define ILK_LP0_PLANE_LATENCY		700
4460 #define ILK_LP0_CURSOR_LATENCY		1300
4461 
4462 /*
4463  * Check the wm result.
4464  *
4465  * If any calculated watermark values is larger than the maximum value that
4466  * can be programmed into the associated watermark register, that watermark
4467  * must be disabled.
4468  */
4469 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4470 				int fbc_wm, int display_wm, int cursor_wm,
4471 				const struct intel_watermark_params *display,
4472 				const struct intel_watermark_params *cursor)
4473 {
4474 	struct drm_i915_private *dev_priv = dev->dev_private;
4475 
4476 	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4477 		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4478 
4479 	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4480 		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4481 			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4482 
4483 		/* fbc has it's own way to disable FBC WM */
4484 		I915_WRITE(DISP_ARB_CTL,
4485 			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4486 		return false;
4487 	}
4488 
4489 	if (display_wm > display->max_wm) {
4490 		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4491 			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4492 		return false;
4493 	}
4494 
4495 	if (cursor_wm > cursor->max_wm) {
4496 		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4497 			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4498 		return false;
4499 	}
4500 
4501 	if (!(fbc_wm || display_wm || cursor_wm)) {
4502 		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4503 		return false;
4504 	}
4505 
4506 	return true;
4507 }
4508 
4509 /*
4510  * Compute watermark values of WM[1-3],
4511  */
4512 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4513 				  int latency_ns,
4514 				  const struct intel_watermark_params *display,
4515 				  const struct intel_watermark_params *cursor,
4516 				  int *fbc_wm, int *display_wm, int *cursor_wm)
4517 {
4518 	struct drm_crtc *crtc;
4519 	unsigned long line_time_us;
4520 	int hdisplay, htotal, pixel_size, clock;
4521 	int line_count, line_size;
4522 	int small, large;
4523 	int entries;
4524 
4525 	if (!latency_ns) {
4526 		*fbc_wm = *display_wm = *cursor_wm = 0;
4527 		return false;
4528 	}
4529 
4530 	crtc = intel_get_crtc_for_plane(dev, plane);
4531 	hdisplay = crtc->mode.hdisplay;
4532 	htotal = crtc->mode.htotal;
4533 	clock = crtc->mode.clock;
4534 	pixel_size = crtc->fb->bits_per_pixel / 8;
4535 
4536 	line_time_us = (htotal * 1000) / clock;
4537 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4538 	line_size = hdisplay * pixel_size;
4539 
4540 	/* Use the minimum of the small and large buffer method for primary */
4541 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4542 	large = line_count * line_size;
4543 
4544 	entries = howmany(min(small, large), display->cacheline_size);
4545 	*display_wm = entries + display->guard_size;
4546 
4547 	/*
4548 	 * Spec says:
4549 	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4550 	 */
4551 	*fbc_wm = howmany(*display_wm * 64, line_size) + 2;
4552 
4553 	/* calculate the self-refresh watermark for display cursor */
4554 	entries = line_count * pixel_size * 64;
4555 	entries = howmany(entries, cursor->cacheline_size);
4556 	*cursor_wm = entries + cursor->guard_size;
4557 
4558 	return ironlake_check_srwm(dev, level,
4559 				   *fbc_wm, *display_wm, *cursor_wm,
4560 				   display, cursor);
4561 }
4562 
4563 static void ironlake_update_wm(struct drm_device *dev)
4564 {
4565 	struct drm_i915_private *dev_priv = dev->dev_private;
4566 	int fbc_wm, plane_wm, cursor_wm;
4567 	unsigned int enabled;
4568 
4569 	enabled = 0;
4570 	if (g4x_compute_wm0(dev, 0,
4571 			    &ironlake_display_wm_info,
4572 			    ILK_LP0_PLANE_LATENCY,
4573 			    &ironlake_cursor_wm_info,
4574 			    ILK_LP0_CURSOR_LATENCY,
4575 			    &plane_wm, &cursor_wm)) {
4576 		I915_WRITE(WM0_PIPEA_ILK,
4577 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4578 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4579 			      " plane %d, " "cursor: %d\n",
4580 			      plane_wm, cursor_wm);
4581 		enabled |= 1;
4582 	}
4583 
4584 	if (g4x_compute_wm0(dev, 1,
4585 			    &ironlake_display_wm_info,
4586 			    ILK_LP0_PLANE_LATENCY,
4587 			    &ironlake_cursor_wm_info,
4588 			    ILK_LP0_CURSOR_LATENCY,
4589 			    &plane_wm, &cursor_wm)) {
4590 		I915_WRITE(WM0_PIPEB_ILK,
4591 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4592 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4593 			      " plane %d, cursor: %d\n",
4594 			      plane_wm, cursor_wm);
4595 		enabled |= 2;
4596 	}
4597 
4598 	/*
4599 	 * Calculate and update the self-refresh watermark only when one
4600 	 * display plane is used.
4601 	 */
4602 	I915_WRITE(WM3_LP_ILK, 0);
4603 	I915_WRITE(WM2_LP_ILK, 0);
4604 	I915_WRITE(WM1_LP_ILK, 0);
4605 
4606 	if (!single_plane_enabled(enabled))
4607 		return;
4608 	enabled = ffs(enabled) - 1;
4609 
4610 	/* WM1 */
4611 	if (!ironlake_compute_srwm(dev, 1, enabled,
4612 				   ILK_READ_WM1_LATENCY() * 500,
4613 				   &ironlake_display_srwm_info,
4614 				   &ironlake_cursor_srwm_info,
4615 				   &fbc_wm, &plane_wm, &cursor_wm))
4616 		return;
4617 
4618 	I915_WRITE(WM1_LP_ILK,
4619 		   WM1_LP_SR_EN |
4620 		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4621 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4622 		   (plane_wm << WM1_LP_SR_SHIFT) |
4623 		   cursor_wm);
4624 
4625 	/* WM2 */
4626 	if (!ironlake_compute_srwm(dev, 2, enabled,
4627 				   ILK_READ_WM2_LATENCY() * 500,
4628 				   &ironlake_display_srwm_info,
4629 				   &ironlake_cursor_srwm_info,
4630 				   &fbc_wm, &plane_wm, &cursor_wm))
4631 		return;
4632 
4633 	I915_WRITE(WM2_LP_ILK,
4634 		   WM2_LP_EN |
4635 		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4636 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4637 		   (plane_wm << WM1_LP_SR_SHIFT) |
4638 		   cursor_wm);
4639 
4640 	/*
4641 	 * WM3 is unsupported on ILK, probably because we don't have latency
4642 	 * data for that power state
4643 	 */
4644 }
4645 
4646 void sandybridge_update_wm(struct drm_device *dev)
4647 {
4648 	struct drm_i915_private *dev_priv = dev->dev_private;
4649 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4650 	u32 val;
4651 	int fbc_wm, plane_wm, cursor_wm;
4652 	unsigned int enabled;
4653 
4654 	enabled = 0;
4655 	if (g4x_compute_wm0(dev, 0,
4656 			    &sandybridge_display_wm_info, latency,
4657 			    &sandybridge_cursor_wm_info, latency,
4658 			    &plane_wm, &cursor_wm)) {
4659 		val = I915_READ(WM0_PIPEA_ILK);
4660 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4661 		I915_WRITE(WM0_PIPEA_ILK, val |
4662 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4663 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4664 			      " plane %d, " "cursor: %d\n",
4665 			      plane_wm, cursor_wm);
4666 		enabled |= 1;
4667 	}
4668 
4669 	if (g4x_compute_wm0(dev, 1,
4670 			    &sandybridge_display_wm_info, latency,
4671 			    &sandybridge_cursor_wm_info, latency,
4672 			    &plane_wm, &cursor_wm)) {
4673 		val = I915_READ(WM0_PIPEB_ILK);
4674 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4675 		I915_WRITE(WM0_PIPEB_ILK, val |
4676 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4677 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4678 			      " plane %d, cursor: %d\n",
4679 			      plane_wm, cursor_wm);
4680 		enabled |= 2;
4681 	}
4682 
4683 	/* IVB has 3 pipes */
4684 	if (IS_IVYBRIDGE(dev) &&
4685 	    g4x_compute_wm0(dev, 2,
4686 			    &sandybridge_display_wm_info, latency,
4687 			    &sandybridge_cursor_wm_info, latency,
4688 			    &plane_wm, &cursor_wm)) {
4689 		val = I915_READ(WM0_PIPEC_IVB);
4690 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4691 		I915_WRITE(WM0_PIPEC_IVB, val |
4692 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4693 		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4694 			      " plane %d, cursor: %d\n",
4695 			      plane_wm, cursor_wm);
4696 		enabled |= 3;
4697 	}
4698 
4699 	/*
4700 	 * Calculate and update the self-refresh watermark only when one
4701 	 * display plane is used.
4702 	 *
4703 	 * SNB support 3 levels of watermark.
4704 	 *
4705 	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4706 	 * and disabled in the descending order
4707 	 *
4708 	 */
4709 	I915_WRITE(WM3_LP_ILK, 0);
4710 	I915_WRITE(WM2_LP_ILK, 0);
4711 	I915_WRITE(WM1_LP_ILK, 0);
4712 
4713 	if (!single_plane_enabled(enabled) ||
4714 	    dev_priv->sprite_scaling_enabled)
4715 		return;
4716 	enabled = ffs(enabled) - 1;
4717 
4718 	/* WM1 */
4719 	if (!ironlake_compute_srwm(dev, 1, enabled,
4720 				   SNB_READ_WM1_LATENCY() * 500,
4721 				   &sandybridge_display_srwm_info,
4722 				   &sandybridge_cursor_srwm_info,
4723 				   &fbc_wm, &plane_wm, &cursor_wm))
4724 		return;
4725 
4726 	I915_WRITE(WM1_LP_ILK,
4727 		   WM1_LP_SR_EN |
4728 		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4729 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4730 		   (plane_wm << WM1_LP_SR_SHIFT) |
4731 		   cursor_wm);
4732 
4733 	/* WM2 */
4734 	if (!ironlake_compute_srwm(dev, 2, enabled,
4735 				   SNB_READ_WM2_LATENCY() * 500,
4736 				   &sandybridge_display_srwm_info,
4737 				   &sandybridge_cursor_srwm_info,
4738 				   &fbc_wm, &plane_wm, &cursor_wm))
4739 		return;
4740 
4741 	I915_WRITE(WM2_LP_ILK,
4742 		   WM2_LP_EN |
4743 		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4744 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4745 		   (plane_wm << WM1_LP_SR_SHIFT) |
4746 		   cursor_wm);
4747 
4748 	/* WM3 */
4749 	if (!ironlake_compute_srwm(dev, 3, enabled,
4750 				   SNB_READ_WM3_LATENCY() * 500,
4751 				   &sandybridge_display_srwm_info,
4752 				   &sandybridge_cursor_srwm_info,
4753 				   &fbc_wm, &plane_wm, &cursor_wm))
4754 		return;
4755 
4756 	I915_WRITE(WM3_LP_ILK,
4757 		   WM3_LP_EN |
4758 		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4759 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4760 		   (plane_wm << WM1_LP_SR_SHIFT) |
4761 		   cursor_wm);
4762 }
4763 
4764 static bool
4765 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4766 			      uint32_t sprite_width, int pixel_size,
4767 			      const struct intel_watermark_params *display,
4768 			      int display_latency_ns, int *sprite_wm)
4769 {
4770 	struct drm_crtc *crtc;
4771 	int clock;
4772 	int entries, tlb_miss;
4773 
4774 	crtc = intel_get_crtc_for_plane(dev, plane);
4775 	if (crtc->fb == NULL || !crtc->enabled) {
4776 		*sprite_wm = display->guard_size;
4777 		return false;
4778 	}
4779 
4780 	clock = crtc->mode.clock;
4781 
4782 	/* Use the small buffer method to calculate the sprite watermark */
4783 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4784 	tlb_miss = display->fifo_size*display->cacheline_size -
4785 		sprite_width * 8;
4786 	if (tlb_miss > 0)
4787 		entries += tlb_miss;
4788 	entries = howmany(entries, display->cacheline_size);
4789 	*sprite_wm = entries + display->guard_size;
4790 	if (*sprite_wm > (int)display->max_wm)
4791 		*sprite_wm = display->max_wm;
4792 
4793 	return true;
4794 }
4795 
4796 static bool
4797 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4798 				uint32_t sprite_width, int pixel_size,
4799 				const struct intel_watermark_params *display,
4800 				int latency_ns, int *sprite_wm)
4801 {
4802 	struct drm_crtc *crtc;
4803 	unsigned long line_time_us;
4804 	int clock;
4805 	int line_count, line_size;
4806 	int small, large;
4807 	int entries;
4808 
4809 	if (!latency_ns) {
4810 		*sprite_wm = 0;
4811 		return false;
4812 	}
4813 
4814 	crtc = intel_get_crtc_for_plane(dev, plane);
4815 	clock = crtc->mode.clock;
4816 	if (!clock) {
4817 		*sprite_wm = 0;
4818 		return false;
4819 	}
4820 
4821 	line_time_us = (sprite_width * 1000) / clock;
4822 	if (!line_time_us) {
4823 		*sprite_wm = 0;
4824 		return false;
4825 	}
4826 
4827 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4828 	line_size = sprite_width * pixel_size;
4829 
4830 	/* Use the minimum of the small and large buffer method for primary */
4831 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4832 	large = line_count * line_size;
4833 
4834 	entries = howmany(min(small, large), display->cacheline_size);
4835 	*sprite_wm = entries + display->guard_size;
4836 
4837 	return *sprite_wm > 0x3ff ? false : true;
4838 }
4839 
4840 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4841 					 uint32_t sprite_width, int pixel_size)
4842 {
4843 	struct drm_i915_private *dev_priv = dev->dev_private;
4844 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4845 	u32 val;
4846 	int sprite_wm, reg;
4847 	int ret;
4848 
4849 	switch (pipe) {
4850 	case 0:
4851 		reg = WM0_PIPEA_ILK;
4852 		break;
4853 	case 1:
4854 		reg = WM0_PIPEB_ILK;
4855 		break;
4856 	case 2:
4857 		reg = WM0_PIPEC_IVB;
4858 		break;
4859 	default:
4860 		return; /* bad pipe */
4861 	}
4862 
4863 	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4864 					    &sandybridge_display_wm_info,
4865 					    latency, &sprite_wm);
4866 	if (!ret) {
4867 		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4868 			      pipe);
4869 		return;
4870 	}
4871 
4872 	val = I915_READ(reg);
4873 	val &= ~WM0_PIPE_SPRITE_MASK;
4874 	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4875 	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4876 
4877 
4878 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4879 					      pixel_size,
4880 					      &sandybridge_display_srwm_info,
4881 					      SNB_READ_WM1_LATENCY() * 500,
4882 					      &sprite_wm);
4883 	if (!ret) {
4884 		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4885 			      pipe);
4886 		return;
4887 	}
4888 	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4889 
4890 	/* Only IVB has two more LP watermarks for sprite */
4891 	if (!IS_IVYBRIDGE(dev))
4892 		return;
4893 
4894 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4895 					      pixel_size,
4896 					      &sandybridge_display_srwm_info,
4897 					      SNB_READ_WM2_LATENCY() * 500,
4898 					      &sprite_wm);
4899 	if (!ret) {
4900 		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4901 			      pipe);
4902 		return;
4903 	}
4904 	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4905 
4906 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4907 					      pixel_size,
4908 					      &sandybridge_display_srwm_info,
4909 					      SNB_READ_WM3_LATENCY() * 500,
4910 					      &sprite_wm);
4911 	if (!ret) {
4912 		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4913 			      pipe);
4914 		return;
4915 	}
4916 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4917 }
4918 
4919 /**
4920  * intel_update_watermarks - update FIFO watermark values based on current modes
4921  *
4922  * Calculate watermark values for the various WM regs based on current mode
4923  * and plane configuration.
4924  *
4925  * There are several cases to deal with here:
4926  *   - normal (i.e. non-self-refresh)
4927  *   - self-refresh (SR) mode
4928  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4929  *   - lines are small relative to FIFO size (buffer can hold more than 2
4930  *     lines), so need to account for TLB latency
4931  *
4932  *   The normal calculation is:
4933  *     watermark = dotclock * bytes per pixel * latency
4934  *   where latency is platform & configuration dependent (we assume pessimal
4935  *   values here).
4936  *
4937  *   The SR calculation is:
4938  *     watermark = (trunc(latency/line time)+1) * surface width *
4939  *       bytes per pixel
4940  *   where
4941  *     line time = htotal / dotclock
4942  *     surface width = hdisplay for normal plane and 64 for cursor
4943  *   and latency is assumed to be high, as above.
4944  *
4945  * The final value programmed to the register should always be rounded up,
4946  * and include an extra 2 entries to account for clock crossings.
4947  *
4948  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4949  * to set the non-SR watermarks to 8.
4950  */
4951 static void intel_update_watermarks(struct drm_device *dev)
4952 {
4953 	struct drm_i915_private *dev_priv = dev->dev_private;
4954 
4955 	if (dev_priv->display.update_wm)
4956 		dev_priv->display.update_wm(dev);
4957 }
4958 
4959 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4960 				    uint32_t sprite_width, int pixel_size)
4961 {
4962 	struct drm_i915_private *dev_priv = dev->dev_private;
4963 
4964 	if (dev_priv->display.update_sprite_wm)
4965 		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4966 						   pixel_size);
4967 }
4968 
4969 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4970 {
4971 	if (i915_panel_use_ssc >= 0)
4972 		return i915_panel_use_ssc != 0;
4973 	return dev_priv->lvds_use_ssc
4974 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4975 }
4976 
4977 /**
4978  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4979  * @crtc: CRTC structure
4980  * @mode: requested mode
4981  *
4982  * A pipe may be connected to one or more outputs.  Based on the depth of the
4983  * attached framebuffer, choose a good color depth to use on the pipe.
4984  *
4985  * If possible, match the pipe depth to the fb depth.  In some cases, this
4986  * isn't ideal, because the connected output supports a lesser or restricted
4987  * set of depths.  Resolve that here:
4988  *    LVDS typically supports only 6bpc, so clamp down in that case
4989  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4990  *    Displays may support a restricted set as well, check EDID and clamp as
4991  *      appropriate.
4992  *    DP may want to dither down to 6bpc to fit larger modes
4993  *
4994  * RETURNS:
4995  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4996  * true if they don't match).
4997  */
4998 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4999 					 unsigned int *pipe_bpp,
5000 					 struct drm_display_mode *mode)
5001 {
5002 	struct drm_device *dev = crtc->dev;
5003 	struct drm_i915_private *dev_priv = dev->dev_private;
5004 	struct drm_encoder *encoder;
5005 	struct drm_connector *connector;
5006 	unsigned int display_bpc = UINT_MAX, bpc;
5007 
5008 	/* Walk the encoders & connectors on this crtc, get min bpc */
5009 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
5010 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5011 
5012 		if (encoder->crtc != crtc)
5013 			continue;
5014 
5015 		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
5016 			unsigned int lvds_bpc;
5017 
5018 			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
5019 			    LVDS_A3_POWER_UP)
5020 				lvds_bpc = 8;
5021 			else
5022 				lvds_bpc = 6;
5023 
5024 			if (lvds_bpc < display_bpc) {
5025 				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5026 				display_bpc = lvds_bpc;
5027 			}
5028 			continue;
5029 		}
5030 
5031 		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5032 			/* Use VBT settings if we have an eDP panel */
5033 			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5034 
5035 			if (edp_bpc < display_bpc) {
5036 				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5037 				display_bpc = edp_bpc;
5038 			}
5039 			continue;
5040 		}
5041 
5042 		/* Not one of the known troublemakers, check the EDID */
5043 		list_for_each_entry(connector, &dev->mode_config.connector_list,
5044 				    head) {
5045 			if (connector->encoder != encoder)
5046 				continue;
5047 
5048 			/* Don't use an invalid EDID bpc value */
5049 			if (connector->display_info.bpc &&
5050 			    connector->display_info.bpc < display_bpc) {
5051 				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5052 				display_bpc = connector->display_info.bpc;
5053 			}
5054 		}
5055 
5056 		/*
5057 		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5058 		 * through, clamp it down.  (Note: >12bpc will be caught below.)
5059 		 */
5060 		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5061 			if (display_bpc > 8 && display_bpc < 12) {
5062 				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5063 				display_bpc = 12;
5064 			} else {
5065 				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5066 				display_bpc = 8;
5067 			}
5068 		}
5069 	}
5070 
5071 	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5072 		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5073 		display_bpc = 6;
5074 	}
5075 
5076 	/*
5077 	 * We could just drive the pipe at the highest bpc all the time and
5078 	 * enable dithering as needed, but that costs bandwidth.  So choose
5079 	 * the minimum value that expresses the full color range of the fb but
5080 	 * also stays within the max display bpc discovered above.
5081 	 */
5082 
5083 	switch (crtc->fb->depth) {
5084 	case 8:
5085 		bpc = 8; /* since we go through a colormap */
5086 		break;
5087 	case 15:
5088 	case 16:
5089 		bpc = 6; /* min is 18bpp */
5090 		break;
5091 	case 24:
5092 		bpc = 8;
5093 		break;
5094 	case 30:
5095 		bpc = 10;
5096 		break;
5097 	case 48:
5098 		bpc = 12;
5099 		break;
5100 	default:
5101 		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5102 		bpc = min((unsigned int)8, display_bpc);
5103 		break;
5104 	}
5105 
5106 	display_bpc = min(display_bpc, bpc);
5107 
5108 	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5109 			 bpc, display_bpc);
5110 
5111 	*pipe_bpp = display_bpc * 3;
5112 
5113 	return display_bpc != bpc;
5114 }
5115 
5116 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5117 {
5118 	struct drm_device *dev = crtc->dev;
5119 	struct drm_i915_private *dev_priv = dev->dev_private;
5120 	int refclk;
5121 
5122 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5123 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5124 		refclk = dev_priv->lvds_ssc_freq * 1000;
5125 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5126 			      refclk / 1000);
5127 	} else if (!IS_GEN2(dev)) {
5128 		refclk = 96000;
5129 	} else {
5130 		refclk = 48000;
5131 	}
5132 
5133 	return refclk;
5134 }
5135 
5136 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5137 				      intel_clock_t *clock)
5138 {
5139 	/* SDVO TV has fixed PLL values depend on its clock range,
5140 	   this mirrors vbios setting. */
5141 	if (adjusted_mode->clock >= 100000
5142 	    && adjusted_mode->clock < 140500) {
5143 		clock->p1 = 2;
5144 		clock->p2 = 10;
5145 		clock->n = 3;
5146 		clock->m1 = 16;
5147 		clock->m2 = 8;
5148 	} else if (adjusted_mode->clock >= 140500
5149 		   && adjusted_mode->clock <= 200000) {
5150 		clock->p1 = 1;
5151 		clock->p2 = 10;
5152 		clock->n = 6;
5153 		clock->m1 = 12;
5154 		clock->m2 = 8;
5155 	}
5156 }
5157 
5158 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5159 				     intel_clock_t *clock,
5160 				     intel_clock_t *reduced_clock)
5161 {
5162 	struct drm_device *dev = crtc->dev;
5163 	struct drm_i915_private *dev_priv = dev->dev_private;
5164 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5165 	int pipe = intel_crtc->pipe;
5166 	u32 fp, fp2 = 0;
5167 
5168 	if (IS_PINEVIEW(dev)) {
5169 		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5170 		if (reduced_clock)
5171 			fp2 = (1 << reduced_clock->n) << 16 |
5172 				reduced_clock->m1 << 8 | reduced_clock->m2;
5173 	} else {
5174 		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5175 		if (reduced_clock)
5176 			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5177 				reduced_clock->m2;
5178 	}
5179 
5180 	I915_WRITE(FP0(pipe), fp);
5181 
5182 	intel_crtc->lowfreq_avail = false;
5183 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5184 	    reduced_clock && i915_powersave) {
5185 		I915_WRITE(FP1(pipe), fp2);
5186 		intel_crtc->lowfreq_avail = true;
5187 	} else {
5188 		I915_WRITE(FP1(pipe), fp);
5189 	}
5190 }
5191 
5192 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5193 			      struct drm_display_mode *mode,
5194 			      struct drm_display_mode *adjusted_mode,
5195 			      int x, int y,
5196 			      struct drm_framebuffer *old_fb)
5197 {
5198 	struct drm_device *dev = crtc->dev;
5199 	struct drm_i915_private *dev_priv = dev->dev_private;
5200 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5201 	int pipe = intel_crtc->pipe;
5202 	int plane = intel_crtc->plane;
5203 	int refclk, num_connectors = 0;
5204 	intel_clock_t clock, reduced_clock;
5205 	u32 dpll, dspcntr, pipeconf, vsyncshift;
5206 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5207 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5208 	struct drm_mode_config *mode_config = &dev->mode_config;
5209 	struct intel_encoder *encoder;
5210 	const intel_limit_t *limit;
5211 	int ret;
5212 	u32 temp;
5213 	u32 lvds_sync = 0;
5214 
5215 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5216 		if (encoder->base.crtc != crtc)
5217 			continue;
5218 
5219 		switch (encoder->type) {
5220 		case INTEL_OUTPUT_LVDS:
5221 			is_lvds = true;
5222 			break;
5223 		case INTEL_OUTPUT_SDVO:
5224 		case INTEL_OUTPUT_HDMI:
5225 			is_sdvo = true;
5226 			if (encoder->needs_tv_clock)
5227 				is_tv = true;
5228 			break;
5229 		case INTEL_OUTPUT_DVO:
5230 			is_dvo = true;
5231 			break;
5232 		case INTEL_OUTPUT_TVOUT:
5233 			is_tv = true;
5234 			break;
5235 		case INTEL_OUTPUT_ANALOG:
5236 			is_crt = true;
5237 			break;
5238 		case INTEL_OUTPUT_DISPLAYPORT:
5239 			is_dp = true;
5240 			break;
5241 		}
5242 
5243 		num_connectors++;
5244 	}
5245 
5246 	refclk = i9xx_get_refclk(crtc, num_connectors);
5247 
5248 	/*
5249 	 * Returns a set of divisors for the desired target clock with the given
5250 	 * refclk, or false.  The returned values represent the clock equation:
5251 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5252 	 */
5253 	limit = intel_limit(crtc, refclk);
5254 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5255 			     &clock);
5256 	if (!ok) {
5257 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5258 		return -EINVAL;
5259 	}
5260 
5261 	/* Ensure that the cursor is valid for the new mode before changing... */
5262 	intel_crtc_update_cursor(crtc, true);
5263 
5264 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5265 		/*
5266 		 * Ensure we match the reduced clock's P to the target clock.
5267 		 * If the clocks don't match, we can't switch the display clock
5268 		 * by using the FP0/FP1. In such case we will disable the LVDS
5269 		 * downclock feature.
5270 		*/
5271 		has_reduced_clock = limit->find_pll(limit, crtc,
5272 						    dev_priv->lvds_downclock,
5273 						    refclk,
5274 						    &clock,
5275 						    &reduced_clock);
5276 	}
5277 
5278 	if (is_sdvo && is_tv)
5279 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5280 
5281 	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5282 				 &reduced_clock : NULL);
5283 
5284 	dpll = DPLL_VGA_MODE_DIS;
5285 
5286 	if (!IS_GEN2(dev)) {
5287 		if (is_lvds)
5288 			dpll |= DPLLB_MODE_LVDS;
5289 		else
5290 			dpll |= DPLLB_MODE_DAC_SERIAL;
5291 		if (is_sdvo) {
5292 			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5293 			if (pixel_multiplier > 1) {
5294 				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5295 					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5296 			}
5297 			dpll |= DPLL_DVO_HIGH_SPEED;
5298 		}
5299 		if (is_dp)
5300 			dpll |= DPLL_DVO_HIGH_SPEED;
5301 
5302 		/* compute bitmask from p1 value */
5303 		if (IS_PINEVIEW(dev))
5304 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5305 		else {
5306 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5307 			if (IS_G4X(dev) && has_reduced_clock)
5308 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5309 		}
5310 		switch (clock.p2) {
5311 		case 5:
5312 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5313 			break;
5314 		case 7:
5315 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5316 			break;
5317 		case 10:
5318 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5319 			break;
5320 		case 14:
5321 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5322 			break;
5323 		}
5324 		if (INTEL_INFO(dev)->gen >= 4)
5325 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5326 	} else {
5327 		if (is_lvds) {
5328 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5329 		} else {
5330 			if (clock.p1 == 2)
5331 				dpll |= PLL_P1_DIVIDE_BY_TWO;
5332 			else
5333 				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5334 			if (clock.p2 == 4)
5335 				dpll |= PLL_P2_DIVIDE_BY_4;
5336 		}
5337 	}
5338 
5339 	if (is_sdvo && is_tv)
5340 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5341 	else if (is_tv)
5342 		/* XXX: just matching BIOS for now */
5343 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5344 		dpll |= 3;
5345 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5346 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5347 	else
5348 		dpll |= PLL_REF_INPUT_DREFCLK;
5349 
5350 	/* setup pipeconf */
5351 	pipeconf = I915_READ(PIPECONF(pipe));
5352 
5353 	/* Set up the display plane register */
5354 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5355 
5356 	if (pipe == 0)
5357 		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5358 	else
5359 		dspcntr |= DISPPLANE_SEL_PIPE_B;
5360 
5361 	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5362 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
5363 		 * core speed.
5364 		 *
5365 		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5366 		 * pipe == 0 check?
5367 		 */
5368 		if (mode->clock >
5369 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5370 			pipeconf |= PIPECONF_DOUBLE_WIDE;
5371 		else
5372 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5373 	}
5374 
5375 	/* default to 8bpc */
5376 	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5377 	if (is_dp) {
5378 		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5379 			pipeconf |= PIPECONF_BPP_6 |
5380 				    PIPECONF_DITHER_EN |
5381 				    PIPECONF_DITHER_TYPE_SP;
5382 		}
5383 	}
5384 
5385 	dpll |= DPLL_VCO_ENABLE;
5386 
5387 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5388 	drm_mode_debug_printmodeline(mode);
5389 
5390 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5391 
5392 	POSTING_READ(DPLL(pipe));
5393 	DELAY(150);
5394 
5395 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5396 	 * This is an exception to the general rule that mode_set doesn't turn
5397 	 * things on.
5398 	 */
5399 	if (is_lvds) {
5400 		temp = I915_READ(LVDS);
5401 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5402 		if (pipe == 1) {
5403 			temp |= LVDS_PIPEB_SELECT;
5404 		} else {
5405 			temp &= ~LVDS_PIPEB_SELECT;
5406 		}
5407 		/* set the corresponsding LVDS_BORDER bit */
5408 		temp |= dev_priv->lvds_border_bits;
5409 		/* Set the B0-B3 data pairs corresponding to whether we're going to
5410 		 * set the DPLLs for dual-channel mode or not.
5411 		 */
5412 		if (clock.p2 == 7)
5413 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5414 		else
5415 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5416 
5417 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5418 		 * appropriately here, but we need to look more thoroughly into how
5419 		 * panels behave in the two modes.
5420 		 */
5421 		/* set the dithering flag on LVDS as needed */
5422 		if (INTEL_INFO(dev)->gen >= 4) {
5423 			if (dev_priv->lvds_dither)
5424 				temp |= LVDS_ENABLE_DITHER;
5425 			else
5426 				temp &= ~LVDS_ENABLE_DITHER;
5427 		}
5428 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5429 			lvds_sync |= LVDS_HSYNC_POLARITY;
5430 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5431 			lvds_sync |= LVDS_VSYNC_POLARITY;
5432 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5433 		    != lvds_sync) {
5434 			char flags[2] = "-+";
5435 			DRM_INFO("Changing LVDS panel from "
5436 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5437 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5438 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5439 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5440 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5441 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5442 			temp |= lvds_sync;
5443 		}
5444 		I915_WRITE(LVDS, temp);
5445 	}
5446 
5447 	if (is_dp) {
5448 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5449 	}
5450 
5451 	I915_WRITE(DPLL(pipe), dpll);
5452 
5453 	/* Wait for the clocks to stabilize. */
5454 	POSTING_READ(DPLL(pipe));
5455 	DELAY(150);
5456 
5457 	if (INTEL_INFO(dev)->gen >= 4) {
5458 		temp = 0;
5459 		if (is_sdvo) {
5460 			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5461 			if (temp > 1)
5462 				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5463 			else
5464 				temp = 0;
5465 		}
5466 		I915_WRITE(DPLL_MD(pipe), temp);
5467 	} else {
5468 		/* The pixel multiplier can only be updated once the
5469 		 * DPLL is enabled and the clocks are stable.
5470 		 *
5471 		 * So write it again.
5472 		 */
5473 		I915_WRITE(DPLL(pipe), dpll);
5474 	}
5475 
5476 	if (HAS_PIPE_CXSR(dev)) {
5477 		if (intel_crtc->lowfreq_avail) {
5478 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5479 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5480 		} else {
5481 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5482 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5483 		}
5484 	}
5485 
5486 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
5487 	if (!IS_GEN2(dev) &&
5488 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5489 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5490 		/* the chip adds 2 halflines automatically */
5491 		adjusted_mode->crtc_vtotal -= 1;
5492 		adjusted_mode->crtc_vblank_end -= 1;
5493 		vsyncshift = adjusted_mode->crtc_hsync_start
5494 			     - adjusted_mode->crtc_htotal/2;
5495 	} else {
5496 		pipeconf |= PIPECONF_PROGRESSIVE;
5497 		vsyncshift = 0;
5498 	}
5499 
5500 	if (!IS_GEN3(dev))
5501 		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5502 
5503 	I915_WRITE(HTOTAL(pipe),
5504 		   (adjusted_mode->crtc_hdisplay - 1) |
5505 		   ((adjusted_mode->crtc_htotal - 1) << 16));
5506 	I915_WRITE(HBLANK(pipe),
5507 		   (adjusted_mode->crtc_hblank_start - 1) |
5508 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5509 	I915_WRITE(HSYNC(pipe),
5510 		   (adjusted_mode->crtc_hsync_start - 1) |
5511 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5512 
5513 	I915_WRITE(VTOTAL(pipe),
5514 		   (adjusted_mode->crtc_vdisplay - 1) |
5515 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5516 	I915_WRITE(VBLANK(pipe),
5517 		   (adjusted_mode->crtc_vblank_start - 1) |
5518 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5519 	I915_WRITE(VSYNC(pipe),
5520 		   (adjusted_mode->crtc_vsync_start - 1) |
5521 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5522 
5523 	/* pipesrc and dspsize control the size that is scaled from,
5524 	 * which should always be the user's requested size.
5525 	 */
5526 	I915_WRITE(DSPSIZE(plane),
5527 		   ((mode->vdisplay - 1) << 16) |
5528 		   (mode->hdisplay - 1));
5529 	I915_WRITE(DSPPOS(plane), 0);
5530 	I915_WRITE(PIPESRC(pipe),
5531 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5532 
5533 	I915_WRITE(PIPECONF(pipe), pipeconf);
5534 	POSTING_READ(PIPECONF(pipe));
5535 	intel_enable_pipe(dev_priv, pipe, false);
5536 
5537 	intel_wait_for_vblank(dev, pipe);
5538 
5539 	I915_WRITE(DSPCNTR(plane), dspcntr);
5540 	POSTING_READ(DSPCNTR(plane));
5541 	intel_enable_plane(dev_priv, plane, pipe);
5542 
5543 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5544 
5545 	intel_update_watermarks(dev);
5546 
5547 	return ret;
5548 }
5549 
5550 /*
5551  * Initialize reference clocks when the driver loads
5552  */
5553 void ironlake_init_pch_refclk(struct drm_device *dev)
5554 {
5555 	struct drm_i915_private *dev_priv = dev->dev_private;
5556 	struct drm_mode_config *mode_config = &dev->mode_config;
5557 	struct intel_encoder *encoder;
5558 	u32 temp;
5559 	bool has_lvds = false;
5560 	bool has_cpu_edp = false;
5561 	bool has_pch_edp = false;
5562 	bool has_panel = false;
5563 	bool has_ck505 = false;
5564 	bool can_ssc = false;
5565 
5566 	/* We need to take the global config into account */
5567 	list_for_each_entry(encoder, &mode_config->encoder_list,
5568 			    base.head) {
5569 		switch (encoder->type) {
5570 		case INTEL_OUTPUT_LVDS:
5571 			has_panel = true;
5572 			has_lvds = true;
5573 			break;
5574 		case INTEL_OUTPUT_EDP:
5575 			has_panel = true;
5576 			if (intel_encoder_is_pch_edp(&encoder->base))
5577 				has_pch_edp = true;
5578 			else
5579 				has_cpu_edp = true;
5580 			break;
5581 		}
5582 	}
5583 
5584 	if (HAS_PCH_IBX(dev)) {
5585 		has_ck505 = dev_priv->display_clock_mode;
5586 		can_ssc = has_ck505;
5587 	} else {
5588 		has_ck505 = false;
5589 		can_ssc = true;
5590 	}
5591 
5592 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5593 		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5594 		      has_ck505);
5595 
5596 	/* Ironlake: try to setup display ref clock before DPLL
5597 	 * enabling. This is only under driver's control after
5598 	 * PCH B stepping, previous chipset stepping should be
5599 	 * ignoring this setting.
5600 	 */
5601 	temp = I915_READ(PCH_DREF_CONTROL);
5602 	/* Always enable nonspread source */
5603 	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5604 
5605 	if (has_ck505)
5606 		temp |= DREF_NONSPREAD_CK505_ENABLE;
5607 	else
5608 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5609 
5610 	if (has_panel) {
5611 		temp &= ~DREF_SSC_SOURCE_MASK;
5612 		temp |= DREF_SSC_SOURCE_ENABLE;
5613 
5614 		/* SSC must be turned on before enabling the CPU output  */
5615 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5616 			DRM_DEBUG_KMS("Using SSC on panel\n");
5617 			temp |= DREF_SSC1_ENABLE;
5618 		} else
5619 			temp &= ~DREF_SSC1_ENABLE;
5620 
5621 		/* Get SSC going before enabling the outputs */
5622 		I915_WRITE(PCH_DREF_CONTROL, temp);
5623 		POSTING_READ(PCH_DREF_CONTROL);
5624 		DELAY(200);
5625 
5626 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5627 
5628 		/* Enable CPU source on CPU attached eDP */
5629 		if (has_cpu_edp) {
5630 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5631 				DRM_DEBUG_KMS("Using SSC on eDP\n");
5632 				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5633 			}
5634 			else
5635 				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5636 		} else
5637 			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5638 
5639 		I915_WRITE(PCH_DREF_CONTROL, temp);
5640 		POSTING_READ(PCH_DREF_CONTROL);
5641 		DELAY(200);
5642 	} else {
5643 		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5644 
5645 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5646 
5647 		/* Turn off CPU output */
5648 		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5649 
5650 		I915_WRITE(PCH_DREF_CONTROL, temp);
5651 		POSTING_READ(PCH_DREF_CONTROL);
5652 		DELAY(200);
5653 
5654 		/* Turn off the SSC source */
5655 		temp &= ~DREF_SSC_SOURCE_MASK;
5656 		temp |= DREF_SSC_SOURCE_DISABLE;
5657 
5658 		/* Turn off SSC1 */
5659 		temp &= ~ DREF_SSC1_ENABLE;
5660 
5661 		I915_WRITE(PCH_DREF_CONTROL, temp);
5662 		POSTING_READ(PCH_DREF_CONTROL);
5663 		DELAY(200);
5664 	}
5665 }
5666 
5667 static int ironlake_get_refclk(struct drm_crtc *crtc)
5668 {
5669 	struct drm_device *dev = crtc->dev;
5670 	struct drm_i915_private *dev_priv = dev->dev_private;
5671 	struct intel_encoder *encoder;
5672 	struct drm_mode_config *mode_config = &dev->mode_config;
5673 	struct intel_encoder *edp_encoder = NULL;
5674 	int num_connectors = 0;
5675 	bool is_lvds = false;
5676 
5677 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5678 		if (encoder->base.crtc != crtc)
5679 			continue;
5680 
5681 		switch (encoder->type) {
5682 		case INTEL_OUTPUT_LVDS:
5683 			is_lvds = true;
5684 			break;
5685 		case INTEL_OUTPUT_EDP:
5686 			edp_encoder = encoder;
5687 			break;
5688 		}
5689 		num_connectors++;
5690 	}
5691 
5692 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5693 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5694 			      dev_priv->lvds_ssc_freq);
5695 		return dev_priv->lvds_ssc_freq * 1000;
5696 	}
5697 
5698 	return 120000;
5699 }
5700 
5701 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5702 				  struct drm_display_mode *mode,
5703 				  struct drm_display_mode *adjusted_mode,
5704 				  int x, int y,
5705 				  struct drm_framebuffer *old_fb)
5706 {
5707 	struct drm_device *dev = crtc->dev;
5708 	struct drm_i915_private *dev_priv = dev->dev_private;
5709 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5710 	int pipe = intel_crtc->pipe;
5711 	int plane = intel_crtc->plane;
5712 	int refclk, num_connectors = 0;
5713 	intel_clock_t clock, reduced_clock;
5714 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5715 	bool ok, has_reduced_clock = false, is_sdvo = false;
5716 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5717 	struct intel_encoder *has_edp_encoder = NULL;
5718 	struct drm_mode_config *mode_config = &dev->mode_config;
5719 	struct intel_encoder *encoder;
5720 	const intel_limit_t *limit;
5721 	int ret;
5722 	struct fdi_m_n m_n = {0};
5723 	u32 temp;
5724 	u32 lvds_sync = 0;
5725 	int target_clock, pixel_multiplier, lane, link_bw, factor;
5726 	unsigned int pipe_bpp;
5727 	bool dither;
5728 
5729 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5730 		if (encoder->base.crtc != crtc)
5731 			continue;
5732 
5733 		switch (encoder->type) {
5734 		case INTEL_OUTPUT_LVDS:
5735 			is_lvds = true;
5736 			break;
5737 		case INTEL_OUTPUT_SDVO:
5738 		case INTEL_OUTPUT_HDMI:
5739 			is_sdvo = true;
5740 			if (encoder->needs_tv_clock)
5741 				is_tv = true;
5742 			break;
5743 		case INTEL_OUTPUT_TVOUT:
5744 			is_tv = true;
5745 			break;
5746 		case INTEL_OUTPUT_ANALOG:
5747 			is_crt = true;
5748 			break;
5749 		case INTEL_OUTPUT_DISPLAYPORT:
5750 			is_dp = true;
5751 			break;
5752 		case INTEL_OUTPUT_EDP:
5753 			has_edp_encoder = encoder;
5754 			break;
5755 		}
5756 
5757 		num_connectors++;
5758 	}
5759 
5760 	refclk = ironlake_get_refclk(crtc);
5761 
5762 	/*
5763 	 * Returns a set of divisors for the desired target clock with the given
5764 	 * refclk, or false.  The returned values represent the clock equation:
5765 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5766 	 */
5767 	limit = intel_limit(crtc, refclk);
5768 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5769 			     &clock);
5770 	if (!ok) {
5771 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5772 		return -EINVAL;
5773 	}
5774 
5775 	/* Ensure that the cursor is valid for the new mode before changing... */
5776 	intel_crtc_update_cursor(crtc, true);
5777 
5778 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5779 		/*
5780 		 * Ensure we match the reduced clock's P to the target clock.
5781 		 * If the clocks don't match, we can't switch the display clock
5782 		 * by using the FP0/FP1. In such case we will disable the LVDS
5783 		 * downclock feature.
5784 		*/
5785 		has_reduced_clock = limit->find_pll(limit, crtc,
5786 						    dev_priv->lvds_downclock,
5787 						    refclk,
5788 						    &clock,
5789 						    &reduced_clock);
5790 	}
5791 	/* SDVO TV has fixed PLL values depend on its clock range,
5792 	   this mirrors vbios setting. */
5793 	if (is_sdvo && is_tv) {
5794 		if (adjusted_mode->clock >= 100000
5795 		    && adjusted_mode->clock < 140500) {
5796 			clock.p1 = 2;
5797 			clock.p2 = 10;
5798 			clock.n = 3;
5799 			clock.m1 = 16;
5800 			clock.m2 = 8;
5801 		} else if (adjusted_mode->clock >= 140500
5802 			   && adjusted_mode->clock <= 200000) {
5803 			clock.p1 = 1;
5804 			clock.p2 = 10;
5805 			clock.n = 6;
5806 			clock.m1 = 12;
5807 			clock.m2 = 8;
5808 		}
5809 	}
5810 
5811 	/* FDI link */
5812 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5813 	lane = 0;
5814 	/* CPU eDP doesn't require FDI link, so just set DP M/N
5815 	   according to current link config */
5816 	if (has_edp_encoder &&
5817 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5818 		target_clock = mode->clock;
5819 		intel_edp_link_config(has_edp_encoder,
5820 				      &lane, &link_bw);
5821 	} else {
5822 		/* [e]DP over FDI requires target mode clock
5823 		   instead of link clock */
5824 		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5825 			target_clock = mode->clock;
5826 		else
5827 			target_clock = adjusted_mode->clock;
5828 
5829 		/* FDI is a binary signal running at ~2.7GHz, encoding
5830 		 * each output octet as 10 bits. The actual frequency
5831 		 * is stored as a divider into a 100MHz clock, and the
5832 		 * mode pixel clock is stored in units of 1KHz.
5833 		 * Hence the bw of each lane in terms of the mode signal
5834 		 * is:
5835 		 */
5836 		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5837 	}
5838 
5839 	/* determine panel color depth */
5840 	temp = I915_READ(PIPECONF(pipe));
5841 	temp &= ~PIPE_BPC_MASK;
5842 	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5843 	switch (pipe_bpp) {
5844 	case 18:
5845 		temp |= PIPE_6BPC;
5846 		break;
5847 	case 24:
5848 		temp |= PIPE_8BPC;
5849 		break;
5850 	case 30:
5851 		temp |= PIPE_10BPC;
5852 		break;
5853 	case 36:
5854 		temp |= PIPE_12BPC;
5855 		break;
5856 	default:
5857 		kprintf("intel_choose_pipe_bpp returned invalid value %d\n",
5858 			pipe_bpp);
5859 		temp |= PIPE_8BPC;
5860 		pipe_bpp = 24;
5861 		break;
5862 	}
5863 
5864 	intel_crtc->bpp = pipe_bpp;
5865 	I915_WRITE(PIPECONF(pipe), temp);
5866 
5867 	if (!lane) {
5868 		/*
5869 		 * Account for spread spectrum to avoid
5870 		 * oversubscribing the link. Max center spread
5871 		 * is 2.5%; use 5% for safety's sake.
5872 		 */
5873 		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5874 		lane = bps / (link_bw * 8) + 1;
5875 	}
5876 
5877 	intel_crtc->fdi_lanes = lane;
5878 
5879 	if (pixel_multiplier > 1)
5880 		link_bw *= pixel_multiplier;
5881 	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5882 			     &m_n);
5883 
5884 	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5885 	if (has_reduced_clock)
5886 		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5887 			reduced_clock.m2;
5888 
5889 	/* Enable autotuning of the PLL clock (if permissible) */
5890 	factor = 21;
5891 	if (is_lvds) {
5892 		if ((intel_panel_use_ssc(dev_priv) &&
5893 		     dev_priv->lvds_ssc_freq == 100) ||
5894 		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5895 			factor = 25;
5896 	} else if (is_sdvo && is_tv)
5897 		factor = 20;
5898 
5899 	if (clock.m < factor * clock.n)
5900 		fp |= FP_CB_TUNE;
5901 
5902 	dpll = 0;
5903 
5904 	if (is_lvds)
5905 		dpll |= DPLLB_MODE_LVDS;
5906 	else
5907 		dpll |= DPLLB_MODE_DAC_SERIAL;
5908 	if (is_sdvo) {
5909 		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5910 		if (pixel_multiplier > 1) {
5911 			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5912 		}
5913 		dpll |= DPLL_DVO_HIGH_SPEED;
5914 	}
5915 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5916 		dpll |= DPLL_DVO_HIGH_SPEED;
5917 
5918 	/* compute bitmask from p1 value */
5919 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5920 	/* also FPA1 */
5921 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5922 
5923 	switch (clock.p2) {
5924 	case 5:
5925 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5926 		break;
5927 	case 7:
5928 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5929 		break;
5930 	case 10:
5931 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5932 		break;
5933 	case 14:
5934 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5935 		break;
5936 	}
5937 
5938 	if (is_sdvo && is_tv)
5939 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5940 	else if (is_tv)
5941 		/* XXX: just matching BIOS for now */
5942 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5943 		dpll |= 3;
5944 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5945 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5946 	else
5947 		dpll |= PLL_REF_INPUT_DREFCLK;
5948 
5949 	/* setup pipeconf */
5950 	pipeconf = I915_READ(PIPECONF(pipe));
5951 
5952 	/* Set up the display plane register */
5953 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5954 
5955 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5956 	drm_mode_debug_printmodeline(mode);
5957 
5958 	/* PCH eDP needs FDI, but CPU eDP does not */
5959 	if (!intel_crtc->no_pll) {
5960 		if (!has_edp_encoder ||
5961 		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5962 			I915_WRITE(PCH_FP0(pipe), fp);
5963 			I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5964 
5965 			POSTING_READ(PCH_DPLL(pipe));
5966 			DELAY(150);
5967 		}
5968 	} else {
5969 		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5970 		    fp == I915_READ(PCH_FP0(0))) {
5971 			intel_crtc->use_pll_a = true;
5972 			DRM_DEBUG_KMS("using pipe a dpll\n");
5973 		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5974 			   fp == I915_READ(PCH_FP0(1))) {
5975 			intel_crtc->use_pll_a = false;
5976 			DRM_DEBUG_KMS("using pipe b dpll\n");
5977 		} else {
5978 			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5979 			return -EINVAL;
5980 		}
5981 	}
5982 
5983 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5984 	 * This is an exception to the general rule that mode_set doesn't turn
5985 	 * things on.
5986 	 */
5987 	if (is_lvds) {
5988 		temp = I915_READ(PCH_LVDS);
5989 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5990 		if (HAS_PCH_CPT(dev)) {
5991 			temp &= ~PORT_TRANS_SEL_MASK;
5992 			temp |= PORT_TRANS_SEL_CPT(pipe);
5993 		} else {
5994 			if (pipe == 1)
5995 				temp |= LVDS_PIPEB_SELECT;
5996 			else
5997 				temp &= ~LVDS_PIPEB_SELECT;
5998 		}
5999 
6000 		/* set the corresponsding LVDS_BORDER bit */
6001 		temp |= dev_priv->lvds_border_bits;
6002 		/* Set the B0-B3 data pairs corresponding to whether we're going to
6003 		 * set the DPLLs for dual-channel mode or not.
6004 		 */
6005 		if (clock.p2 == 7)
6006 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
6007 		else
6008 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
6009 
6010 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
6011 		 * appropriately here, but we need to look more thoroughly into how
6012 		 * panels behave in the two modes.
6013 		 */
6014 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6015 			lvds_sync |= LVDS_HSYNC_POLARITY;
6016 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6017 			lvds_sync |= LVDS_VSYNC_POLARITY;
6018 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6019 		    != lvds_sync) {
6020 			char flags[2] = "-+";
6021 			DRM_INFO("Changing LVDS panel from "
6022 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6023 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
6024 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
6025 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6026 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6027 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6028 			temp |= lvds_sync;
6029 		}
6030 		I915_WRITE(PCH_LVDS, temp);
6031 	}
6032 
6033 	pipeconf &= ~PIPECONF_DITHER_EN;
6034 	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6035 	if ((is_lvds && dev_priv->lvds_dither) || dither) {
6036 		pipeconf |= PIPECONF_DITHER_EN;
6037 		pipeconf |= PIPECONF_DITHER_TYPE_SP;
6038 	}
6039 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6040 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
6041 	} else {
6042 		/* For non-DP output, clear any trans DP clock recovery setting.*/
6043 		I915_WRITE(TRANSDATA_M1(pipe), 0);
6044 		I915_WRITE(TRANSDATA_N1(pipe), 0);
6045 		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6046 		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6047 	}
6048 
6049 	if (!intel_crtc->no_pll &&
6050 	    (!has_edp_encoder ||
6051 	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6052 		I915_WRITE(PCH_DPLL(pipe), dpll);
6053 
6054 		/* Wait for the clocks to stabilize. */
6055 		POSTING_READ(PCH_DPLL(pipe));
6056 		DELAY(150);
6057 
6058 		/* The pixel multiplier can only be updated once the
6059 		 * DPLL is enabled and the clocks are stable.
6060 		 *
6061 		 * So write it again.
6062 		 */
6063 		I915_WRITE(PCH_DPLL(pipe), dpll);
6064 	}
6065 
6066 	intel_crtc->lowfreq_avail = false;
6067 	if (!intel_crtc->no_pll) {
6068 		if (is_lvds && has_reduced_clock && i915_powersave) {
6069 			I915_WRITE(PCH_FP1(pipe), fp2);
6070 			intel_crtc->lowfreq_avail = true;
6071 			if (HAS_PIPE_CXSR(dev)) {
6072 				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6073 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6074 			}
6075 		} else {
6076 			I915_WRITE(PCH_FP1(pipe), fp);
6077 			if (HAS_PIPE_CXSR(dev)) {
6078 				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6079 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6080 			}
6081 		}
6082 	}
6083 
6084 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
6085 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6086 		pipeconf |= PIPECONF_INTERLACED_ILK;
6087 		/* the chip adds 2 halflines automatically */
6088 		adjusted_mode->crtc_vtotal -= 1;
6089 		adjusted_mode->crtc_vblank_end -= 1;
6090 		I915_WRITE(VSYNCSHIFT(pipe),
6091 			   adjusted_mode->crtc_hsync_start
6092 			   - adjusted_mode->crtc_htotal/2);
6093 	} else {
6094 		pipeconf |= PIPECONF_PROGRESSIVE;
6095 		I915_WRITE(VSYNCSHIFT(pipe), 0);
6096 	}
6097 
6098 	I915_WRITE(HTOTAL(pipe),
6099 		   (adjusted_mode->crtc_hdisplay - 1) |
6100 		   ((adjusted_mode->crtc_htotal - 1) << 16));
6101 	I915_WRITE(HBLANK(pipe),
6102 		   (adjusted_mode->crtc_hblank_start - 1) |
6103 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6104 	I915_WRITE(HSYNC(pipe),
6105 		   (adjusted_mode->crtc_hsync_start - 1) |
6106 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6107 
6108 	I915_WRITE(VTOTAL(pipe),
6109 		   (adjusted_mode->crtc_vdisplay - 1) |
6110 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
6111 	I915_WRITE(VBLANK(pipe),
6112 		   (adjusted_mode->crtc_vblank_start - 1) |
6113 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
6114 	I915_WRITE(VSYNC(pipe),
6115 		   (adjusted_mode->crtc_vsync_start - 1) |
6116 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6117 
6118 	/* pipesrc controls the size that is scaled from, which should
6119 	 * always be the user's requested size.
6120 	 */
6121 	I915_WRITE(PIPESRC(pipe),
6122 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6123 
6124 	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6125 	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6126 	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6127 	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6128 
6129 	if (has_edp_encoder &&
6130 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6131 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6132 	}
6133 
6134 	I915_WRITE(PIPECONF(pipe), pipeconf);
6135 	POSTING_READ(PIPECONF(pipe));
6136 
6137 	intel_wait_for_vblank(dev, pipe);
6138 
6139 	I915_WRITE(DSPCNTR(plane), dspcntr);
6140 	POSTING_READ(DSPCNTR(plane));
6141 
6142 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
6143 
6144 	intel_update_watermarks(dev);
6145 
6146 	return ret;
6147 }
6148 
6149 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6150 			       struct drm_display_mode *mode,
6151 			       struct drm_display_mode *adjusted_mode,
6152 			       int x, int y,
6153 			       struct drm_framebuffer *old_fb)
6154 {
6155 	struct drm_device *dev = crtc->dev;
6156 	struct drm_i915_private *dev_priv = dev->dev_private;
6157 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6158 	int pipe = intel_crtc->pipe;
6159 	int ret;
6160 
6161 	drm_vblank_pre_modeset(dev, pipe);
6162 
6163 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6164 					      x, y, old_fb);
6165 	drm_vblank_post_modeset(dev, pipe);
6166 
6167 	if (ret)
6168 		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6169 	else
6170 		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6171 
6172 	return ret;
6173 }
6174 
6175 static bool intel_eld_uptodate(struct drm_connector *connector,
6176 			       int reg_eldv, uint32_t bits_eldv,
6177 			       int reg_elda, uint32_t bits_elda,
6178 			       int reg_edid)
6179 {
6180 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6181 	uint8_t *eld = connector->eld;
6182 	uint32_t i;
6183 
6184 	i = I915_READ(reg_eldv);
6185 	i &= bits_eldv;
6186 
6187 	if (!eld[0])
6188 		return !i;
6189 
6190 	if (!i)
6191 		return false;
6192 
6193 	i = I915_READ(reg_elda);
6194 	i &= ~bits_elda;
6195 	I915_WRITE(reg_elda, i);
6196 
6197 	for (i = 0; i < eld[2]; i++)
6198 		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6199 			return false;
6200 
6201 	return true;
6202 }
6203 
6204 static void g4x_write_eld(struct drm_connector *connector,
6205 			  struct drm_crtc *crtc)
6206 {
6207 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6208 	uint8_t *eld = connector->eld;
6209 	uint32_t eldv;
6210 	uint32_t len;
6211 	uint32_t i;
6212 
6213 	i = I915_READ(G4X_AUD_VID_DID);
6214 
6215 	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6216 		eldv = G4X_ELDV_DEVCL_DEVBLC;
6217 	else
6218 		eldv = G4X_ELDV_DEVCTG;
6219 
6220 	if (intel_eld_uptodate(connector,
6221 			       G4X_AUD_CNTL_ST, eldv,
6222 			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6223 			       G4X_HDMIW_HDMIEDID))
6224 		return;
6225 
6226 	i = I915_READ(G4X_AUD_CNTL_ST);
6227 	i &= ~(eldv | G4X_ELD_ADDR);
6228 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6229 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6230 
6231 	if (!eld[0])
6232 		return;
6233 
6234 	if (eld[2] < (uint8_t)len)
6235 		len = eld[2];
6236 	DRM_DEBUG_KMS("ELD size %d\n", len);
6237 	for (i = 0; i < len; i++)
6238 		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6239 
6240 	i = I915_READ(G4X_AUD_CNTL_ST);
6241 	i |= eldv;
6242 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6243 }
6244 
6245 static void ironlake_write_eld(struct drm_connector *connector,
6246 				     struct drm_crtc *crtc)
6247 {
6248 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6249 	uint8_t *eld = connector->eld;
6250 	uint32_t eldv;
6251 	uint32_t i;
6252 	int len;
6253 	int hdmiw_hdmiedid;
6254 	int aud_config;
6255 	int aud_cntl_st;
6256 	int aud_cntrl_st2;
6257 
6258 	if (HAS_PCH_IBX(connector->dev)) {
6259 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6260 		aud_config = IBX_AUD_CONFIG_A;
6261 		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6262 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6263 	} else {
6264 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6265 		aud_config = CPT_AUD_CONFIG_A;
6266 		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6267 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6268 	}
6269 
6270 	i = to_intel_crtc(crtc)->pipe;
6271 	hdmiw_hdmiedid += i * 0x100;
6272 	aud_cntl_st += i * 0x100;
6273 	aud_config += i * 0x100;
6274 
6275 	DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
6276 
6277 	i = I915_READ(aud_cntl_st);
6278 	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6279 	if (!i) {
6280 		DRM_DEBUG_KMS("Audio directed to unknown port\n");
6281 		/* operate blindly on all ports */
6282 		eldv = IBX_ELD_VALIDB;
6283 		eldv |= IBX_ELD_VALIDB << 4;
6284 		eldv |= IBX_ELD_VALIDB << 8;
6285 	} else {
6286 		DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
6287 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6288 	}
6289 
6290 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6291 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6292 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6293 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6294 	} else
6295 		I915_WRITE(aud_config, 0);
6296 
6297 	if (intel_eld_uptodate(connector,
6298 			       aud_cntrl_st2, eldv,
6299 			       aud_cntl_st, IBX_ELD_ADDRESS,
6300 			       hdmiw_hdmiedid))
6301 		return;
6302 
6303 	i = I915_READ(aud_cntrl_st2);
6304 	i &= ~eldv;
6305 	I915_WRITE(aud_cntrl_st2, i);
6306 
6307 	if (!eld[0])
6308 		return;
6309 
6310 	i = I915_READ(aud_cntl_st);
6311 	i &= ~IBX_ELD_ADDRESS;
6312 	I915_WRITE(aud_cntl_st, i);
6313 
6314 	/* 84 bytes of hw ELD buffer */
6315 	len = 21;
6316 	if (eld[2] < (uint8_t)len)
6317 		len = eld[2];
6318 	DRM_DEBUG_KMS("ELD size %d\n", len);
6319 	for (i = 0; i < len; i++)
6320 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6321 
6322 	i = I915_READ(aud_cntrl_st2);
6323 	i |= eldv;
6324 	I915_WRITE(aud_cntrl_st2, i);
6325 }
6326 
6327 void intel_write_eld(struct drm_encoder *encoder,
6328 		     struct drm_display_mode *mode)
6329 {
6330 	struct drm_crtc *crtc = encoder->crtc;
6331 	struct drm_connector *connector;
6332 	struct drm_device *dev = encoder->dev;
6333 	struct drm_i915_private *dev_priv = dev->dev_private;
6334 
6335 	connector = drm_select_eld(encoder, mode);
6336 	if (!connector)
6337 		return;
6338 
6339 	DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6340 			 connector->base.id,
6341 			 drm_get_connector_name(connector),
6342 			 connector->encoder->base.id,
6343 			 drm_get_encoder_name(connector->encoder));
6344 
6345 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6346 
6347 	if (dev_priv->display.write_eld)
6348 		dev_priv->display.write_eld(connector, crtc);
6349 }
6350 
6351 /** Loads the palette/gamma unit for the CRTC with the prepared values */
6352 void intel_crtc_load_lut(struct drm_crtc *crtc)
6353 {
6354 	struct drm_device *dev = crtc->dev;
6355 	struct drm_i915_private *dev_priv = dev->dev_private;
6356 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6357 	int palreg = PALETTE(intel_crtc->pipe);
6358 	int i;
6359 
6360 	/* The clocks have to be on to load the palette. */
6361 	if (!crtc->enabled || !intel_crtc->active)
6362 		return;
6363 
6364 	/* use legacy palette for Ironlake */
6365 	if (HAS_PCH_SPLIT(dev))
6366 		palreg = LGC_PALETTE(intel_crtc->pipe);
6367 
6368 	for (i = 0; i < 256; i++) {
6369 		I915_WRITE(palreg + 4 * i,
6370 			   (intel_crtc->lut_r[i] << 16) |
6371 			   (intel_crtc->lut_g[i] << 8) |
6372 			   intel_crtc->lut_b[i]);
6373 	}
6374 }
6375 
6376 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6377 {
6378 	struct drm_device *dev = crtc->dev;
6379 	struct drm_i915_private *dev_priv = dev->dev_private;
6380 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6381 	bool visible = base != 0;
6382 	u32 cntl;
6383 
6384 	if (intel_crtc->cursor_visible == visible)
6385 		return;
6386 
6387 	cntl = I915_READ(_CURACNTR);
6388 	if (visible) {
6389 		/* On these chipsets we can only modify the base whilst
6390 		 * the cursor is disabled.
6391 		 */
6392 		I915_WRITE(_CURABASE, base);
6393 
6394 		cntl &= ~(CURSOR_FORMAT_MASK);
6395 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6396 		cntl |= CURSOR_ENABLE |
6397 			CURSOR_GAMMA_ENABLE |
6398 			CURSOR_FORMAT_ARGB;
6399 	} else
6400 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6401 	I915_WRITE(_CURACNTR, cntl);
6402 
6403 	intel_crtc->cursor_visible = visible;
6404 }
6405 
6406 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6407 {
6408 	struct drm_device *dev = crtc->dev;
6409 	struct drm_i915_private *dev_priv = dev->dev_private;
6410 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6411 	int pipe = intel_crtc->pipe;
6412 	bool visible = base != 0;
6413 
6414 	if (intel_crtc->cursor_visible != visible) {
6415 		uint32_t cntl = I915_READ(CURCNTR(pipe));
6416 		if (base) {
6417 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6418 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6419 			cntl |= pipe << 28; /* Connect to correct pipe */
6420 		} else {
6421 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6422 			cntl |= CURSOR_MODE_DISABLE;
6423 		}
6424 		I915_WRITE(CURCNTR(pipe), cntl);
6425 
6426 		intel_crtc->cursor_visible = visible;
6427 	}
6428 	/* and commit changes on next vblank */
6429 	I915_WRITE(CURBASE(pipe), base);
6430 }
6431 
6432 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6433 {
6434 	struct drm_device *dev = crtc->dev;
6435 	struct drm_i915_private *dev_priv = dev->dev_private;
6436 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6437 	int pipe = intel_crtc->pipe;
6438 	bool visible = base != 0;
6439 
6440 	if (intel_crtc->cursor_visible != visible) {
6441 		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6442 		if (base) {
6443 			cntl &= ~CURSOR_MODE;
6444 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6445 		} else {
6446 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6447 			cntl |= CURSOR_MODE_DISABLE;
6448 		}
6449 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
6450 
6451 		intel_crtc->cursor_visible = visible;
6452 	}
6453 	/* and commit changes on next vblank */
6454 	I915_WRITE(CURBASE_IVB(pipe), base);
6455 }
6456 
6457 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6458 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6459 				     bool on)
6460 {
6461 	struct drm_device *dev = crtc->dev;
6462 	struct drm_i915_private *dev_priv = dev->dev_private;
6463 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6464 	int pipe = intel_crtc->pipe;
6465 	int x = intel_crtc->cursor_x;
6466 	int y = intel_crtc->cursor_y;
6467 	u32 base, pos;
6468 	bool visible;
6469 
6470 	pos = 0;
6471 
6472 	if (on && crtc->enabled && crtc->fb) {
6473 		base = intel_crtc->cursor_addr;
6474 		if (x > (int) crtc->fb->width)
6475 			base = 0;
6476 
6477 		if (y > (int) crtc->fb->height)
6478 			base = 0;
6479 	} else
6480 		base = 0;
6481 
6482 	if (x < 0) {
6483 		if (x + intel_crtc->cursor_width < 0)
6484 			base = 0;
6485 
6486 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6487 		x = -x;
6488 	}
6489 	pos |= x << CURSOR_X_SHIFT;
6490 
6491 	if (y < 0) {
6492 		if (y + intel_crtc->cursor_height < 0)
6493 			base = 0;
6494 
6495 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6496 		y = -y;
6497 	}
6498 	pos |= y << CURSOR_Y_SHIFT;
6499 
6500 	visible = base != 0;
6501 	if (!visible && !intel_crtc->cursor_visible)
6502 		return;
6503 
6504 	if (IS_IVYBRIDGE(dev)) {
6505 		I915_WRITE(CURPOS_IVB(pipe), pos);
6506 		ivb_update_cursor(crtc, base);
6507 	} else {
6508 		I915_WRITE(CURPOS(pipe), pos);
6509 		if (IS_845G(dev) || IS_I865G(dev))
6510 			i845_update_cursor(crtc, base);
6511 		else
6512 			i9xx_update_cursor(crtc, base);
6513 	}
6514 
6515 	if (visible)
6516 		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6517 }
6518 
6519 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6520 				 struct drm_file *file,
6521 				 uint32_t handle,
6522 				 uint32_t width, uint32_t height)
6523 {
6524 	struct drm_device *dev = crtc->dev;
6525 	struct drm_i915_private *dev_priv = dev->dev_private;
6526 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6527 	struct drm_i915_gem_object *obj;
6528 	uint32_t addr;
6529 	int ret;
6530 
6531 	DRM_DEBUG_KMS("\n");
6532 
6533 	/* if we want to turn off the cursor ignore width and height */
6534 	if (!handle) {
6535 		DRM_DEBUG_KMS("cursor off\n");
6536 		addr = 0;
6537 		obj = NULL;
6538 		DRM_LOCK(dev);
6539 		goto finish;
6540 	}
6541 
6542 	/* Currently we only support 64x64 cursors */
6543 	if (width != 64 || height != 64) {
6544 		DRM_ERROR("we currently only support 64x64 cursors\n");
6545 		return -EINVAL;
6546 	}
6547 
6548 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6549 	if (&obj->base == NULL)
6550 		return -ENOENT;
6551 
6552 	if (obj->base.size < width * height * 4) {
6553 		DRM_ERROR("buffer is to small\n");
6554 		ret = -ENOMEM;
6555 		goto fail;
6556 	}
6557 
6558 	/* we only need to pin inside GTT if cursor is non-phy */
6559 	DRM_LOCK(dev);
6560 	if (!dev_priv->info->cursor_needs_physical) {
6561 		if (obj->tiling_mode) {
6562 			DRM_ERROR("cursor cannot be tiled\n");
6563 			ret = -EINVAL;
6564 			goto fail_locked;
6565 		}
6566 
6567 		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6568 		if (ret) {
6569 			DRM_ERROR("failed to move cursor bo into the GTT\n");
6570 			goto fail_locked;
6571 		}
6572 
6573 		ret = i915_gem_object_put_fence(obj);
6574 		if (ret) {
6575 			DRM_ERROR("failed to release fence for cursor\n");
6576 			goto fail_unpin;
6577 		}
6578 
6579 		addr = obj->gtt_offset;
6580 	} else {
6581 		int align = IS_I830(dev) ? 16 * 1024 : 256;
6582 		ret = i915_gem_attach_phys_object(dev, obj,
6583 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6584 						  align);
6585 		if (ret) {
6586 			DRM_ERROR("failed to attach phys object\n");
6587 			goto fail_locked;
6588 		}
6589 		addr = obj->phys_obj->handle->busaddr;
6590 	}
6591 
6592 	if (IS_GEN2(dev))
6593 		I915_WRITE(CURSIZE, (height << 12) | width);
6594 
6595  finish:
6596 	if (intel_crtc->cursor_bo) {
6597 		if (dev_priv->info->cursor_needs_physical) {
6598 			if (intel_crtc->cursor_bo != obj)
6599 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6600 		} else
6601 			i915_gem_object_unpin(intel_crtc->cursor_bo);
6602 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6603 	}
6604 
6605 	DRM_UNLOCK(dev);
6606 
6607 	intel_crtc->cursor_addr = addr;
6608 	intel_crtc->cursor_bo = obj;
6609 	intel_crtc->cursor_width = width;
6610 	intel_crtc->cursor_height = height;
6611 
6612 	intel_crtc_update_cursor(crtc, true);
6613 
6614 	return 0;
6615 fail_unpin:
6616 	i915_gem_object_unpin(obj);
6617 fail_locked:
6618 	DRM_UNLOCK(dev);
6619 fail:
6620 	drm_gem_object_unreference_unlocked(&obj->base);
6621 	return ret;
6622 }
6623 
6624 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6625 {
6626 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6627 
6628 	intel_crtc->cursor_x = x;
6629 	intel_crtc->cursor_y = y;
6630 
6631 	intel_crtc_update_cursor(crtc, true);
6632 
6633 	return 0;
6634 }
6635 
6636 /** Sets the color ramps on behalf of RandR */
6637 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6638 				 u16 blue, int regno)
6639 {
6640 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6641 
6642 	intel_crtc->lut_r[regno] = red >> 8;
6643 	intel_crtc->lut_g[regno] = green >> 8;
6644 	intel_crtc->lut_b[regno] = blue >> 8;
6645 }
6646 
6647 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6648 			     u16 *blue, int regno)
6649 {
6650 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6651 
6652 	*red = intel_crtc->lut_r[regno] << 8;
6653 	*green = intel_crtc->lut_g[regno] << 8;
6654 	*blue = intel_crtc->lut_b[regno] << 8;
6655 }
6656 
6657 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6658 				 u16 *blue, uint32_t start, uint32_t size)
6659 {
6660 	int end = (start + size > 256) ? 256 : start + size, i;
6661 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6662 
6663 	for (i = start; i < end; i++) {
6664 		intel_crtc->lut_r[i] = red[i] >> 8;
6665 		intel_crtc->lut_g[i] = green[i] >> 8;
6666 		intel_crtc->lut_b[i] = blue[i] >> 8;
6667 	}
6668 
6669 	intel_crtc_load_lut(crtc);
6670 }
6671 
6672 /**
6673  * Get a pipe with a simple mode set on it for doing load-based monitor
6674  * detection.
6675  *
6676  * It will be up to the load-detect code to adjust the pipe as appropriate for
6677  * its requirements.  The pipe will be connected to no other encoders.
6678  *
6679  * Currently this code will only succeed if there is a pipe with no encoders
6680  * configured for it.  In the future, it could choose to temporarily disable
6681  * some outputs to free up a pipe for its use.
6682  *
6683  * \return crtc, or NULL if no pipes are available.
6684  */
6685 
6686 /* VESA 640x480x72Hz mode to set on the pipe */
6687 static struct drm_display_mode load_detect_mode = {
6688 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6689 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6690 };
6691 
6692 static int
6693 intel_framebuffer_create(struct drm_device *dev,
6694     struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj,
6695      struct drm_framebuffer **res)
6696 {
6697 	struct intel_framebuffer *intel_fb;
6698 	int ret;
6699 
6700 	intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
6701 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6702 	if (ret) {
6703 		drm_gem_object_unreference_unlocked(&obj->base);
6704 		drm_free(intel_fb, DRM_MEM_KMS);
6705 		return (ret);
6706 	}
6707 
6708 	*res = &intel_fb->base;
6709 	return (0);
6710 }
6711 
6712 static u32
6713 intel_framebuffer_pitch_for_width(int width, int bpp)
6714 {
6715 	u32 pitch = howmany(width * bpp, 8);
6716 	return roundup2(pitch, 64);
6717 }
6718 
6719 static u32
6720 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6721 {
6722 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6723 	return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
6724 }
6725 
6726 static int
6727 intel_framebuffer_create_for_mode(struct drm_device *dev,
6728     struct drm_display_mode *mode, int depth, int bpp,
6729     struct drm_framebuffer **res)
6730 {
6731 	struct drm_i915_gem_object *obj;
6732 	struct drm_mode_fb_cmd2 mode_cmd;
6733 
6734 	obj = i915_gem_alloc_object(dev,
6735 				    intel_framebuffer_size_for_mode(mode, bpp));
6736 	if (obj == NULL)
6737 		return (-ENOMEM);
6738 
6739 	mode_cmd.width = mode->hdisplay;
6740 	mode_cmd.height = mode->vdisplay;
6741 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6742 								bpp);
6743 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6744 
6745 	return (intel_framebuffer_create(dev, &mode_cmd, obj, res));
6746 }
6747 
6748 static int
6749 mode_fits_in_fbdev(struct drm_device *dev,
6750     struct drm_display_mode *mode, struct drm_framebuffer **res)
6751 {
6752 	struct drm_i915_private *dev_priv = dev->dev_private;
6753 	struct drm_i915_gem_object *obj;
6754 	struct drm_framebuffer *fb;
6755 
6756 	if (dev_priv->fbdev == NULL) {
6757 		*res = NULL;
6758 		return (0);
6759 	}
6760 
6761 	obj = dev_priv->fbdev->ifb.obj;
6762 	if (obj == NULL) {
6763 		*res = NULL;
6764 		return (0);
6765 	}
6766 
6767 	fb = &dev_priv->fbdev->ifb.base;
6768 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6769 	    fb->bits_per_pixel)) {
6770 		*res = NULL;
6771 		return (0);
6772 	}
6773 
6774 	if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
6775 		*res = NULL;
6776 		return (0);
6777 	}
6778 
6779 	*res = fb;
6780 	return (0);
6781 }
6782 
6783 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6784 				struct drm_connector *connector,
6785 				struct drm_display_mode *mode,
6786 				struct intel_load_detect_pipe *old)
6787 {
6788 	struct intel_crtc *intel_crtc;
6789 	struct drm_crtc *possible_crtc;
6790 	struct drm_encoder *encoder = &intel_encoder->base;
6791 	struct drm_crtc *crtc = NULL;
6792 	struct drm_device *dev = encoder->dev;
6793 	struct drm_framebuffer *old_fb;
6794 	int i = -1, r;
6795 
6796 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6797 		      connector->base.id, drm_get_connector_name(connector),
6798 		      encoder->base.id, drm_get_encoder_name(encoder));
6799 
6800 	/*
6801 	 * Algorithm gets a little messy:
6802 	 *
6803 	 *   - if the connector already has an assigned crtc, use it (but make
6804 	 *     sure it's on first)
6805 	 *
6806 	 *   - try to find the first unused crtc that can drive this connector,
6807 	 *     and use that if we find one
6808 	 */
6809 
6810 	/* See if we already have a CRTC for this connector */
6811 	if (encoder->crtc) {
6812 		crtc = encoder->crtc;
6813 
6814 		intel_crtc = to_intel_crtc(crtc);
6815 		old->dpms_mode = intel_crtc->dpms_mode;
6816 		old->load_detect_temp = false;
6817 
6818 		/* Make sure the crtc and connector are running */
6819 		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6820 			struct drm_encoder_helper_funcs *encoder_funcs;
6821 			struct drm_crtc_helper_funcs *crtc_funcs;
6822 
6823 			crtc_funcs = crtc->helper_private;
6824 			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6825 
6826 			encoder_funcs = encoder->helper_private;
6827 			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6828 		}
6829 
6830 		return true;
6831 	}
6832 
6833 	/* Find an unused one (if possible) */
6834 	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6835 		i++;
6836 		if (!(encoder->possible_crtcs & (1 << i)))
6837 			continue;
6838 		if (!possible_crtc->enabled) {
6839 			crtc = possible_crtc;
6840 			break;
6841 		}
6842 	}
6843 
6844 	/*
6845 	 * If we didn't find an unused CRTC, don't use any.
6846 	 */
6847 	if (!crtc) {
6848 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6849 		return false;
6850 	}
6851 
6852 	encoder->crtc = crtc;
6853 	connector->encoder = encoder;
6854 
6855 	intel_crtc = to_intel_crtc(crtc);
6856 	old->dpms_mode = intel_crtc->dpms_mode;
6857 	old->load_detect_temp = true;
6858 	old->release_fb = NULL;
6859 
6860 	if (!mode)
6861 		mode = &load_detect_mode;
6862 
6863 	old_fb = crtc->fb;
6864 
6865 	/* We need a framebuffer large enough to accommodate all accesses
6866 	 * that the plane may generate whilst we perform load detection.
6867 	 * We can not rely on the fbcon either being present (we get called
6868 	 * during its initialisation to detect all boot displays, or it may
6869 	 * not even exist) or that it is large enough to satisfy the
6870 	 * requested mode.
6871 	 */
6872 	r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
6873 	if (crtc->fb == NULL) {
6874 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6875 		r = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
6876 		    &crtc->fb);
6877 		old->release_fb = crtc->fb;
6878 	} else
6879 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6880 	if (r != 0) {
6881 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6882 		crtc->fb = old_fb;
6883 		return false;
6884 	}
6885 
6886 	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6887 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6888 		if (old->release_fb)
6889 			old->release_fb->funcs->destroy(old->release_fb);
6890 		crtc->fb = old_fb;
6891 		return false;
6892 	}
6893 
6894 	/* let the connector get through one full cycle before testing */
6895 	intel_wait_for_vblank(dev, intel_crtc->pipe);
6896 
6897 	return true;
6898 }
6899 
6900 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6901 				    struct drm_connector *connector,
6902 				    struct intel_load_detect_pipe *old)
6903 {
6904 	struct drm_encoder *encoder = &intel_encoder->base;
6905 	struct drm_device *dev = encoder->dev;
6906 	struct drm_crtc *crtc = encoder->crtc;
6907 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6908 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6909 
6910 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6911 		      connector->base.id, drm_get_connector_name(connector),
6912 		      encoder->base.id, drm_get_encoder_name(encoder));
6913 
6914 	if (old->load_detect_temp) {
6915 		connector->encoder = NULL;
6916 		drm_helper_disable_unused_functions(dev);
6917 
6918 		if (old->release_fb)
6919 			old->release_fb->funcs->destroy(old->release_fb);
6920 
6921 		return;
6922 	}
6923 
6924 	/* Switch crtc and encoder back off if necessary */
6925 	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6926 		encoder_funcs->dpms(encoder, old->dpms_mode);
6927 		crtc_funcs->dpms(crtc, old->dpms_mode);
6928 	}
6929 }
6930 
6931 /* Returns the clock of the currently programmed mode of the given pipe. */
6932 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6933 {
6934 	struct drm_i915_private *dev_priv = dev->dev_private;
6935 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6936 	int pipe = intel_crtc->pipe;
6937 	u32 dpll = I915_READ(DPLL(pipe));
6938 	u32 fp;
6939 	intel_clock_t clock;
6940 
6941 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6942 		fp = I915_READ(FP0(pipe));
6943 	else
6944 		fp = I915_READ(FP1(pipe));
6945 
6946 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6947 	if (IS_PINEVIEW(dev)) {
6948 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6949 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6950 	} else {
6951 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6952 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6953 	}
6954 
6955 	if (!IS_GEN2(dev)) {
6956 		if (IS_PINEVIEW(dev))
6957 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6958 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6959 		else
6960 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6961 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6962 
6963 		switch (dpll & DPLL_MODE_MASK) {
6964 		case DPLLB_MODE_DAC_SERIAL:
6965 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6966 				5 : 10;
6967 			break;
6968 		case DPLLB_MODE_LVDS:
6969 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6970 				7 : 14;
6971 			break;
6972 		default:
6973 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6974 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6975 			return 0;
6976 		}
6977 
6978 		/* XXX: Handle the 100Mhz refclk */
6979 		intel_clock(dev, 96000, &clock);
6980 	} else {
6981 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6982 
6983 		if (is_lvds) {
6984 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6985 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6986 			clock.p2 = 14;
6987 
6988 			if ((dpll & PLL_REF_INPUT_MASK) ==
6989 			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6990 				/* XXX: might not be 66MHz */
6991 				intel_clock(dev, 66000, &clock);
6992 			} else
6993 				intel_clock(dev, 48000, &clock);
6994 		} else {
6995 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6996 				clock.p1 = 2;
6997 			else {
6998 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6999 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7000 			}
7001 			if (dpll & PLL_P2_DIVIDE_BY_4)
7002 				clock.p2 = 4;
7003 			else
7004 				clock.p2 = 2;
7005 
7006 			intel_clock(dev, 48000, &clock);
7007 		}
7008 	}
7009 
7010 	/* XXX: It would be nice to validate the clocks, but we can't reuse
7011 	 * i830PllIsValid() because it relies on the xf86_config connector
7012 	 * configuration being accurate, which it isn't necessarily.
7013 	 */
7014 
7015 	return clock.dot;
7016 }
7017 
7018 /** Returns the currently programmed mode of the given pipe. */
7019 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7020 					     struct drm_crtc *crtc)
7021 {
7022 	struct drm_i915_private *dev_priv = dev->dev_private;
7023 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7024 	int pipe = intel_crtc->pipe;
7025 	struct drm_display_mode *mode;
7026 	int htot = I915_READ(HTOTAL(pipe));
7027 	int hsync = I915_READ(HSYNC(pipe));
7028 	int vtot = I915_READ(VTOTAL(pipe));
7029 	int vsync = I915_READ(VSYNC(pipe));
7030 
7031 	mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
7032 
7033 	mode->clock = intel_crtc_clock_get(dev, crtc);
7034 	mode->hdisplay = (htot & 0xffff) + 1;
7035 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7036 	mode->hsync_start = (hsync & 0xffff) + 1;
7037 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7038 	mode->vdisplay = (vtot & 0xffff) + 1;
7039 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7040 	mode->vsync_start = (vsync & 0xffff) + 1;
7041 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7042 
7043 	drm_mode_set_name(mode);
7044 	drm_mode_set_crtcinfo(mode, 0);
7045 
7046 	return mode;
7047 }
7048 
7049 #define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
7050 
7051 /* When this timer fires, we've been idle for awhile */
7052 static void intel_gpu_idle_timer(void *arg)
7053 {
7054 	struct drm_device *dev = arg;
7055 	drm_i915_private_t *dev_priv = dev->dev_private;
7056 
7057 	if (!list_empty(&dev_priv->mm.active_list)) {
7058 		/* Still processing requests, so just re-arm the timer. */
7059 		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7060 		    i915_hangcheck_elapsed, dev);
7061 		return;
7062 	}
7063 
7064 	dev_priv->busy = false;
7065 	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7066 }
7067 
7068 #define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
7069 
7070 static void intel_crtc_idle_timer(void *arg)
7071 {
7072 	struct intel_crtc *intel_crtc = arg;
7073 	struct drm_crtc *crtc = &intel_crtc->base;
7074 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7075 	struct intel_framebuffer *intel_fb;
7076 
7077 	intel_fb = to_intel_framebuffer(crtc->fb);
7078 	if (intel_fb && intel_fb->obj->active) {
7079 		/* The framebuffer is still being accessed by the GPU. */
7080 		callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
7081 		    i915_hangcheck_elapsed, crtc->dev);
7082 		return;
7083 	}
7084 
7085 	intel_crtc->busy = false;
7086 	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7087 }
7088 
7089 static void intel_increase_pllclock(struct drm_crtc *crtc)
7090 {
7091 	struct drm_device *dev = crtc->dev;
7092 	drm_i915_private_t *dev_priv = dev->dev_private;
7093 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7094 	int pipe = intel_crtc->pipe;
7095 	int dpll_reg = DPLL(pipe);
7096 	int dpll;
7097 
7098 	if (HAS_PCH_SPLIT(dev))
7099 		return;
7100 
7101 	if (!dev_priv->lvds_downclock_avail)
7102 		return;
7103 
7104 	dpll = I915_READ(dpll_reg);
7105 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7106 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7107 
7108 		assert_panel_unlocked(dev_priv, pipe);
7109 
7110 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7111 		I915_WRITE(dpll_reg, dpll);
7112 		intel_wait_for_vblank(dev, pipe);
7113 
7114 		dpll = I915_READ(dpll_reg);
7115 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7116 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7117 	}
7118 
7119 	/* Schedule downclock */
7120 	callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
7121 	    intel_crtc_idle_timer, intel_crtc);
7122 }
7123 
7124 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7125 {
7126 	struct drm_device *dev = crtc->dev;
7127 	drm_i915_private_t *dev_priv = dev->dev_private;
7128 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7129 
7130 	if (HAS_PCH_SPLIT(dev))
7131 		return;
7132 
7133 	if (!dev_priv->lvds_downclock_avail)
7134 		return;
7135 
7136 	/*
7137 	 * Since this is called by a timer, we should never get here in
7138 	 * the manual case.
7139 	 */
7140 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7141 		int pipe = intel_crtc->pipe;
7142 		int dpll_reg = DPLL(pipe);
7143 		u32 dpll;
7144 
7145 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
7146 
7147 		assert_panel_unlocked(dev_priv, pipe);
7148 
7149 		dpll = I915_READ(dpll_reg);
7150 		dpll |= DISPLAY_RATE_SELECT_FPA1;
7151 		I915_WRITE(dpll_reg, dpll);
7152 		intel_wait_for_vblank(dev, pipe);
7153 		dpll = I915_READ(dpll_reg);
7154 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7155 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7156 	}
7157 }
7158 
7159 /**
7160  * intel_idle_update - adjust clocks for idleness
7161  * @work: work struct
7162  *
7163  * Either the GPU or display (or both) went idle.  Check the busy status
7164  * here and adjust the CRTC and GPU clocks as necessary.
7165  */
7166 static void intel_idle_update(void *arg, int pending)
7167 {
7168 	drm_i915_private_t *dev_priv = arg;
7169 	struct drm_device *dev = dev_priv->dev;
7170 	struct drm_crtc *crtc;
7171 	struct intel_crtc *intel_crtc;
7172 
7173 	if (!i915_powersave)
7174 		return;
7175 
7176 	DRM_LOCK(dev);
7177 
7178 	i915_update_gfx_val(dev_priv);
7179 
7180 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7181 		/* Skip inactive CRTCs */
7182 		if (!crtc->fb)
7183 			continue;
7184 
7185 		intel_crtc = to_intel_crtc(crtc);
7186 		if (!intel_crtc->busy)
7187 			intel_decrease_pllclock(crtc);
7188 	}
7189 
7190 	DRM_UNLOCK(dev);
7191 }
7192 
7193 /**
7194  * intel_mark_busy - mark the GPU and possibly the display busy
7195  * @dev: drm device
7196  * @obj: object we're operating on
7197  *
7198  * Callers can use this function to indicate that the GPU is busy processing
7199  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7200  * buffer), we'll also mark the display as busy, so we know to increase its
7201  * clock frequency.
7202  */
7203 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7204 {
7205 	drm_i915_private_t *dev_priv = dev->dev_private;
7206 	struct drm_crtc *crtc = NULL;
7207 	struct intel_framebuffer *intel_fb;
7208 	struct intel_crtc *intel_crtc;
7209 
7210 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
7211 		return;
7212 
7213 	if (!dev_priv->busy)
7214 		dev_priv->busy = true;
7215 	else
7216 		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7217 		    intel_gpu_idle_timer, dev);
7218 
7219 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7220 		if (!crtc->fb)
7221 			continue;
7222 
7223 		intel_crtc = to_intel_crtc(crtc);
7224 		intel_fb = to_intel_framebuffer(crtc->fb);
7225 		if (intel_fb->obj == obj) {
7226 			if (!intel_crtc->busy) {
7227 				/* Non-busy -> busy, upclock */
7228 				intel_increase_pllclock(crtc);
7229 				intel_crtc->busy = true;
7230 			} else {
7231 				/* Busy -> busy, put off timer */
7232 				callout_reset(&intel_crtc->idle_callout,
7233 				    CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
7234 				    intel_crtc);
7235 			}
7236 		}
7237 	}
7238 }
7239 
7240 static void intel_crtc_destroy(struct drm_crtc *crtc)
7241 {
7242 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7243 	struct drm_device *dev = crtc->dev;
7244 	struct drm_i915_private *dev_priv = dev->dev_private;
7245 	struct intel_unpin_work *work;
7246 
7247 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7248 	work = intel_crtc->unpin_work;
7249 	intel_crtc->unpin_work = NULL;
7250 	lockmgr(&dev->event_lock, LK_RELEASE);
7251 
7252 	if (work) {
7253 		taskqueue_cancel(dev_priv->tq, &work->task, NULL);
7254 		taskqueue_drain(dev_priv->tq, &work->task);
7255 		drm_free(work, DRM_MEM_KMS);
7256 	}
7257 
7258 	drm_crtc_cleanup(crtc);
7259 
7260 	drm_free(intel_crtc, DRM_MEM_KMS);
7261 }
7262 
7263 static void intel_unpin_work_fn(void *arg, int pending)
7264 {
7265 	struct intel_unpin_work *work = arg;
7266 	struct drm_device *dev;
7267 
7268 	dev = work->dev;
7269 	DRM_LOCK(dev);
7270 	intel_unpin_fb_obj(work->old_fb_obj);
7271 	drm_gem_object_unreference(&work->pending_flip_obj->base);
7272 	drm_gem_object_unreference(&work->old_fb_obj->base);
7273 
7274 	intel_update_fbc(work->dev);
7275 	DRM_UNLOCK(dev);
7276 	drm_free(work, DRM_MEM_KMS);
7277 }
7278 
7279 static void do_intel_finish_page_flip(struct drm_device *dev,
7280 				      struct drm_crtc *crtc)
7281 {
7282 	drm_i915_private_t *dev_priv = dev->dev_private;
7283 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7284 	struct intel_unpin_work *work;
7285 	struct drm_i915_gem_object *obj;
7286 	struct drm_pending_vblank_event *e;
7287 	struct timeval tnow, tvbl;
7288 
7289 	/* Ignore early vblank irqs */
7290 	if (intel_crtc == NULL)
7291 		return;
7292 
7293 	microtime(&tnow);
7294 
7295 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7296 	work = intel_crtc->unpin_work;
7297 	if (work == NULL || !work->pending) {
7298 		lockmgr(&dev->event_lock, LK_RELEASE);
7299 		return;
7300 	}
7301 
7302 	intel_crtc->unpin_work = NULL;
7303 
7304 	if (work->event) {
7305 		e = work->event;
7306 		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7307 
7308 		/* Called before vblank count and timestamps have
7309 		 * been updated for the vblank interval of flip
7310 		 * completion? Need to increment vblank count and
7311 		 * add one videorefresh duration to returned timestamp
7312 		 * to account for this. We assume this happened if we
7313 		 * get called over 0.9 frame durations after the last
7314 		 * timestamped vblank.
7315 		 *
7316 		 * This calculation can not be used with vrefresh rates
7317 		 * below 5Hz (10Hz to be on the safe side) without
7318 		 * promoting to 64 integers.
7319 		 */
7320 		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7321 		    9 * crtc->framedur_ns) {
7322 			e->event.sequence++;
7323 			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7324 					     crtc->framedur_ns);
7325 		}
7326 
7327 		e->event.tv_sec = tvbl.tv_sec;
7328 		e->event.tv_usec = tvbl.tv_usec;
7329 
7330 		list_add_tail(&e->base.link,
7331 			      &e->base.file_priv->event_list);
7332 		drm_event_wakeup(&e->base);
7333 	}
7334 
7335 	drm_vblank_put(dev, intel_crtc->pipe);
7336 
7337 	obj = work->old_fb_obj;
7338 
7339 	atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
7340 	if (atomic_load_acq_int(&obj->pending_flip) == 0)
7341 		wakeup(&obj->pending_flip);
7342 	lockmgr(&dev->event_lock, LK_RELEASE);
7343 
7344 	taskqueue_enqueue(dev_priv->tq, &work->task);
7345 }
7346 
7347 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7348 {
7349 	drm_i915_private_t *dev_priv = dev->dev_private;
7350 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7351 
7352 	do_intel_finish_page_flip(dev, crtc);
7353 }
7354 
7355 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7356 {
7357 	drm_i915_private_t *dev_priv = dev->dev_private;
7358 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7359 
7360 	do_intel_finish_page_flip(dev, crtc);
7361 }
7362 
7363 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7364 {
7365 	drm_i915_private_t *dev_priv = dev->dev_private;
7366 	struct intel_crtc *intel_crtc =
7367 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7368 
7369 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7370 	if (intel_crtc->unpin_work) {
7371 		if ((++intel_crtc->unpin_work->pending) > 1)
7372 			DRM_ERROR("Prepared flip multiple times\n");
7373 	} else {
7374 		DRM_DEBUG("preparing flip with no unpin work?\n");
7375 	}
7376 	lockmgr(&dev->event_lock, LK_RELEASE);
7377 }
7378 
7379 static int intel_gen2_queue_flip(struct drm_device *dev,
7380 				 struct drm_crtc *crtc,
7381 				 struct drm_framebuffer *fb,
7382 				 struct drm_i915_gem_object *obj)
7383 {
7384 	struct drm_i915_private *dev_priv = dev->dev_private;
7385 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7386 	unsigned long offset;
7387 	u32 flip_mask;
7388 	int ret;
7389 
7390 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7391 	if (ret)
7392 		goto out;
7393 
7394 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7395 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7396 
7397 	ret = BEGIN_LP_RING(6);
7398 	if (ret)
7399 		goto out;
7400 
7401 	/* Can't queue multiple flips, so wait for the previous
7402 	 * one to finish before executing the next.
7403 	 */
7404 	if (intel_crtc->plane)
7405 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7406 	else
7407 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7408 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7409 	OUT_RING(MI_NOOP);
7410 	OUT_RING(MI_DISPLAY_FLIP |
7411 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7412 	OUT_RING(fb->pitches[0]);
7413 	OUT_RING(obj->gtt_offset + offset);
7414 	OUT_RING(0); /* aux display base address, unused */
7415 	ADVANCE_LP_RING();
7416 out:
7417 	return ret;
7418 }
7419 
7420 static int intel_gen3_queue_flip(struct drm_device *dev,
7421 				 struct drm_crtc *crtc,
7422 				 struct drm_framebuffer *fb,
7423 				 struct drm_i915_gem_object *obj)
7424 {
7425 	struct drm_i915_private *dev_priv = dev->dev_private;
7426 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7427 	unsigned long offset;
7428 	u32 flip_mask;
7429 	int ret;
7430 
7431 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7432 	if (ret)
7433 		goto out;
7434 
7435 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7436 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7437 
7438 	ret = BEGIN_LP_RING(6);
7439 	if (ret)
7440 		goto out;
7441 
7442 	if (intel_crtc->plane)
7443 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7444 	else
7445 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7446 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7447 	OUT_RING(MI_NOOP);
7448 	OUT_RING(MI_DISPLAY_FLIP_I915 |
7449 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7450 	OUT_RING(fb->pitches[0]);
7451 	OUT_RING(obj->gtt_offset + offset);
7452 	OUT_RING(MI_NOOP);
7453 
7454 	ADVANCE_LP_RING();
7455 out:
7456 	return ret;
7457 }
7458 
7459 static int intel_gen4_queue_flip(struct drm_device *dev,
7460 				 struct drm_crtc *crtc,
7461 				 struct drm_framebuffer *fb,
7462 				 struct drm_i915_gem_object *obj)
7463 {
7464 	struct drm_i915_private *dev_priv = dev->dev_private;
7465 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7466 	uint32_t pf, pipesrc;
7467 	int ret;
7468 
7469 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7470 	if (ret)
7471 		goto out;
7472 
7473 	ret = BEGIN_LP_RING(4);
7474 	if (ret)
7475 		goto out;
7476 
7477 	/* i965+ uses the linear or tiled offsets from the
7478 	 * Display Registers (which do not change across a page-flip)
7479 	 * so we need only reprogram the base address.
7480 	 */
7481 	OUT_RING(MI_DISPLAY_FLIP |
7482 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7483 	OUT_RING(fb->pitches[0]);
7484 	OUT_RING(obj->gtt_offset | obj->tiling_mode);
7485 
7486 	/* XXX Enabling the panel-fitter across page-flip is so far
7487 	 * untested on non-native modes, so ignore it for now.
7488 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7489 	 */
7490 	pf = 0;
7491 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7492 	OUT_RING(pf | pipesrc);
7493 	ADVANCE_LP_RING();
7494 out:
7495 	return ret;
7496 }
7497 
7498 static int intel_gen6_queue_flip(struct drm_device *dev,
7499 				 struct drm_crtc *crtc,
7500 				 struct drm_framebuffer *fb,
7501 				 struct drm_i915_gem_object *obj)
7502 {
7503 	struct drm_i915_private *dev_priv = dev->dev_private;
7504 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7505 	uint32_t pf, pipesrc;
7506 	int ret;
7507 
7508 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7509 	if (ret)
7510 		goto out;
7511 
7512 	ret = BEGIN_LP_RING(4);
7513 	if (ret)
7514 		goto out;
7515 
7516 	OUT_RING(MI_DISPLAY_FLIP |
7517 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7518 	OUT_RING(fb->pitches[0] | obj->tiling_mode);
7519 	OUT_RING(obj->gtt_offset);
7520 
7521 	/* Contrary to the suggestions in the documentation,
7522 	 * "Enable Panel Fitter" does not seem to be required when page
7523 	 * flipping with a non-native mode, and worse causes a normal
7524 	 * modeset to fail.
7525 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7526 	 */
7527 	pf = 0;
7528 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7529 	OUT_RING(pf | pipesrc);
7530 	ADVANCE_LP_RING();
7531 out:
7532 	return ret;
7533 }
7534 
7535 /*
7536  * On gen7 we currently use the blit ring because (in early silicon at least)
7537  * the render ring doesn't give us interrpts for page flip completion, which
7538  * means clients will hang after the first flip is queued.  Fortunately the
7539  * blit ring generates interrupts properly, so use it instead.
7540  */
7541 static int intel_gen7_queue_flip(struct drm_device *dev,
7542 				 struct drm_crtc *crtc,
7543 				 struct drm_framebuffer *fb,
7544 				 struct drm_i915_gem_object *obj)
7545 {
7546 	struct drm_i915_private *dev_priv = dev->dev_private;
7547 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7548 	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
7549 	int ret;
7550 
7551 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7552 	if (ret)
7553 		goto out;
7554 
7555 	ret = intel_ring_begin(ring, 4);
7556 	if (ret)
7557 		goto out;
7558 
7559 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7560 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7561 	intel_ring_emit(ring, (obj->gtt_offset));
7562 	intel_ring_emit(ring, (MI_NOOP));
7563 	intel_ring_advance(ring);
7564 out:
7565 	return ret;
7566 }
7567 
7568 static int intel_default_queue_flip(struct drm_device *dev,
7569 				    struct drm_crtc *crtc,
7570 				    struct drm_framebuffer *fb,
7571 				    struct drm_i915_gem_object *obj)
7572 {
7573 	return -ENODEV;
7574 }
7575 
7576 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7577 				struct drm_framebuffer *fb,
7578 				struct drm_pending_vblank_event *event)
7579 {
7580 	struct drm_device *dev = crtc->dev;
7581 	struct drm_i915_private *dev_priv = dev->dev_private;
7582 	struct intel_framebuffer *intel_fb;
7583 	struct drm_i915_gem_object *obj;
7584 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7585 	struct intel_unpin_work *work;
7586 	int ret;
7587 
7588 	work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
7589 
7590 	work->event = event;
7591 	work->dev = crtc->dev;
7592 	intel_fb = to_intel_framebuffer(crtc->fb);
7593 	work->old_fb_obj = intel_fb->obj;
7594 	TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
7595 
7596 	ret = drm_vblank_get(dev, intel_crtc->pipe);
7597 	if (ret)
7598 		goto free_work;
7599 
7600 	/* We borrow the event spin lock for protecting unpin_work */
7601 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7602 	if (intel_crtc->unpin_work) {
7603 		lockmgr(&dev->event_lock, LK_RELEASE);
7604 		drm_free(work, DRM_MEM_KMS);
7605 		drm_vblank_put(dev, intel_crtc->pipe);
7606 
7607 		DRM_DEBUG("flip queue: crtc already busy\n");
7608 		return -EBUSY;
7609 	}
7610 	intel_crtc->unpin_work = work;
7611 	lockmgr(&dev->event_lock, LK_RELEASE);
7612 
7613 	intel_fb = to_intel_framebuffer(fb);
7614 	obj = intel_fb->obj;
7615 
7616 	DRM_LOCK(dev);
7617 
7618 	/* Reference the objects for the scheduled work. */
7619 	drm_gem_object_reference(&work->old_fb_obj->base);
7620 	drm_gem_object_reference(&obj->base);
7621 
7622 	crtc->fb = fb;
7623 
7624 	work->pending_flip_obj = obj;
7625 
7626 	work->enable_stall_check = true;
7627 
7628 	/* Block clients from rendering to the new back buffer until
7629 	 * the flip occurs and the object is no longer visible.
7630 	 */
7631 	atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7632 
7633 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7634 	if (ret)
7635 		goto cleanup_pending;
7636 	intel_disable_fbc(dev);
7637 	DRM_UNLOCK(dev);
7638 
7639 	return 0;
7640 
7641 cleanup_pending:
7642 	atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7643 	drm_gem_object_unreference(&work->old_fb_obj->base);
7644 	drm_gem_object_unreference(&obj->base);
7645 	DRM_UNLOCK(dev);
7646 
7647 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7648 	intel_crtc->unpin_work = NULL;
7649 	lockmgr(&dev->event_lock, LK_RELEASE);
7650 
7651 	drm_vblank_put(dev, intel_crtc->pipe);
7652 free_work:
7653 	drm_free(work, DRM_MEM_KMS);
7654 
7655 	return ret;
7656 }
7657 
7658 static void intel_sanitize_modesetting(struct drm_device *dev,
7659 				       int pipe, int plane)
7660 {
7661 	struct drm_i915_private *dev_priv = dev->dev_private;
7662 	u32 reg, val;
7663 
7664 	/* Clear any frame start delays used for debugging left by the BIOS */
7665 	for_each_pipe(pipe) {
7666 		reg = PIPECONF(pipe);
7667 		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7668 	}
7669 
7670 	if (HAS_PCH_SPLIT(dev))
7671 		return;
7672 
7673 	/* Who knows what state these registers were left in by the BIOS or
7674 	 * grub?
7675 	 *
7676 	 * If we leave the registers in a conflicting state (e.g. with the
7677 	 * display plane reading from the other pipe than the one we intend
7678 	 * to use) then when we attempt to teardown the active mode, we will
7679 	 * not disable the pipes and planes in the correct order -- leaving
7680 	 * a plane reading from a disabled pipe and possibly leading to
7681 	 * undefined behaviour.
7682 	 */
7683 
7684 	reg = DSPCNTR(plane);
7685 	val = I915_READ(reg);
7686 
7687 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
7688 		return;
7689 	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7690 		return;
7691 
7692 	/* This display plane is active and attached to the other CPU pipe. */
7693 	pipe = !pipe;
7694 
7695 	/* Disable the plane and wait for it to stop reading from the pipe. */
7696 	intel_disable_plane(dev_priv, plane, pipe);
7697 	intel_disable_pipe(dev_priv, pipe);
7698 }
7699 
7700 static void intel_crtc_reset(struct drm_crtc *crtc)
7701 {
7702 	struct drm_device *dev = crtc->dev;
7703 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7704 
7705 	/* Reset flags back to the 'unknown' status so that they
7706 	 * will be correctly set on the initial modeset.
7707 	 */
7708 	intel_crtc->dpms_mode = -1;
7709 
7710 	/* We need to fix up any BIOS configuration that conflicts with
7711 	 * our expectations.
7712 	 */
7713 	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7714 }
7715 
7716 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7717 	.dpms = intel_crtc_dpms,
7718 	.mode_fixup = intel_crtc_mode_fixup,
7719 	.mode_set = intel_crtc_mode_set,
7720 	.mode_set_base = intel_pipe_set_base,
7721 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7722 	.load_lut = intel_crtc_load_lut,
7723 	.disable = intel_crtc_disable,
7724 };
7725 
7726 static const struct drm_crtc_funcs intel_crtc_funcs = {
7727 	.reset = intel_crtc_reset,
7728 	.cursor_set = intel_crtc_cursor_set,
7729 	.cursor_move = intel_crtc_cursor_move,
7730 	.gamma_set = intel_crtc_gamma_set,
7731 	.set_config = drm_crtc_helper_set_config,
7732 	.destroy = intel_crtc_destroy,
7733 	.page_flip = intel_crtc_page_flip,
7734 };
7735 
7736 static void intel_crtc_init(struct drm_device *dev, int pipe)
7737 {
7738 	drm_i915_private_t *dev_priv = dev->dev_private;
7739 	struct intel_crtc *intel_crtc;
7740 	int i;
7741 
7742 	intel_crtc = kmalloc(sizeof(struct intel_crtc) +
7743 	    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
7744 	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
7745 
7746 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7747 
7748 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7749 	for (i = 0; i < 256; i++) {
7750 		intel_crtc->lut_r[i] = i;
7751 		intel_crtc->lut_g[i] = i;
7752 		intel_crtc->lut_b[i] = i;
7753 	}
7754 
7755 	/* Swap pipes & planes for FBC on pre-965 */
7756 	intel_crtc->pipe = pipe;
7757 	intel_crtc->plane = pipe;
7758 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7759 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7760 		intel_crtc->plane = !pipe;
7761 	}
7762 
7763 	KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
7764 	    dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
7765 	    ("plane_to_crtc is already initialized"));
7766 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7767 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7768 
7769 	intel_crtc_reset(&intel_crtc->base);
7770 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
7771 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
7772 
7773 	if (HAS_PCH_SPLIT(dev)) {
7774 		if (pipe == 2 && IS_IVYBRIDGE(dev))
7775 			intel_crtc->no_pll = true;
7776 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
7777 		intel_helper_funcs.commit = ironlake_crtc_commit;
7778 	} else {
7779 		intel_helper_funcs.prepare = i9xx_crtc_prepare;
7780 		intel_helper_funcs.commit = i9xx_crtc_commit;
7781 	}
7782 
7783 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7784 
7785 	intel_crtc->busy = false;
7786 
7787 	callout_init_mp(&intel_crtc->idle_callout);
7788 }
7789 
7790 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7791 				struct drm_file *file)
7792 {
7793 	drm_i915_private_t *dev_priv = dev->dev_private;
7794 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7795 	struct drm_mode_object *drmmode_obj;
7796 	struct intel_crtc *crtc;
7797 
7798 	if (!dev_priv) {
7799 		DRM_ERROR("called with no initialization\n");
7800 		return -EINVAL;
7801 	}
7802 
7803 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7804 			DRM_MODE_OBJECT_CRTC);
7805 
7806 	if (!drmmode_obj) {
7807 		DRM_ERROR("no such CRTC id\n");
7808 		return -EINVAL;
7809 	}
7810 
7811 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7812 	pipe_from_crtc_id->pipe = crtc->pipe;
7813 
7814 	return 0;
7815 }
7816 
7817 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7818 {
7819 	struct intel_encoder *encoder;
7820 	int index_mask = 0;
7821 	int entry = 0;
7822 
7823 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7824 		if (type_mask & encoder->clone_mask)
7825 			index_mask |= (1 << entry);
7826 		entry++;
7827 	}
7828 
7829 	return index_mask;
7830 }
7831 
7832 static bool has_edp_a(struct drm_device *dev)
7833 {
7834 	struct drm_i915_private *dev_priv = dev->dev_private;
7835 
7836 	if (!IS_MOBILE(dev))
7837 		return false;
7838 
7839 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7840 		return false;
7841 
7842 	if (IS_GEN5(dev) &&
7843 	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7844 		return false;
7845 
7846 	return true;
7847 }
7848 
7849 static void intel_setup_outputs(struct drm_device *dev)
7850 {
7851 	struct drm_i915_private *dev_priv = dev->dev_private;
7852 	struct intel_encoder *encoder;
7853 	bool dpd_is_edp = false;
7854 	bool has_lvds;
7855 
7856 	has_lvds = intel_lvds_init(dev);
7857 	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7858 		/* disable the panel fitter on everything but LVDS */
7859 		I915_WRITE(PFIT_CONTROL, 0);
7860 	}
7861 
7862 	if (HAS_PCH_SPLIT(dev)) {
7863 		dpd_is_edp = intel_dpd_is_edp(dev);
7864 
7865 		if (has_edp_a(dev))
7866 			intel_dp_init(dev, DP_A);
7867 
7868 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7869 			intel_dp_init(dev, PCH_DP_D);
7870 	}
7871 
7872 	intel_crt_init(dev);
7873 
7874 	if (HAS_PCH_SPLIT(dev)) {
7875 		int found;
7876 
7877 		DRM_DEBUG_KMS(
7878 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
7879 		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
7880 		    (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
7881 		    (I915_READ(HDMIC) & PORT_DETECTED) != 0,
7882 		    (I915_READ(HDMID) & PORT_DETECTED) != 0,
7883 		    (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
7884 		    (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
7885 		    (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
7886 
7887 		if (I915_READ(HDMIB) & PORT_DETECTED) {
7888 			/* PCH SDVOB multiplex with HDMIB */
7889 			found = intel_sdvo_init(dev, PCH_SDVOB);
7890 			if (!found)
7891 				intel_hdmi_init(dev, HDMIB);
7892 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7893 				intel_dp_init(dev, PCH_DP_B);
7894 		}
7895 
7896 		if (I915_READ(HDMIC) & PORT_DETECTED)
7897 			intel_hdmi_init(dev, HDMIC);
7898 
7899 		if (I915_READ(HDMID) & PORT_DETECTED)
7900 			intel_hdmi_init(dev, HDMID);
7901 
7902 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
7903 			intel_dp_init(dev, PCH_DP_C);
7904 
7905 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7906 			intel_dp_init(dev, PCH_DP_D);
7907 
7908 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7909 		bool found = false;
7910 
7911 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7912 			DRM_DEBUG_KMS("probing SDVOB\n");
7913 			found = intel_sdvo_init(dev, SDVOB);
7914 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7915 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7916 				intel_hdmi_init(dev, SDVOB);
7917 			}
7918 
7919 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7920 				DRM_DEBUG_KMS("probing DP_B\n");
7921 				intel_dp_init(dev, DP_B);
7922 			}
7923 		}
7924 
7925 		/* Before G4X SDVOC doesn't have its own detect register */
7926 
7927 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7928 			DRM_DEBUG_KMS("probing SDVOC\n");
7929 			found = intel_sdvo_init(dev, SDVOC);
7930 		}
7931 
7932 		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7933 
7934 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7935 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7936 				intel_hdmi_init(dev, SDVOC);
7937 			}
7938 			if (SUPPORTS_INTEGRATED_DP(dev)) {
7939 				DRM_DEBUG_KMS("probing DP_C\n");
7940 				intel_dp_init(dev, DP_C);
7941 			}
7942 		}
7943 
7944 		if (SUPPORTS_INTEGRATED_DP(dev) &&
7945 		    (I915_READ(DP_D) & DP_DETECTED)) {
7946 			DRM_DEBUG_KMS("probing DP_D\n");
7947 			intel_dp_init(dev, DP_D);
7948 		}
7949 	} else if (IS_GEN2(dev)) {
7950 #if 1
7951 		KIB_NOTYET();
7952 #else
7953 		intel_dvo_init(dev);
7954 #endif
7955 	}
7956 
7957 	if (SUPPORTS_TV(dev))
7958 		intel_tv_init(dev);
7959 
7960 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7961 		encoder->base.possible_crtcs = encoder->crtc_mask;
7962 		encoder->base.possible_clones =
7963 			intel_encoder_clones(dev, encoder->clone_mask);
7964 	}
7965 
7966 	/* disable all the possible outputs/crtcs before entering KMS mode */
7967 	drm_helper_disable_unused_functions(dev);
7968 
7969 	if (HAS_PCH_SPLIT(dev))
7970 		ironlake_init_pch_refclk(dev);
7971 }
7972 
7973 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7974 {
7975 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7976 
7977 	drm_framebuffer_cleanup(fb);
7978 	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7979 
7980 	drm_free(intel_fb, DRM_MEM_KMS);
7981 }
7982 
7983 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7984 						struct drm_file *file,
7985 						unsigned int *handle)
7986 {
7987 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7988 	struct drm_i915_gem_object *obj = intel_fb->obj;
7989 
7990 	return drm_gem_handle_create(file, &obj->base, handle);
7991 }
7992 
7993 static const struct drm_framebuffer_funcs intel_fb_funcs = {
7994 	.destroy = intel_user_framebuffer_destroy,
7995 	.create_handle = intel_user_framebuffer_create_handle,
7996 };
7997 
7998 int intel_framebuffer_init(struct drm_device *dev,
7999 			   struct intel_framebuffer *intel_fb,
8000 			   struct drm_mode_fb_cmd2 *mode_cmd,
8001 			   struct drm_i915_gem_object *obj)
8002 {
8003 	int ret;
8004 
8005 	if (obj->tiling_mode == I915_TILING_Y)
8006 		return -EINVAL;
8007 
8008 	if (mode_cmd->pitches[0] & 63)
8009 		return -EINVAL;
8010 
8011 	switch (mode_cmd->pixel_format) {
8012 	case DRM_FORMAT_RGB332:
8013 	case DRM_FORMAT_RGB565:
8014 	case DRM_FORMAT_XRGB8888:
8015 	case DRM_FORMAT_XBGR8888:
8016 	case DRM_FORMAT_ARGB8888:
8017 	case DRM_FORMAT_XRGB2101010:
8018 	case DRM_FORMAT_ARGB2101010:
8019 		/* RGB formats are common across chipsets */
8020 		break;
8021 	case DRM_FORMAT_YUYV:
8022 	case DRM_FORMAT_UYVY:
8023 	case DRM_FORMAT_YVYU:
8024 	case DRM_FORMAT_VYUY:
8025 		break;
8026 	default:
8027 		DRM_DEBUG_KMS("unsupported pixel format %u\n",
8028 				mode_cmd->pixel_format);
8029 		return -EINVAL;
8030 	}
8031 
8032 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8033 	if (ret) {
8034 		DRM_ERROR("framebuffer init failed %d\n", ret);
8035 		return ret;
8036 	}
8037 
8038 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8039 	intel_fb->obj = obj;
8040 	return 0;
8041 }
8042 
8043 static int
8044 intel_user_framebuffer_create(struct drm_device *dev,
8045     struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd,
8046     struct drm_framebuffer **res)
8047 {
8048 	struct drm_i915_gem_object *obj;
8049 
8050 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8051 						mode_cmd->handles[0]));
8052 	if (&obj->base == NULL)
8053 		return (-ENOENT);
8054 
8055 	return (intel_framebuffer_create(dev, mode_cmd, obj, res));
8056 }
8057 
8058 static const struct drm_mode_config_funcs intel_mode_funcs = {
8059 	.fb_create = intel_user_framebuffer_create,
8060 	.output_poll_changed = intel_fb_output_poll_changed,
8061 };
8062 
8063 static struct drm_i915_gem_object *
8064 intel_alloc_context_page(struct drm_device *dev)
8065 {
8066 	struct drm_i915_gem_object *ctx;
8067 	int ret;
8068 
8069 	DRM_LOCK_ASSERT(dev);
8070 
8071 	ctx = i915_gem_alloc_object(dev, 4096);
8072 	if (!ctx) {
8073 		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8074 		return NULL;
8075 	}
8076 
8077 	ret = i915_gem_object_pin(ctx, 4096, true);
8078 	if (ret) {
8079 		DRM_ERROR("failed to pin power context: %d\n", ret);
8080 		goto err_unref;
8081 	}
8082 
8083 	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8084 	if (ret) {
8085 		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8086 		goto err_unpin;
8087 	}
8088 
8089 	return ctx;
8090 
8091 err_unpin:
8092 	i915_gem_object_unpin(ctx);
8093 err_unref:
8094 	drm_gem_object_unreference(&ctx->base);
8095 	DRM_UNLOCK(dev);
8096 	return NULL;
8097 }
8098 
8099 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8100 {
8101 	struct drm_i915_private *dev_priv = dev->dev_private;
8102 	u16 rgvswctl;
8103 
8104 	rgvswctl = I915_READ16(MEMSWCTL);
8105 	if (rgvswctl & MEMCTL_CMD_STS) {
8106 		DRM_DEBUG("gpu busy, RCS change rejected\n");
8107 		return false; /* still busy with another command */
8108 	}
8109 
8110 	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8111 		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8112 	I915_WRITE16(MEMSWCTL, rgvswctl);
8113 	POSTING_READ16(MEMSWCTL);
8114 
8115 	rgvswctl |= MEMCTL_CMD_STS;
8116 	I915_WRITE16(MEMSWCTL, rgvswctl);
8117 
8118 	return true;
8119 }
8120 
8121 void ironlake_enable_drps(struct drm_device *dev)
8122 {
8123 	struct drm_i915_private *dev_priv = dev->dev_private;
8124 	u32 rgvmodectl = I915_READ(MEMMODECTL);
8125 	u8 fmax, fmin, fstart, vstart;
8126 
8127 	/* Enable temp reporting */
8128 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8129 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8130 
8131 	/* 100ms RC evaluation intervals */
8132 	I915_WRITE(RCUPEI, 100000);
8133 	I915_WRITE(RCDNEI, 100000);
8134 
8135 	/* Set max/min thresholds to 90ms and 80ms respectively */
8136 	I915_WRITE(RCBMAXAVG, 90000);
8137 	I915_WRITE(RCBMINAVG, 80000);
8138 
8139 	I915_WRITE(MEMIHYST, 1);
8140 
8141 	/* Set up min, max, and cur for interrupt handling */
8142 	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8143 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8144 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8145 		MEMMODE_FSTART_SHIFT;
8146 
8147 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8148 		PXVFREQ_PX_SHIFT;
8149 
8150 	dev_priv->fmax = fmax; /* IPS callback will increase this */
8151 	dev_priv->fstart = fstart;
8152 
8153 	dev_priv->max_delay = fstart;
8154 	dev_priv->min_delay = fmin;
8155 	dev_priv->cur_delay = fstart;
8156 
8157 	DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
8158 			 fmax, fmin, fstart);
8159 
8160 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8161 
8162 	/*
8163 	 * Interrupts will be enabled in ironlake_irq_postinstall
8164 	 */
8165 
8166 	I915_WRITE(VIDSTART, vstart);
8167 	POSTING_READ(VIDSTART);
8168 
8169 	rgvmodectl |= MEMMODE_SWMODE_EN;
8170 	I915_WRITE(MEMMODECTL, rgvmodectl);
8171 
8172 	if (_intel_wait_for(dev,
8173 	    (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
8174 	    1, "915per"))
8175 		DRM_ERROR("stuck trying to change perf mode\n");
8176 	DELAY(1000);
8177 
8178 	ironlake_set_drps(dev, fstart);
8179 
8180 	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8181 		I915_READ(0x112e0);
8182 	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8183 	dev_priv->last_count2 = I915_READ(0x112f4);
8184 	nanotime(&dev_priv->last_time2);
8185 }
8186 
8187 void ironlake_disable_drps(struct drm_device *dev)
8188 {
8189 	struct drm_i915_private *dev_priv = dev->dev_private;
8190 	u16 rgvswctl = I915_READ16(MEMSWCTL);
8191 
8192 	/* Ack interrupts, disable EFC interrupt */
8193 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8194 	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8195 	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8196 	I915_WRITE(DEIIR, DE_PCU_EVENT);
8197 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8198 
8199 	/* Go back to the starting frequency */
8200 	ironlake_set_drps(dev, dev_priv->fstart);
8201 	DELAY(1000);
8202 	rgvswctl |= MEMCTL_CMD_STS;
8203 	I915_WRITE(MEMSWCTL, rgvswctl);
8204 	DELAY(1000);
8205 
8206 }
8207 
8208 void gen6_set_rps(struct drm_device *dev, u8 val)
8209 {
8210 	struct drm_i915_private *dev_priv = dev->dev_private;
8211 	u32 swreq;
8212 
8213 	swreq = (val & 0x3ff) << 25;
8214 	I915_WRITE(GEN6_RPNSWREQ, swreq);
8215 }
8216 
8217 void gen6_disable_rps(struct drm_device *dev)
8218 {
8219 	struct drm_i915_private *dev_priv = dev->dev_private;
8220 
8221 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8222 	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8223 	I915_WRITE(GEN6_PMIER, 0);
8224 	/* Complete PM interrupt masking here doesn't race with the rps work
8225 	 * item again unmasking PM interrupts because that is using a different
8226 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8227 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8228 
8229 	lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
8230 	dev_priv->pm_iir = 0;
8231 	lockmgr(&dev_priv->rps_lock, LK_RELEASE);
8232 
8233 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8234 }
8235 
8236 static unsigned long intel_pxfreq(u32 vidfreq)
8237 {
8238 	unsigned long freq;
8239 	int div = (vidfreq & 0x3f0000) >> 16;
8240 	int post = (vidfreq & 0x3000) >> 12;
8241 	int pre = (vidfreq & 0x7);
8242 
8243 	if (!pre)
8244 		return 0;
8245 
8246 	freq = ((div * 133333) / ((1<<post) * pre));
8247 
8248 	return freq;
8249 }
8250 
8251 void intel_init_emon(struct drm_device *dev)
8252 {
8253 	struct drm_i915_private *dev_priv = dev->dev_private;
8254 	u32 lcfuse;
8255 	u8 pxw[16];
8256 	int i;
8257 
8258 	/* Disable to program */
8259 	I915_WRITE(ECR, 0);
8260 	POSTING_READ(ECR);
8261 
8262 	/* Program energy weights for various events */
8263 	I915_WRITE(SDEW, 0x15040d00);
8264 	I915_WRITE(CSIEW0, 0x007f0000);
8265 	I915_WRITE(CSIEW1, 0x1e220004);
8266 	I915_WRITE(CSIEW2, 0x04000004);
8267 
8268 	for (i = 0; i < 5; i++)
8269 		I915_WRITE(PEW + (i * 4), 0);
8270 	for (i = 0; i < 3; i++)
8271 		I915_WRITE(DEW + (i * 4), 0);
8272 
8273 	/* Program P-state weights to account for frequency power adjustment */
8274 	for (i = 0; i < 16; i++) {
8275 		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8276 		unsigned long freq = intel_pxfreq(pxvidfreq);
8277 		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8278 			PXVFREQ_PX_SHIFT;
8279 		unsigned long val;
8280 
8281 		val = vid * vid;
8282 		val *= (freq / 1000);
8283 		val *= 255;
8284 		val /= (127*127*900);
8285 		if (val > 0xff)
8286 			DRM_ERROR("bad pxval: %ld\n", val);
8287 		pxw[i] = val;
8288 	}
8289 	/* Render standby states get 0 weight */
8290 	pxw[14] = 0;
8291 	pxw[15] = 0;
8292 
8293 	for (i = 0; i < 4; i++) {
8294 		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8295 			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8296 		I915_WRITE(PXW + (i * 4), val);
8297 	}
8298 
8299 	/* Adjust magic regs to magic values (more experimental results) */
8300 	I915_WRITE(OGW0, 0);
8301 	I915_WRITE(OGW1, 0);
8302 	I915_WRITE(EG0, 0x00007f00);
8303 	I915_WRITE(EG1, 0x0000000e);
8304 	I915_WRITE(EG2, 0x000e0000);
8305 	I915_WRITE(EG3, 0x68000300);
8306 	I915_WRITE(EG4, 0x42000000);
8307 	I915_WRITE(EG5, 0x00140031);
8308 	I915_WRITE(EG6, 0);
8309 	I915_WRITE(EG7, 0);
8310 
8311 	for (i = 0; i < 8; i++)
8312 		I915_WRITE(PXWL + (i * 4), 0);
8313 
8314 	/* Enable PMON + select events */
8315 	I915_WRITE(ECR, 0x80000019);
8316 
8317 	lcfuse = I915_READ(LCFUSE02);
8318 
8319 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8320 }
8321 
8322 static int intel_enable_rc6(struct drm_device *dev)
8323 {
8324 	/*
8325 	 * Respect the kernel parameter if it is set
8326 	 */
8327 	if (i915_enable_rc6 >= 0)
8328 		return i915_enable_rc6;
8329 
8330 	/*
8331 	 * Disable RC6 on Ironlake
8332 	 */
8333 	if (INTEL_INFO(dev)->gen == 5)
8334 		return 0;
8335 
8336 	/*
8337 	 * Enable rc6 on Sandybridge if DMA remapping is disabled
8338 	 */
8339 	if (INTEL_INFO(dev)->gen == 6) {
8340 		DRM_DEBUG_DRIVER(
8341 		    "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
8342 		     intel_iommu_enabled ? "true" : "false",
8343 		     !intel_iommu_enabled ? "en" : "dis");
8344 		return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
8345 	}
8346 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8347 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8348 }
8349 
8350 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8351 {
8352 	struct drm_device *dev = dev_priv->dev;
8353 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8354 	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8355 	u32 pcu_mbox, rc6_mask = 0;
8356 	u32 gtfifodbg;
8357 	int cur_freq, min_freq, max_freq;
8358 	int rc6_mode;
8359 	int i;
8360 
8361 	/* Here begins a magic sequence of register writes to enable
8362 	 * auto-downclocking.
8363 	 *
8364 	 * Perhaps there might be some value in exposing these to
8365 	 * userspace...
8366 	 */
8367 	I915_WRITE(GEN6_RC_STATE, 0);
8368 	DRM_LOCK(dev);
8369 
8370 	/* Clear the DBG now so we don't confuse earlier errors */
8371 	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8372 		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8373 		I915_WRITE(GTFIFODBG, gtfifodbg);
8374 	}
8375 
8376 	gen6_gt_force_wake_get(dev_priv);
8377 
8378 	/* disable the counters and set deterministic thresholds */
8379 	I915_WRITE(GEN6_RC_CONTROL, 0);
8380 
8381 	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8382 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8383 	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8384 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8385 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8386 
8387 	for (i = 0; i < I915_NUM_RINGS; i++)
8388 		I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
8389 
8390 	I915_WRITE(GEN6_RC_SLEEP, 0);
8391 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8392 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8393 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8394 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8395 
8396 	rc6_mode = intel_enable_rc6(dev_priv->dev);
8397 	if (rc6_mode & INTEL_RC6_ENABLE)
8398 		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8399 
8400 	if (rc6_mode & INTEL_RC6p_ENABLE)
8401 		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8402 
8403 	if (rc6_mode & INTEL_RC6pp_ENABLE)
8404 		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8405 
8406 	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8407 			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8408 			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8409 			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8410 
8411 	I915_WRITE(GEN6_RC_CONTROL,
8412 		   rc6_mask |
8413 		   GEN6_RC_CTL_EI_MODE(1) |
8414 		   GEN6_RC_CTL_HW_ENABLE);
8415 
8416 	I915_WRITE(GEN6_RPNSWREQ,
8417 		   GEN6_FREQUENCY(10) |
8418 		   GEN6_OFFSET(0) |
8419 		   GEN6_AGGRESSIVE_TURBO);
8420 	I915_WRITE(GEN6_RC_VIDEO_FREQ,
8421 		   GEN6_FREQUENCY(12));
8422 
8423 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8424 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8425 		   18 << 24 |
8426 		   6 << 16);
8427 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8428 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8429 	I915_WRITE(GEN6_RP_UP_EI, 100000);
8430 	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8431 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8432 	I915_WRITE(GEN6_RP_CONTROL,
8433 		   GEN6_RP_MEDIA_TURBO |
8434 		   GEN6_RP_MEDIA_HW_MODE |
8435 		   GEN6_RP_MEDIA_IS_GFX |
8436 		   GEN6_RP_ENABLE |
8437 		   GEN6_RP_UP_BUSY_AVG |
8438 		   GEN6_RP_DOWN_IDLE_CONT);
8439 
8440 	if (_intel_wait_for(dev,
8441 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8442 	    1, "915pr1"))
8443 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8444 
8445 	I915_WRITE(GEN6_PCODE_DATA, 0);
8446 	I915_WRITE(GEN6_PCODE_MAILBOX,
8447 		   GEN6_PCODE_READY |
8448 		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8449 	if (_intel_wait_for(dev,
8450 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8451 	    1, "915pr2"))
8452 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8453 
8454 	min_freq = (rp_state_cap & 0xff0000) >> 16;
8455 	max_freq = rp_state_cap & 0xff;
8456 	cur_freq = (gt_perf_status & 0xff00) >> 8;
8457 
8458 	/* Check for overclock support */
8459 	if (_intel_wait_for(dev,
8460 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8461 	    1, "915pr3"))
8462 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8463 	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8464 	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8465 	if (_intel_wait_for(dev,
8466 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8467 	    1, "915pr4"))
8468 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8469 	if (pcu_mbox & (1<<31)) { /* OC supported */
8470 		max_freq = pcu_mbox & 0xff;
8471 		DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8472 	}
8473 
8474 	/* In units of 100MHz */
8475 	dev_priv->max_delay = max_freq;
8476 	dev_priv->min_delay = min_freq;
8477 	dev_priv->cur_delay = cur_freq;
8478 
8479 	/* requires MSI enabled */
8480 	I915_WRITE(GEN6_PMIER,
8481 		   GEN6_PM_MBOX_EVENT |
8482 		   GEN6_PM_THERMAL_EVENT |
8483 		   GEN6_PM_RP_DOWN_TIMEOUT |
8484 		   GEN6_PM_RP_UP_THRESHOLD |
8485 		   GEN6_PM_RP_DOWN_THRESHOLD |
8486 		   GEN6_PM_RP_UP_EI_EXPIRED |
8487 		   GEN6_PM_RP_DOWN_EI_EXPIRED);
8488 	lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
8489 	if (dev_priv->pm_iir != 0)
8490 		kprintf("pm_iir %x\n", dev_priv->pm_iir);
8491 	I915_WRITE(GEN6_PMIMR, 0);
8492 	lockmgr(&dev_priv->rps_lock, LK_RELEASE);
8493 	/* enable all PM interrupts */
8494 	I915_WRITE(GEN6_PMINTRMSK, 0);
8495 
8496 	gen6_gt_force_wake_put(dev_priv);
8497 	DRM_UNLOCK(dev);
8498 }
8499 
8500 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8501 {
8502 	struct drm_device *dev;
8503 	int min_freq = 15;
8504 	int gpu_freq, ia_freq, max_ia_freq;
8505 	int scaling_factor = 180;
8506 	uint64_t tsc_freq;
8507 
8508 	dev = dev_priv->dev;
8509 #if 0
8510 	max_ia_freq = cpufreq_quick_get_max(0);
8511 	/*
8512 	 * Default to measured freq if none found, PCU will ensure we don't go
8513 	 * over
8514 	 */
8515 	if (!max_ia_freq)
8516 		max_ia_freq = tsc_freq;
8517 
8518 	/* Convert from Hz to MHz */
8519 	max_ia_freq /= 1000;
8520 #else
8521 	tsc_freq = atomic_load_acq_64(&tsc_freq);
8522 	max_ia_freq = tsc_freq / 1000 / 1000;
8523 #endif
8524 
8525 	DRM_LOCK(dev);
8526 
8527 	/*
8528 	 * For each potential GPU frequency, load a ring frequency we'd like
8529 	 * to use for memory access.  We do this by specifying the IA frequency
8530 	 * the PCU should use as a reference to determine the ring frequency.
8531 	 */
8532 	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8533 	     gpu_freq--) {
8534 		int diff = dev_priv->max_delay - gpu_freq;
8535 		int d;
8536 
8537 		/*
8538 		 * For GPU frequencies less than 750MHz, just use the lowest
8539 		 * ring freq.
8540 		 */
8541 		if (gpu_freq < min_freq)
8542 			ia_freq = 800;
8543 		else
8544 			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8545 		d = 100;
8546 		ia_freq = (ia_freq + d / 2) / d;
8547 
8548 		I915_WRITE(GEN6_PCODE_DATA,
8549 			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8550 			   gpu_freq);
8551 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8552 			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8553 		if (_intel_wait_for(dev,
8554 		    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8555 		    10, 1, "915frq")) {
8556 			DRM_ERROR("pcode write of freq table timed out\n");
8557 			continue;
8558 		}
8559 	}
8560 
8561 	DRM_UNLOCK(dev);
8562 }
8563 
8564 static void ironlake_init_clock_gating(struct drm_device *dev)
8565 {
8566 	struct drm_i915_private *dev_priv = dev->dev_private;
8567 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8568 
8569 	/* Required for FBC */
8570 	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8571 		DPFCRUNIT_CLOCK_GATE_DISABLE |
8572 		DPFDUNIT_CLOCK_GATE_DISABLE;
8573 	/* Required for CxSR */
8574 	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8575 
8576 	I915_WRITE(PCH_3DCGDIS0,
8577 		   MARIUNIT_CLOCK_GATE_DISABLE |
8578 		   SVSMUNIT_CLOCK_GATE_DISABLE);
8579 	I915_WRITE(PCH_3DCGDIS1,
8580 		   VFMUNIT_CLOCK_GATE_DISABLE);
8581 
8582 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8583 
8584 	/*
8585 	 * According to the spec the following bits should be set in
8586 	 * order to enable memory self-refresh
8587 	 * The bit 22/21 of 0x42004
8588 	 * The bit 5 of 0x42020
8589 	 * The bit 15 of 0x45000
8590 	 */
8591 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8592 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
8593 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8594 	I915_WRITE(ILK_DSPCLK_GATE,
8595 		   (I915_READ(ILK_DSPCLK_GATE) |
8596 		    ILK_DPARB_CLK_GATE));
8597 	I915_WRITE(DISP_ARB_CTL,
8598 		   (I915_READ(DISP_ARB_CTL) |
8599 		    DISP_FBC_WM_DIS));
8600 	I915_WRITE(WM3_LP_ILK, 0);
8601 	I915_WRITE(WM2_LP_ILK, 0);
8602 	I915_WRITE(WM1_LP_ILK, 0);
8603 
8604 	/*
8605 	 * Based on the document from hardware guys the following bits
8606 	 * should be set unconditionally in order to enable FBC.
8607 	 * The bit 22 of 0x42000
8608 	 * The bit 22 of 0x42004
8609 	 * The bit 7,8,9 of 0x42020.
8610 	 */
8611 	if (IS_IRONLAKE_M(dev)) {
8612 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
8613 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
8614 			   ILK_FBCQ_DIS);
8615 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
8616 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
8617 			   ILK_DPARB_GATE);
8618 		I915_WRITE(ILK_DSPCLK_GATE,
8619 			   I915_READ(ILK_DSPCLK_GATE) |
8620 			   ILK_DPFC_DIS1 |
8621 			   ILK_DPFC_DIS2 |
8622 			   ILK_CLK_FBC);
8623 	}
8624 
8625 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8626 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8627 		   ILK_ELPIN_409_SELECT);
8628 	I915_WRITE(_3D_CHICKEN2,
8629 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8630 		   _3D_CHICKEN2_WM_READ_PIPELINED);
8631 }
8632 
8633 static void gen6_init_clock_gating(struct drm_device *dev)
8634 {
8635 	struct drm_i915_private *dev_priv = dev->dev_private;
8636 	int pipe;
8637 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8638 
8639 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8640 
8641 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8642 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8643 		   ILK_ELPIN_409_SELECT);
8644 
8645 	I915_WRITE(WM3_LP_ILK, 0);
8646 	I915_WRITE(WM2_LP_ILK, 0);
8647 	I915_WRITE(WM1_LP_ILK, 0);
8648 
8649 	I915_WRITE(GEN6_UCGCTL1,
8650 		   I915_READ(GEN6_UCGCTL1) |
8651 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8652 
8653 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8654 	 * gating disable must be set.  Failure to set it results in
8655 	 * flickering pixels due to Z write ordering failures after
8656 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
8657 	 * Sanctuary and Tropics, and apparently anything else with
8658 	 * alpha test or pixel discard.
8659 	 *
8660 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
8661 	 * but we didn't debug actual testcases to find it out.
8662 	 */
8663 	I915_WRITE(GEN6_UCGCTL2,
8664 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8665 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8666 
8667 	/*
8668 	 * According to the spec the following bits should be
8669 	 * set in order to enable memory self-refresh and fbc:
8670 	 * The bit21 and bit22 of 0x42000
8671 	 * The bit21 and bit22 of 0x42004
8672 	 * The bit5 and bit7 of 0x42020
8673 	 * The bit14 of 0x70180
8674 	 * The bit14 of 0x71180
8675 	 */
8676 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
8677 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
8678 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8679 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8680 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8681 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8682 	I915_WRITE(ILK_DSPCLK_GATE,
8683 		   I915_READ(ILK_DSPCLK_GATE) |
8684 		   ILK_DPARB_CLK_GATE  |
8685 		   ILK_DPFD_CLK_GATE);
8686 
8687 	for_each_pipe(pipe) {
8688 		I915_WRITE(DSPCNTR(pipe),
8689 			   I915_READ(DSPCNTR(pipe)) |
8690 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8691 		intel_flush_display_plane(dev_priv, pipe);
8692 	}
8693 }
8694 
8695 static void ivybridge_init_clock_gating(struct drm_device *dev)
8696 {
8697 	struct drm_i915_private *dev_priv = dev->dev_private;
8698 	int pipe;
8699 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8700 
8701 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8702 
8703 	I915_WRITE(WM3_LP_ILK, 0);
8704 	I915_WRITE(WM2_LP_ILK, 0);
8705 	I915_WRITE(WM1_LP_ILK, 0);
8706 
8707 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8708 	 * This implements the WaDisableRCZUnitClockGating workaround.
8709 	 */
8710 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8711 
8712 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8713 
8714 	I915_WRITE(IVB_CHICKEN3,
8715 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8716 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
8717 
8718 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8719 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8720 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8721 
8722 	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8723 	I915_WRITE(GEN7_L3CNTLREG1,
8724 			GEN7_WA_FOR_GEN7_L3_CONTROL);
8725 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8726 			GEN7_WA_L3_CHICKEN_MODE);
8727 
8728 	/* This is required by WaCatErrorRejectionIssue */
8729 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8730 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8731 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8732 
8733 	for_each_pipe(pipe) {
8734 		I915_WRITE(DSPCNTR(pipe),
8735 			   I915_READ(DSPCNTR(pipe)) |
8736 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8737 		intel_flush_display_plane(dev_priv, pipe);
8738 	}
8739 }
8740 
8741 static void g4x_init_clock_gating(struct drm_device *dev)
8742 {
8743 	struct drm_i915_private *dev_priv = dev->dev_private;
8744 	uint32_t dspclk_gate;
8745 
8746 	I915_WRITE(RENCLK_GATE_D1, 0);
8747 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8748 		   GS_UNIT_CLOCK_GATE_DISABLE |
8749 		   CL_UNIT_CLOCK_GATE_DISABLE);
8750 	I915_WRITE(RAMCLK_GATE_D, 0);
8751 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8752 		OVRUNIT_CLOCK_GATE_DISABLE |
8753 		OVCUNIT_CLOCK_GATE_DISABLE;
8754 	if (IS_GM45(dev))
8755 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8756 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8757 }
8758 
8759 static void crestline_init_clock_gating(struct drm_device *dev)
8760 {
8761 	struct drm_i915_private *dev_priv = dev->dev_private;
8762 
8763 	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8764 	I915_WRITE(RENCLK_GATE_D2, 0);
8765 	I915_WRITE(DSPCLK_GATE_D, 0);
8766 	I915_WRITE(RAMCLK_GATE_D, 0);
8767 	I915_WRITE16(DEUC, 0);
8768 }
8769 
8770 static void broadwater_init_clock_gating(struct drm_device *dev)
8771 {
8772 	struct drm_i915_private *dev_priv = dev->dev_private;
8773 
8774 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8775 		   I965_RCC_CLOCK_GATE_DISABLE |
8776 		   I965_RCPB_CLOCK_GATE_DISABLE |
8777 		   I965_ISC_CLOCK_GATE_DISABLE |
8778 		   I965_FBC_CLOCK_GATE_DISABLE);
8779 	I915_WRITE(RENCLK_GATE_D2, 0);
8780 }
8781 
8782 static void gen3_init_clock_gating(struct drm_device *dev)
8783 {
8784 	struct drm_i915_private *dev_priv = dev->dev_private;
8785 	u32 dstate = I915_READ(D_STATE);
8786 
8787 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8788 		DSTATE_DOT_CLOCK_GATING;
8789 	I915_WRITE(D_STATE, dstate);
8790 }
8791 
8792 static void i85x_init_clock_gating(struct drm_device *dev)
8793 {
8794 	struct drm_i915_private *dev_priv = dev->dev_private;
8795 
8796 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8797 }
8798 
8799 static void i830_init_clock_gating(struct drm_device *dev)
8800 {
8801 	struct drm_i915_private *dev_priv = dev->dev_private;
8802 
8803 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8804 }
8805 
8806 static void ibx_init_clock_gating(struct drm_device *dev)
8807 {
8808 	struct drm_i915_private *dev_priv = dev->dev_private;
8809 
8810 	/*
8811 	 * On Ibex Peak and Cougar Point, we need to disable clock
8812 	 * gating for the panel power sequencer or it will fail to
8813 	 * start up when no ports are active.
8814 	 */
8815 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8816 }
8817 
8818 static void cpt_init_clock_gating(struct drm_device *dev)
8819 {
8820 	struct drm_i915_private *dev_priv = dev->dev_private;
8821 	int pipe;
8822 
8823 	/*
8824 	 * On Ibex Peak and Cougar Point, we need to disable clock
8825 	 * gating for the panel power sequencer or it will fail to
8826 	 * start up when no ports are active.
8827 	 */
8828 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8829 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8830 		   DPLS_EDP_PPS_FIX_DIS);
8831 	/* Without this, mode sets may fail silently on FDI */
8832 	for_each_pipe(pipe)
8833 		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8834 }
8835 
8836 static void ironlake_teardown_rc6(struct drm_device *dev)
8837 {
8838 	struct drm_i915_private *dev_priv = dev->dev_private;
8839 
8840 	if (dev_priv->renderctx) {
8841 		i915_gem_object_unpin(dev_priv->renderctx);
8842 		drm_gem_object_unreference(&dev_priv->renderctx->base);
8843 		dev_priv->renderctx = NULL;
8844 	}
8845 
8846 	if (dev_priv->pwrctx) {
8847 		i915_gem_object_unpin(dev_priv->pwrctx);
8848 		drm_gem_object_unreference(&dev_priv->pwrctx->base);
8849 		dev_priv->pwrctx = NULL;
8850 	}
8851 }
8852 
8853 static void ironlake_disable_rc6(struct drm_device *dev)
8854 {
8855 	struct drm_i915_private *dev_priv = dev->dev_private;
8856 
8857 	if (I915_READ(PWRCTXA)) {
8858 		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8859 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8860 		(void)_intel_wait_for(dev,
8861 		    ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8862 		    50, 1, "915pro");
8863 
8864 		I915_WRITE(PWRCTXA, 0);
8865 		POSTING_READ(PWRCTXA);
8866 
8867 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8868 		POSTING_READ(RSTDBYCTL);
8869 	}
8870 
8871 	ironlake_teardown_rc6(dev);
8872 }
8873 
8874 static int ironlake_setup_rc6(struct drm_device *dev)
8875 {
8876 	struct drm_i915_private *dev_priv = dev->dev_private;
8877 
8878 	if (dev_priv->renderctx == NULL)
8879 		dev_priv->renderctx = intel_alloc_context_page(dev);
8880 	if (!dev_priv->renderctx)
8881 		return -ENOMEM;
8882 
8883 	if (dev_priv->pwrctx == NULL)
8884 		dev_priv->pwrctx = intel_alloc_context_page(dev);
8885 	if (!dev_priv->pwrctx) {
8886 		ironlake_teardown_rc6(dev);
8887 		return -ENOMEM;
8888 	}
8889 
8890 	return 0;
8891 }
8892 
8893 void ironlake_enable_rc6(struct drm_device *dev)
8894 {
8895 	struct drm_i915_private *dev_priv = dev->dev_private;
8896 	int ret;
8897 
8898 	/* rc6 disabled by default due to repeated reports of hanging during
8899 	 * boot and resume.
8900 	 */
8901 	if (!intel_enable_rc6(dev))
8902 		return;
8903 
8904 	DRM_LOCK(dev);
8905 	ret = ironlake_setup_rc6(dev);
8906 	if (ret) {
8907 		DRM_UNLOCK(dev);
8908 		return;
8909 	}
8910 
8911 	/*
8912 	 * GPU can automatically power down the render unit if given a page
8913 	 * to save state.
8914 	 */
8915 	ret = BEGIN_LP_RING(6);
8916 	if (ret) {
8917 		ironlake_teardown_rc6(dev);
8918 		DRM_UNLOCK(dev);
8919 		return;
8920 	}
8921 
8922 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8923 	OUT_RING(MI_SET_CONTEXT);
8924 	OUT_RING(dev_priv->renderctx->gtt_offset |
8925 		 MI_MM_SPACE_GTT |
8926 		 MI_SAVE_EXT_STATE_EN |
8927 		 MI_RESTORE_EXT_STATE_EN |
8928 		 MI_RESTORE_INHIBIT);
8929 	OUT_RING(MI_SUSPEND_FLUSH);
8930 	OUT_RING(MI_NOOP);
8931 	OUT_RING(MI_FLUSH);
8932 	ADVANCE_LP_RING();
8933 
8934 	/*
8935 	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8936 	 * does an implicit flush, combined with MI_FLUSH above, it should be
8937 	 * safe to assume that renderctx is valid
8938 	 */
8939 	ret = intel_wait_ring_idle(LP_RING(dev_priv));
8940 	if (ret) {
8941 		DRM_ERROR("failed to enable ironlake power power savings\n");
8942 		ironlake_teardown_rc6(dev);
8943 		DRM_UNLOCK(dev);
8944 		return;
8945 	}
8946 
8947 	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8948 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8949 	DRM_UNLOCK(dev);
8950 }
8951 
8952 void intel_init_clock_gating(struct drm_device *dev)
8953 {
8954 	struct drm_i915_private *dev_priv = dev->dev_private;
8955 
8956 	dev_priv->display.init_clock_gating(dev);
8957 
8958 	if (dev_priv->display.init_pch_clock_gating)
8959 		dev_priv->display.init_pch_clock_gating(dev);
8960 }
8961 
8962 /* Set up chip specific display functions */
8963 static void intel_init_display(struct drm_device *dev)
8964 {
8965 	struct drm_i915_private *dev_priv = dev->dev_private;
8966 
8967 	/* We always want a DPMS function */
8968 	if (HAS_PCH_SPLIT(dev)) {
8969 		dev_priv->display.dpms = ironlake_crtc_dpms;
8970 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8971 		dev_priv->display.update_plane = ironlake_update_plane;
8972 	} else {
8973 		dev_priv->display.dpms = i9xx_crtc_dpms;
8974 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8975 		dev_priv->display.update_plane = i9xx_update_plane;
8976 	}
8977 
8978 	if (I915_HAS_FBC(dev)) {
8979 		if (HAS_PCH_SPLIT(dev)) {
8980 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8981 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
8982 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
8983 		} else if (IS_GM45(dev)) {
8984 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8985 			dev_priv->display.enable_fbc = g4x_enable_fbc;
8986 			dev_priv->display.disable_fbc = g4x_disable_fbc;
8987 		} else if (IS_CRESTLINE(dev)) {
8988 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8989 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
8990 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
8991 		}
8992 		/* 855GM needs testing */
8993 	}
8994 
8995 	/* Returns the core display clock speed */
8996 	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8997 		dev_priv->display.get_display_clock_speed =
8998 			i945_get_display_clock_speed;
8999 	else if (IS_I915G(dev))
9000 		dev_priv->display.get_display_clock_speed =
9001 			i915_get_display_clock_speed;
9002 	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9003 		dev_priv->display.get_display_clock_speed =
9004 			i9xx_misc_get_display_clock_speed;
9005 	else if (IS_I915GM(dev))
9006 		dev_priv->display.get_display_clock_speed =
9007 			i915gm_get_display_clock_speed;
9008 	else if (IS_I865G(dev))
9009 		dev_priv->display.get_display_clock_speed =
9010 			i865_get_display_clock_speed;
9011 	else if (IS_I85X(dev))
9012 		dev_priv->display.get_display_clock_speed =
9013 			i855_get_display_clock_speed;
9014 	else /* 852, 830 */
9015 		dev_priv->display.get_display_clock_speed =
9016 			i830_get_display_clock_speed;
9017 
9018 	/* For FIFO watermark updates */
9019 	if (HAS_PCH_SPLIT(dev)) {
9020 		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9021 		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9022 
9023 		/* IVB configs may use multi-threaded forcewake */
9024 		if (IS_IVYBRIDGE(dev)) {
9025 			u32	ecobus;
9026 
9027 			/* A small trick here - if the bios hasn't configured MT forcewake,
9028 			 * and if the device is in RC6, then force_wake_mt_get will not wake
9029 			 * the device and the ECOBUS read will return zero. Which will be
9030 			 * (correctly) interpreted by the test below as MT forcewake being
9031 			 * disabled.
9032 			 */
9033 			DRM_LOCK(dev);
9034 			__gen6_gt_force_wake_mt_get(dev_priv);
9035 			ecobus = I915_READ_NOTRACE(ECOBUS);
9036 			__gen6_gt_force_wake_mt_put(dev_priv);
9037 			DRM_UNLOCK(dev);
9038 
9039 			if (ecobus & FORCEWAKE_MT_ENABLE) {
9040 				DRM_DEBUG_KMS("Using MT version of forcewake\n");
9041 				dev_priv->display.force_wake_get =
9042 					__gen6_gt_force_wake_mt_get;
9043 				dev_priv->display.force_wake_put =
9044 					__gen6_gt_force_wake_mt_put;
9045 			}
9046 		}
9047 
9048 		if (HAS_PCH_IBX(dev))
9049 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9050 		else if (HAS_PCH_CPT(dev))
9051 			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9052 
9053 		if (IS_GEN5(dev)) {
9054 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9055 				dev_priv->display.update_wm = ironlake_update_wm;
9056 			else {
9057 				DRM_DEBUG_KMS("Failed to get proper latency. "
9058 					      "Disable CxSR\n");
9059 				dev_priv->display.update_wm = NULL;
9060 			}
9061 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9062 			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9063 			dev_priv->display.write_eld = ironlake_write_eld;
9064 		} else if (IS_GEN6(dev)) {
9065 			if (SNB_READ_WM0_LATENCY()) {
9066 				dev_priv->display.update_wm = sandybridge_update_wm;
9067 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9068 			} else {
9069 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9070 					      "Disable CxSR\n");
9071 				dev_priv->display.update_wm = NULL;
9072 			}
9073 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9074 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9075 			dev_priv->display.write_eld = ironlake_write_eld;
9076 		} else if (IS_IVYBRIDGE(dev)) {
9077 			/* FIXME: detect B0+ stepping and use auto training */
9078 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9079 			if (SNB_READ_WM0_LATENCY()) {
9080 				dev_priv->display.update_wm = sandybridge_update_wm;
9081 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9082 			} else {
9083 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9084 					      "Disable CxSR\n");
9085 				dev_priv->display.update_wm = NULL;
9086 			}
9087 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9088 			dev_priv->display.write_eld = ironlake_write_eld;
9089 		} else
9090 			dev_priv->display.update_wm = NULL;
9091 	} else if (IS_PINEVIEW(dev)) {
9092 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9093 					    dev_priv->is_ddr3,
9094 					    dev_priv->fsb_freq,
9095 					    dev_priv->mem_freq)) {
9096 			DRM_INFO("failed to find known CxSR latency "
9097 				 "(found ddr%s fsb freq %d, mem freq %d), "
9098 				 "disabling CxSR\n",
9099 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9100 				 dev_priv->fsb_freq, dev_priv->mem_freq);
9101 			/* Disable CxSR and never update its watermark again */
9102 			pineview_disable_cxsr(dev);
9103 			dev_priv->display.update_wm = NULL;
9104 		} else
9105 			dev_priv->display.update_wm = pineview_update_wm;
9106 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9107 	} else if (IS_G4X(dev)) {
9108 		dev_priv->display.write_eld = g4x_write_eld;
9109 		dev_priv->display.update_wm = g4x_update_wm;
9110 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9111 	} else if (IS_GEN4(dev)) {
9112 		dev_priv->display.update_wm = i965_update_wm;
9113 		if (IS_CRESTLINE(dev))
9114 			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9115 		else if (IS_BROADWATER(dev))
9116 			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9117 	} else if (IS_GEN3(dev)) {
9118 		dev_priv->display.update_wm = i9xx_update_wm;
9119 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9120 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9121 	} else if (IS_I865G(dev)) {
9122 		dev_priv->display.update_wm = i830_update_wm;
9123 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9124 		dev_priv->display.get_fifo_size = i830_get_fifo_size;
9125 	} else if (IS_I85X(dev)) {
9126 		dev_priv->display.update_wm = i9xx_update_wm;
9127 		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9128 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9129 	} else {
9130 		dev_priv->display.update_wm = i830_update_wm;
9131 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
9132 		if (IS_845G(dev))
9133 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
9134 		else
9135 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
9136 	}
9137 
9138 	/* Default just returns -ENODEV to indicate unsupported */
9139 	dev_priv->display.queue_flip = intel_default_queue_flip;
9140 
9141 	switch (INTEL_INFO(dev)->gen) {
9142 	case 2:
9143 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
9144 		break;
9145 
9146 	case 3:
9147 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
9148 		break;
9149 
9150 	case 4:
9151 	case 5:
9152 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
9153 		break;
9154 
9155 	case 6:
9156 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
9157 		break;
9158 	case 7:
9159 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
9160 		break;
9161 	}
9162 }
9163 
9164 /*
9165  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9166  * resume, or other times.  This quirk makes sure that's the case for
9167  * affected systems.
9168  */
9169 static void quirk_pipea_force(struct drm_device *dev)
9170 {
9171 	struct drm_i915_private *dev_priv = dev->dev_private;
9172 
9173 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9174 	DRM_DEBUG("applying pipe a force quirk\n");
9175 }
9176 
9177 /*
9178  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9179  */
9180 static void quirk_ssc_force_disable(struct drm_device *dev)
9181 {
9182 	struct drm_i915_private *dev_priv = dev->dev_private;
9183 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9184 }
9185 
9186 struct intel_quirk {
9187 	int device;
9188 	int subsystem_vendor;
9189 	int subsystem_device;
9190 	void (*hook)(struct drm_device *dev);
9191 };
9192 
9193 #define	PCI_ANY_ID	(~0u)
9194 
9195 struct intel_quirk intel_quirks[] = {
9196 	/* HP Mini needs pipe A force quirk (LP: #322104) */
9197 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9198 
9199 	/* Thinkpad R31 needs pipe A force quirk */
9200 	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9201 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9202 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9203 
9204 	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9205 	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9206 	/* ThinkPad X40 needs pipe A force quirk */
9207 
9208 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9209 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9210 
9211 	/* 855 & before need to leave pipe A & dpll A up */
9212 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9213 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9214 
9215 	/* Lenovo U160 cannot use SSC on LVDS */
9216 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9217 
9218 	/* Sony Vaio Y cannot use SSC on LVDS */
9219 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9220 };
9221 
9222 static void intel_init_quirks(struct drm_device *dev)
9223 {
9224 	struct intel_quirk *q;
9225 	device_t d;
9226 	int i;
9227 
9228 	d = dev->device;
9229 	for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
9230 		q = &intel_quirks[i];
9231 		if (pci_get_device(d) == q->device &&
9232 		    (pci_get_subvendor(d) == q->subsystem_vendor ||
9233 		     q->subsystem_vendor == PCI_ANY_ID) &&
9234 		    (pci_get_subdevice(d) == q->subsystem_device ||
9235 		     q->subsystem_device == PCI_ANY_ID))
9236 			q->hook(dev);
9237 	}
9238 }
9239 
9240 /* Disable the VGA plane that we never use */
9241 static void i915_disable_vga(struct drm_device *dev)
9242 {
9243 	struct drm_i915_private *dev_priv = dev->dev_private;
9244 	u8 sr1;
9245 	u32 vga_reg;
9246 
9247 	if (HAS_PCH_SPLIT(dev))
9248 		vga_reg = CPU_VGACNTRL;
9249 	else
9250 		vga_reg = VGACNTRL;
9251 
9252 #if 0
9253 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9254 #endif
9255 	outb(VGA_SR_INDEX, 1);
9256 	sr1 = inb(VGA_SR_DATA);
9257 	outb(VGA_SR_DATA, sr1 | 1 << 5);
9258 #if 0
9259 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9260 #endif
9261 	DELAY(300);
9262 
9263 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9264 	POSTING_READ(vga_reg);
9265 }
9266 
9267 void intel_modeset_init(struct drm_device *dev)
9268 {
9269 	struct drm_i915_private *dev_priv = dev->dev_private;
9270 	int i, ret;
9271 
9272 	drm_mode_config_init(dev);
9273 
9274 	dev->mode_config.min_width = 0;
9275 	dev->mode_config.min_height = 0;
9276 
9277 	dev->mode_config.preferred_depth = 24;
9278 	dev->mode_config.prefer_shadow = 1;
9279 
9280 	dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
9281 	    &intel_mode_funcs);
9282 
9283 	intel_init_quirks(dev);
9284 
9285 	intel_init_display(dev);
9286 
9287 	if (IS_GEN2(dev)) {
9288 		dev->mode_config.max_width = 2048;
9289 		dev->mode_config.max_height = 2048;
9290 	} else if (IS_GEN3(dev)) {
9291 		dev->mode_config.max_width = 4096;
9292 		dev->mode_config.max_height = 4096;
9293 	} else {
9294 		dev->mode_config.max_width = 8192;
9295 		dev->mode_config.max_height = 8192;
9296 	}
9297 	dev->mode_config.fb_base = dev->agp->base;
9298 
9299 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
9300 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9301 
9302 	for (i = 0; i < dev_priv->num_pipe; i++) {
9303 		intel_crtc_init(dev, i);
9304 		ret = intel_plane_init(dev, i);
9305 		if (ret)
9306 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9307 	}
9308 
9309 	/* Just disable it once at startup */
9310 	i915_disable_vga(dev);
9311 	intel_setup_outputs(dev);
9312 
9313 	intel_init_clock_gating(dev);
9314 
9315 	if (IS_IRONLAKE_M(dev)) {
9316 		ironlake_enable_drps(dev);
9317 		intel_init_emon(dev);
9318 	}
9319 
9320 	if (IS_GEN6(dev)) {
9321 		gen6_enable_rps(dev_priv);
9322 		gen6_update_ring_freq(dev_priv);
9323 	}
9324 
9325 	TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
9326 	callout_init_mp(&dev_priv->idle_callout);
9327 }
9328 
9329 void intel_modeset_gem_init(struct drm_device *dev)
9330 {
9331 	if (IS_IRONLAKE_M(dev))
9332 		ironlake_enable_rc6(dev);
9333 
9334 	intel_setup_overlay(dev);
9335 }
9336 
9337 void intel_modeset_cleanup(struct drm_device *dev)
9338 {
9339 	struct drm_i915_private *dev_priv = dev->dev_private;
9340 	struct drm_crtc *crtc;
9341 	struct intel_crtc *intel_crtc;
9342 
9343 	drm_kms_helper_poll_fini(dev);
9344 	DRM_LOCK(dev);
9345 
9346 #if 0
9347 	intel_unregister_dsm_handler();
9348 #endif
9349 
9350 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9351 		/* Skip inactive CRTCs */
9352 		if (!crtc->fb)
9353 			continue;
9354 
9355 		intel_crtc = to_intel_crtc(crtc);
9356 		intel_increase_pllclock(crtc);
9357 	}
9358 
9359 	intel_disable_fbc(dev);
9360 
9361 	if (IS_IRONLAKE_M(dev))
9362 		ironlake_disable_drps(dev);
9363 	if (IS_GEN6(dev))
9364 		gen6_disable_rps(dev);
9365 
9366 	if (IS_IRONLAKE_M(dev))
9367 		ironlake_disable_rc6(dev);
9368 
9369 	/* Disable the irq before mode object teardown, for the irq might
9370 	 * enqueue unpin/hotplug work. */
9371 	drm_irq_uninstall(dev);
9372 	DRM_UNLOCK(dev);
9373 
9374 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
9375 		taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
9376 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
9377 		taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
9378 
9379 	/* Shut off idle work before the crtcs get freed. */
9380 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
9381 		taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
9382 
9383 	drm_mode_config_cleanup(dev);
9384 }
9385 
9386 /*
9387  * Return which encoder is currently attached for connector.
9388  */
9389 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9390 {
9391 	return &intel_attached_encoder(connector)->base;
9392 }
9393 
9394 void intel_connector_attach_encoder(struct intel_connector *connector,
9395 				    struct intel_encoder *encoder)
9396 {
9397 	connector->encoder = encoder;
9398 	drm_mode_connector_attach_encoder(&connector->base,
9399 					  &encoder->base);
9400 }
9401 
9402 /*
9403  * set vga decode state - true == enable VGA decode
9404  */
9405 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9406 {
9407 	struct drm_i915_private *dev_priv;
9408 	device_t bridge_dev;
9409 	u16 gmch_ctrl;
9410 
9411 	dev_priv = dev->dev_private;
9412 	bridge_dev = intel_gtt_get_bridge_device();
9413 	gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
9414 	if (state)
9415 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9416 	else
9417 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9418 	pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
9419 	return (0);
9420 }
9421 
9422 struct intel_display_error_state {
9423 	struct intel_cursor_error_state {
9424 		u32 control;
9425 		u32 position;
9426 		u32 base;
9427 		u32 size;
9428 	} cursor[2];
9429 
9430 	struct intel_pipe_error_state {
9431 		u32 conf;
9432 		u32 source;
9433 
9434 		u32 htotal;
9435 		u32 hblank;
9436 		u32 hsync;
9437 		u32 vtotal;
9438 		u32 vblank;
9439 		u32 vsync;
9440 	} pipe[2];
9441 
9442 	struct intel_plane_error_state {
9443 		u32 control;
9444 		u32 stride;
9445 		u32 size;
9446 		u32 pos;
9447 		u32 addr;
9448 		u32 surface;
9449 		u32 tile_offset;
9450 	} plane[2];
9451 };
9452 
9453 struct intel_display_error_state *
9454 intel_display_capture_error_state(struct drm_device *dev)
9455 {
9456 	drm_i915_private_t *dev_priv = dev->dev_private;
9457 	struct intel_display_error_state *error;
9458 	int i;
9459 
9460 	error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
9461 	if (error == NULL)
9462 		return NULL;
9463 
9464 	for (i = 0; i < 2; i++) {
9465 		error->cursor[i].control = I915_READ(CURCNTR(i));
9466 		error->cursor[i].position = I915_READ(CURPOS(i));
9467 		error->cursor[i].base = I915_READ(CURBASE(i));
9468 
9469 		error->plane[i].control = I915_READ(DSPCNTR(i));
9470 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9471 		error->plane[i].size = I915_READ(DSPSIZE(i));
9472 		error->plane[i].pos = I915_READ(DSPPOS(i));
9473 		error->plane[i].addr = I915_READ(DSPADDR(i));
9474 		if (INTEL_INFO(dev)->gen >= 4) {
9475 			error->plane[i].surface = I915_READ(DSPSURF(i));
9476 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9477 		}
9478 
9479 		error->pipe[i].conf = I915_READ(PIPECONF(i));
9480 		error->pipe[i].source = I915_READ(PIPESRC(i));
9481 		error->pipe[i].htotal = I915_READ(HTOTAL(i));
9482 		error->pipe[i].hblank = I915_READ(HBLANK(i));
9483 		error->pipe[i].hsync = I915_READ(HSYNC(i));
9484 		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9485 		error->pipe[i].vblank = I915_READ(VBLANK(i));
9486 		error->pipe[i].vsync = I915_READ(VSYNC(i));
9487 	}
9488 
9489 	return error;
9490 }
9491 
9492 void
9493 intel_display_print_error_state(struct sbuf *m,
9494 				struct drm_device *dev,
9495 				struct intel_display_error_state *error)
9496 {
9497 	int i;
9498 
9499 	for (i = 0; i < 2; i++) {
9500 		sbuf_printf(m, "Pipe [%d]:\n", i);
9501 		sbuf_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9502 		sbuf_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9503 		sbuf_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9504 		sbuf_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9505 		sbuf_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9506 		sbuf_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9507 		sbuf_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9508 		sbuf_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9509 
9510 		sbuf_printf(m, "Plane [%d]:\n", i);
9511 		sbuf_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9512 		sbuf_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9513 		sbuf_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9514 		sbuf_printf(m, "  POS: %08x\n", error->plane[i].pos);
9515 		sbuf_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9516 		if (INTEL_INFO(dev)->gen >= 4) {
9517 			sbuf_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9518 			sbuf_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9519 		}
9520 
9521 		sbuf_printf(m, "Cursor [%d]:\n", i);
9522 		sbuf_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9523 		sbuf_printf(m, "  POS: %08x\n", error->cursor[i].position);
9524 		sbuf_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9525 	}
9526 }
9527