xref: /dflybsd-src/sys/dev/drm/i915/intel_pm.c (revision 40245fd9cad57ddb46a86213b9ecff8f6370c56b)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27 
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
30 #include "i915_drv.h"
31 #include "intel_drv.h"
32 #include <linux/module.h>
33 
34 /**
35  * DOC: RC6
36  *
37  * RC6 is a special power stage which allows the GPU to enter an very
38  * low-voltage mode when idle, using down to 0V while at this stage.  This
39  * stage is entered automatically when the GPU is idle when RC6 support is
40  * enabled, and as soon as new workload arises GPU wakes up automatically as well.
41  *
42  * There are different RC6 modes available in Intel GPU, which differentiate
43  * among each other with the latency required to enter and leave RC6 and
44  * voltage consumed by the GPU in different states.
45  *
46  * The combination of the following flags define which states GPU is allowed
47  * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
48  * RC6pp is deepest RC6. Their support by hardware varies according to the
49  * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
50  * which brings the most power savings; deeper states save more power, but
51  * require higher latency to switch to and wake up.
52  */
53 #define INTEL_RC6_ENABLE			(1<<0)
54 #define INTEL_RC6p_ENABLE			(1<<1)
55 #define INTEL_RC6pp_ENABLE			(1<<2)
56 
57 static void gen9_init_clock_gating(struct drm_device *dev)
58 {
59 	struct drm_i915_private *dev_priv = to_i915(dev);
60 
61 	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
62 	I915_WRITE(CHICKEN_PAR1_1,
63 		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
64 
65 	I915_WRITE(GEN8_CONFIG0,
66 		   I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
67 
68 	/* WaEnableChickenDCPR:skl,bxt,kbl */
69 	I915_WRITE(GEN8_CHICKEN_DCPR_1,
70 		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
71 
72 	/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
73 	/* WaFbcWakeMemOn:skl,bxt,kbl */
74 	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
75 		   DISP_FBC_WM_DIS |
76 		   DISP_FBC_MEMORY_WAKE);
77 
78 	/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
79 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
80 		   ILK_DPFC_DISABLE_DUMMY0);
81 }
82 
83 static void bxt_init_clock_gating(struct drm_device *dev)
84 {
85 	struct drm_i915_private *dev_priv = to_i915(dev);
86 
87 	gen9_init_clock_gating(dev);
88 
89 	/* WaDisableSDEUnitClockGating:bxt */
90 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
91 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
92 
93 	/*
94 	 * FIXME:
95 	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
96 	 */
97 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
98 		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
99 
100 	/*
101 	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
102 	 * to stay fully on.
103 	 */
104 	if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
105 		I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
106 			   PWM1_GATING_DIS | PWM2_GATING_DIS);
107 }
108 
109 static void i915_pineview_get_mem_freq(struct drm_device *dev)
110 {
111 	struct drm_i915_private *dev_priv = to_i915(dev);
112 	u32 tmp;
113 
114 	tmp = I915_READ(CLKCFG);
115 
116 	switch (tmp & CLKCFG_FSB_MASK) {
117 	case CLKCFG_FSB_533:
118 		dev_priv->fsb_freq = 533; /* 133*4 */
119 		break;
120 	case CLKCFG_FSB_800:
121 		dev_priv->fsb_freq = 800; /* 200*4 */
122 		break;
123 	case CLKCFG_FSB_667:
124 		dev_priv->fsb_freq =  667; /* 167*4 */
125 		break;
126 	case CLKCFG_FSB_400:
127 		dev_priv->fsb_freq = 400; /* 100*4 */
128 		break;
129 	}
130 
131 	switch (tmp & CLKCFG_MEM_MASK) {
132 	case CLKCFG_MEM_533:
133 		dev_priv->mem_freq = 533;
134 		break;
135 	case CLKCFG_MEM_667:
136 		dev_priv->mem_freq = 667;
137 		break;
138 	case CLKCFG_MEM_800:
139 		dev_priv->mem_freq = 800;
140 		break;
141 	}
142 
143 	/* detect pineview DDR3 setting */
144 	tmp = I915_READ(CSHRDDR3CTL);
145 	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
146 }
147 
148 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
149 {
150 	struct drm_i915_private *dev_priv = to_i915(dev);
151 	u16 ddrpll, csipll;
152 
153 	ddrpll = I915_READ16(DDRMPLL1);
154 	csipll = I915_READ16(CSIPLL0);
155 
156 	switch (ddrpll & 0xff) {
157 	case 0xc:
158 		dev_priv->mem_freq = 800;
159 		break;
160 	case 0x10:
161 		dev_priv->mem_freq = 1066;
162 		break;
163 	case 0x14:
164 		dev_priv->mem_freq = 1333;
165 		break;
166 	case 0x18:
167 		dev_priv->mem_freq = 1600;
168 		break;
169 	default:
170 		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
171 				 ddrpll & 0xff);
172 		dev_priv->mem_freq = 0;
173 		break;
174 	}
175 
176 	dev_priv->ips.r_t = dev_priv->mem_freq;
177 
178 	switch (csipll & 0x3ff) {
179 	case 0x00c:
180 		dev_priv->fsb_freq = 3200;
181 		break;
182 	case 0x00e:
183 		dev_priv->fsb_freq = 3733;
184 		break;
185 	case 0x010:
186 		dev_priv->fsb_freq = 4266;
187 		break;
188 	case 0x012:
189 		dev_priv->fsb_freq = 4800;
190 		break;
191 	case 0x014:
192 		dev_priv->fsb_freq = 5333;
193 		break;
194 	case 0x016:
195 		dev_priv->fsb_freq = 5866;
196 		break;
197 	case 0x018:
198 		dev_priv->fsb_freq = 6400;
199 		break;
200 	default:
201 		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
202 				 csipll & 0x3ff);
203 		dev_priv->fsb_freq = 0;
204 		break;
205 	}
206 
207 	if (dev_priv->fsb_freq == 3200) {
208 		dev_priv->ips.c_m = 0;
209 	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
210 		dev_priv->ips.c_m = 1;
211 	} else {
212 		dev_priv->ips.c_m = 2;
213 	}
214 }
215 
216 static const struct cxsr_latency cxsr_latency_table[] = {
217 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
218 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
219 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
220 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
221 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
222 
223 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
224 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
225 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
226 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
227 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
228 
229 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
230 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
231 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
232 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
233 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
234 
235 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
236 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
237 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
238 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
239 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
240 
241 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
242 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
243 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
244 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
245 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
246 
247 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
248 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
249 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
250 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
251 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
252 };
253 
254 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
255 							 int is_ddr3,
256 							 int fsb,
257 							 int mem)
258 {
259 	const struct cxsr_latency *latency;
260 	int i;
261 
262 	if (fsb == 0 || mem == 0)
263 		return NULL;
264 
265 	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
266 		latency = &cxsr_latency_table[i];
267 		if (is_desktop == latency->is_desktop &&
268 		    is_ddr3 == latency->is_ddr3 &&
269 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
270 			return latency;
271 	}
272 
273 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
274 
275 	return NULL;
276 }
277 
278 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
279 {
280 	u32 val;
281 
282 	mutex_lock(&dev_priv->rps.hw_lock);
283 
284 	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
285 	if (enable)
286 		val &= ~FORCE_DDR_HIGH_FREQ;
287 	else
288 		val |= FORCE_DDR_HIGH_FREQ;
289 	val &= ~FORCE_DDR_LOW_FREQ;
290 	val |= FORCE_DDR_FREQ_REQ_ACK;
291 	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
292 
293 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
294 		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
295 		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
296 
297 	mutex_unlock(&dev_priv->rps.hw_lock);
298 }
299 
300 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
301 {
302 	u32 val;
303 
304 	mutex_lock(&dev_priv->rps.hw_lock);
305 
306 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
307 	if (enable)
308 		val |= DSP_MAXFIFO_PM5_ENABLE;
309 	else
310 		val &= ~DSP_MAXFIFO_PM5_ENABLE;
311 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
312 
313 	mutex_unlock(&dev_priv->rps.hw_lock);
314 }
315 
316 #define FW_WM(value, plane) \
317 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
318 
319 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
320 {
321 	struct drm_device *dev = &dev_priv->drm;
322 	u32 val;
323 
324 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
325 		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
326 		POSTING_READ(FW_BLC_SELF_VLV);
327 		dev_priv->wm.vlv.cxsr = enable;
328 	} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
329 		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
330 		POSTING_READ(FW_BLC_SELF);
331 	} else if (IS_PINEVIEW(dev)) {
332 		val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
333 		val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
334 		I915_WRITE(DSPFW3, val);
335 		POSTING_READ(DSPFW3);
336 	} else if (IS_I945G(dev) || IS_I945GM(dev)) {
337 		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
338 			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
339 		I915_WRITE(FW_BLC_SELF, val);
340 		POSTING_READ(FW_BLC_SELF);
341 	} else if (IS_I915GM(dev)) {
342 		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
343 			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
344 		I915_WRITE(INSTPM, val);
345 		POSTING_READ(INSTPM);
346 	} else {
347 		return;
348 	}
349 
350 	DRM_DEBUG_KMS("memory self-refresh is %s\n",
351 		      enable ? "enabled" : "disabled");
352 }
353 
354 
355 /*
356  * Latency for FIFO fetches is dependent on several factors:
357  *   - memory configuration (speed, channels)
358  *   - chipset
359  *   - current MCH state
360  * It can be fairly high in some situations, so here we assume a fairly
361  * pessimal value.  It's a tradeoff between extra memory fetches (if we
362  * set this value too high, the FIFO will fetch frequently to stay full)
363  * and power consumption (set it too low to save power and we might see
364  * FIFO underruns and display "flicker").
365  *
366  * A value of 5us seems to be a good balance; safe for very low end
367  * platforms but not overly aggressive on lower latency configs.
368  */
369 static const int pessimal_latency_ns = 5000;
370 
371 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
372 	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
373 
374 static int vlv_get_fifo_size(struct drm_device *dev,
375 			      enum i915_pipe pipe, int plane)
376 {
377 	struct drm_i915_private *dev_priv = to_i915(dev);
378 	int sprite0_start, sprite1_start, size;
379 
380 	switch (pipe) {
381 		uint32_t dsparb, dsparb2, dsparb3;
382 	case PIPE_A:
383 		dsparb = I915_READ(DSPARB);
384 		dsparb2 = I915_READ(DSPARB2);
385 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
386 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
387 		break;
388 	case PIPE_B:
389 		dsparb = I915_READ(DSPARB);
390 		dsparb2 = I915_READ(DSPARB2);
391 		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
392 		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
393 		break;
394 	case PIPE_C:
395 		dsparb2 = I915_READ(DSPARB2);
396 		dsparb3 = I915_READ(DSPARB3);
397 		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
398 		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
399 		break;
400 	default:
401 		return 0;
402 	}
403 
404 	switch (plane) {
405 	case 0:
406 		size = sprite0_start;
407 		break;
408 	case 1:
409 		size = sprite1_start - sprite0_start;
410 		break;
411 	case 2:
412 		size = 512 - 1 - sprite1_start;
413 		break;
414 	default:
415 		return 0;
416 	}
417 
418 	DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
419 		      pipe_name(pipe), plane == 0 ? "primary" : "sprite",
420 		      plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
421 		      size);
422 
423 	return size;
424 }
425 
426 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
427 {
428 	struct drm_i915_private *dev_priv = to_i915(dev);
429 	uint32_t dsparb = I915_READ(DSPARB);
430 	int size;
431 
432 	size = dsparb & 0x7f;
433 	if (plane)
434 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
435 
436 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
437 		      plane ? "B" : "A", size);
438 
439 	return size;
440 }
441 
442 static int i830_get_fifo_size(struct drm_device *dev, int plane)
443 {
444 	struct drm_i915_private *dev_priv = to_i915(dev);
445 	uint32_t dsparb = I915_READ(DSPARB);
446 	int size;
447 
448 	size = dsparb & 0x1ff;
449 	if (plane)
450 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
451 	size >>= 1; /* Convert to cachelines */
452 
453 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
454 		      plane ? "B" : "A", size);
455 
456 	return size;
457 }
458 
459 static int i845_get_fifo_size(struct drm_device *dev, int plane)
460 {
461 	struct drm_i915_private *dev_priv = to_i915(dev);
462 	uint32_t dsparb = I915_READ(DSPARB);
463 	int size;
464 
465 	size = dsparb & 0x7f;
466 	size >>= 2; /* Convert to cachelines */
467 
468 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
469 		      plane ? "B" : "A",
470 		      size);
471 
472 	return size;
473 }
474 
475 /* Pineview has different values for various configs */
476 static const struct intel_watermark_params pineview_display_wm = {
477 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
478 	.max_wm = PINEVIEW_MAX_WM,
479 	.default_wm = PINEVIEW_DFT_WM,
480 	.guard_size = PINEVIEW_GUARD_WM,
481 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
482 };
483 static const struct intel_watermark_params pineview_display_hplloff_wm = {
484 	.fifo_size = PINEVIEW_DISPLAY_FIFO,
485 	.max_wm = PINEVIEW_MAX_WM,
486 	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
487 	.guard_size = PINEVIEW_GUARD_WM,
488 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
489 };
490 static const struct intel_watermark_params pineview_cursor_wm = {
491 	.fifo_size = PINEVIEW_CURSOR_FIFO,
492 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
493 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
494 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
495 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
496 };
497 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
498 	.fifo_size = PINEVIEW_CURSOR_FIFO,
499 	.max_wm = PINEVIEW_CURSOR_MAX_WM,
500 	.default_wm = PINEVIEW_CURSOR_DFT_WM,
501 	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
502 	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
503 };
504 static const struct intel_watermark_params g4x_wm_info = {
505 	.fifo_size = G4X_FIFO_SIZE,
506 	.max_wm = G4X_MAX_WM,
507 	.default_wm = G4X_MAX_WM,
508 	.guard_size = 2,
509 	.cacheline_size = G4X_FIFO_LINE_SIZE,
510 };
511 static const struct intel_watermark_params g4x_cursor_wm_info = {
512 	.fifo_size = I965_CURSOR_FIFO,
513 	.max_wm = I965_CURSOR_MAX_WM,
514 	.default_wm = I965_CURSOR_DFT_WM,
515 	.guard_size = 2,
516 	.cacheline_size = G4X_FIFO_LINE_SIZE,
517 };
518 static const struct intel_watermark_params i965_cursor_wm_info = {
519 	.fifo_size = I965_CURSOR_FIFO,
520 	.max_wm = I965_CURSOR_MAX_WM,
521 	.default_wm = I965_CURSOR_DFT_WM,
522 	.guard_size = 2,
523 	.cacheline_size = I915_FIFO_LINE_SIZE,
524 };
525 static const struct intel_watermark_params i945_wm_info = {
526 	.fifo_size = I945_FIFO_SIZE,
527 	.max_wm = I915_MAX_WM,
528 	.default_wm = 1,
529 	.guard_size = 2,
530 	.cacheline_size = I915_FIFO_LINE_SIZE,
531 };
532 static const struct intel_watermark_params i915_wm_info = {
533 	.fifo_size = I915_FIFO_SIZE,
534 	.max_wm = I915_MAX_WM,
535 	.default_wm = 1,
536 	.guard_size = 2,
537 	.cacheline_size = I915_FIFO_LINE_SIZE,
538 };
539 static const struct intel_watermark_params i830_a_wm_info = {
540 	.fifo_size = I855GM_FIFO_SIZE,
541 	.max_wm = I915_MAX_WM,
542 	.default_wm = 1,
543 	.guard_size = 2,
544 	.cacheline_size = I830_FIFO_LINE_SIZE,
545 };
546 static const struct intel_watermark_params i830_bc_wm_info = {
547 	.fifo_size = I855GM_FIFO_SIZE,
548 	.max_wm = I915_MAX_WM/2,
549 	.default_wm = 1,
550 	.guard_size = 2,
551 	.cacheline_size = I830_FIFO_LINE_SIZE,
552 };
553 static const struct intel_watermark_params i845_wm_info = {
554 	.fifo_size = I830_FIFO_SIZE,
555 	.max_wm = I915_MAX_WM,
556 	.default_wm = 1,
557 	.guard_size = 2,
558 	.cacheline_size = I830_FIFO_LINE_SIZE,
559 };
560 
561 /**
562  * intel_calculate_wm - calculate watermark level
563  * @clock_in_khz: pixel clock
564  * @wm: chip FIFO params
565  * @cpp: bytes per pixel
566  * @latency_ns: memory latency for the platform
567  *
568  * Calculate the watermark level (the level at which the display plane will
569  * start fetching from memory again).  Each chip has a different display
570  * FIFO size and allocation, so the caller needs to figure that out and pass
571  * in the correct intel_watermark_params structure.
572  *
573  * As the pixel clock runs, the FIFO will be drained at a rate that depends
574  * on the pixel size.  When it reaches the watermark level, it'll start
575  * fetching FIFO line sized based chunks from memory until the FIFO fills
576  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
577  * will occur, and a display engine hang could result.
578  */
579 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
580 					const struct intel_watermark_params *wm,
581 					int fifo_size, int cpp,
582 					unsigned long latency_ns)
583 {
584 	long entries_required, wm_size;
585 
586 	/*
587 	 * Note: we need to make sure we don't overflow for various clock &
588 	 * latency values.
589 	 * clocks go from a few thousand to several hundred thousand.
590 	 * latency is usually a few thousand
591 	 */
592 	entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
593 		1000;
594 	entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
595 
596 	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
597 
598 	wm_size = fifo_size - (entries_required + wm->guard_size);
599 
600 	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
601 
602 	/* Don't promote wm_size to unsigned... */
603 	if (wm_size > (long)wm->max_wm)
604 		wm_size = wm->max_wm;
605 	if (wm_size <= 0)
606 		wm_size = wm->default_wm;
607 
608 	/*
609 	 * Bspec seems to indicate that the value shouldn't be lower than
610 	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
611 	 * Lets go for 8 which is the burst size since certain platforms
612 	 * already use a hardcoded 8 (which is what the spec says should be
613 	 * done).
614 	 */
615 	if (wm_size <= 8)
616 		wm_size = 8;
617 
618 	return wm_size;
619 }
620 
621 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
622 {
623 	struct drm_crtc *crtc, *enabled = NULL;
624 
625 	for_each_crtc(dev, crtc) {
626 		if (intel_crtc_active(crtc)) {
627 			if (enabled)
628 				return NULL;
629 			enabled = crtc;
630 		}
631 	}
632 
633 	return enabled;
634 }
635 
636 static void pineview_update_wm(struct drm_crtc *unused_crtc)
637 {
638 	struct drm_device *dev = unused_crtc->dev;
639 	struct drm_i915_private *dev_priv = to_i915(dev);
640 	struct drm_crtc *crtc;
641 	const struct cxsr_latency *latency;
642 	u32 reg;
643 	unsigned long wm;
644 
645 	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
646 					 dev_priv->fsb_freq, dev_priv->mem_freq);
647 	if (!latency) {
648 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
649 		intel_set_memory_cxsr(dev_priv, false);
650 		return;
651 	}
652 
653 	crtc = single_enabled_crtc(dev);
654 	if (crtc) {
655 		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
656 		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
657 		int clock = adjusted_mode->crtc_clock;
658 
659 		/* Display SR */
660 		wm = intel_calculate_wm(clock, &pineview_display_wm,
661 					pineview_display_wm.fifo_size,
662 					cpp, latency->display_sr);
663 		reg = I915_READ(DSPFW1);
664 		reg &= ~DSPFW_SR_MASK;
665 		reg |= FW_WM(wm, SR);
666 		I915_WRITE(DSPFW1, reg);
667 		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
668 
669 		/* cursor SR */
670 		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
671 					pineview_display_wm.fifo_size,
672 					cpp, latency->cursor_sr);
673 		reg = I915_READ(DSPFW3);
674 		reg &= ~DSPFW_CURSOR_SR_MASK;
675 		reg |= FW_WM(wm, CURSOR_SR);
676 		I915_WRITE(DSPFW3, reg);
677 
678 		/* Display HPLL off SR */
679 		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
680 					pineview_display_hplloff_wm.fifo_size,
681 					cpp, latency->display_hpll_disable);
682 		reg = I915_READ(DSPFW3);
683 		reg &= ~DSPFW_HPLL_SR_MASK;
684 		reg |= FW_WM(wm, HPLL_SR);
685 		I915_WRITE(DSPFW3, reg);
686 
687 		/* cursor HPLL off SR */
688 		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
689 					pineview_display_hplloff_wm.fifo_size,
690 					cpp, latency->cursor_hpll_disable);
691 		reg = I915_READ(DSPFW3);
692 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
693 		reg |= FW_WM(wm, HPLL_CURSOR);
694 		I915_WRITE(DSPFW3, reg);
695 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
696 
697 		intel_set_memory_cxsr(dev_priv, true);
698 	} else {
699 		intel_set_memory_cxsr(dev_priv, false);
700 	}
701 }
702 
703 static bool g4x_compute_wm0(struct drm_device *dev,
704 			    int plane,
705 			    const struct intel_watermark_params *display,
706 			    int display_latency_ns,
707 			    const struct intel_watermark_params *cursor,
708 			    int cursor_latency_ns,
709 			    int *plane_wm,
710 			    int *cursor_wm)
711 {
712 	struct drm_crtc *crtc;
713 	const struct drm_display_mode *adjusted_mode;
714 	int htotal, hdisplay, clock, cpp;
715 	int line_time_us, line_count;
716 	int entries, tlb_miss;
717 
718 	crtc = intel_get_crtc_for_plane(dev, plane);
719 	if (!intel_crtc_active(crtc)) {
720 		*cursor_wm = cursor->guard_size;
721 		*plane_wm = display->guard_size;
722 		return false;
723 	}
724 
725 	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
726 	clock = adjusted_mode->crtc_clock;
727 	htotal = adjusted_mode->crtc_htotal;
728 	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
729 	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
730 
731 	/* Use the small buffer method to calculate plane watermark */
732 	entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
733 	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
734 	if (tlb_miss > 0)
735 		entries += tlb_miss;
736 	entries = DIV_ROUND_UP(entries, display->cacheline_size);
737 	*plane_wm = entries + display->guard_size;
738 	if (*plane_wm > (int)display->max_wm)
739 		*plane_wm = display->max_wm;
740 
741 	/* Use the large buffer method to calculate cursor watermark */
742 	line_time_us = max(htotal * 1000 / clock, 1);
743 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
744 	entries = line_count * crtc->cursor->state->crtc_w * cpp;
745 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
746 	if (tlb_miss > 0)
747 		entries += tlb_miss;
748 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
749 	*cursor_wm = entries + cursor->guard_size;
750 	if (*cursor_wm > (int)cursor->max_wm)
751 		*cursor_wm = (int)cursor->max_wm;
752 
753 	return true;
754 }
755 
756 /*
757  * Check the wm result.
758  *
759  * If any calculated watermark values is larger than the maximum value that
760  * can be programmed into the associated watermark register, that watermark
761  * must be disabled.
762  */
763 static bool g4x_check_srwm(struct drm_device *dev,
764 			   int display_wm, int cursor_wm,
765 			   const struct intel_watermark_params *display,
766 			   const struct intel_watermark_params *cursor)
767 {
768 	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
769 		      display_wm, cursor_wm);
770 
771 	if (display_wm > display->max_wm) {
772 		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
773 			      display_wm, display->max_wm);
774 		return false;
775 	}
776 
777 	if (cursor_wm > cursor->max_wm) {
778 		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
779 			      cursor_wm, cursor->max_wm);
780 		return false;
781 	}
782 
783 	if (!(display_wm || cursor_wm)) {
784 		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
785 		return false;
786 	}
787 
788 	return true;
789 }
790 
791 static bool g4x_compute_srwm(struct drm_device *dev,
792 			     int plane,
793 			     int latency_ns,
794 			     const struct intel_watermark_params *display,
795 			     const struct intel_watermark_params *cursor,
796 			     int *display_wm, int *cursor_wm)
797 {
798 	struct drm_crtc *crtc;
799 	const struct drm_display_mode *adjusted_mode;
800 	int hdisplay, htotal, cpp, clock;
801 	unsigned long line_time_us;
802 	int line_count, line_size;
803 	int small, large;
804 	int entries;
805 
806 	if (!latency_ns) {
807 		*display_wm = *cursor_wm = 0;
808 		return false;
809 	}
810 
811 	crtc = intel_get_crtc_for_plane(dev, plane);
812 	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
813 	clock = adjusted_mode->crtc_clock;
814 	htotal = adjusted_mode->crtc_htotal;
815 	hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
816 	cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
817 
818 	line_time_us = max(htotal * 1000 / clock, 1);
819 	line_count = (latency_ns / line_time_us + 1000) / 1000;
820 	line_size = hdisplay * cpp;
821 
822 	/* Use the minimum of the small and large buffer method for primary */
823 	small = ((clock * cpp / 1000) * latency_ns) / 1000;
824 	large = line_count * line_size;
825 
826 	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
827 	*display_wm = entries + display->guard_size;
828 
829 	/* calculate the self-refresh watermark for display cursor */
830 	entries = line_count * cpp * crtc->cursor->state->crtc_w;
831 	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
832 	*cursor_wm = entries + cursor->guard_size;
833 
834 	return g4x_check_srwm(dev,
835 			      *display_wm, *cursor_wm,
836 			      display, cursor);
837 }
838 
839 #define FW_WM_VLV(value, plane) \
840 	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
841 
842 static void vlv_write_wm_values(struct intel_crtc *crtc,
843 				const struct vlv_wm_values *wm)
844 {
845 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
846 	enum i915_pipe pipe = crtc->pipe;
847 
848 	I915_WRITE(VLV_DDL(pipe),
849 		   (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
850 		   (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
851 		   (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
852 		   (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
853 
854 	I915_WRITE(DSPFW1,
855 		   FW_WM(wm->sr.plane, SR) |
856 		   FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
857 		   FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
858 		   FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
859 	I915_WRITE(DSPFW2,
860 		   FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
861 		   FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
862 		   FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
863 	I915_WRITE(DSPFW3,
864 		   FW_WM(wm->sr.cursor, CURSOR_SR));
865 
866 	if (IS_CHERRYVIEW(dev_priv)) {
867 		I915_WRITE(DSPFW7_CHV,
868 			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
869 			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
870 		I915_WRITE(DSPFW8_CHV,
871 			   FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
872 			   FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
873 		I915_WRITE(DSPFW9_CHV,
874 			   FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
875 			   FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
876 		I915_WRITE(DSPHOWM,
877 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
878 			   FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
879 			   FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
880 			   FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
881 			   FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
882 			   FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
883 			   FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
884 			   FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
885 			   FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
886 			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
887 	} else {
888 		I915_WRITE(DSPFW7,
889 			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
890 			   FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
891 		I915_WRITE(DSPHOWM,
892 			   FW_WM(wm->sr.plane >> 9, SR_HI) |
893 			   FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
894 			   FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
895 			   FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
896 			   FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
897 			   FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
898 			   FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
899 	}
900 
901 	/* zero (unused) WM1 watermarks */
902 	I915_WRITE(DSPFW4, 0);
903 	I915_WRITE(DSPFW5, 0);
904 	I915_WRITE(DSPFW6, 0);
905 	I915_WRITE(DSPHOWM1, 0);
906 
907 	POSTING_READ(DSPFW1);
908 }
909 
910 #undef FW_WM_VLV
911 
912 enum vlv_wm_level {
913 	VLV_WM_LEVEL_PM2,
914 	VLV_WM_LEVEL_PM5,
915 	VLV_WM_LEVEL_DDR_DVFS,
916 };
917 
918 /* latency must be in 0.1us units. */
919 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
920 				   unsigned int pipe_htotal,
921 				   unsigned int horiz_pixels,
922 				   unsigned int cpp,
923 				   unsigned int latency)
924 {
925 	unsigned int ret;
926 
927 	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
928 	ret = (ret + 1) * horiz_pixels * cpp;
929 	ret = DIV_ROUND_UP(ret, 64);
930 
931 	return ret;
932 }
933 
934 static void vlv_setup_wm_latency(struct drm_device *dev)
935 {
936 	struct drm_i915_private *dev_priv = to_i915(dev);
937 
938 	/* all latencies in usec */
939 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
940 
941 	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
942 
943 	if (IS_CHERRYVIEW(dev_priv)) {
944 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
945 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
946 
947 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
948 	}
949 }
950 
951 static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
952 				     struct intel_crtc *crtc,
953 				     const struct intel_plane_state *state,
954 				     int level)
955 {
956 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
957 	int clock, htotal, cpp, width, wm;
958 
959 	if (dev_priv->wm.pri_latency[level] == 0)
960 		return USHRT_MAX;
961 
962 	if (!state->visible)
963 		return 0;
964 
965 	cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
966 	clock = crtc->config->base.adjusted_mode.crtc_clock;
967 	htotal = crtc->config->base.adjusted_mode.crtc_htotal;
968 	width = crtc->config->pipe_src_w;
969 	if (WARN_ON(htotal == 0))
970 		htotal = 1;
971 
972 	if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
973 		/*
974 		 * FIXME the formula gives values that are
975 		 * too big for the cursor FIFO, and hence we
976 		 * would never be able to use cursors. For
977 		 * now just hardcode the watermark.
978 		 */
979 		wm = 63;
980 	} else {
981 		wm = vlv_wm_method2(clock, htotal, width, cpp,
982 				    dev_priv->wm.pri_latency[level] * 10);
983 	}
984 
985 	return min_t(int, wm, USHRT_MAX);
986 }
987 
988 static void vlv_compute_fifo(struct intel_crtc *crtc)
989 {
990 	struct drm_device *dev = crtc->base.dev;
991 	struct vlv_wm_state *wm_state = &crtc->wm_state;
992 	struct intel_plane *plane;
993 	unsigned int total_rate = 0;
994 	const int fifo_size = 512 - 1;
995 	int fifo_extra, fifo_left = fifo_size;
996 
997 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
998 		struct intel_plane_state *state =
999 			to_intel_plane_state(plane->base.state);
1000 
1001 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1002 			continue;
1003 
1004 		if (state->visible) {
1005 			wm_state->num_active_planes++;
1006 			total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1007 		}
1008 	}
1009 
1010 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
1011 		struct intel_plane_state *state =
1012 			to_intel_plane_state(plane->base.state);
1013 		unsigned int rate;
1014 
1015 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1016 			plane->wm.fifo_size = 63;
1017 			continue;
1018 		}
1019 
1020 		if (!state->visible) {
1021 			plane->wm.fifo_size = 0;
1022 			continue;
1023 		}
1024 
1025 		rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
1026 		plane->wm.fifo_size = fifo_size * rate / total_rate;
1027 		fifo_left -= plane->wm.fifo_size;
1028 	}
1029 
1030 	fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
1031 
1032 	/* spread the remainder evenly */
1033 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
1034 		int plane_extra;
1035 
1036 		if (fifo_left == 0)
1037 			break;
1038 
1039 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
1040 			continue;
1041 
1042 		/* give it all to the first plane if none are active */
1043 		if (plane->wm.fifo_size == 0 &&
1044 		    wm_state->num_active_planes)
1045 			continue;
1046 
1047 		plane_extra = min(fifo_extra, fifo_left);
1048 		plane->wm.fifo_size += plane_extra;
1049 		fifo_left -= plane_extra;
1050 	}
1051 
1052 	WARN_ON(fifo_left != 0);
1053 }
1054 
1055 static void vlv_invert_wms(struct intel_crtc *crtc)
1056 {
1057 	struct vlv_wm_state *wm_state = &crtc->wm_state;
1058 	int level;
1059 
1060 	for (level = 0; level < wm_state->num_levels; level++) {
1061 		struct drm_device *dev = crtc->base.dev;
1062 		const int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1063 		struct intel_plane *plane;
1064 
1065 		wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
1066 		wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
1067 
1068 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
1069 			switch (plane->base.type) {
1070 				int sprite;
1071 			case DRM_PLANE_TYPE_CURSOR:
1072 				wm_state->wm[level].cursor = plane->wm.fifo_size -
1073 					wm_state->wm[level].cursor;
1074 				break;
1075 			case DRM_PLANE_TYPE_PRIMARY:
1076 				wm_state->wm[level].primary = plane->wm.fifo_size -
1077 					wm_state->wm[level].primary;
1078 				break;
1079 			case DRM_PLANE_TYPE_OVERLAY:
1080 				sprite = plane->plane;
1081 				wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
1082 					wm_state->wm[level].sprite[sprite];
1083 				break;
1084 			}
1085 		}
1086 	}
1087 }
1088 
1089 static void vlv_compute_wm(struct intel_crtc *crtc)
1090 {
1091 	struct drm_device *dev = crtc->base.dev;
1092 	struct vlv_wm_state *wm_state = &crtc->wm_state;
1093 	struct intel_plane *plane;
1094 	int sr_fifo_size = INTEL_INFO(dev)->num_pipes * 512 - 1;
1095 	int level;
1096 
1097 	memset(wm_state, 0, sizeof(*wm_state));
1098 
1099 	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
1100 	wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
1101 
1102 	wm_state->num_active_planes = 0;
1103 
1104 	vlv_compute_fifo(crtc);
1105 
1106 	if (wm_state->num_active_planes != 1)
1107 		wm_state->cxsr = false;
1108 
1109 	if (wm_state->cxsr) {
1110 		for (level = 0; level < wm_state->num_levels; level++) {
1111 			wm_state->sr[level].plane = sr_fifo_size;
1112 			wm_state->sr[level].cursor = 63;
1113 		}
1114 	}
1115 
1116 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
1117 		struct intel_plane_state *state =
1118 			to_intel_plane_state(plane->base.state);
1119 
1120 		if (!state->visible)
1121 			continue;
1122 
1123 		/* normal watermarks */
1124 		for (level = 0; level < wm_state->num_levels; level++) {
1125 			int wm = vlv_compute_wm_level(plane, crtc, state, level);
1126 			int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
1127 
1128 			/* hack */
1129 			if (WARN_ON(level == 0 && wm > max_wm))
1130 				wm = max_wm;
1131 
1132 			if (wm > plane->wm.fifo_size)
1133 				break;
1134 
1135 			switch (plane->base.type) {
1136 				int sprite;
1137 			case DRM_PLANE_TYPE_CURSOR:
1138 				wm_state->wm[level].cursor = wm;
1139 				break;
1140 			case DRM_PLANE_TYPE_PRIMARY:
1141 				wm_state->wm[level].primary = wm;
1142 				break;
1143 			case DRM_PLANE_TYPE_OVERLAY:
1144 				sprite = plane->plane;
1145 				wm_state->wm[level].sprite[sprite] = wm;
1146 				break;
1147 			}
1148 		}
1149 
1150 		wm_state->num_levels = level;
1151 
1152 		if (!wm_state->cxsr)
1153 			continue;
1154 
1155 		/* maxfifo watermarks */
1156 		switch (plane->base.type) {
1157 			int sprite, level;
1158 		case DRM_PLANE_TYPE_CURSOR:
1159 			for (level = 0; level < wm_state->num_levels; level++)
1160 				wm_state->sr[level].cursor =
1161 					wm_state->wm[level].cursor;
1162 			break;
1163 		case DRM_PLANE_TYPE_PRIMARY:
1164 			for (level = 0; level < wm_state->num_levels; level++)
1165 				wm_state->sr[level].plane =
1166 					min(wm_state->sr[level].plane,
1167 					    wm_state->wm[level].primary);
1168 			break;
1169 		case DRM_PLANE_TYPE_OVERLAY:
1170 			sprite = plane->plane;
1171 			for (level = 0; level < wm_state->num_levels; level++)
1172 				wm_state->sr[level].plane =
1173 					min(wm_state->sr[level].plane,
1174 					    wm_state->wm[level].sprite[sprite]);
1175 			break;
1176 		}
1177 	}
1178 
1179 	/* clear any (partially) filled invalid levels */
1180 	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
1181 		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
1182 		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
1183 	}
1184 
1185 	vlv_invert_wms(crtc);
1186 }
1187 
1188 #define VLV_FIFO(plane, value) \
1189 	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1190 
1191 static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
1192 {
1193 	struct drm_device *dev = crtc->base.dev;
1194 	struct drm_i915_private *dev_priv = to_i915(dev);
1195 	struct intel_plane *plane;
1196 	int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
1197 
1198 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
1199 		if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
1200 			WARN_ON(plane->wm.fifo_size != 63);
1201 			continue;
1202 		}
1203 
1204 		if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
1205 			sprite0_start = plane->wm.fifo_size;
1206 		else if (plane->plane == 0)
1207 			sprite1_start = sprite0_start + plane->wm.fifo_size;
1208 		else
1209 			fifo_size = sprite1_start + plane->wm.fifo_size;
1210 	}
1211 
1212 	WARN_ON(fifo_size != 512 - 1);
1213 
1214 	DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1215 		      pipe_name(crtc->pipe), sprite0_start,
1216 		      sprite1_start, fifo_size);
1217 
1218 	switch (crtc->pipe) {
1219 		uint32_t dsparb, dsparb2, dsparb3;
1220 	case PIPE_A:
1221 		dsparb = I915_READ(DSPARB);
1222 		dsparb2 = I915_READ(DSPARB2);
1223 
1224 		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1225 			    VLV_FIFO(SPRITEB, 0xff));
1226 		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1227 			   VLV_FIFO(SPRITEB, sprite1_start));
1228 
1229 		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1230 			     VLV_FIFO(SPRITEB_HI, 0x1));
1231 		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1232 			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1233 
1234 		I915_WRITE(DSPARB, dsparb);
1235 		I915_WRITE(DSPARB2, dsparb2);
1236 		break;
1237 	case PIPE_B:
1238 		dsparb = I915_READ(DSPARB);
1239 		dsparb2 = I915_READ(DSPARB2);
1240 
1241 		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1242 			    VLV_FIFO(SPRITED, 0xff));
1243 		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1244 			   VLV_FIFO(SPRITED, sprite1_start));
1245 
1246 		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1247 			     VLV_FIFO(SPRITED_HI, 0xff));
1248 		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1249 			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1250 
1251 		I915_WRITE(DSPARB, dsparb);
1252 		I915_WRITE(DSPARB2, dsparb2);
1253 		break;
1254 	case PIPE_C:
1255 		dsparb3 = I915_READ(DSPARB3);
1256 		dsparb2 = I915_READ(DSPARB2);
1257 
1258 		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1259 			     VLV_FIFO(SPRITEF, 0xff));
1260 		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1261 			    VLV_FIFO(SPRITEF, sprite1_start));
1262 
1263 		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1264 			     VLV_FIFO(SPRITEF_HI, 0xff));
1265 		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1266 			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1267 
1268 		I915_WRITE(DSPARB3, dsparb3);
1269 		I915_WRITE(DSPARB2, dsparb2);
1270 		break;
1271 	default:
1272 		break;
1273 	}
1274 }
1275 
1276 #undef VLV_FIFO
1277 
1278 static void vlv_merge_wm(struct drm_device *dev,
1279 			 struct vlv_wm_values *wm)
1280 {
1281 	struct intel_crtc *crtc;
1282 	int num_active_crtcs = 0;
1283 
1284 	wm->level = to_i915(dev)->wm.max_level;
1285 	wm->cxsr = true;
1286 
1287 	for_each_intel_crtc(dev, crtc) {
1288 		const struct vlv_wm_state *wm_state = &crtc->wm_state;
1289 
1290 		if (!crtc->active)
1291 			continue;
1292 
1293 		if (!wm_state->cxsr)
1294 			wm->cxsr = false;
1295 
1296 		num_active_crtcs++;
1297 		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
1298 	}
1299 
1300 	if (num_active_crtcs != 1)
1301 		wm->cxsr = false;
1302 
1303 	if (num_active_crtcs > 1)
1304 		wm->level = VLV_WM_LEVEL_PM2;
1305 
1306 	for_each_intel_crtc(dev, crtc) {
1307 		struct vlv_wm_state *wm_state = &crtc->wm_state;
1308 		enum i915_pipe pipe = crtc->pipe;
1309 
1310 		if (!crtc->active)
1311 			continue;
1312 
1313 		wm->pipe[pipe] = wm_state->wm[wm->level];
1314 		if (wm->cxsr)
1315 			wm->sr = wm_state->sr[wm->level];
1316 
1317 		wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
1318 		wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
1319 		wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
1320 		wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
1321 	}
1322 }
1323 
1324 static void vlv_update_wm(struct drm_crtc *crtc)
1325 {
1326 	struct drm_device *dev = crtc->dev;
1327 	struct drm_i915_private *dev_priv = to_i915(dev);
1328 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1329 	enum i915_pipe pipe = intel_crtc->pipe;
1330 	struct vlv_wm_values wm = {};
1331 
1332 	vlv_compute_wm(intel_crtc);
1333 	vlv_merge_wm(dev, &wm);
1334 
1335 	if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
1336 		/* FIXME should be part of crtc atomic commit */
1337 		vlv_pipe_set_fifo_size(intel_crtc);
1338 		return;
1339 	}
1340 
1341 	if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
1342 	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
1343 		chv_set_memory_dvfs(dev_priv, false);
1344 
1345 	if (wm.level < VLV_WM_LEVEL_PM5 &&
1346 	    dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
1347 		chv_set_memory_pm5(dev_priv, false);
1348 
1349 	if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
1350 		intel_set_memory_cxsr(dev_priv, false);
1351 
1352 	/* FIXME should be part of crtc atomic commit */
1353 	vlv_pipe_set_fifo_size(intel_crtc);
1354 
1355 	vlv_write_wm_values(intel_crtc, &wm);
1356 
1357 	DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1358 		      "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1359 		      pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
1360 		      wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
1361 		      wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
1362 
1363 	if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
1364 		intel_set_memory_cxsr(dev_priv, true);
1365 
1366 	if (wm.level >= VLV_WM_LEVEL_PM5 &&
1367 	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
1368 		chv_set_memory_pm5(dev_priv, true);
1369 
1370 	if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
1371 	    dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
1372 		chv_set_memory_dvfs(dev_priv, true);
1373 
1374 	dev_priv->wm.vlv = wm;
1375 }
1376 
1377 #define single_plane_enabled(mask) is_power_of_2(mask)
1378 
1379 static void g4x_update_wm(struct drm_crtc *crtc)
1380 {
1381 	struct drm_device *dev = crtc->dev;
1382 	static const int sr_latency_ns = 12000;
1383 	struct drm_i915_private *dev_priv = to_i915(dev);
1384 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1385 	int plane_sr, cursor_sr;
1386 	unsigned int enabled = 0;
1387 	bool cxsr_enabled;
1388 
1389 	if (g4x_compute_wm0(dev, PIPE_A,
1390 			    &g4x_wm_info, pessimal_latency_ns,
1391 			    &g4x_cursor_wm_info, pessimal_latency_ns,
1392 			    &planea_wm, &cursora_wm))
1393 		enabled |= 1 << PIPE_A;
1394 
1395 	if (g4x_compute_wm0(dev, PIPE_B,
1396 			    &g4x_wm_info, pessimal_latency_ns,
1397 			    &g4x_cursor_wm_info, pessimal_latency_ns,
1398 			    &planeb_wm, &cursorb_wm))
1399 		enabled |= 1 << PIPE_B;
1400 
1401 	if (single_plane_enabled(enabled) &&
1402 	    g4x_compute_srwm(dev, ffs(enabled) - 1,
1403 			     sr_latency_ns,
1404 			     &g4x_wm_info,
1405 			     &g4x_cursor_wm_info,
1406 			     &plane_sr, &cursor_sr)) {
1407 		cxsr_enabled = true;
1408 	} else {
1409 		cxsr_enabled = false;
1410 		intel_set_memory_cxsr(dev_priv, false);
1411 		plane_sr = cursor_sr = 0;
1412 	}
1413 
1414 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1415 		      "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1416 		      planea_wm, cursora_wm,
1417 		      planeb_wm, cursorb_wm,
1418 		      plane_sr, cursor_sr);
1419 
1420 	I915_WRITE(DSPFW1,
1421 		   FW_WM(plane_sr, SR) |
1422 		   FW_WM(cursorb_wm, CURSORB) |
1423 		   FW_WM(planeb_wm, PLANEB) |
1424 		   FW_WM(planea_wm, PLANEA));
1425 	I915_WRITE(DSPFW2,
1426 		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1427 		   FW_WM(cursora_wm, CURSORA));
1428 	/* HPLL off in SR has some issues on G4x... disable it */
1429 	I915_WRITE(DSPFW3,
1430 		   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1431 		   FW_WM(cursor_sr, CURSOR_SR));
1432 
1433 	if (cxsr_enabled)
1434 		intel_set_memory_cxsr(dev_priv, true);
1435 }
1436 
1437 static void i965_update_wm(struct drm_crtc *unused_crtc)
1438 {
1439 	struct drm_device *dev = unused_crtc->dev;
1440 	struct drm_i915_private *dev_priv = to_i915(dev);
1441 	struct drm_crtc *crtc;
1442 	int srwm = 1;
1443 	int cursor_sr = 16;
1444 	bool cxsr_enabled;
1445 
1446 	/* Calc sr entries for one plane configs */
1447 	crtc = single_enabled_crtc(dev);
1448 	if (crtc) {
1449 		/* self-refresh has much higher latency */
1450 		static const int sr_latency_ns = 12000;
1451 		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1452 		int clock = adjusted_mode->crtc_clock;
1453 		int htotal = adjusted_mode->crtc_htotal;
1454 		int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
1455 		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1456 		unsigned long line_time_us;
1457 		int entries;
1458 
1459 		line_time_us = max(htotal * 1000 / clock, 1);
1460 
1461 		/* Use ns/us then divide to preserve precision */
1462 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1463 			cpp * hdisplay;
1464 		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1465 		srwm = I965_FIFO_SIZE - entries;
1466 		if (srwm < 0)
1467 			srwm = 1;
1468 		srwm &= 0x1ff;
1469 		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1470 			      entries, srwm);
1471 
1472 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1473 			cpp * crtc->cursor->state->crtc_w;
1474 		entries = DIV_ROUND_UP(entries,
1475 					  i965_cursor_wm_info.cacheline_size);
1476 		cursor_sr = i965_cursor_wm_info.fifo_size -
1477 			(entries + i965_cursor_wm_info.guard_size);
1478 
1479 		if (cursor_sr > i965_cursor_wm_info.max_wm)
1480 			cursor_sr = i965_cursor_wm_info.max_wm;
1481 
1482 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1483 			      "cursor %d\n", srwm, cursor_sr);
1484 
1485 		cxsr_enabled = true;
1486 	} else {
1487 		cxsr_enabled = false;
1488 		/* Turn off self refresh if both pipes are enabled */
1489 		intel_set_memory_cxsr(dev_priv, false);
1490 	}
1491 
1492 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1493 		      srwm);
1494 
1495 	/* 965 has limitations... */
1496 	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
1497 		   FW_WM(8, CURSORB) |
1498 		   FW_WM(8, PLANEB) |
1499 		   FW_WM(8, PLANEA));
1500 	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
1501 		   FW_WM(8, PLANEC_OLD));
1502 	/* update cursor SR watermark */
1503 	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
1504 
1505 	if (cxsr_enabled)
1506 		intel_set_memory_cxsr(dev_priv, true);
1507 }
1508 
1509 #undef FW_WM
1510 
1511 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1512 {
1513 	struct drm_device *dev = unused_crtc->dev;
1514 	struct drm_i915_private *dev_priv = to_i915(dev);
1515 	const struct intel_watermark_params *wm_info;
1516 	uint32_t fwater_lo;
1517 	uint32_t fwater_hi;
1518 	int cwm, srwm = 1;
1519 	int fifo_size;
1520 	int planea_wm, planeb_wm;
1521 	struct drm_crtc *crtc, *enabled = NULL;
1522 
1523 	if (IS_I945GM(dev))
1524 		wm_info = &i945_wm_info;
1525 	else if (!IS_GEN2(dev))
1526 		wm_info = &i915_wm_info;
1527 	else
1528 		wm_info = &i830_a_wm_info;
1529 
1530 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1531 	crtc = intel_get_crtc_for_plane(dev, 0);
1532 	if (intel_crtc_active(crtc)) {
1533 		const struct drm_display_mode *adjusted_mode;
1534 		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1535 		if (IS_GEN2(dev))
1536 			cpp = 4;
1537 
1538 		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1539 		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1540 					       wm_info, fifo_size, cpp,
1541 					       pessimal_latency_ns);
1542 		enabled = crtc;
1543 	} else {
1544 		planea_wm = fifo_size - wm_info->guard_size;
1545 		if (planea_wm > (long)wm_info->max_wm)
1546 			planea_wm = wm_info->max_wm;
1547 	}
1548 
1549 	if (IS_GEN2(dev))
1550 		wm_info = &i830_bc_wm_info;
1551 
1552 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1553 	crtc = intel_get_crtc_for_plane(dev, 1);
1554 	if (intel_crtc_active(crtc)) {
1555 		const struct drm_display_mode *adjusted_mode;
1556 		int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
1557 		if (IS_GEN2(dev))
1558 			cpp = 4;
1559 
1560 		adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1561 		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1562 					       wm_info, fifo_size, cpp,
1563 					       pessimal_latency_ns);
1564 		if (enabled == NULL)
1565 			enabled = crtc;
1566 		else
1567 			enabled = NULL;
1568 	} else {
1569 		planeb_wm = fifo_size - wm_info->guard_size;
1570 		if (planeb_wm > (long)wm_info->max_wm)
1571 			planeb_wm = wm_info->max_wm;
1572 	}
1573 
1574 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1575 
1576 	if (IS_I915GM(dev) && enabled) {
1577 		struct drm_i915_gem_object *obj;
1578 
1579 		obj = intel_fb_obj(enabled->primary->state->fb);
1580 
1581 		/* self-refresh seems busted with untiled */
1582 		if (obj->tiling_mode == I915_TILING_NONE)
1583 			enabled = NULL;
1584 	}
1585 
1586 	/*
1587 	 * Overlay gets an aggressive default since video jitter is bad.
1588 	 */
1589 	cwm = 2;
1590 
1591 	/* Play safe and disable self-refresh before adjusting watermarks. */
1592 	intel_set_memory_cxsr(dev_priv, false);
1593 
1594 	/* Calc sr entries for one plane configs */
1595 	if (HAS_FW_BLC(dev) && enabled) {
1596 		/* self-refresh has much higher latency */
1597 		static const int sr_latency_ns = 6000;
1598 		const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1599 		int clock = adjusted_mode->crtc_clock;
1600 		int htotal = adjusted_mode->crtc_htotal;
1601 		int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
1602 		int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
1603 		unsigned long line_time_us;
1604 		int entries;
1605 
1606 		line_time_us = max(htotal * 1000 / clock, 1);
1607 
1608 		/* Use ns/us then divide to preserve precision */
1609 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1610 			cpp * hdisplay;
1611 		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1612 		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1613 		srwm = wm_info->fifo_size - entries;
1614 		if (srwm < 0)
1615 			srwm = 1;
1616 
1617 		if (IS_I945G(dev) || IS_I945GM(dev))
1618 			I915_WRITE(FW_BLC_SELF,
1619 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1620 		else if (IS_I915GM(dev))
1621 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1622 	}
1623 
1624 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1625 		      planea_wm, planeb_wm, cwm, srwm);
1626 
1627 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1628 	fwater_hi = (cwm & 0x1f);
1629 
1630 	/* Set request length to 8 cachelines per fetch */
1631 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1632 	fwater_hi = fwater_hi | (1 << 8);
1633 
1634 	I915_WRITE(FW_BLC, fwater_lo);
1635 	I915_WRITE(FW_BLC2, fwater_hi);
1636 
1637 	if (enabled)
1638 		intel_set_memory_cxsr(dev_priv, true);
1639 }
1640 
1641 static void i845_update_wm(struct drm_crtc *unused_crtc)
1642 {
1643 	struct drm_device *dev = unused_crtc->dev;
1644 	struct drm_i915_private *dev_priv = to_i915(dev);
1645 	struct drm_crtc *crtc;
1646 	const struct drm_display_mode *adjusted_mode;
1647 	uint32_t fwater_lo;
1648 	int planea_wm;
1649 
1650 	crtc = single_enabled_crtc(dev);
1651 	if (crtc == NULL)
1652 		return;
1653 
1654 	adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1655 	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1656 				       &i845_wm_info,
1657 				       dev_priv->display.get_fifo_size(dev, 0),
1658 				       4, pessimal_latency_ns);
1659 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1660 	fwater_lo |= (3<<8) | planea_wm;
1661 
1662 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1663 
1664 	I915_WRITE(FW_BLC, fwater_lo);
1665 }
1666 
1667 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
1668 {
1669 	uint32_t pixel_rate;
1670 
1671 	pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
1672 
1673 	/* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1674 	 * adjust the pixel_rate here. */
1675 
1676 	if (pipe_config->pch_pfit.enabled) {
1677 		uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1678 		uint32_t pfit_size = pipe_config->pch_pfit.size;
1679 
1680 		pipe_w = pipe_config->pipe_src_w;
1681 		pipe_h = pipe_config->pipe_src_h;
1682 
1683 		pfit_w = (pfit_size >> 16) & 0xFFFF;
1684 		pfit_h = pfit_size & 0xFFFF;
1685 		if (pipe_w < pfit_w)
1686 			pipe_w = pfit_w;
1687 		if (pipe_h < pfit_h)
1688 			pipe_h = pfit_h;
1689 
1690 		if (WARN_ON(!pfit_w || !pfit_h))
1691 			return pixel_rate;
1692 
1693 		pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1694 				     pfit_w * pfit_h);
1695 	}
1696 
1697 	return pixel_rate;
1698 }
1699 
1700 /* latency must be in 0.1us units. */
1701 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
1702 {
1703 	uint64_t ret;
1704 
1705 	if (WARN(latency == 0, "Latency value missing\n"))
1706 		return UINT_MAX;
1707 
1708 	ret = (uint64_t) pixel_rate * cpp * latency;
1709 	ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1710 
1711 	return ret;
1712 }
1713 
1714 /* latency must be in 0.1us units. */
1715 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1716 			       uint32_t horiz_pixels, uint8_t cpp,
1717 			       uint32_t latency)
1718 {
1719 	uint32_t ret;
1720 
1721 	if (WARN(latency == 0, "Latency value missing\n"))
1722 		return UINT_MAX;
1723 	if (WARN_ON(!pipe_htotal))
1724 		return UINT_MAX;
1725 
1726 	ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1727 	ret = (ret + 1) * horiz_pixels * cpp;
1728 	ret = DIV_ROUND_UP(ret, 64) + 2;
1729 	return ret;
1730 }
1731 
1732 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1733 			   uint8_t cpp)
1734 {
1735 	/*
1736 	 * Neither of these should be possible since this function shouldn't be
1737 	 * called if the CRTC is off or the plane is invisible.  But let's be
1738 	 * extra paranoid to avoid a potential divide-by-zero if we screw up
1739 	 * elsewhere in the driver.
1740 	 */
1741 	if (WARN_ON(!cpp))
1742 		return 0;
1743 	if (WARN_ON(!horiz_pixels))
1744 		return 0;
1745 
1746 	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
1747 }
1748 
1749 struct ilk_wm_maximums {
1750 	uint16_t pri;
1751 	uint16_t spr;
1752 	uint16_t cur;
1753 	uint16_t fbc;
1754 };
1755 
1756 /*
1757  * For both WM_PIPE and WM_LP.
1758  * mem_value must be in 0.1us units.
1759  */
1760 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1761 				   const struct intel_plane_state *pstate,
1762 				   uint32_t mem_value,
1763 				   bool is_lp)
1764 {
1765 	int cpp = pstate->base.fb ?
1766 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1767 	uint32_t method1, method2;
1768 
1769 	if (!cstate->base.active || !pstate->visible)
1770 		return 0;
1771 
1772 	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1773 
1774 	if (!is_lp)
1775 		return method1;
1776 
1777 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1778 				 cstate->base.adjusted_mode.crtc_htotal,
1779 				 drm_rect_width(&pstate->dst),
1780 				 cpp, mem_value);
1781 
1782 	return min(method1, method2);
1783 }
1784 
1785 /*
1786  * For both WM_PIPE and WM_LP.
1787  * mem_value must be in 0.1us units.
1788  */
1789 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1790 				   const struct intel_plane_state *pstate,
1791 				   uint32_t mem_value)
1792 {
1793 	int cpp = pstate->base.fb ?
1794 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1795 	uint32_t method1, method2;
1796 
1797 	if (!cstate->base.active || !pstate->visible)
1798 		return 0;
1799 
1800 	method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
1801 	method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1802 				 cstate->base.adjusted_mode.crtc_htotal,
1803 				 drm_rect_width(&pstate->dst),
1804 				 cpp, mem_value);
1805 	return min(method1, method2);
1806 }
1807 
1808 /*
1809  * For both WM_PIPE and WM_LP.
1810  * mem_value must be in 0.1us units.
1811  */
1812 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1813 				   const struct intel_plane_state *pstate,
1814 				   uint32_t mem_value)
1815 {
1816 	/*
1817 	 * We treat the cursor plane as always-on for the purposes of watermark
1818 	 * calculation.  Until we have two-stage watermark programming merged,
1819 	 * this is necessary to avoid flickering.
1820 	 */
1821 	int cpp = 4;
1822 	int width = pstate->visible ? pstate->base.crtc_w : 64;
1823 
1824 	if (!cstate->base.active)
1825 		return 0;
1826 
1827 	return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1828 			      cstate->base.adjusted_mode.crtc_htotal,
1829 			      width, cpp, mem_value);
1830 }
1831 
1832 /* Only for WM_LP. */
1833 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1834 				   const struct intel_plane_state *pstate,
1835 				   uint32_t pri_val)
1836 {
1837 	int cpp = pstate->base.fb ?
1838 		drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
1839 
1840 	if (!cstate->base.active || !pstate->visible)
1841 		return 0;
1842 
1843 	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
1844 }
1845 
1846 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1847 {
1848 	if (INTEL_INFO(dev)->gen >= 8)
1849 		return 3072;
1850 	else if (INTEL_INFO(dev)->gen >= 7)
1851 		return 768;
1852 	else
1853 		return 512;
1854 }
1855 
1856 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1857 					 int level, bool is_sprite)
1858 {
1859 	if (INTEL_INFO(dev)->gen >= 8)
1860 		/* BDW primary/sprite plane watermarks */
1861 		return level == 0 ? 255 : 2047;
1862 	else if (INTEL_INFO(dev)->gen >= 7)
1863 		/* IVB/HSW primary/sprite plane watermarks */
1864 		return level == 0 ? 127 : 1023;
1865 	else if (!is_sprite)
1866 		/* ILK/SNB primary plane watermarks */
1867 		return level == 0 ? 127 : 511;
1868 	else
1869 		/* ILK/SNB sprite plane watermarks */
1870 		return level == 0 ? 63 : 255;
1871 }
1872 
1873 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1874 					  int level)
1875 {
1876 	if (INTEL_INFO(dev)->gen >= 7)
1877 		return level == 0 ? 63 : 255;
1878 	else
1879 		return level == 0 ? 31 : 63;
1880 }
1881 
1882 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1883 {
1884 	if (INTEL_INFO(dev)->gen >= 8)
1885 		return 31;
1886 	else
1887 		return 15;
1888 }
1889 
1890 /* Calculate the maximum primary/sprite plane watermark */
1891 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1892 				     int level,
1893 				     const struct intel_wm_config *config,
1894 				     enum intel_ddb_partitioning ddb_partitioning,
1895 				     bool is_sprite)
1896 {
1897 	unsigned int fifo_size = ilk_display_fifo_size(dev);
1898 
1899 	/* if sprites aren't enabled, sprites get nothing */
1900 	if (is_sprite && !config->sprites_enabled)
1901 		return 0;
1902 
1903 	/* HSW allows LP1+ watermarks even with multiple pipes */
1904 	if (level == 0 || config->num_pipes_active > 1) {
1905 		fifo_size /= INTEL_INFO(dev)->num_pipes;
1906 
1907 		/*
1908 		 * For some reason the non self refresh
1909 		 * FIFO size is only half of the self
1910 		 * refresh FIFO size on ILK/SNB.
1911 		 */
1912 		if (INTEL_INFO(dev)->gen <= 6)
1913 			fifo_size /= 2;
1914 	}
1915 
1916 	if (config->sprites_enabled) {
1917 		/* level 0 is always calculated with 1:1 split */
1918 		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1919 			if (is_sprite)
1920 				fifo_size *= 5;
1921 			fifo_size /= 6;
1922 		} else {
1923 			fifo_size /= 2;
1924 		}
1925 	}
1926 
1927 	/* clamp to max that the registers can hold */
1928 	return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1929 }
1930 
1931 /* Calculate the maximum cursor plane watermark */
1932 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1933 				      int level,
1934 				      const struct intel_wm_config *config)
1935 {
1936 	/* HSW LP1+ watermarks w/ multiple pipes */
1937 	if (level > 0 && config->num_pipes_active > 1)
1938 		return 64;
1939 
1940 	/* otherwise just report max that registers can hold */
1941 	return ilk_cursor_wm_reg_max(dev, level);
1942 }
1943 
1944 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1945 				    int level,
1946 				    const struct intel_wm_config *config,
1947 				    enum intel_ddb_partitioning ddb_partitioning,
1948 				    struct ilk_wm_maximums *max)
1949 {
1950 	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1951 	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1952 	max->cur = ilk_cursor_wm_max(dev, level, config);
1953 	max->fbc = ilk_fbc_wm_reg_max(dev);
1954 }
1955 
1956 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1957 					int level,
1958 					struct ilk_wm_maximums *max)
1959 {
1960 	max->pri = ilk_plane_wm_reg_max(dev, level, false);
1961 	max->spr = ilk_plane_wm_reg_max(dev, level, true);
1962 	max->cur = ilk_cursor_wm_reg_max(dev, level);
1963 	max->fbc = ilk_fbc_wm_reg_max(dev);
1964 }
1965 
1966 static bool ilk_validate_wm_level(int level,
1967 				  const struct ilk_wm_maximums *max,
1968 				  struct intel_wm_level *result)
1969 {
1970 	bool ret;
1971 
1972 	/* already determined to be invalid? */
1973 	if (!result->enable)
1974 		return false;
1975 
1976 	result->enable = result->pri_val <= max->pri &&
1977 			 result->spr_val <= max->spr &&
1978 			 result->cur_val <= max->cur;
1979 
1980 	ret = result->enable;
1981 
1982 	/*
1983 	 * HACK until we can pre-compute everything,
1984 	 * and thus fail gracefully if LP0 watermarks
1985 	 * are exceeded...
1986 	 */
1987 	if (level == 0 && !result->enable) {
1988 		if (result->pri_val > max->pri)
1989 			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1990 				      level, result->pri_val, max->pri);
1991 		if (result->spr_val > max->spr)
1992 			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1993 				      level, result->spr_val, max->spr);
1994 		if (result->cur_val > max->cur)
1995 			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1996 				      level, result->cur_val, max->cur);
1997 
1998 		result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1999 		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2000 		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2001 		result->enable = true;
2002 	}
2003 
2004 	return ret;
2005 }
2006 
2007 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2008 				 const struct intel_crtc *intel_crtc,
2009 				 int level,
2010 				 struct intel_crtc_state *cstate,
2011 				 struct intel_plane_state *pristate,
2012 				 struct intel_plane_state *sprstate,
2013 				 struct intel_plane_state *curstate,
2014 				 struct intel_wm_level *result)
2015 {
2016 	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2017 	uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2018 	uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2019 
2020 	/* WM1+ latency values stored in 0.5us units */
2021 	if (level > 0) {
2022 		pri_latency *= 5;
2023 		spr_latency *= 5;
2024 		cur_latency *= 5;
2025 	}
2026 
2027 	if (pristate) {
2028 		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
2029 						     pri_latency, level);
2030 		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
2031 	}
2032 
2033 	if (sprstate)
2034 		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
2035 
2036 	if (curstate)
2037 		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
2038 
2039 	result->enable = true;
2040 }
2041 
2042 static uint32_t
2043 hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2044 {
2045 	const struct intel_atomic_state *intel_state =
2046 		to_intel_atomic_state(cstate->base.state);
2047 	const struct drm_display_mode *adjusted_mode =
2048 		&cstate->base.adjusted_mode;
2049 	u32 linetime, ips_linetime;
2050 
2051 	if (!cstate->base.active)
2052 		return 0;
2053 	if (WARN_ON(adjusted_mode->crtc_clock == 0))
2054 		return 0;
2055 	if (WARN_ON(intel_state->cdclk == 0))
2056 		return 0;
2057 
2058 	/* The WM are computed with base on how long it takes to fill a single
2059 	 * row at the given clock rate, multiplied by 8.
2060 	 * */
2061 	linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2062 				     adjusted_mode->crtc_clock);
2063 	ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2064 					 intel_state->cdclk);
2065 
2066 	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2067 	       PIPE_WM_LINETIME_TIME(linetime);
2068 }
2069 
2070 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2071 {
2072 	struct drm_i915_private *dev_priv = to_i915(dev);
2073 
2074 	if (IS_GEN9(dev)) {
2075 		uint32_t val;
2076 		int ret, i;
2077 		int level, max_level = ilk_wm_max_level(dev);
2078 
2079 		/* read the first set of memory latencies[0:3] */
2080 		val = 0; /* data0 to be programmed to 0 for first set */
2081 		mutex_lock(&dev_priv->rps.hw_lock);
2082 		ret = sandybridge_pcode_read(dev_priv,
2083 					     GEN9_PCODE_READ_MEM_LATENCY,
2084 					     &val);
2085 		mutex_unlock(&dev_priv->rps.hw_lock);
2086 
2087 		if (ret) {
2088 			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2089 			return;
2090 		}
2091 
2092 		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2093 		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2094 				GEN9_MEM_LATENCY_LEVEL_MASK;
2095 		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2096 				GEN9_MEM_LATENCY_LEVEL_MASK;
2097 		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2098 				GEN9_MEM_LATENCY_LEVEL_MASK;
2099 
2100 		/* read the second set of memory latencies[4:7] */
2101 		val = 1; /* data0 to be programmed to 1 for second set */
2102 		mutex_lock(&dev_priv->rps.hw_lock);
2103 		ret = sandybridge_pcode_read(dev_priv,
2104 					     GEN9_PCODE_READ_MEM_LATENCY,
2105 					     &val);
2106 		mutex_unlock(&dev_priv->rps.hw_lock);
2107 		if (ret) {
2108 			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2109 			return;
2110 		}
2111 
2112 		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2113 		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2114 				GEN9_MEM_LATENCY_LEVEL_MASK;
2115 		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2116 				GEN9_MEM_LATENCY_LEVEL_MASK;
2117 		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2118 				GEN9_MEM_LATENCY_LEVEL_MASK;
2119 
2120 		/*
2121 		 * WaWmMemoryReadLatency:skl
2122 		 *
2123 		 * punit doesn't take into account the read latency so we need
2124 		 * to add 2us to the various latency levels we retrieve from
2125 		 * the punit.
2126 		 *   - W0 is a bit special in that it's the only level that
2127 		 *   can't be disabled if we want to have display working, so
2128 		 *   we always add 2us there.
2129 		 *   - For levels >=1, punit returns 0us latency when they are
2130 		 *   disabled, so we respect that and don't add 2us then
2131 		 *
2132 		 * Additionally, if a level n (n > 1) has a 0us latency, all
2133 		 * levels m (m >= n) need to be disabled. We make sure to
2134 		 * sanitize the values out of the punit to satisfy this
2135 		 * requirement.
2136 		 */
2137 		wm[0] += 2;
2138 		for (level = 1; level <= max_level; level++)
2139 			if (wm[level] != 0)
2140 				wm[level] += 2;
2141 			else {
2142 				for (i = level + 1; i <= max_level; i++)
2143 					wm[i] = 0;
2144 
2145 				break;
2146 			}
2147 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2148 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
2149 
2150 		wm[0] = (sskpd >> 56) & 0xFF;
2151 		if (wm[0] == 0)
2152 			wm[0] = sskpd & 0xF;
2153 		wm[1] = (sskpd >> 4) & 0xFF;
2154 		wm[2] = (sskpd >> 12) & 0xFF;
2155 		wm[3] = (sskpd >> 20) & 0x1FF;
2156 		wm[4] = (sskpd >> 32) & 0x1FF;
2157 	} else if (INTEL_INFO(dev)->gen >= 6) {
2158 		uint32_t sskpd = I915_READ(MCH_SSKPD);
2159 
2160 		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2161 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2162 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2163 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2164 	} else if (INTEL_INFO(dev)->gen >= 5) {
2165 		uint32_t mltr = I915_READ(MLTR_ILK);
2166 
2167 		/* ILK primary LP0 latency is 700 ns */
2168 		wm[0] = 7;
2169 		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2170 		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2171 	}
2172 }
2173 
2174 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2175 {
2176 	/* ILK sprite LP0 latency is 1300 ns */
2177 	if (IS_GEN5(dev))
2178 		wm[0] = 13;
2179 }
2180 
2181 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2182 {
2183 	/* ILK cursor LP0 latency is 1300 ns */
2184 	if (IS_GEN5(dev))
2185 		wm[0] = 13;
2186 
2187 	/* WaDoubleCursorLP3Latency:ivb */
2188 	if (IS_IVYBRIDGE(dev))
2189 		wm[3] *= 2;
2190 }
2191 
2192 int ilk_wm_max_level(const struct drm_device *dev)
2193 {
2194 	/* how many WM levels are we expecting */
2195 	if (INTEL_INFO(dev)->gen >= 9)
2196 		return 7;
2197 	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2198 		return 4;
2199 	else if (INTEL_INFO(dev)->gen >= 6)
2200 		return 3;
2201 	else
2202 		return 2;
2203 }
2204 
2205 static void intel_print_wm_latency(struct drm_device *dev,
2206 				   const char *name,
2207 				   const uint16_t wm[8])
2208 {
2209 	int level, max_level = ilk_wm_max_level(dev);
2210 
2211 	for (level = 0; level <= max_level; level++) {
2212 		unsigned int latency = wm[level];
2213 
2214 		if (latency == 0) {
2215 			DRM_ERROR("%s WM%d latency not provided\n",
2216 				  name, level);
2217 			continue;
2218 		}
2219 
2220 		/*
2221 		 * - latencies are in us on gen9.
2222 		 * - before then, WM1+ latency values are in 0.5us units
2223 		 */
2224 		if (IS_GEN9(dev))
2225 			latency *= 10;
2226 		else if (level > 0)
2227 			latency *= 5;
2228 
2229 		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2230 			      name, level, wm[level],
2231 			      latency / 10, latency % 10);
2232 	}
2233 }
2234 
2235 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2236 				    uint16_t wm[5], uint16_t min)
2237 {
2238 	int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2239 
2240 	if (wm[0] >= min)
2241 		return false;
2242 
2243 	wm[0] = max(wm[0], min);
2244 	for (level = 1; level <= max_level; level++)
2245 		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2246 
2247 	return true;
2248 }
2249 
2250 static void snb_wm_latency_quirk(struct drm_device *dev)
2251 {
2252 	struct drm_i915_private *dev_priv = to_i915(dev);
2253 	bool changed;
2254 
2255 	/*
2256 	 * The BIOS provided WM memory latency values are often
2257 	 * inadequate for high resolution displays. Adjust them.
2258 	 */
2259 	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2260 		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2261 		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2262 
2263 	if (!changed)
2264 		return;
2265 
2266 	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2267 	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2268 	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2269 	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2270 }
2271 
2272 static void ilk_setup_wm_latency(struct drm_device *dev)
2273 {
2274 	struct drm_i915_private *dev_priv = to_i915(dev);
2275 
2276 	intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2277 
2278 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2279 	       sizeof(dev_priv->wm.pri_latency));
2280 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2281 	       sizeof(dev_priv->wm.pri_latency));
2282 
2283 	intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2284 	intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2285 
2286 	intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2287 	intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2288 	intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2289 
2290 	if (IS_GEN6(dev))
2291 		snb_wm_latency_quirk(dev);
2292 }
2293 
2294 static void skl_setup_wm_latency(struct drm_device *dev)
2295 {
2296 	struct drm_i915_private *dev_priv = to_i915(dev);
2297 
2298 	intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2299 	intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2300 }
2301 
2302 static bool ilk_validate_pipe_wm(struct drm_device *dev,
2303 				 struct intel_pipe_wm *pipe_wm)
2304 {
2305 	/* LP0 watermark maximums depend on this pipe alone */
2306 	const struct intel_wm_config config = {
2307 		.num_pipes_active = 1,
2308 		.sprites_enabled = pipe_wm->sprites_enabled,
2309 		.sprites_scaled = pipe_wm->sprites_scaled,
2310 	};
2311 	struct ilk_wm_maximums max;
2312 
2313 	/* LP0 watermarks always use 1/2 DDB partitioning */
2314 	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2315 
2316 	/* At least LP0 must be valid */
2317 	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
2318 		DRM_DEBUG_KMS("LP0 watermark invalid\n");
2319 		return false;
2320 	}
2321 
2322 	return true;
2323 }
2324 
2325 /* Compute new watermarks for the pipe */
2326 static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2327 {
2328 	struct drm_atomic_state *state = cstate->base.state;
2329 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2330 	struct intel_pipe_wm *pipe_wm;
2331 	struct drm_device *dev = state->dev;
2332 	const struct drm_i915_private *dev_priv = to_i915(dev);
2333 	struct intel_plane *intel_plane;
2334 	struct intel_plane_state *pristate = NULL;
2335 	struct intel_plane_state *sprstate = NULL;
2336 	struct intel_plane_state *curstate = NULL;
2337 	int level, max_level = ilk_wm_max_level(dev), usable_level;
2338 	struct ilk_wm_maximums max;
2339 
2340 	pipe_wm = &cstate->wm.ilk.optimal;
2341 
2342 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2343 		struct intel_plane_state *ps;
2344 
2345 		ps = intel_atomic_get_existing_plane_state(state,
2346 							   intel_plane);
2347 		if (!ps)
2348 			continue;
2349 
2350 		if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2351 			pristate = ps;
2352 		else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2353 			sprstate = ps;
2354 		else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
2355 			curstate = ps;
2356 	}
2357 
2358 	pipe_wm->pipe_enabled = cstate->base.active;
2359 	if (sprstate) {
2360 		pipe_wm->sprites_enabled = sprstate->visible;
2361 		pipe_wm->sprites_scaled = sprstate->visible &&
2362 			(drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2363 			 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2364 	}
2365 
2366 	usable_level = max_level;
2367 
2368 	/* ILK/SNB: LP2+ watermarks only w/o sprites */
2369 	if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
2370 		usable_level = 1;
2371 
2372 	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2373 	if (pipe_wm->sprites_scaled)
2374 		usable_level = 0;
2375 
2376 	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
2377 			     pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
2378 
2379 	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2380 	pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2381 
2382 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2383 		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2384 
2385 	if (!ilk_validate_pipe_wm(dev, pipe_wm))
2386 		return -EINVAL;
2387 
2388 	ilk_compute_wm_reg_maximums(dev, 1, &max);
2389 
2390 	for (level = 1; level <= max_level; level++) {
2391 		struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
2392 
2393 		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
2394 				     pristate, sprstate, curstate, wm);
2395 
2396 		/*
2397 		 * Disable any watermark level that exceeds the
2398 		 * register maximums since such watermarks are
2399 		 * always invalid.
2400 		 */
2401 		if (level > usable_level)
2402 			continue;
2403 
2404 		if (ilk_validate_wm_level(level, &max, wm))
2405 			pipe_wm->wm[level] = *wm;
2406 		else
2407 			usable_level = level;
2408 	}
2409 
2410 	return 0;
2411 }
2412 
2413 /*
2414  * Build a set of 'intermediate' watermark values that satisfy both the old
2415  * state and the new state.  These can be programmed to the hardware
2416  * immediately.
2417  */
2418 static int ilk_compute_intermediate_wm(struct drm_device *dev,
2419 				       struct intel_crtc *intel_crtc,
2420 				       struct intel_crtc_state *newstate)
2421 {
2422 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2423 	struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2424 	int level, max_level = ilk_wm_max_level(dev);
2425 
2426 	/*
2427 	 * Start with the final, target watermarks, then combine with the
2428 	 * currently active watermarks to get values that are safe both before
2429 	 * and after the vblank.
2430 	 */
2431 	*a = newstate->wm.ilk.optimal;
2432 	a->pipe_enabled |= b->pipe_enabled;
2433 	a->sprites_enabled |= b->sprites_enabled;
2434 	a->sprites_scaled |= b->sprites_scaled;
2435 
2436 	for (level = 0; level <= max_level; level++) {
2437 		struct intel_wm_level *a_wm = &a->wm[level];
2438 		const struct intel_wm_level *b_wm = &b->wm[level];
2439 
2440 		a_wm->enable &= b_wm->enable;
2441 		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
2442 		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
2443 		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
2444 		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
2445 	}
2446 
2447 	/*
2448 	 * We need to make sure that these merged watermark values are
2449 	 * actually a valid configuration themselves.  If they're not,
2450 	 * there's no safe way to transition from the old state to
2451 	 * the new state, so we need to fail the atomic transaction.
2452 	 */
2453 	if (!ilk_validate_pipe_wm(dev, a))
2454 		return -EINVAL;
2455 
2456 	/*
2457 	 * If our intermediate WM are identical to the final WM, then we can
2458 	 * omit the post-vblank programming; only update if it's different.
2459 	 */
2460 	if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2461 		newstate->wm.need_postvbl_update = false;
2462 
2463 	return 0;
2464 }
2465 
2466 /*
2467  * Merge the watermarks from all active pipes for a specific level.
2468  */
2469 static void ilk_merge_wm_level(struct drm_device *dev,
2470 			       int level,
2471 			       struct intel_wm_level *ret_wm)
2472 {
2473 	const struct intel_crtc *intel_crtc;
2474 
2475 	ret_wm->enable = true;
2476 
2477 	for_each_intel_crtc(dev, intel_crtc) {
2478 		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
2479 		const struct intel_wm_level *wm = &active->wm[level];
2480 
2481 		if (!active->pipe_enabled)
2482 			continue;
2483 
2484 		/*
2485 		 * The watermark values may have been used in the past,
2486 		 * so we must maintain them in the registers for some
2487 		 * time even if the level is now disabled.
2488 		 */
2489 		if (!wm->enable)
2490 			ret_wm->enable = false;
2491 
2492 		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2493 		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2494 		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2495 		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2496 	}
2497 }
2498 
2499 /*
2500  * Merge all low power watermarks for all active pipes.
2501  */
2502 static void ilk_wm_merge(struct drm_device *dev,
2503 			 const struct intel_wm_config *config,
2504 			 const struct ilk_wm_maximums *max,
2505 			 struct intel_pipe_wm *merged)
2506 {
2507 	struct drm_i915_private *dev_priv = to_i915(dev);
2508 	int level, max_level = ilk_wm_max_level(dev);
2509 	int last_enabled_level = max_level;
2510 
2511 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2512 	if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2513 	    config->num_pipes_active > 1)
2514 		last_enabled_level = 0;
2515 
2516 	/* ILK: FBC WM must be disabled always */
2517 	merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2518 
2519 	/* merge each WM1+ level */
2520 	for (level = 1; level <= max_level; level++) {
2521 		struct intel_wm_level *wm = &merged->wm[level];
2522 
2523 		ilk_merge_wm_level(dev, level, wm);
2524 
2525 		if (level > last_enabled_level)
2526 			wm->enable = false;
2527 		else if (!ilk_validate_wm_level(level, max, wm))
2528 			/* make sure all following levels get disabled */
2529 			last_enabled_level = level - 1;
2530 
2531 		/*
2532 		 * The spec says it is preferred to disable
2533 		 * FBC WMs instead of disabling a WM level.
2534 		 */
2535 		if (wm->fbc_val > max->fbc) {
2536 			if (wm->enable)
2537 				merged->fbc_wm_enabled = false;
2538 			wm->fbc_val = 0;
2539 		}
2540 	}
2541 
2542 	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2543 	/*
2544 	 * FIXME this is racy. FBC might get enabled later.
2545 	 * What we should check here is whether FBC can be
2546 	 * enabled sometime later.
2547 	 */
2548 	if (IS_GEN5(dev) && !merged->fbc_wm_enabled &&
2549 	    intel_fbc_is_active(dev_priv)) {
2550 		for (level = 2; level <= max_level; level++) {
2551 			struct intel_wm_level *wm = &merged->wm[level];
2552 
2553 			wm->enable = false;
2554 		}
2555 	}
2556 }
2557 
2558 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2559 {
2560 	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2561 	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2562 }
2563 
2564 /* The value we need to program into the WM_LPx latency field */
2565 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2566 {
2567 	struct drm_i915_private *dev_priv = to_i915(dev);
2568 
2569 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2570 		return 2 * level;
2571 	else
2572 		return dev_priv->wm.pri_latency[level];
2573 }
2574 
2575 static void ilk_compute_wm_results(struct drm_device *dev,
2576 				   const struct intel_pipe_wm *merged,
2577 				   enum intel_ddb_partitioning partitioning,
2578 				   struct ilk_wm_values *results)
2579 {
2580 	struct intel_crtc *intel_crtc;
2581 	int level, wm_lp;
2582 
2583 	results->enable_fbc_wm = merged->fbc_wm_enabled;
2584 	results->partitioning = partitioning;
2585 
2586 	/* LP1+ register values */
2587 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2588 		const struct intel_wm_level *r;
2589 
2590 		level = ilk_wm_lp_to_level(wm_lp, merged);
2591 
2592 		r = &merged->wm[level];
2593 
2594 		/*
2595 		 * Maintain the watermark values even if the level is
2596 		 * disabled. Doing otherwise could cause underruns.
2597 		 */
2598 		results->wm_lp[wm_lp - 1] =
2599 			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2600 			(r->pri_val << WM1_LP_SR_SHIFT) |
2601 			r->cur_val;
2602 
2603 		if (r->enable)
2604 			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2605 
2606 		if (INTEL_INFO(dev)->gen >= 8)
2607 			results->wm_lp[wm_lp - 1] |=
2608 				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2609 		else
2610 			results->wm_lp[wm_lp - 1] |=
2611 				r->fbc_val << WM1_LP_FBC_SHIFT;
2612 
2613 		/*
2614 		 * Always set WM1S_LP_EN when spr_val != 0, even if the
2615 		 * level is disabled. Doing otherwise could cause underruns.
2616 		 */
2617 		if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2618 			WARN_ON(wm_lp != 1);
2619 			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2620 		} else
2621 			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2622 	}
2623 
2624 	/* LP0 register values */
2625 	for_each_intel_crtc(dev, intel_crtc) {
2626 		enum i915_pipe pipe = intel_crtc->pipe;
2627 		const struct intel_wm_level *r =
2628 			&intel_crtc->wm.active.ilk.wm[0];
2629 
2630 		if (WARN_ON(!r->enable))
2631 			continue;
2632 
2633 		results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
2634 
2635 		results->wm_pipe[pipe] =
2636 			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2637 			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2638 			r->cur_val;
2639 	}
2640 }
2641 
2642 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2643  * case both are at the same level. Prefer r1 in case they're the same. */
2644 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2645 						  struct intel_pipe_wm *r1,
2646 						  struct intel_pipe_wm *r2)
2647 {
2648 	int level, max_level = ilk_wm_max_level(dev);
2649 	int level1 = 0, level2 = 0;
2650 
2651 	for (level = 1; level <= max_level; level++) {
2652 		if (r1->wm[level].enable)
2653 			level1 = level;
2654 		if (r2->wm[level].enable)
2655 			level2 = level;
2656 	}
2657 
2658 	if (level1 == level2) {
2659 		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2660 			return r2;
2661 		else
2662 			return r1;
2663 	} else if (level1 > level2) {
2664 		return r1;
2665 	} else {
2666 		return r2;
2667 	}
2668 }
2669 
2670 /* dirty bits used to track which watermarks need changes */
2671 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2672 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2673 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2674 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2675 #define WM_DIRTY_FBC (1 << 24)
2676 #define WM_DIRTY_DDB (1 << 25)
2677 
2678 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2679 					 const struct ilk_wm_values *old,
2680 					 const struct ilk_wm_values *new)
2681 {
2682 	unsigned int dirty = 0;
2683 	enum i915_pipe pipe;
2684 	int wm_lp;
2685 
2686 	for_each_pipe(dev_priv, pipe) {
2687 		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2688 			dirty |= WM_DIRTY_LINETIME(pipe);
2689 			/* Must disable LP1+ watermarks too */
2690 			dirty |= WM_DIRTY_LP_ALL;
2691 		}
2692 
2693 		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2694 			dirty |= WM_DIRTY_PIPE(pipe);
2695 			/* Must disable LP1+ watermarks too */
2696 			dirty |= WM_DIRTY_LP_ALL;
2697 		}
2698 	}
2699 
2700 	if (old->enable_fbc_wm != new->enable_fbc_wm) {
2701 		dirty |= WM_DIRTY_FBC;
2702 		/* Must disable LP1+ watermarks too */
2703 		dirty |= WM_DIRTY_LP_ALL;
2704 	}
2705 
2706 	if (old->partitioning != new->partitioning) {
2707 		dirty |= WM_DIRTY_DDB;
2708 		/* Must disable LP1+ watermarks too */
2709 		dirty |= WM_DIRTY_LP_ALL;
2710 	}
2711 
2712 	/* LP1+ watermarks already deemed dirty, no need to continue */
2713 	if (dirty & WM_DIRTY_LP_ALL)
2714 		return dirty;
2715 
2716 	/* Find the lowest numbered LP1+ watermark in need of an update... */
2717 	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2718 		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2719 		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2720 			break;
2721 	}
2722 
2723 	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2724 	for (; wm_lp <= 3; wm_lp++)
2725 		dirty |= WM_DIRTY_LP(wm_lp);
2726 
2727 	return dirty;
2728 }
2729 
2730 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2731 			       unsigned int dirty)
2732 {
2733 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
2734 	bool changed = false;
2735 
2736 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2737 		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2738 		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2739 		changed = true;
2740 	}
2741 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2742 		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2743 		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2744 		changed = true;
2745 	}
2746 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2747 		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2748 		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2749 		changed = true;
2750 	}
2751 
2752 	/*
2753 	 * Don't touch WM1S_LP_EN here.
2754 	 * Doing so could cause underruns.
2755 	 */
2756 
2757 	return changed;
2758 }
2759 
2760 /*
2761  * The spec says we shouldn't write when we don't need, because every write
2762  * causes WMs to be re-evaluated, expending some power.
2763  */
2764 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2765 				struct ilk_wm_values *results)
2766 {
2767 	struct drm_device *dev = &dev_priv->drm;
2768 	struct ilk_wm_values *previous = &dev_priv->wm.hw;
2769 	unsigned int dirty;
2770 	uint32_t val;
2771 
2772 	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2773 	if (!dirty)
2774 		return;
2775 
2776 	_ilk_disable_lp_wm(dev_priv, dirty);
2777 
2778 	if (dirty & WM_DIRTY_PIPE(PIPE_A))
2779 		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2780 	if (dirty & WM_DIRTY_PIPE(PIPE_B))
2781 		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2782 	if (dirty & WM_DIRTY_PIPE(PIPE_C))
2783 		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2784 
2785 	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2786 		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2787 	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2788 		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2789 	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2790 		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2791 
2792 	if (dirty & WM_DIRTY_DDB) {
2793 		if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2794 			val = I915_READ(WM_MISC);
2795 			if (results->partitioning == INTEL_DDB_PART_1_2)
2796 				val &= ~WM_MISC_DATA_PARTITION_5_6;
2797 			else
2798 				val |= WM_MISC_DATA_PARTITION_5_6;
2799 			I915_WRITE(WM_MISC, val);
2800 		} else {
2801 			val = I915_READ(DISP_ARB_CTL2);
2802 			if (results->partitioning == INTEL_DDB_PART_1_2)
2803 				val &= ~DISP_DATA_PARTITION_5_6;
2804 			else
2805 				val |= DISP_DATA_PARTITION_5_6;
2806 			I915_WRITE(DISP_ARB_CTL2, val);
2807 		}
2808 	}
2809 
2810 	if (dirty & WM_DIRTY_FBC) {
2811 		val = I915_READ(DISP_ARB_CTL);
2812 		if (results->enable_fbc_wm)
2813 			val &= ~DISP_FBC_WM_DIS;
2814 		else
2815 			val |= DISP_FBC_WM_DIS;
2816 		I915_WRITE(DISP_ARB_CTL, val);
2817 	}
2818 
2819 	if (dirty & WM_DIRTY_LP(1) &&
2820 	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2821 		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2822 
2823 	if (INTEL_INFO(dev)->gen >= 7) {
2824 		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2825 			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2826 		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2827 			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2828 	}
2829 
2830 	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2831 		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2832 	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2833 		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2834 	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2835 		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2836 
2837 	dev_priv->wm.hw = *results;
2838 }
2839 
2840 bool ilk_disable_lp_wm(struct drm_device *dev)
2841 {
2842 	struct drm_i915_private *dev_priv = to_i915(dev);
2843 
2844 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2845 }
2846 
2847 /*
2848  * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2849  * different active planes.
2850  */
2851 
2852 #define SKL_DDB_SIZE		896	/* in blocks */
2853 #define BXT_DDB_SIZE		512
2854 
2855 /*
2856  * Return the index of a plane in the SKL DDB and wm result arrays.  Primary
2857  * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2858  * other universal planes are in indices 1..n.  Note that this may leave unused
2859  * indices between the top "sprite" plane and the cursor.
2860  */
2861 static int
2862 skl_wm_plane_id(const struct intel_plane *plane)
2863 {
2864 	switch (plane->base.type) {
2865 	case DRM_PLANE_TYPE_PRIMARY:
2866 		return 0;
2867 	case DRM_PLANE_TYPE_CURSOR:
2868 		return PLANE_CURSOR;
2869 	case DRM_PLANE_TYPE_OVERLAY:
2870 		return plane->plane + 1;
2871 	default:
2872 		MISSING_CASE(plane->base.type);
2873 		return plane->plane;
2874 	}
2875 }
2876 
2877 static void
2878 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2879 				   const struct intel_crtc_state *cstate,
2880 				   struct skl_ddb_entry *alloc, /* out */
2881 				   int *num_active /* out */)
2882 {
2883 	struct drm_atomic_state *state = cstate->base.state;
2884 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2885 	struct drm_i915_private *dev_priv = to_i915(dev);
2886 	struct drm_crtc *for_crtc = cstate->base.crtc;
2887 	unsigned int pipe_size, ddb_size;
2888 	int nth_active_pipe;
2889 	int pipe = to_intel_crtc(for_crtc)->pipe;
2890 
2891 	if (WARN_ON(!state) || !cstate->base.active) {
2892 		alloc->start = 0;
2893 		alloc->end = 0;
2894 		*num_active = hweight32(dev_priv->active_crtcs);
2895 		return;
2896 	}
2897 
2898 	if (intel_state->active_pipe_changes)
2899 		*num_active = hweight32(intel_state->active_crtcs);
2900 	else
2901 		*num_active = hweight32(dev_priv->active_crtcs);
2902 
2903 	if (IS_BROXTON(dev))
2904 		ddb_size = BXT_DDB_SIZE;
2905 	else
2906 		ddb_size = SKL_DDB_SIZE;
2907 
2908 	ddb_size -= 4; /* 4 blocks for bypass path allocation */
2909 
2910 	/*
2911 	 * If the state doesn't change the active CRTC's, then there's
2912 	 * no need to recalculate; the existing pipe allocation limits
2913 	 * should remain unchanged.  Note that we're safe from racing
2914 	 * commits since any racing commit that changes the active CRTC
2915 	 * list would need to grab _all_ crtc locks, including the one
2916 	 * we currently hold.
2917 	 */
2918 	if (!intel_state->active_pipe_changes) {
2919 		*alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2920 		return;
2921 	}
2922 
2923 	nth_active_pipe = hweight32(intel_state->active_crtcs &
2924 				    (drm_crtc_mask(for_crtc) - 1));
2925 	pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2926 	alloc->start = nth_active_pipe * ddb_size / *num_active;
2927 	alloc->end = alloc->start + pipe_size;
2928 }
2929 
2930 static unsigned int skl_cursor_allocation(int num_active)
2931 {
2932 	if (num_active == 1)
2933 		return 32;
2934 
2935 	return 8;
2936 }
2937 
2938 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
2939 {
2940 	entry->start = reg & 0x3ff;
2941 	entry->end = (reg >> 16) & 0x3ff;
2942 	if (entry->end)
2943 		entry->end += 1;
2944 }
2945 
2946 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2947 			  struct skl_ddb_allocation *ddb /* out */)
2948 {
2949 	enum i915_pipe pipe;
2950 	int plane;
2951 	u32 val;
2952 
2953 	memset(ddb, 0, sizeof(*ddb));
2954 
2955 	for_each_pipe(dev_priv, pipe) {
2956 		enum intel_display_power_domain power_domain;
2957 
2958 		power_domain = POWER_DOMAIN_PIPE(pipe);
2959 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2960 			continue;
2961 
2962 		for_each_plane(dev_priv, pipe, plane) {
2963 			val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2964 			skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
2965 						   val);
2966 		}
2967 
2968 		val = I915_READ(CUR_BUF_CFG(pipe));
2969 		skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2970 					   val);
2971 
2972 		intel_display_power_put(dev_priv, power_domain);
2973 	}
2974 }
2975 
2976 /*
2977  * Determines the downscale amount of a plane for the purposes of watermark calculations.
2978  * The bspec defines downscale amount as:
2979  *
2980  * """
2981  * Horizontal down scale amount = maximum[1, Horizontal source size /
2982  *                                           Horizontal destination size]
2983  * Vertical down scale amount = maximum[1, Vertical source size /
2984  *                                         Vertical destination size]
2985  * Total down scale amount = Horizontal down scale amount *
2986  *                           Vertical down scale amount
2987  * """
2988  *
2989  * Return value is provided in 16.16 fixed point form to retain fractional part.
2990  * Caller should take care of dividing & rounding off the value.
2991  */
2992 static uint32_t
2993 skl_plane_downscale_amount(const struct intel_plane_state *pstate)
2994 {
2995 	uint32_t downscale_h, downscale_w;
2996 	uint32_t src_w, src_h, dst_w, dst_h;
2997 
2998 	if (WARN_ON(!pstate->visible))
2999 		return DRM_PLANE_HELPER_NO_SCALING;
3000 
3001 	/* n.b., src is 16.16 fixed point, dst is whole integer */
3002 	src_w = drm_rect_width(&pstate->src);
3003 	src_h = drm_rect_height(&pstate->src);
3004 	dst_w = drm_rect_width(&pstate->dst);
3005 	dst_h = drm_rect_height(&pstate->dst);
3006 	if (intel_rotation_90_or_270(pstate->base.rotation))
3007 		swap(dst_w, dst_h);
3008 
3009 	downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3010 	downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3011 
3012 	/* Provide result in 16.16 fixed point */
3013 	return (uint64_t)downscale_w * downscale_h >> 16;
3014 }
3015 
3016 static unsigned int
3017 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
3018 			     struct drm_plane_state *pstate,
3019 			     int y)
3020 {
3021 	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3022 	struct drm_framebuffer *fb = pstate->fb;
3023 	uint32_t down_scale_amount, data_rate;
3024 	uint32_t width = 0, height = 0;
3025 	unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3026 
3027 	if (!intel_pstate->visible)
3028 		return 0;
3029 	if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3030 		return 0;
3031 	if (y && format != DRM_FORMAT_NV12)
3032 		return 0;
3033 
3034 	width = drm_rect_width(&intel_pstate->src) >> 16;
3035 	height = drm_rect_height(&intel_pstate->src) >> 16;
3036 
3037 	if (intel_rotation_90_or_270(pstate->rotation))
3038 		swap(width, height);
3039 
3040 	/* for planar format */
3041 	if (format == DRM_FORMAT_NV12) {
3042 		if (y)  /* y-plane data rate */
3043 			data_rate = width * height *
3044 				drm_format_plane_cpp(format, 0);
3045 		else    /* uv-plane data rate */
3046 			data_rate = (width / 2) * (height / 2) *
3047 				drm_format_plane_cpp(format, 1);
3048 	} else {
3049 		/* for packed formats */
3050 		data_rate = width * height * drm_format_plane_cpp(format, 0);
3051 	}
3052 
3053 	down_scale_amount = skl_plane_downscale_amount(intel_pstate);
3054 
3055 	return (uint64_t)data_rate * down_scale_amount >> 16;
3056 }
3057 
3058 /*
3059  * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3060  * a 8192x4096@32bpp framebuffer:
3061  *   3 * 4096 * 8192  * 4 < 2^32
3062  */
3063 static unsigned int
3064 skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3065 {
3066 	struct drm_crtc_state *cstate = &intel_cstate->base;
3067 	struct drm_atomic_state *state = cstate->state;
3068 	struct drm_crtc *crtc = cstate->crtc;
3069 	struct drm_device *dev = crtc->dev;
3070 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3071 	struct drm_plane *plane;
3072 	const struct intel_plane *intel_plane;
3073 	struct drm_plane_state *pstate;
3074 	unsigned int rate, total_data_rate = 0;
3075 	int id;
3076 	int i;
3077 
3078 	if (WARN_ON(!state))
3079 		return 0;
3080 
3081 	/* Calculate and cache data rate for each plane */
3082 	for_each_plane_in_state(state, plane, pstate, i) {
3083 		id = skl_wm_plane_id(to_intel_plane(plane));
3084 		intel_plane = to_intel_plane(plane);
3085 
3086 		if (intel_plane->pipe != intel_crtc->pipe)
3087 			continue;
3088 
3089 		/* packed/uv */
3090 		rate = skl_plane_relative_data_rate(intel_cstate,
3091 						    pstate, 0);
3092 		intel_cstate->wm.skl.plane_data_rate[id] = rate;
3093 
3094 		/* y-plane */
3095 		rate = skl_plane_relative_data_rate(intel_cstate,
3096 						    pstate, 1);
3097 		intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3098 	}
3099 
3100 	/* Calculate CRTC's total data rate from cached values */
3101 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3102 		int id = skl_wm_plane_id(intel_plane);
3103 
3104 		/* packed/uv */
3105 		total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3106 		total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3107 	}
3108 
3109 	WARN_ON(cstate->plane_mask && total_data_rate == 0);
3110 
3111 	return total_data_rate;
3112 }
3113 
3114 static uint16_t
3115 skl_ddb_min_alloc(struct drm_plane_state *pstate,
3116 		  const int y)
3117 {
3118 	struct drm_framebuffer *fb = pstate->fb;
3119 	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3120 	uint32_t src_w, src_h;
3121 	uint32_t min_scanlines = 8;
3122 	uint8_t plane_bpp;
3123 
3124 	if (WARN_ON(!fb))
3125 		return 0;
3126 
3127 	/* For packed formats, no y-plane, return 0 */
3128 	if (y && fb->pixel_format != DRM_FORMAT_NV12)
3129 		return 0;
3130 
3131 	/* For Non Y-tile return 8-blocks */
3132 	if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3133 	    fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3134 		return 8;
3135 
3136 	src_w = drm_rect_width(&intel_pstate->src) >> 16;
3137 	src_h = drm_rect_height(&intel_pstate->src) >> 16;
3138 
3139 	if (intel_rotation_90_or_270(pstate->rotation))
3140 		swap(src_w, src_h);
3141 
3142 	/* Halve UV plane width and height for NV12 */
3143 	if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3144 		src_w /= 2;
3145 		src_h /= 2;
3146 	}
3147 
3148 	if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3149 		plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3150 	else
3151 		plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3152 
3153 	if (intel_rotation_90_or_270(pstate->rotation)) {
3154 		switch (plane_bpp) {
3155 		case 1:
3156 			min_scanlines = 32;
3157 			break;
3158 		case 2:
3159 			min_scanlines = 16;
3160 			break;
3161 		case 4:
3162 			min_scanlines = 8;
3163 			break;
3164 		case 8:
3165 			min_scanlines = 4;
3166 			break;
3167 		default:
3168 			WARN(1, "Unsupported pixel depth %u for rotation",
3169 			     plane_bpp);
3170 			min_scanlines = 32;
3171 		}
3172 	}
3173 
3174 	return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3175 }
3176 
3177 static int
3178 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3179 		      struct skl_ddb_allocation *ddb /* out */)
3180 {
3181 	struct drm_atomic_state *state = cstate->base.state;
3182 	struct drm_crtc *crtc = cstate->base.crtc;
3183 	struct drm_device *dev = crtc->dev;
3184 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3185 	struct intel_plane *intel_plane;
3186 	struct drm_plane *plane;
3187 	struct drm_plane_state *pstate;
3188 	enum i915_pipe pipe = intel_crtc->pipe;
3189 	struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3190 	uint16_t alloc_size, start, cursor_blocks;
3191 	uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3192 	uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3193 	unsigned int total_data_rate;
3194 	int num_active;
3195 	int id, i;
3196 
3197 	if (WARN_ON(!state))
3198 		return 0;
3199 
3200 	if (!cstate->base.active) {
3201 		ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3202 		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3203 		memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3204 		return 0;
3205 	}
3206 
3207 	skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3208 	alloc_size = skl_ddb_entry_size(alloc);
3209 	if (alloc_size == 0) {
3210 		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3211 		return 0;
3212 	}
3213 
3214 	cursor_blocks = skl_cursor_allocation(num_active);
3215 	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3216 	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3217 
3218 	alloc_size -= cursor_blocks;
3219 
3220 	/* 1. Allocate the mininum required blocks for each active plane */
3221 	for_each_plane_in_state(state, plane, pstate, i) {
3222 		intel_plane = to_intel_plane(plane);
3223 		id = skl_wm_plane_id(intel_plane);
3224 
3225 		if (intel_plane->pipe != pipe)
3226 			continue;
3227 
3228 		if (!to_intel_plane_state(pstate)->visible) {
3229 			minimum[id] = 0;
3230 			y_minimum[id] = 0;
3231 			continue;
3232 		}
3233 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3234 			minimum[id] = 0;
3235 			y_minimum[id] = 0;
3236 			continue;
3237 		}
3238 
3239 		minimum[id] = skl_ddb_min_alloc(pstate, 0);
3240 		y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3241 	}
3242 
3243 	for (i = 0; i < PLANE_CURSOR; i++) {
3244 		alloc_size -= minimum[i];
3245 		alloc_size -= y_minimum[i];
3246 	}
3247 
3248 	/*
3249 	 * 2. Distribute the remaining space in proportion to the amount of
3250 	 * data each plane needs to fetch from memory.
3251 	 *
3252 	 * FIXME: we may not allocate every single block here.
3253 	 */
3254 	total_data_rate = skl_get_total_relative_data_rate(cstate);
3255 	if (total_data_rate == 0)
3256 		return 0;
3257 
3258 	start = alloc->start;
3259 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3260 		unsigned int data_rate, y_data_rate;
3261 		uint16_t plane_blocks, y_plane_blocks = 0;
3262 		int id = skl_wm_plane_id(intel_plane);
3263 
3264 		data_rate = cstate->wm.skl.plane_data_rate[id];
3265 
3266 		/*
3267 		 * allocation for (packed formats) or (uv-plane part of planar format):
3268 		 * promote the expression to 64 bits to avoid overflowing, the
3269 		 * result is < available as data_rate / total_data_rate < 1
3270 		 */
3271 		plane_blocks = minimum[id];
3272 		plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3273 					total_data_rate);
3274 
3275 		/* Leave disabled planes at (0,0) */
3276 		if (data_rate) {
3277 			ddb->plane[pipe][id].start = start;
3278 			ddb->plane[pipe][id].end = start + plane_blocks;
3279 		}
3280 
3281 		start += plane_blocks;
3282 
3283 		/*
3284 		 * allocation for y_plane part of planar format:
3285 		 */
3286 		y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3287 
3288 		y_plane_blocks = y_minimum[id];
3289 		y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3290 					total_data_rate);
3291 
3292 		if (y_data_rate) {
3293 			ddb->y_plane[pipe][id].start = start;
3294 			ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3295 		}
3296 
3297 		start += y_plane_blocks;
3298 	}
3299 
3300 	return 0;
3301 }
3302 
3303 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
3304 {
3305 	/* TODO: Take into account the scalers once we support them */
3306 	return config->base.adjusted_mode.crtc_clock;
3307 }
3308 
3309 /*
3310  * The max latency should be 257 (max the punit can code is 255 and we add 2us
3311  * for the read latency) and cpp should always be <= 8, so that
3312  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3313  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3314 */
3315 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
3316 {
3317 	uint32_t wm_intermediate_val, ret;
3318 
3319 	if (latency == 0)
3320 		return UINT_MAX;
3321 
3322 	wm_intermediate_val = latency * pixel_rate * cpp / 512;
3323 	ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3324 
3325 	return ret;
3326 }
3327 
3328 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3329 			       uint32_t horiz_pixels, uint8_t cpp,
3330 			       uint64_t tiling, uint32_t latency)
3331 {
3332 	uint32_t ret;
3333 	uint32_t plane_bytes_per_line, plane_blocks_per_line;
3334 	uint32_t wm_intermediate_val;
3335 
3336 	if (latency == 0)
3337 		return UINT_MAX;
3338 
3339 	plane_bytes_per_line = horiz_pixels * cpp;
3340 
3341 	if (tiling == I915_FORMAT_MOD_Y_TILED ||
3342 	    tiling == I915_FORMAT_MOD_Yf_TILED) {
3343 		plane_bytes_per_line *= 4;
3344 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3345 		plane_blocks_per_line /= 4;
3346 	} else {
3347 		plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3348 	}
3349 
3350 	wm_intermediate_val = latency * pixel_rate;
3351 	ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3352 				plane_blocks_per_line;
3353 
3354 	return ret;
3355 }
3356 
3357 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3358 					      struct intel_plane_state *pstate)
3359 {
3360 	uint64_t adjusted_pixel_rate;
3361 	uint64_t downscale_amount;
3362 	uint64_t pixel_rate;
3363 
3364 	/* Shouldn't reach here on disabled planes... */
3365 	if (WARN_ON(!pstate->visible))
3366 		return 0;
3367 
3368 	/*
3369 	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3370 	 * with additional adjustments for plane-specific scaling.
3371 	 */
3372 	adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3373 	downscale_amount = skl_plane_downscale_amount(pstate);
3374 
3375 	pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3376 	WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3377 
3378 	return pixel_rate;
3379 }
3380 
3381 static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3382 				struct intel_crtc_state *cstate,
3383 				struct intel_plane_state *intel_pstate,
3384 				uint16_t ddb_allocation,
3385 				int level,
3386 				uint16_t *out_blocks, /* out */
3387 				uint8_t *out_lines, /* out */
3388 				bool *enabled /* out */)
3389 {
3390 	struct drm_plane_state *pstate = &intel_pstate->base;
3391 	struct drm_framebuffer *fb = pstate->fb;
3392 	uint32_t latency = dev_priv->wm.skl_latency[level];
3393 	uint32_t method1, method2;
3394 	uint32_t plane_bytes_per_line, plane_blocks_per_line;
3395 	uint32_t res_blocks, res_lines;
3396 	uint32_t selected_result;
3397 	uint8_t cpp;
3398 	uint32_t width = 0, height = 0;
3399 	uint32_t plane_pixel_rate;
3400 
3401 	if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3402 		*enabled = false;
3403 		return 0;
3404 	}
3405 
3406 	width = drm_rect_width(&intel_pstate->src) >> 16;
3407 	height = drm_rect_height(&intel_pstate->src) >> 16;
3408 
3409 	if (intel_rotation_90_or_270(pstate->rotation))
3410 		swap(width, height);
3411 
3412 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3413 	plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3414 
3415 	method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3416 	method2 = skl_wm_method2(plane_pixel_rate,
3417 				 cstate->base.adjusted_mode.crtc_htotal,
3418 				 width,
3419 				 cpp,
3420 				 fb->modifier[0],
3421 				 latency);
3422 
3423 	plane_bytes_per_line = width * cpp;
3424 	plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3425 
3426 	if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3427 	    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3428 		uint32_t min_scanlines = 4;
3429 		uint32_t y_tile_minimum;
3430 		if (intel_rotation_90_or_270(pstate->rotation)) {
3431 			int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3432 				drm_format_plane_cpp(fb->pixel_format, 1) :
3433 				drm_format_plane_cpp(fb->pixel_format, 0);
3434 
3435 			switch (cpp) {
3436 			case 1:
3437 				min_scanlines = 16;
3438 				break;
3439 			case 2:
3440 				min_scanlines = 8;
3441 				break;
3442 			case 8:
3443 				WARN(1, "Unsupported pixel depth for rotation");
3444 			}
3445 		}
3446 		y_tile_minimum = plane_blocks_per_line * min_scanlines;
3447 		selected_result = max(method2, y_tile_minimum);
3448 	} else {
3449 		if ((ddb_allocation / plane_blocks_per_line) >= 1)
3450 			selected_result = min(method1, method2);
3451 		else
3452 			selected_result = method1;
3453 	}
3454 
3455 	res_blocks = selected_result + 1;
3456 	res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
3457 
3458 	if (level >= 1 && level <= 7) {
3459 		if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
3460 		    fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
3461 			res_lines += 4;
3462 		else
3463 			res_blocks++;
3464 	}
3465 
3466 	if (res_blocks >= ddb_allocation || res_lines > 31) {
3467 		*enabled = false;
3468 
3469 		/*
3470 		 * If there are no valid level 0 watermarks, then we can't
3471 		 * support this display configuration.
3472 		 */
3473 		if (level) {
3474 			return 0;
3475 		} else {
3476 			DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3477 			DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3478 				      to_intel_crtc(cstate->base.crtc)->pipe,
3479 				      skl_wm_plane_id(to_intel_plane(pstate->plane)),
3480 				      res_blocks, ddb_allocation, res_lines);
3481 
3482 			return -EINVAL;
3483 		}
3484 	}
3485 
3486 	*out_blocks = res_blocks;
3487 	*out_lines = res_lines;
3488 	*enabled = true;
3489 
3490 	return 0;
3491 }
3492 
3493 static int
3494 skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3495 		     struct skl_ddb_allocation *ddb,
3496 		     struct intel_crtc_state *cstate,
3497 		     int level,
3498 		     struct skl_wm_level *result)
3499 {
3500 	struct drm_atomic_state *state = cstate->base.state;
3501 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3502 	struct drm_plane *plane;
3503 	struct intel_plane *intel_plane;
3504 	struct intel_plane_state *intel_pstate;
3505 	uint16_t ddb_blocks;
3506 	enum i915_pipe pipe = intel_crtc->pipe;
3507 	int ret;
3508 
3509 	/*
3510 	 * We'll only calculate watermarks for planes that are actually
3511 	 * enabled, so make sure all other planes are set as disabled.
3512 	 */
3513 	memset(result, 0, sizeof(*result));
3514 
3515 	for_each_intel_plane_mask(&dev_priv->drm,
3516 				  intel_plane,
3517 				  cstate->base.plane_mask) {
3518 		int i = skl_wm_plane_id(intel_plane);
3519 
3520 		plane = &intel_plane->base;
3521 		intel_pstate = NULL;
3522 		if (state)
3523 			intel_pstate =
3524 				intel_atomic_get_existing_plane_state(state,
3525 								      intel_plane);
3526 
3527 		/*
3528 		 * Note: If we start supporting multiple pending atomic commits
3529 		 * against the same planes/CRTC's in the future, plane->state
3530 		 * will no longer be the correct pre-state to use for the
3531 		 * calculations here and we'll need to change where we get the
3532 		 * 'unchanged' plane data from.
3533 		 *
3534 		 * For now this is fine because we only allow one queued commit
3535 		 * against a CRTC.  Even if the plane isn't modified by this
3536 		 * transaction and we don't have a plane lock, we still have
3537 		 * the CRTC's lock, so we know that no other transactions are
3538 		 * racing with us to update it.
3539 		 */
3540 		if (!intel_pstate)
3541 			intel_pstate = to_intel_plane_state(plane->state);
3542 
3543 		WARN_ON(!intel_pstate->base.fb);
3544 
3545 		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3546 
3547 		ret = skl_compute_plane_wm(dev_priv,
3548 					   cstate,
3549 					   intel_pstate,
3550 					   ddb_blocks,
3551 					   level,
3552 					   &result->plane_res_b[i],
3553 					   &result->plane_res_l[i],
3554 					   &result->plane_en[i]);
3555 		if (ret)
3556 			return ret;
3557 	}
3558 
3559 	return 0;
3560 }
3561 
3562 static uint32_t
3563 skl_compute_linetime_wm(struct intel_crtc_state *cstate)
3564 {
3565 	if (!cstate->base.active)
3566 		return 0;
3567 
3568 	if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
3569 		return 0;
3570 
3571 	return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
3572 			    skl_pipe_pixel_rate(cstate));
3573 }
3574 
3575 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3576 				      struct skl_wm_level *trans_wm /* out */)
3577 {
3578 	struct drm_crtc *crtc = cstate->base.crtc;
3579 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3580 	struct intel_plane *intel_plane;
3581 
3582 	if (!cstate->base.active)
3583 		return;
3584 
3585 	/* Until we know more, just disable transition WMs */
3586 	for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) {
3587 		int i = skl_wm_plane_id(intel_plane);
3588 
3589 		trans_wm->plane_en[i] = false;
3590 	}
3591 }
3592 
3593 static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3594 			     struct skl_ddb_allocation *ddb,
3595 			     struct skl_pipe_wm *pipe_wm)
3596 {
3597 	struct drm_device *dev = cstate->base.crtc->dev;
3598 	const struct drm_i915_private *dev_priv = to_i915(dev);
3599 	int level, max_level = ilk_wm_max_level(dev);
3600 	int ret;
3601 
3602 	for (level = 0; level <= max_level; level++) {
3603 		ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3604 					   level, &pipe_wm->wm[level]);
3605 		if (ret)
3606 			return ret;
3607 	}
3608 	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3609 
3610 	skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3611 
3612 	return 0;
3613 }
3614 
3615 static void skl_compute_wm_results(struct drm_device *dev,
3616 				   struct skl_pipe_wm *p_wm,
3617 				   struct skl_wm_values *r,
3618 				   struct intel_crtc *intel_crtc)
3619 {
3620 	int level, max_level = ilk_wm_max_level(dev);
3621 	enum i915_pipe pipe = intel_crtc->pipe;
3622 	uint32_t temp;
3623 	int i;
3624 
3625 	for (level = 0; level <= max_level; level++) {
3626 		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3627 			temp = 0;
3628 
3629 			temp |= p_wm->wm[level].plane_res_l[i] <<
3630 					PLANE_WM_LINES_SHIFT;
3631 			temp |= p_wm->wm[level].plane_res_b[i];
3632 			if (p_wm->wm[level].plane_en[i])
3633 				temp |= PLANE_WM_EN;
3634 
3635 			r->plane[pipe][i][level] = temp;
3636 		}
3637 
3638 		temp = 0;
3639 
3640 		temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3641 		temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3642 
3643 		if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3644 			temp |= PLANE_WM_EN;
3645 
3646 		r->plane[pipe][PLANE_CURSOR][level] = temp;
3647 
3648 	}
3649 
3650 	/* transition WMs */
3651 	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3652 		temp = 0;
3653 		temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3654 		temp |= p_wm->trans_wm.plane_res_b[i];
3655 		if (p_wm->trans_wm.plane_en[i])
3656 			temp |= PLANE_WM_EN;
3657 
3658 		r->plane_trans[pipe][i] = temp;
3659 	}
3660 
3661 	temp = 0;
3662 	temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3663 	temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3664 	if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3665 		temp |= PLANE_WM_EN;
3666 
3667 	r->plane_trans[pipe][PLANE_CURSOR] = temp;
3668 
3669 	r->wm_linetime[pipe] = p_wm->linetime;
3670 }
3671 
3672 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3673 				i915_reg_t reg,
3674 				const struct skl_ddb_entry *entry)
3675 {
3676 	if (entry->end)
3677 		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3678 	else
3679 		I915_WRITE(reg, 0);
3680 }
3681 
3682 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3683 				const struct skl_wm_values *new)
3684 {
3685 	struct drm_device *dev = &dev_priv->drm;
3686 	struct intel_crtc *crtc;
3687 
3688 	for_each_intel_crtc(dev, crtc) {
3689 		int i, level, max_level = ilk_wm_max_level(dev);
3690 		enum i915_pipe pipe = crtc->pipe;
3691 
3692 		if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3693 			continue;
3694 		if (!crtc->active)
3695 			continue;
3696 
3697 		I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3698 
3699 		for (level = 0; level <= max_level; level++) {
3700 			for (i = 0; i < intel_num_planes(crtc); i++)
3701 				I915_WRITE(PLANE_WM(pipe, i, level),
3702 					   new->plane[pipe][i][level]);
3703 			I915_WRITE(CUR_WM(pipe, level),
3704 				   new->plane[pipe][PLANE_CURSOR][level]);
3705 		}
3706 		for (i = 0; i < intel_num_planes(crtc); i++)
3707 			I915_WRITE(PLANE_WM_TRANS(pipe, i),
3708 				   new->plane_trans[pipe][i]);
3709 		I915_WRITE(CUR_WM_TRANS(pipe),
3710 			   new->plane_trans[pipe][PLANE_CURSOR]);
3711 
3712 		for (i = 0; i < intel_num_planes(crtc); i++) {
3713 			skl_ddb_entry_write(dev_priv,
3714 					    PLANE_BUF_CFG(pipe, i),
3715 					    &new->ddb.plane[pipe][i]);
3716 			skl_ddb_entry_write(dev_priv,
3717 					    PLANE_NV12_BUF_CFG(pipe, i),
3718 					    &new->ddb.y_plane[pipe][i]);
3719 		}
3720 
3721 		skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3722 				    &new->ddb.plane[pipe][PLANE_CURSOR]);
3723 	}
3724 }
3725 
3726 /*
3727  * When setting up a new DDB allocation arrangement, we need to correctly
3728  * sequence the times at which the new allocations for the pipes are taken into
3729  * account or we'll have pipes fetching from space previously allocated to
3730  * another pipe.
3731  *
3732  * Roughly the sequence looks like:
3733  *  1. re-allocate the pipe(s) with the allocation being reduced and not
3734  *     overlapping with a previous light-up pipe (another way to put it is:
3735  *     pipes with their new allocation strickly included into their old ones).
3736  *  2. re-allocate the other pipes that get their allocation reduced
3737  *  3. allocate the pipes having their allocation increased
3738  *
3739  * Steps 1. and 2. are here to take care of the following case:
3740  * - Initially DDB looks like this:
3741  *     |   B    |   C    |
3742  * - enable pipe A.
3743  * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3744  *   allocation
3745  *     |  A  |  B  |  C  |
3746  *
3747  * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3748  */
3749 
3750 static void
3751 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int pass)
3752 {
3753 	int plane;
3754 
3755 	DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3756 
3757 	for_each_plane(dev_priv, pipe, plane) {
3758 		I915_WRITE(PLANE_SURF(pipe, plane),
3759 			   I915_READ(PLANE_SURF(pipe, plane)));
3760 	}
3761 	I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3762 }
3763 
3764 static bool
3765 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3766 			    const struct skl_ddb_allocation *new,
3767 			    enum i915_pipe pipe)
3768 {
3769 	uint16_t old_size, new_size;
3770 
3771 	old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3772 	new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3773 
3774 	return old_size != new_size &&
3775 	       new->pipe[pipe].start >= old->pipe[pipe].start &&
3776 	       new->pipe[pipe].end <= old->pipe[pipe].end;
3777 }
3778 
3779 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3780 				struct skl_wm_values *new_values)
3781 {
3782 	struct drm_device *dev = &dev_priv->drm;
3783 	struct skl_ddb_allocation *cur_ddb, *new_ddb;
3784 	bool reallocated[I915_MAX_PIPES] = {};
3785 	struct intel_crtc *crtc;
3786 	enum i915_pipe pipe;
3787 
3788 	new_ddb = &new_values->ddb;
3789 	cur_ddb = &dev_priv->wm.skl_hw.ddb;
3790 
3791 	/*
3792 	 * First pass: flush the pipes with the new allocation contained into
3793 	 * the old space.
3794 	 *
3795 	 * We'll wait for the vblank on those pipes to ensure we can safely
3796 	 * re-allocate the freed space without this pipe fetching from it.
3797 	 */
3798 	for_each_intel_crtc(dev, crtc) {
3799 		if (!crtc->active)
3800 			continue;
3801 
3802 		pipe = crtc->pipe;
3803 
3804 		if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3805 			continue;
3806 
3807 		skl_wm_flush_pipe(dev_priv, pipe, 1);
3808 		intel_wait_for_vblank(dev, pipe);
3809 
3810 		reallocated[pipe] = true;
3811 	}
3812 
3813 
3814 	/*
3815 	 * Second pass: flush the pipes that are having their allocation
3816 	 * reduced, but overlapping with a previous allocation.
3817 	 *
3818 	 * Here as well we need to wait for the vblank to make sure the freed
3819 	 * space is not used anymore.
3820 	 */
3821 	for_each_intel_crtc(dev, crtc) {
3822 		if (!crtc->active)
3823 			continue;
3824 
3825 		pipe = crtc->pipe;
3826 
3827 		if (reallocated[pipe])
3828 			continue;
3829 
3830 		if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3831 		    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3832 			skl_wm_flush_pipe(dev_priv, pipe, 2);
3833 			intel_wait_for_vblank(dev, pipe);
3834 			reallocated[pipe] = true;
3835 		}
3836 	}
3837 
3838 	/*
3839 	 * Third pass: flush the pipes that got more space allocated.
3840 	 *
3841 	 * We don't need to actively wait for the update here, next vblank
3842 	 * will just get more DDB space with the correct WM values.
3843 	 */
3844 	for_each_intel_crtc(dev, crtc) {
3845 		if (!crtc->active)
3846 			continue;
3847 
3848 		pipe = crtc->pipe;
3849 
3850 		/*
3851 		 * At this point, only the pipes more space than before are
3852 		 * left to re-allocate.
3853 		 */
3854 		if (reallocated[pipe])
3855 			continue;
3856 
3857 		skl_wm_flush_pipe(dev_priv, pipe, 3);
3858 	}
3859 }
3860 
3861 static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3862 			      struct skl_ddb_allocation *ddb, /* out */
3863 			      struct skl_pipe_wm *pipe_wm, /* out */
3864 			      bool *changed /* out */)
3865 {
3866 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3867 	struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3868 	int ret;
3869 
3870 	ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3871 	if (ret)
3872 		return ret;
3873 
3874 	if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3875 		*changed = false;
3876 	else
3877 		*changed = true;
3878 
3879 	return 0;
3880 }
3881 
3882 static uint32_t
3883 pipes_modified(struct drm_atomic_state *state)
3884 {
3885 	struct drm_crtc *crtc;
3886 	struct drm_crtc_state *cstate;
3887 	uint32_t i, ret = 0;
3888 
3889 	for_each_crtc_in_state(state, crtc, cstate, i)
3890 		ret |= drm_crtc_mask(crtc);
3891 
3892 	return ret;
3893 }
3894 
3895 static int
3896 skl_compute_ddb(struct drm_atomic_state *state)
3897 {
3898 	struct drm_device *dev = state->dev;
3899 	struct drm_i915_private *dev_priv = to_i915(dev);
3900 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3901 	struct intel_crtc *intel_crtc;
3902 	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3903 	uint32_t realloc_pipes = pipes_modified(state);
3904 	int ret;
3905 
3906 	/*
3907 	 * If this is our first atomic update following hardware readout,
3908 	 * we can't trust the DDB that the BIOS programmed for us.  Let's
3909 	 * pretend that all pipes switched active status so that we'll
3910 	 * ensure a full DDB recompute.
3911 	 */
3912 	if (dev_priv->wm.distrust_bios_wm)
3913 		intel_state->active_pipe_changes = ~0;
3914 
3915 	/*
3916 	 * If the modeset changes which CRTC's are active, we need to
3917 	 * recompute the DDB allocation for *all* active pipes, even
3918 	 * those that weren't otherwise being modified in any way by this
3919 	 * atomic commit.  Due to the shrinking of the per-pipe allocations
3920 	 * when new active CRTC's are added, it's possible for a pipe that
3921 	 * we were already using and aren't changing at all here to suddenly
3922 	 * become invalid if its DDB needs exceeds its new allocation.
3923 	 *
3924 	 * Note that if we wind up doing a full DDB recompute, we can't let
3925 	 * any other display updates race with this transaction, so we need
3926 	 * to grab the lock on *all* CRTC's.
3927 	 */
3928 	if (intel_state->active_pipe_changes) {
3929 		realloc_pipes = ~0;
3930 		intel_state->wm_results.dirty_pipes = ~0;
3931 	}
3932 
3933 	for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3934 		struct intel_crtc_state *cstate;
3935 
3936 		cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3937 		if (IS_ERR(cstate))
3938 			return PTR_ERR(cstate);
3939 
3940 		ret = skl_allocate_pipe_ddb(cstate, ddb);
3941 		if (ret)
3942 			return ret;
3943 	}
3944 
3945 	return 0;
3946 }
3947 
3948 static int
3949 skl_compute_wm(struct drm_atomic_state *state)
3950 {
3951 	struct drm_crtc *crtc;
3952 	struct drm_crtc_state *cstate;
3953 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3954 	struct skl_wm_values *results = &intel_state->wm_results;
3955 	struct skl_pipe_wm *pipe_wm;
3956 	bool changed = false;
3957 	int ret, i;
3958 
3959 	/*
3960 	 * If this transaction isn't actually touching any CRTC's, don't
3961 	 * bother with watermark calculation.  Note that if we pass this
3962 	 * test, we're guaranteed to hold at least one CRTC state mutex,
3963 	 * which means we can safely use values like dev_priv->active_crtcs
3964 	 * since any racing commits that want to update them would need to
3965 	 * hold _all_ CRTC state mutexes.
3966 	 */
3967 	for_each_crtc_in_state(state, crtc, cstate, i)
3968 		changed = true;
3969 	if (!changed)
3970 		return 0;
3971 
3972 	/* Clear all dirty flags */
3973 	results->dirty_pipes = 0;
3974 
3975 	ret = skl_compute_ddb(state);
3976 	if (ret)
3977 		return ret;
3978 
3979 	/*
3980 	 * Calculate WM's for all pipes that are part of this transaction.
3981 	 * Note that the DDB allocation above may have added more CRTC's that
3982 	 * weren't otherwise being modified (and set bits in dirty_pipes) if
3983 	 * pipe allocations had to change.
3984 	 *
3985 	 * FIXME:  Now that we're doing this in the atomic check phase, we
3986 	 * should allow skl_update_pipe_wm() to return failure in cases where
3987 	 * no suitable watermark values can be found.
3988 	 */
3989 	for_each_crtc_in_state(state, crtc, cstate, i) {
3990 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3991 		struct intel_crtc_state *intel_cstate =
3992 			to_intel_crtc_state(cstate);
3993 
3994 		pipe_wm = &intel_cstate->wm.skl.optimal;
3995 		ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3996 					 &changed);
3997 		if (ret)
3998 			return ret;
3999 
4000 		if (changed)
4001 			results->dirty_pipes |= drm_crtc_mask(crtc);
4002 
4003 		if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4004 			/* This pipe's WM's did not change */
4005 			continue;
4006 
4007 		intel_cstate->update_wm_pre = true;
4008 		skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
4009 	}
4010 
4011 	return 0;
4012 }
4013 
4014 static void skl_update_wm(struct drm_crtc *crtc)
4015 {
4016 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4017 	struct drm_device *dev = crtc->dev;
4018 	struct drm_i915_private *dev_priv = to_i915(dev);
4019 	struct skl_wm_values *results = &dev_priv->wm.skl_results;
4020 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4021 	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4022 
4023 	if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4024 		return;
4025 
4026 	intel_crtc->wm.active.skl = *pipe_wm;
4027 
4028 	mutex_lock(&dev_priv->wm.wm_mutex);
4029 
4030 	skl_write_wm_values(dev_priv, results);
4031 	skl_flush_wm_values(dev_priv, results);
4032 
4033 	/* store the new configuration */
4034 	dev_priv->wm.skl_hw = *results;
4035 
4036 	mutex_unlock(&dev_priv->wm.wm_mutex);
4037 }
4038 
4039 static void ilk_compute_wm_config(struct drm_device *dev,
4040 				  struct intel_wm_config *config)
4041 {
4042 	struct intel_crtc *crtc;
4043 
4044 	/* Compute the currently _active_ config */
4045 	for_each_intel_crtc(dev, crtc) {
4046 		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
4047 
4048 		if (!wm->pipe_enabled)
4049 			continue;
4050 
4051 		config->sprites_enabled |= wm->sprites_enabled;
4052 		config->sprites_scaled |= wm->sprites_scaled;
4053 		config->num_pipes_active++;
4054 	}
4055 }
4056 
4057 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4058 {
4059 	struct drm_device *dev = &dev_priv->drm;
4060 	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4061 	struct ilk_wm_maximums max;
4062 	struct intel_wm_config config = {};
4063 	struct ilk_wm_values results = {};
4064 	enum intel_ddb_partitioning partitioning;
4065 
4066 	ilk_compute_wm_config(dev, &config);
4067 
4068 	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
4069 	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
4070 
4071 	/* 5/6 split only in single pipe config on IVB+ */
4072 	if (INTEL_INFO(dev)->gen >= 7 &&
4073 	    config.num_pipes_active == 1 && config.sprites_enabled) {
4074 		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
4075 		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
4076 
4077 		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
4078 	} else {
4079 		best_lp_wm = &lp_wm_1_2;
4080 	}
4081 
4082 	partitioning = (best_lp_wm == &lp_wm_1_2) ?
4083 		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
4084 
4085 	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
4086 
4087 	ilk_write_wm_values(dev_priv, &results);
4088 }
4089 
4090 static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
4091 {
4092 	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4093 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4094 
4095 	mutex_lock(&dev_priv->wm.wm_mutex);
4096 	intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
4097 	ilk_program_watermarks(dev_priv);
4098 	mutex_unlock(&dev_priv->wm.wm_mutex);
4099 }
4100 
4101 static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
4102 {
4103 	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
4104 	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
4105 
4106 	mutex_lock(&dev_priv->wm.wm_mutex);
4107 	if (cstate->wm.need_postvbl_update) {
4108 		intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
4109 		ilk_program_watermarks(dev_priv);
4110 	}
4111 	mutex_unlock(&dev_priv->wm.wm_mutex);
4112 }
4113 
4114 static void skl_pipe_wm_active_state(uint32_t val,
4115 				     struct skl_pipe_wm *active,
4116 				     bool is_transwm,
4117 				     bool is_cursor,
4118 				     int i,
4119 				     int level)
4120 {
4121 	bool is_enabled = (val & PLANE_WM_EN) != 0;
4122 
4123 	if (!is_transwm) {
4124 		if (!is_cursor) {
4125 			active->wm[level].plane_en[i] = is_enabled;
4126 			active->wm[level].plane_res_b[i] =
4127 					val & PLANE_WM_BLOCKS_MASK;
4128 			active->wm[level].plane_res_l[i] =
4129 					(val >> PLANE_WM_LINES_SHIFT) &
4130 						PLANE_WM_LINES_MASK;
4131 		} else {
4132 			active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
4133 			active->wm[level].plane_res_b[PLANE_CURSOR] =
4134 					val & PLANE_WM_BLOCKS_MASK;
4135 			active->wm[level].plane_res_l[PLANE_CURSOR] =
4136 					(val >> PLANE_WM_LINES_SHIFT) &
4137 						PLANE_WM_LINES_MASK;
4138 		}
4139 	} else {
4140 		if (!is_cursor) {
4141 			active->trans_wm.plane_en[i] = is_enabled;
4142 			active->trans_wm.plane_res_b[i] =
4143 					val & PLANE_WM_BLOCKS_MASK;
4144 			active->trans_wm.plane_res_l[i] =
4145 					(val >> PLANE_WM_LINES_SHIFT) &
4146 						PLANE_WM_LINES_MASK;
4147 		} else {
4148 			active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
4149 			active->trans_wm.plane_res_b[PLANE_CURSOR] =
4150 					val & PLANE_WM_BLOCKS_MASK;
4151 			active->trans_wm.plane_res_l[PLANE_CURSOR] =
4152 					(val >> PLANE_WM_LINES_SHIFT) &
4153 						PLANE_WM_LINES_MASK;
4154 		}
4155 	}
4156 }
4157 
4158 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4159 {
4160 	struct drm_device *dev = crtc->dev;
4161 	struct drm_i915_private *dev_priv = to_i915(dev);
4162 	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4163 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4164 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4165 	struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
4166 	enum i915_pipe pipe = intel_crtc->pipe;
4167 	int level, i, max_level;
4168 	uint32_t temp;
4169 
4170 	max_level = ilk_wm_max_level(dev);
4171 
4172 	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4173 
4174 	for (level = 0; level <= max_level; level++) {
4175 		for (i = 0; i < intel_num_planes(intel_crtc); i++)
4176 			hw->plane[pipe][i][level] =
4177 					I915_READ(PLANE_WM(pipe, i, level));
4178 		hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
4179 	}
4180 
4181 	for (i = 0; i < intel_num_planes(intel_crtc); i++)
4182 		hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
4183 	hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
4184 
4185 	if (!intel_crtc->active)
4186 		return;
4187 
4188 	hw->dirty_pipes |= drm_crtc_mask(crtc);
4189 
4190 	active->linetime = hw->wm_linetime[pipe];
4191 
4192 	for (level = 0; level <= max_level; level++) {
4193 		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4194 			temp = hw->plane[pipe][i][level];
4195 			skl_pipe_wm_active_state(temp, active, false,
4196 						false, i, level);
4197 		}
4198 		temp = hw->plane[pipe][PLANE_CURSOR][level];
4199 		skl_pipe_wm_active_state(temp, active, false, true, i, level);
4200 	}
4201 
4202 	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
4203 		temp = hw->plane_trans[pipe][i];
4204 		skl_pipe_wm_active_state(temp, active, true, false, i, 0);
4205 	}
4206 
4207 	temp = hw->plane_trans[pipe][PLANE_CURSOR];
4208 	skl_pipe_wm_active_state(temp, active, true, true, i, 0);
4209 
4210 	intel_crtc->wm.active.skl = *active;
4211 }
4212 
4213 void skl_wm_get_hw_state(struct drm_device *dev)
4214 {
4215 	struct drm_i915_private *dev_priv = to_i915(dev);
4216 	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4217 	struct drm_crtc *crtc;
4218 
4219 	skl_ddb_get_hw_state(dev_priv, ddb);
4220 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
4221 		skl_pipe_wm_get_hw_state(crtc);
4222 
4223 	if (dev_priv->active_crtcs) {
4224 		/* Fully recompute DDB on first atomic commit */
4225 		dev_priv->wm.distrust_bios_wm = true;
4226 	} else {
4227 		/* Easy/common case; just sanitize DDB now if everything off */
4228 		memset(ddb, 0, sizeof(*ddb));
4229 	}
4230 }
4231 
4232 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4233 {
4234 	struct drm_device *dev = crtc->dev;
4235 	struct drm_i915_private *dev_priv = to_i915(dev);
4236 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
4237 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4238 	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4239 	struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
4240 	enum i915_pipe pipe = intel_crtc->pipe;
4241 	static const i915_reg_t wm0_pipe_reg[] = {
4242 		[PIPE_A] = WM0_PIPEA_ILK,
4243 		[PIPE_B] = WM0_PIPEB_ILK,
4244 		[PIPE_C] = WM0_PIPEC_IVB,
4245 	};
4246 
4247 	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
4248 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4249 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
4250 
4251 	memset(active, 0, sizeof(*active));
4252 
4253 	active->pipe_enabled = intel_crtc->active;
4254 
4255 	if (active->pipe_enabled) {
4256 		u32 tmp = hw->wm_pipe[pipe];
4257 
4258 		/*
4259 		 * For active pipes LP0 watermark is marked as
4260 		 * enabled, and LP1+ watermaks as disabled since
4261 		 * we can't really reverse compute them in case
4262 		 * multiple pipes are active.
4263 		 */
4264 		active->wm[0].enable = true;
4265 		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
4266 		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
4267 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
4268 		active->linetime = hw->wm_linetime[pipe];
4269 	} else {
4270 		int level, max_level = ilk_wm_max_level(dev);
4271 
4272 		/*
4273 		 * For inactive pipes, all watermark levels
4274 		 * should be marked as enabled but zeroed,
4275 		 * which is what we'd compute them to.
4276 		 */
4277 		for (level = 0; level <= max_level; level++)
4278 			active->wm[level].enable = true;
4279 	}
4280 
4281 	intel_crtc->wm.active.ilk = *active;
4282 }
4283 
4284 #define _FW_WM(value, plane) \
4285 	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4286 #define _FW_WM_VLV(value, plane) \
4287 	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4288 
4289 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
4290 			       struct vlv_wm_values *wm)
4291 {
4292 	enum i915_pipe pipe;
4293 	uint32_t tmp;
4294 
4295 	for_each_pipe(dev_priv, pipe) {
4296 		tmp = I915_READ(VLV_DDL(pipe));
4297 
4298 		wm->ddl[pipe].primary =
4299 			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4300 		wm->ddl[pipe].cursor =
4301 			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4302 		wm->ddl[pipe].sprite[0] =
4303 			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4304 		wm->ddl[pipe].sprite[1] =
4305 			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
4306 	}
4307 
4308 	tmp = I915_READ(DSPFW1);
4309 	wm->sr.plane = _FW_WM(tmp, SR);
4310 	wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
4311 	wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
4312 	wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
4313 
4314 	tmp = I915_READ(DSPFW2);
4315 	wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
4316 	wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
4317 	wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
4318 
4319 	tmp = I915_READ(DSPFW3);
4320 	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
4321 
4322 	if (IS_CHERRYVIEW(dev_priv)) {
4323 		tmp = I915_READ(DSPFW7_CHV);
4324 		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4325 		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4326 
4327 		tmp = I915_READ(DSPFW8_CHV);
4328 		wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
4329 		wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
4330 
4331 		tmp = I915_READ(DSPFW9_CHV);
4332 		wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
4333 		wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
4334 
4335 		tmp = I915_READ(DSPHOWM);
4336 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4337 		wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
4338 		wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
4339 		wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
4340 		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4341 		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4342 		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4343 		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4344 		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4345 		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4346 	} else {
4347 		tmp = I915_READ(DSPFW7);
4348 		wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
4349 		wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
4350 
4351 		tmp = I915_READ(DSPHOWM);
4352 		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
4353 		wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
4354 		wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
4355 		wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
4356 		wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
4357 		wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
4358 		wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
4359 	}
4360 }
4361 
4362 #undef _FW_WM
4363 #undef _FW_WM_VLV
4364 
4365 void vlv_wm_get_hw_state(struct drm_device *dev)
4366 {
4367 	struct drm_i915_private *dev_priv = to_i915(dev);
4368 	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
4369 	struct intel_plane *plane;
4370 	enum i915_pipe pipe;
4371 	u32 val;
4372 
4373 	vlv_read_wm_values(dev_priv, wm);
4374 
4375 	for_each_intel_plane(dev, plane) {
4376 		switch (plane->base.type) {
4377 			int sprite;
4378 		case DRM_PLANE_TYPE_CURSOR:
4379 			plane->wm.fifo_size = 63;
4380 			break;
4381 		case DRM_PLANE_TYPE_PRIMARY:
4382 			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, 0);
4383 			break;
4384 		case DRM_PLANE_TYPE_OVERLAY:
4385 			sprite = plane->plane;
4386 			plane->wm.fifo_size = vlv_get_fifo_size(dev, plane->pipe, sprite + 1);
4387 			break;
4388 		}
4389 	}
4390 
4391 	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4392 	wm->level = VLV_WM_LEVEL_PM2;
4393 
4394 	if (IS_CHERRYVIEW(dev_priv)) {
4395 		mutex_lock(&dev_priv->rps.hw_lock);
4396 
4397 		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4398 		if (val & DSP_MAXFIFO_PM5_ENABLE)
4399 			wm->level = VLV_WM_LEVEL_PM5;
4400 
4401 		/*
4402 		 * If DDR DVFS is disabled in the BIOS, Punit
4403 		 * will never ack the request. So if that happens
4404 		 * assume we don't have to enable/disable DDR DVFS
4405 		 * dynamically. To test that just set the REQ_ACK
4406 		 * bit to poke the Punit, but don't change the
4407 		 * HIGH/LOW bits so that we don't actually change
4408 		 * the current state.
4409 		 */
4410 		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4411 		val |= FORCE_DDR_FREQ_REQ_ACK;
4412 		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
4413 
4414 		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
4415 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
4416 			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4417 				      "assuming DDR DVFS is disabled\n");
4418 			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
4419 		} else {
4420 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
4421 			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
4422 				wm->level = VLV_WM_LEVEL_DDR_DVFS;
4423 		}
4424 
4425 		mutex_unlock(&dev_priv->rps.hw_lock);
4426 	}
4427 
4428 	for_each_pipe(dev_priv, pipe)
4429 		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4430 			      pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
4431 			      wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
4432 
4433 	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4434 		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4435 }
4436 
4437 void ilk_wm_get_hw_state(struct drm_device *dev)
4438 {
4439 	struct drm_i915_private *dev_priv = to_i915(dev);
4440 	struct ilk_wm_values *hw = &dev_priv->wm.hw;
4441 	struct drm_crtc *crtc;
4442 
4443 	for_each_crtc(dev, crtc)
4444 		ilk_pipe_wm_get_hw_state(crtc);
4445 
4446 	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4447 	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4448 	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4449 
4450 	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4451 	if (INTEL_INFO(dev)->gen >= 7) {
4452 		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4453 		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4454 	}
4455 
4456 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4457 		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4458 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4459 	else if (IS_IVYBRIDGE(dev))
4460 		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4461 			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4462 
4463 	hw->enable_fbc_wm =
4464 		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4465 }
4466 
4467 /**
4468  * intel_update_watermarks - update FIFO watermark values based on current modes
4469  *
4470  * Calculate watermark values for the various WM regs based on current mode
4471  * and plane configuration.
4472  *
4473  * There are several cases to deal with here:
4474  *   - normal (i.e. non-self-refresh)
4475  *   - self-refresh (SR) mode
4476  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4477  *   - lines are small relative to FIFO size (buffer can hold more than 2
4478  *     lines), so need to account for TLB latency
4479  *
4480  *   The normal calculation is:
4481  *     watermark = dotclock * bytes per pixel * latency
4482  *   where latency is platform & configuration dependent (we assume pessimal
4483  *   values here).
4484  *
4485  *   The SR calculation is:
4486  *     watermark = (trunc(latency/line time)+1) * surface width *
4487  *       bytes per pixel
4488  *   where
4489  *     line time = htotal / dotclock
4490  *     surface width = hdisplay for normal plane and 64 for cursor
4491  *   and latency is assumed to be high, as above.
4492  *
4493  * The final value programmed to the register should always be rounded up,
4494  * and include an extra 2 entries to account for clock crossings.
4495  *
4496  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4497  * to set the non-SR watermarks to 8.
4498  */
4499 void intel_update_watermarks(struct drm_crtc *crtc)
4500 {
4501 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4502 
4503 	if (dev_priv->display.update_wm)
4504 		dev_priv->display.update_wm(crtc);
4505 }
4506 
4507 /*
4508  * Lock protecting IPS related data structures
4509  */
4510 DEFINE_SPINLOCK(mchdev_lock);
4511 
4512 /* Global for IPS driver to get at the current i915 device. Protected by
4513  * mchdev_lock. */
4514 static struct drm_i915_private *i915_mch_dev;
4515 
4516 bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4517 {
4518 	u16 rgvswctl;
4519 
4520 	assert_spin_locked(&mchdev_lock);
4521 
4522 	rgvswctl = I915_READ16(MEMSWCTL);
4523 	if (rgvswctl & MEMCTL_CMD_STS) {
4524 		DRM_DEBUG("gpu busy, RCS change rejected\n");
4525 		return false; /* still busy with another command */
4526 	}
4527 
4528 	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4529 		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4530 	I915_WRITE16(MEMSWCTL, rgvswctl);
4531 	POSTING_READ16(MEMSWCTL);
4532 
4533 	rgvswctl |= MEMCTL_CMD_STS;
4534 	I915_WRITE16(MEMSWCTL, rgvswctl);
4535 
4536 	return true;
4537 }
4538 
4539 static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4540 {
4541 	u32 rgvmodectl;
4542 	u8 fmax, fmin, fstart, vstart;
4543 
4544 	spin_lock_irq(&mchdev_lock);
4545 
4546 	rgvmodectl = I915_READ(MEMMODECTL);
4547 
4548 	/* Enable temp reporting */
4549 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4550 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4551 
4552 	/* 100ms RC evaluation intervals */
4553 	I915_WRITE(RCUPEI, 100000);
4554 	I915_WRITE(RCDNEI, 100000);
4555 
4556 	/* Set max/min thresholds to 90ms and 80ms respectively */
4557 	I915_WRITE(RCBMAXAVG, 90000);
4558 	I915_WRITE(RCBMINAVG, 80000);
4559 
4560 	I915_WRITE(MEMIHYST, 1);
4561 
4562 	/* Set up min, max, and cur for interrupt handling */
4563 	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4564 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4565 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4566 		MEMMODE_FSTART_SHIFT;
4567 
4568 	vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4569 		PXVFREQ_PX_SHIFT;
4570 
4571 	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4572 	dev_priv->ips.fstart = fstart;
4573 
4574 	dev_priv->ips.max_delay = fstart;
4575 	dev_priv->ips.min_delay = fmin;
4576 	dev_priv->ips.cur_delay = fstart;
4577 
4578 	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4579 			 fmax, fmin, fstart);
4580 
4581 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4582 
4583 	/*
4584 	 * Interrupts will be enabled in ironlake_irq_postinstall
4585 	 */
4586 
4587 	I915_WRITE(VIDSTART, vstart);
4588 	POSTING_READ(VIDSTART);
4589 
4590 	rgvmodectl |= MEMMODE_SWMODE_EN;
4591 	I915_WRITE(MEMMODECTL, rgvmodectl);
4592 
4593 	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4594 		DRM_ERROR("stuck trying to change perf mode\n");
4595 	mdelay(1);
4596 
4597 	ironlake_set_drps(dev_priv, fstart);
4598 
4599 	dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4600 		I915_READ(DDREC) + I915_READ(CSIEC);
4601 	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4602 	dev_priv->ips.last_count2 = I915_READ(GFXEC);
4603 	dev_priv->ips.last_time2 = ktime_get_raw_ns();
4604 
4605 	spin_unlock_irq(&mchdev_lock);
4606 }
4607 
4608 static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4609 {
4610 	u16 rgvswctl;
4611 
4612 	spin_lock_irq(&mchdev_lock);
4613 
4614 	rgvswctl = I915_READ16(MEMSWCTL);
4615 
4616 	/* Ack interrupts, disable EFC interrupt */
4617 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4618 	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4619 	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4620 	I915_WRITE(DEIIR, DE_PCU_EVENT);
4621 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4622 
4623 	/* Go back to the starting frequency */
4624 	ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4625 	mdelay(1);
4626 	rgvswctl |= MEMCTL_CMD_STS;
4627 	I915_WRITE(MEMSWCTL, rgvswctl);
4628 	mdelay(1);
4629 
4630 	spin_unlock_irq(&mchdev_lock);
4631 }
4632 
4633 /* There's a funny hw issue where the hw returns all 0 when reading from
4634  * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4635  * ourselves, instead of doing a rmw cycle (which might result in us clearing
4636  * all limits and the gpu stuck at whatever frequency it is at atm).
4637  */
4638 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4639 {
4640 	u32 limits;
4641 
4642 	/* Only set the down limit when we've reached the lowest level to avoid
4643 	 * getting more interrupts, otherwise leave this clear. This prevents a
4644 	 * race in the hw when coming out of rc6: There's a tiny window where
4645 	 * the hw runs at the minimal clock before selecting the desired
4646 	 * frequency, if the down threshold expires in that window we will not
4647 	 * receive a down interrupt. */
4648 	if (IS_GEN9(dev_priv)) {
4649 		limits = (dev_priv->rps.max_freq_softlimit) << 23;
4650 		if (val <= dev_priv->rps.min_freq_softlimit)
4651 			limits |= (dev_priv->rps.min_freq_softlimit) << 14;
4652 	} else {
4653 		limits = dev_priv->rps.max_freq_softlimit << 24;
4654 		if (val <= dev_priv->rps.min_freq_softlimit)
4655 			limits |= dev_priv->rps.min_freq_softlimit << 16;
4656 	}
4657 
4658 	return limits;
4659 }
4660 
4661 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4662 {
4663 	int new_power;
4664 	u32 threshold_up = 0, threshold_down = 0; /* in % */
4665 	u32 ei_up = 0, ei_down = 0;
4666 
4667 	new_power = dev_priv->rps.power;
4668 	switch (dev_priv->rps.power) {
4669 	case LOW_POWER:
4670 		if (val > dev_priv->rps.efficient_freq + 1 &&
4671 		    val > dev_priv->rps.cur_freq)
4672 			new_power = BETWEEN;
4673 		break;
4674 
4675 	case BETWEEN:
4676 		if (val <= dev_priv->rps.efficient_freq &&
4677 		    val < dev_priv->rps.cur_freq)
4678 			new_power = LOW_POWER;
4679 		else if (val >= dev_priv->rps.rp0_freq &&
4680 			 val > dev_priv->rps.cur_freq)
4681 			new_power = HIGH_POWER;
4682 		break;
4683 
4684 	case HIGH_POWER:
4685 		if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4686 		    val < dev_priv->rps.cur_freq)
4687 			new_power = BETWEEN;
4688 		break;
4689 	}
4690 	/* Max/min bins are special */
4691 	if (val <= dev_priv->rps.min_freq_softlimit)
4692 		new_power = LOW_POWER;
4693 	if (val >= dev_priv->rps.max_freq_softlimit)
4694 		new_power = HIGH_POWER;
4695 	if (new_power == dev_priv->rps.power)
4696 		return;
4697 
4698 	/* Note the units here are not exactly 1us, but 1280ns. */
4699 	switch (new_power) {
4700 	case LOW_POWER:
4701 		/* Upclock if more than 95% busy over 16ms */
4702 		ei_up = 16000;
4703 		threshold_up = 95;
4704 
4705 		/* Downclock if less than 85% busy over 32ms */
4706 		ei_down = 32000;
4707 		threshold_down = 85;
4708 		break;
4709 
4710 	case BETWEEN:
4711 		/* Upclock if more than 90% busy over 13ms */
4712 		ei_up = 13000;
4713 		threshold_up = 90;
4714 
4715 		/* Downclock if less than 75% busy over 32ms */
4716 		ei_down = 32000;
4717 		threshold_down = 75;
4718 		break;
4719 
4720 	case HIGH_POWER:
4721 		/* Upclock if more than 85% busy over 10ms */
4722 		ei_up = 10000;
4723 		threshold_up = 85;
4724 
4725 		/* Downclock if less than 60% busy over 32ms */
4726 		ei_down = 32000;
4727 		threshold_down = 60;
4728 		break;
4729 	}
4730 
4731 	I915_WRITE(GEN6_RP_UP_EI,
4732 		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
4733 	I915_WRITE(GEN6_RP_UP_THRESHOLD,
4734 		   GT_INTERVAL_FROM_US(dev_priv,
4735 				       ei_up * threshold_up / 100));
4736 
4737 	I915_WRITE(GEN6_RP_DOWN_EI,
4738 		   GT_INTERVAL_FROM_US(dev_priv, ei_down));
4739 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4740 		   GT_INTERVAL_FROM_US(dev_priv,
4741 				       ei_down * threshold_down / 100));
4742 
4743 	I915_WRITE(GEN6_RP_CONTROL,
4744 		   GEN6_RP_MEDIA_TURBO |
4745 		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
4746 		   GEN6_RP_MEDIA_IS_GFX |
4747 		   GEN6_RP_ENABLE |
4748 		   GEN6_RP_UP_BUSY_AVG |
4749 		   GEN6_RP_DOWN_IDLE_AVG);
4750 
4751 	dev_priv->rps.power = new_power;
4752 	dev_priv->rps.up_threshold = threshold_up;
4753 	dev_priv->rps.down_threshold = threshold_down;
4754 	dev_priv->rps.last_adj = 0;
4755 }
4756 
4757 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4758 {
4759 	u32 mask = 0;
4760 
4761 	if (val > dev_priv->rps.min_freq_softlimit)
4762 		mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4763 	if (val < dev_priv->rps.max_freq_softlimit)
4764 		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
4765 
4766 	mask &= dev_priv->pm_rps_events;
4767 
4768 	return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
4769 }
4770 
4771 /* gen6_set_rps is called to update the frequency request, but should also be
4772  * called when the range (min_delay and max_delay) is modified so that we can
4773  * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4774 static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4775 {
4776 	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4777 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4778 		return;
4779 
4780 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4781 	WARN_ON(val > dev_priv->rps.max_freq);
4782 	WARN_ON(val < dev_priv->rps.min_freq);
4783 
4784 	/* min/max delay may still have been modified so be sure to
4785 	 * write the limits value.
4786 	 */
4787 	if (val != dev_priv->rps.cur_freq) {
4788 		gen6_set_rps_thresholds(dev_priv, val);
4789 
4790 		if (IS_GEN9(dev_priv))
4791 			I915_WRITE(GEN6_RPNSWREQ,
4792 				   GEN9_FREQUENCY(val));
4793 		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4794 			I915_WRITE(GEN6_RPNSWREQ,
4795 				   HSW_FREQUENCY(val));
4796 		else
4797 			I915_WRITE(GEN6_RPNSWREQ,
4798 				   GEN6_FREQUENCY(val) |
4799 				   GEN6_OFFSET(0) |
4800 				   GEN6_AGGRESSIVE_TURBO);
4801 	}
4802 
4803 	/* Make sure we continue to get interrupts
4804 	 * until we hit the minimum or maximum frequencies.
4805 	 */
4806 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
4807 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4808 
4809 	POSTING_READ(GEN6_RPNSWREQ);
4810 
4811 	dev_priv->rps.cur_freq = val;
4812 	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4813 }
4814 
4815 static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4816 {
4817 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4818 	WARN_ON(val > dev_priv->rps.max_freq);
4819 	WARN_ON(val < dev_priv->rps.min_freq);
4820 
4821 	if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4822 		      "Odd GPU freq value\n"))
4823 		val &= ~1;
4824 
4825 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4826 
4827 	if (val != dev_priv->rps.cur_freq) {
4828 		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4829 		if (!IS_CHERRYVIEW(dev_priv))
4830 			gen6_set_rps_thresholds(dev_priv, val);
4831 	}
4832 
4833 	dev_priv->rps.cur_freq = val;
4834 	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4835 }
4836 
4837 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
4838  *
4839  * * If Gfx is Idle, then
4840  * 1. Forcewake Media well.
4841  * 2. Request idle freq.
4842  * 3. Release Forcewake of Media well.
4843 */
4844 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4845 {
4846 	u32 val = dev_priv->rps.idle_freq;
4847 
4848 	if (dev_priv->rps.cur_freq <= val)
4849 		return;
4850 
4851 	/* Wake up the media well, as that takes a lot less
4852 	 * power than the Render well. */
4853 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4854 	valleyview_set_rps(dev_priv, val);
4855 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4856 }
4857 
4858 void gen6_rps_busy(struct drm_i915_private *dev_priv)
4859 {
4860 	mutex_lock(&dev_priv->rps.hw_lock);
4861 	if (dev_priv->rps.enabled) {
4862 		if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
4863 			gen6_rps_reset_ei(dev_priv);
4864 		I915_WRITE(GEN6_PMINTRMSK,
4865 			   gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4866 
4867 		gen6_enable_rps_interrupts(dev_priv);
4868 
4869 		/* Ensure we start at the user's desired frequency */
4870 		intel_set_rps(dev_priv,
4871 			      clamp(dev_priv->rps.cur_freq,
4872 				    dev_priv->rps.min_freq_softlimit,
4873 				    dev_priv->rps.max_freq_softlimit));
4874 	}
4875 	mutex_unlock(&dev_priv->rps.hw_lock);
4876 }
4877 
4878 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4879 {
4880 	/* Flush our bottom-half so that it does not race with us
4881 	 * setting the idle frequency and so that it is bounded by
4882 	 * our rpm wakeref. And then disable the interrupts to stop any
4883 	 * futher RPS reclocking whilst we are asleep.
4884 	 */
4885 	gen6_disable_rps_interrupts(dev_priv);
4886 
4887 	mutex_lock(&dev_priv->rps.hw_lock);
4888 	if (dev_priv->rps.enabled) {
4889 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4890 			vlv_set_rps_idle(dev_priv);
4891 		else
4892 			gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4893 		dev_priv->rps.last_adj = 0;
4894 		I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4895 	}
4896 	mutex_unlock(&dev_priv->rps.hw_lock);
4897 
4898 	lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE);
4899 	while (!list_empty(&dev_priv->rps.clients))
4900 		list_del_init(dev_priv->rps.clients.next);
4901 	lockmgr(&dev_priv->rps.client_lock, LK_RELEASE);
4902 }
4903 
4904 void gen6_rps_boost(struct drm_i915_private *dev_priv,
4905 		    struct intel_rps_client *rps,
4906 		    unsigned long submitted)
4907 {
4908 	/* This is intentionally racy! We peek at the state here, then
4909 	 * validate inside the RPS worker.
4910 	 */
4911 	if (!(dev_priv->gt.awake &&
4912 	      dev_priv->rps.enabled &&
4913 	      dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
4914 		return;
4915 
4916 	/* Force a RPS boost (and don't count it against the client) if
4917 	 * the GPU is severely congested.
4918 	 */
4919 	if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
4920 		rps = NULL;
4921 
4922 	lockmgr(&dev_priv->rps.client_lock, LK_EXCLUSIVE);
4923 	if (rps == NULL || list_empty(&rps->link)) {
4924 		spin_lock_irq(&dev_priv->irq_lock);
4925 		if (dev_priv->rps.interrupts_enabled) {
4926 			dev_priv->rps.client_boost = true;
4927 			schedule_work(&dev_priv->rps.work);
4928 		}
4929 		spin_unlock_irq(&dev_priv->irq_lock);
4930 
4931 		if (rps != NULL) {
4932 			list_add(&rps->link, &dev_priv->rps.clients);
4933 			rps->boosts++;
4934 		} else
4935 			dev_priv->rps.boosts++;
4936 	}
4937 	lockmgr(&dev_priv->rps.client_lock, LK_RELEASE);
4938 }
4939 
4940 void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4941 {
4942 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4943 		valleyview_set_rps(dev_priv, val);
4944 	else
4945 		gen6_set_rps(dev_priv, val);
4946 }
4947 
4948 static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4949 {
4950 	I915_WRITE(GEN6_RC_CONTROL, 0);
4951 	I915_WRITE(GEN9_PG_ENABLE, 0);
4952 }
4953 
4954 static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4955 {
4956 	I915_WRITE(GEN6_RP_CONTROL, 0);
4957 }
4958 
4959 static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4960 {
4961 	I915_WRITE(GEN6_RC_CONTROL, 0);
4962 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4963 	I915_WRITE(GEN6_RP_CONTROL, 0);
4964 }
4965 
4966 static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4967 {
4968 	I915_WRITE(GEN6_RC_CONTROL, 0);
4969 }
4970 
4971 static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4972 {
4973 	/* we're doing forcewake before Disabling RC6,
4974 	 * This what the BIOS expects when going into suspend */
4975 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4976 
4977 	I915_WRITE(GEN6_RC_CONTROL, 0);
4978 
4979 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4980 }
4981 
4982 static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4983 {
4984 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4985 		if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4986 			mode = GEN6_RC_CTL_RC6_ENABLE;
4987 		else
4988 			mode = 0;
4989 	}
4990 	if (HAS_RC6p(dev_priv))
4991 		DRM_DEBUG_DRIVER("Enabling RC6 states: "
4992 				 "RC6 %s RC6p %s RC6pp %s\n",
4993 				 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4994 				 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4995 				 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4996 
4997 	else
4998 		DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
4999 				 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
5000 }
5001 
5002 static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
5003 {
5004 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
5005 	bool enable_rc6 = true;
5006 	unsigned long rc6_ctx_base;
5007 	u32 rc_ctl;
5008 	int rc_sw_target;
5009 
5010 	rc_ctl = I915_READ(GEN6_RC_CONTROL);
5011 	rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5012 		       RC_SW_TARGET_STATE_SHIFT;
5013 	DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5014 			 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5015 			 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5016 			 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5017 			 rc_sw_target);
5018 
5019 	if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
5020 		DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5021 		enable_rc6 = false;
5022 	}
5023 
5024 	/*
5025 	 * The exact context size is not known for BXT, so assume a page size
5026 	 * for this check.
5027 	 */
5028 	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
5029 	if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
5030 	      (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
5031 					ggtt->stolen_reserved_size))) {
5032 		DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5033 		enable_rc6 = false;
5034 	}
5035 
5036 	if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
5037 	      ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
5038 	      ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
5039 	      ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
5040 		DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5041 		enable_rc6 = false;
5042 	}
5043 
5044 	if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5045 	    !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5046 	    !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5047 		DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5048 		enable_rc6 = false;
5049 	}
5050 
5051 	if (!I915_READ(GEN6_GFXPAUSE)) {
5052 		DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5053 		enable_rc6 = false;
5054 	}
5055 
5056 	if (!I915_READ(GEN8_MISC_CTRL0)) {
5057 		DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5058 		enable_rc6 = false;
5059 	}
5060 
5061 	return enable_rc6;
5062 }
5063 
5064 int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5065 {
5066 	/* No RC6 before Ironlake and code is gone for ilk. */
5067 	if (INTEL_INFO(dev_priv)->gen < 6)
5068 		return 0;
5069 
5070 	if (!enable_rc6)
5071 		return 0;
5072 
5073 	if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
5074 		DRM_INFO("RC6 disabled by BIOS\n");
5075 		return 0;
5076 	}
5077 
5078 	/* Respect the kernel parameter if it is set */
5079 	if (enable_rc6 >= 0) {
5080 		int mask;
5081 
5082 		if (HAS_RC6p(dev_priv))
5083 			mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
5084 			       INTEL_RC6pp_ENABLE;
5085 		else
5086 			mask = INTEL_RC6_ENABLE;
5087 
5088 		if ((enable_rc6 & mask) != enable_rc6)
5089 			DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5090 					 "(requested %d, valid %d)\n",
5091 					 enable_rc6 & mask, enable_rc6, mask);
5092 
5093 		return enable_rc6 & mask;
5094 	}
5095 
5096 	if (IS_IVYBRIDGE(dev_priv))
5097 		return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
5098 
5099 	return INTEL_RC6_ENABLE;
5100 }
5101 
5102 static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
5103 {
5104 	/* All of these values are in units of 50MHz */
5105 
5106 	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
5107 	if (IS_BROXTON(dev_priv)) {
5108 		u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
5109 		dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
5110 		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
5111 		dev_priv->rps.min_freq = (rp_state_cap >>  0) & 0xff;
5112 	} else {
5113 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
5114 		dev_priv->rps.rp0_freq = (rp_state_cap >>  0) & 0xff;
5115 		dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
5116 		dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
5117 	}
5118 	/* hw_max = RP0 until we check for overclocking */
5119 	dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
5120 
5121 	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
5122 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
5123 	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5124 		u32 ddcc_status = 0;
5125 
5126 		if (sandybridge_pcode_read(dev_priv,
5127 					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
5128 					   &ddcc_status) == 0)
5129 			dev_priv->rps.efficient_freq =
5130 				clamp_t(u8,
5131 					((ddcc_status >> 8) & 0xff),
5132 					dev_priv->rps.min_freq,
5133 					dev_priv->rps.max_freq);
5134 	}
5135 
5136 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5137 		/* Store the frequency values in 16.66 MHZ units, which is
5138 		 * the natural hardware unit for SKL
5139 		 */
5140 		dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
5141 		dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
5142 		dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
5143 		dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
5144 		dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
5145 	}
5146 }
5147 
5148 static void reset_rps(struct drm_i915_private *dev_priv,
5149 		      void (*set)(struct drm_i915_private *, u8))
5150 {
5151 	u8 freq = dev_priv->rps.cur_freq;
5152 
5153 	/* force a reset */
5154 	dev_priv->rps.power = -1;
5155 	dev_priv->rps.cur_freq = -1;
5156 
5157 	set(dev_priv, freq);
5158 }
5159 
5160 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5161 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
5162 {
5163 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5164 
5165 	/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5166 	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5167 		/*
5168 		 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5169 		 * clear out the Control register just to avoid inconsitency
5170 		 * with debugfs interface, which will show  Turbo as enabled
5171 		 * only and that is not expected by the User after adding the
5172 		 * WaGsvDisableTurbo. Apart from this there is no problem even
5173 		 * if the Turbo is left enabled in the Control register, as the
5174 		 * Up/Down interrupts would remain masked.
5175 		 */
5176 		gen9_disable_rps(dev_priv);
5177 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5178 		return;
5179 	}
5180 
5181 	/* Program defaults and thresholds for RPS*/
5182 	I915_WRITE(GEN6_RC_VIDEO_FREQ,
5183 		GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
5184 
5185 	/* 1 second timeout*/
5186 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
5187 		GT_INTERVAL_FROM_US(dev_priv, 1000000));
5188 
5189 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
5190 
5191 	/* Leaning on the below call to gen6_set_rps to program/setup the
5192 	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5193 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5194 	reset_rps(dev_priv, gen6_set_rps);
5195 
5196 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5197 }
5198 
5199 static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
5200 {
5201 	struct intel_engine_cs *engine;
5202 	uint32_t rc6_mask = 0;
5203 
5204 	/* 1a: Software RC state - RC0 */
5205 	I915_WRITE(GEN6_RC_STATE, 0);
5206 
5207 	/* 1b: Get forcewake during program sequence. Although the driver
5208 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5209 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5210 
5211 	/* 2a: Disable RC states. */
5212 	I915_WRITE(GEN6_RC_CONTROL, 0);
5213 
5214 	/* 2b: Program RC6 thresholds.*/
5215 
5216 	/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5217 	if (IS_SKYLAKE(dev_priv))
5218 		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
5219 	else
5220 		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
5221 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5222 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5223 	for_each_engine(engine, dev_priv)
5224 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5225 
5226 	if (HAS_GUC(dev_priv))
5227 		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
5228 
5229 	I915_WRITE(GEN6_RC_SLEEP, 0);
5230 
5231 	/* 2c: Program Coarse Power Gating Policies. */
5232 	I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
5233 	I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
5234 
5235 	/* 3a: Enable RC6 */
5236 	if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5237 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5238 	DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
5239 	/* WaRsUseTimeoutMode */
5240 	if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
5241 	    IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
5242 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
5243 		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5244 			   GEN7_RC_CTL_TO_MODE |
5245 			   rc6_mask);
5246 	} else {
5247 		I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
5248 		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5249 			   GEN6_RC_CTL_EI_MODE(1) |
5250 			   rc6_mask);
5251 	}
5252 
5253 	/*
5254 	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5255 	 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5256 	 */
5257 	if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
5258 		I915_WRITE(GEN9_PG_ENABLE, 0);
5259 	else
5260 		I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
5261 				(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
5262 
5263 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5264 }
5265 
5266 static void gen8_enable_rps(struct drm_i915_private *dev_priv)
5267 {
5268 	struct intel_engine_cs *engine;
5269 	uint32_t rc6_mask = 0;
5270 
5271 	/* 1a: Software RC state - RC0 */
5272 	I915_WRITE(GEN6_RC_STATE, 0);
5273 
5274 	/* 1c & 1d: Get forcewake during program sequence. Although the driver
5275 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5276 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5277 
5278 	/* 2a: Disable RC states. */
5279 	I915_WRITE(GEN6_RC_CONTROL, 0);
5280 
5281 	/* 2b: Program RC6 thresholds.*/
5282 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5283 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5284 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5285 	for_each_engine(engine, dev_priv)
5286 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5287 	I915_WRITE(GEN6_RC_SLEEP, 0);
5288 	if (IS_BROADWELL(dev_priv))
5289 		I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
5290 	else
5291 		I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5292 
5293 	/* 3: Enable RC6 */
5294 	if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5295 		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
5296 	intel_print_rc6_info(dev_priv, rc6_mask);
5297 	if (IS_BROADWELL(dev_priv))
5298 		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5299 				GEN7_RC_CTL_TO_MODE |
5300 				rc6_mask);
5301 	else
5302 		I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
5303 				GEN6_RC_CTL_EI_MODE(1) |
5304 				rc6_mask);
5305 
5306 	/* 4 Program defaults and thresholds for RPS*/
5307 	I915_WRITE(GEN6_RPNSWREQ,
5308 		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5309 	I915_WRITE(GEN6_RC_VIDEO_FREQ,
5310 		   HSW_FREQUENCY(dev_priv->rps.rp1_freq));
5311 	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5312 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
5313 
5314 	/* Docs recommend 900MHz, and 300 MHz respectively */
5315 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
5316 		   dev_priv->rps.max_freq_softlimit << 24 |
5317 		   dev_priv->rps.min_freq_softlimit << 16);
5318 
5319 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
5320 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5321 	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
5322 	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
5323 
5324 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5325 
5326 	/* 5: Enable RPS */
5327 	I915_WRITE(GEN6_RP_CONTROL,
5328 		   GEN6_RP_MEDIA_TURBO |
5329 		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
5330 		   GEN6_RP_MEDIA_IS_GFX |
5331 		   GEN6_RP_ENABLE |
5332 		   GEN6_RP_UP_BUSY_AVG |
5333 		   GEN6_RP_DOWN_IDLE_AVG);
5334 
5335 	/* 6: Ring frequency + overclocking (our driver does this later */
5336 
5337 	reset_rps(dev_priv, gen6_set_rps);
5338 
5339 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5340 }
5341 
5342 static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5343 {
5344 	struct intel_engine_cs *engine;
5345 	u32 rc6vids, rc6_mask = 0;
5346 	u32 gtfifodbg;
5347 	int rc6_mode;
5348 	int ret;
5349 
5350 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5351 
5352 	/* Here begins a magic sequence of register writes to enable
5353 	 * auto-downclocking.
5354 	 *
5355 	 * Perhaps there might be some value in exposing these to
5356 	 * userspace...
5357 	 */
5358 	I915_WRITE(GEN6_RC_STATE, 0);
5359 
5360 	/* Clear the DBG now so we don't confuse earlier errors */
5361 	gtfifodbg = I915_READ(GTFIFODBG);
5362 	if (gtfifodbg) {
5363 		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
5364 		I915_WRITE(GTFIFODBG, gtfifodbg);
5365 	}
5366 
5367 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5368 
5369 	/* disable the counters and set deterministic thresholds */
5370 	I915_WRITE(GEN6_RC_CONTROL, 0);
5371 
5372 	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
5373 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
5374 	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
5375 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5376 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5377 
5378 	for_each_engine(engine, dev_priv)
5379 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5380 
5381 	I915_WRITE(GEN6_RC_SLEEP, 0);
5382 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5383 	if (IS_IVYBRIDGE(dev_priv))
5384 		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5385 	else
5386 		I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
5387 	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
5388 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5389 
5390 	/* Check if we are enabling RC6 */
5391 	rc6_mode = intel_enable_rc6();
5392 	if (rc6_mode & INTEL_RC6_ENABLE)
5393 		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5394 
5395 	/* We don't use those on Haswell */
5396 	if (!IS_HASWELL(dev_priv)) {
5397 		if (rc6_mode & INTEL_RC6p_ENABLE)
5398 			rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5399 
5400 		if (rc6_mode & INTEL_RC6pp_ENABLE)
5401 			rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5402 	}
5403 
5404 	intel_print_rc6_info(dev_priv, rc6_mask);
5405 
5406 	I915_WRITE(GEN6_RC_CONTROL,
5407 		   rc6_mask |
5408 		   GEN6_RC_CTL_EI_MODE(1) |
5409 		   GEN6_RC_CTL_HW_ENABLE);
5410 
5411 	/* Power down if completely idle for over 50ms */
5412 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
5413 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5414 
5415 	ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
5416 	if (ret)
5417 		DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5418 
5419 	reset_rps(dev_priv, gen6_set_rps);
5420 
5421 	rc6vids = 0;
5422 	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5423 	if (IS_GEN6(dev_priv) && ret) {
5424 		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5425 	} else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5426 		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5427 			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5428 		rc6vids &= 0xffff00;
5429 		rc6vids |= GEN6_ENCODE_RC6_VID(450);
5430 		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
5431 		if (ret)
5432 			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5433 	}
5434 
5435 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5436 }
5437 
5438 static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5439 {
5440 	int min_freq = 15;
5441 	unsigned int gpu_freq;
5442 	unsigned int max_ia_freq, min_ring_freq;
5443 	unsigned int max_gpu_freq, min_gpu_freq;
5444 	int scaling_factor = 180;
5445 
5446 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5447 
5448 #if 0
5449 	policy = cpufreq_cpu_get(0);
5450 	if (policy) {
5451 		max_ia_freq = policy->cpuinfo.max_freq;
5452 		cpufreq_cpu_put(policy);
5453 	} else {
5454 		/*
5455 		 * Default to measured freq if none found, PCU will ensure we
5456 		 * don't go over
5457 		 */
5458 		max_ia_freq = tsc_khz;
5459 	}
5460 #else
5461 	max_ia_freq = tsc_frequency / 1000;
5462 #endif
5463 
5464 	/* Convert from kHz to MHz */
5465 	max_ia_freq /= 1000;
5466 
5467 	min_ring_freq = I915_READ(DCLK) & 0xf;
5468 	/* convert DDR frequency from units of 266.6MHz to bandwidth */
5469 	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5470 
5471 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5472 		/* Convert GT frequency to 50 HZ units */
5473 		min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5474 		max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
5475 	} else {
5476 		min_gpu_freq = dev_priv->rps.min_freq;
5477 		max_gpu_freq = dev_priv->rps.max_freq;
5478 	}
5479 
5480 	/*
5481 	 * For each potential GPU frequency, load a ring frequency we'd like
5482 	 * to use for memory access.  We do this by specifying the IA frequency
5483 	 * the PCU should use as a reference to determine the ring frequency.
5484 	 */
5485 	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
5486 		int diff = max_gpu_freq - gpu_freq;
5487 		unsigned int ia_freq = 0, ring_freq = 0;
5488 
5489 		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5490 			/*
5491 			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5492 			 * No floor required for ring frequency on SKL.
5493 			 */
5494 			ring_freq = gpu_freq;
5495 		} else if (INTEL_INFO(dev_priv)->gen >= 8) {
5496 			/* max(2 * GT, DDR). NB: GT is 50MHz units */
5497 			ring_freq = max(min_ring_freq, gpu_freq);
5498 		} else if (IS_HASWELL(dev_priv)) {
5499 			ring_freq = mult_frac(gpu_freq, 5, 4);
5500 			ring_freq = max(min_ring_freq, ring_freq);
5501 			/* leave ia_freq as the default, chosen by cpufreq */
5502 		} else {
5503 			/* On older processors, there is no separate ring
5504 			 * clock domain, so in order to boost the bandwidth
5505 			 * of the ring, we need to upclock the CPU (ia_freq).
5506 			 *
5507 			 * For GPU frequencies less than 750MHz,
5508 			 * just use the lowest ring freq.
5509 			 */
5510 			if (gpu_freq < min_freq)
5511 				ia_freq = 800;
5512 			else
5513 				ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
5514 			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
5515 		}
5516 
5517 		sandybridge_pcode_write(dev_priv,
5518 					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
5519 					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
5520 					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
5521 					gpu_freq);
5522 	}
5523 }
5524 
5525 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5526 {
5527 	u32 val, rp0;
5528 
5529 	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5530 
5531 	switch (INTEL_INFO(dev_priv)->eu_total) {
5532 	case 8:
5533 		/* (2 * 4) config */
5534 		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5535 		break;
5536 	case 12:
5537 		/* (2 * 6) config */
5538 		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5539 		break;
5540 	case 16:
5541 		/* (2 * 8) config */
5542 	default:
5543 		/* Setting (2 * 8) Min RP0 for any other combination */
5544 		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5545 		break;
5546 	}
5547 
5548 	rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5549 
5550 	return rp0;
5551 }
5552 
5553 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5554 {
5555 	u32 val, rpe;
5556 
5557 	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
5558 	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
5559 
5560 	return rpe;
5561 }
5562 
5563 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5564 {
5565 	u32 val, rp1;
5566 
5567 	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5568 	rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5569 
5570 	return rp1;
5571 }
5572 
5573 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5574 {
5575 	u32 val, rp1;
5576 
5577 	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5578 
5579 	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5580 
5581 	return rp1;
5582 }
5583 
5584 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5585 {
5586 	u32 val, rp0;
5587 
5588 	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5589 
5590 	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5591 	/* Clamp to max */
5592 	rp0 = min_t(u32, rp0, 0xea);
5593 
5594 	return rp0;
5595 }
5596 
5597 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5598 {
5599 	u32 val, rpe;
5600 
5601 	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5602 	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5603 	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5604 	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5605 
5606 	return rpe;
5607 }
5608 
5609 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5610 {
5611 	u32 val;
5612 
5613 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5614 	/*
5615 	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5616 	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5617 	 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5618 	 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5619 	 * to make sure it matches what Punit accepts.
5620 	 */
5621 	return max_t(u32, val, 0xc0);
5622 }
5623 
5624 /* Check that the pctx buffer wasn't move under us. */
5625 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5626 {
5627 	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5628 
5629 	WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5630 			     dev_priv->vlv_pctx->stolen->start);
5631 }
5632 
5633 
5634 /* Check that the pcbr address is not empty. */
5635 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5636 {
5637 	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5638 
5639 	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5640 }
5641 
5642 static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5643 {
5644 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
5645 	unsigned long pctx_paddr, paddr;
5646 	u32 pcbr;
5647 	int pctx_size = 32*1024;
5648 
5649 	pcbr = I915_READ(VLV_PCBR);
5650 	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5651 		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5652 		paddr = (dev_priv->mm.stolen_base +
5653 			 (ggtt->stolen_size - pctx_size));
5654 
5655 		pctx_paddr = (paddr & (~4095));
5656 		I915_WRITE(VLV_PCBR, pctx_paddr);
5657 	}
5658 
5659 	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5660 }
5661 
5662 static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5663 {
5664 	struct drm_i915_gem_object *pctx;
5665 	unsigned long pctx_paddr;
5666 	u32 pcbr;
5667 	int pctx_size = 24*1024;
5668 
5669 	mutex_lock(&dev_priv->drm.struct_mutex);
5670 
5671 	pcbr = I915_READ(VLV_PCBR);
5672 	if (pcbr) {
5673 		/* BIOS set it up already, grab the pre-alloc'd space */
5674 		int pcbr_offset;
5675 
5676 		pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5677 		pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5678 								      pcbr_offset,
5679 								      I915_GTT_OFFSET_NONE,
5680 								      pctx_size);
5681 		goto out;
5682 	}
5683 
5684 	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5685 
5686 	/*
5687 	 * From the Gunit register HAS:
5688 	 * The Gfx driver is expected to program this register and ensure
5689 	 * proper allocation within Gfx stolen memory.  For example, this
5690 	 * register should be programmed such than the PCBR range does not
5691 	 * overlap with other ranges, such as the frame buffer, protected
5692 	 * memory, or any other relevant ranges.
5693 	 */
5694 	pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5695 	if (!pctx) {
5696 		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5697 		goto out;
5698 	}
5699 
5700 	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5701 	I915_WRITE(VLV_PCBR, pctx_paddr);
5702 
5703 out:
5704 	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5705 	dev_priv->vlv_pctx = pctx;
5706 	mutex_unlock(&dev_priv->drm.struct_mutex);
5707 }
5708 
5709 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5710 {
5711 	if (WARN_ON(!dev_priv->vlv_pctx))
5712 		return;
5713 
5714 	i915_gem_object_put_unlocked(dev_priv->vlv_pctx);
5715 	dev_priv->vlv_pctx = NULL;
5716 }
5717 
5718 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5719 {
5720 	dev_priv->rps.gpll_ref_freq =
5721 		vlv_get_cck_clock(dev_priv, "GPLL ref",
5722 				  CCK_GPLL_CLOCK_CONTROL,
5723 				  dev_priv->czclk_freq);
5724 
5725 	DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5726 			 dev_priv->rps.gpll_ref_freq);
5727 }
5728 
5729 static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5730 {
5731 	u32 val;
5732 
5733 	valleyview_setup_pctx(dev_priv);
5734 
5735 	vlv_init_gpll_ref_freq(dev_priv);
5736 
5737 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5738 	switch ((val >> 6) & 3) {
5739 	case 0:
5740 	case 1:
5741 		dev_priv->mem_freq = 800;
5742 		break;
5743 	case 2:
5744 		dev_priv->mem_freq = 1066;
5745 		break;
5746 	case 3:
5747 		dev_priv->mem_freq = 1333;
5748 		break;
5749 	}
5750 	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5751 
5752 	dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5753 	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5754 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5755 			 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5756 			 dev_priv->rps.max_freq);
5757 
5758 	dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5759 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5760 			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5761 			 dev_priv->rps.efficient_freq);
5762 
5763 	dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5764 	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5765 			 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5766 			 dev_priv->rps.rp1_freq);
5767 
5768 	dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5769 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5770 			 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5771 			 dev_priv->rps.min_freq);
5772 }
5773 
5774 static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5775 {
5776 	u32 val;
5777 
5778 	cherryview_setup_pctx(dev_priv);
5779 
5780 	vlv_init_gpll_ref_freq(dev_priv);
5781 
5782 	mutex_lock(&dev_priv->sb_lock);
5783 	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5784 	mutex_unlock(&dev_priv->sb_lock);
5785 
5786 	switch ((val >> 2) & 0x7) {
5787 	case 3:
5788 		dev_priv->mem_freq = 2000;
5789 		break;
5790 	default:
5791 		dev_priv->mem_freq = 1600;
5792 		break;
5793 	}
5794 	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5795 
5796 	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5797 	dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5798 	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5799 			 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5800 			 dev_priv->rps.max_freq);
5801 
5802 	dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5803 	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5804 			 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5805 			 dev_priv->rps.efficient_freq);
5806 
5807 	dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5808 	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5809 			 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5810 			 dev_priv->rps.rp1_freq);
5811 
5812 	/* PUnit validated range is only [RPe, RP0] */
5813 	dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
5814 	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5815 			 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5816 			 dev_priv->rps.min_freq);
5817 
5818 	WARN_ONCE((dev_priv->rps.max_freq |
5819 		   dev_priv->rps.efficient_freq |
5820 		   dev_priv->rps.rp1_freq |
5821 		   dev_priv->rps.min_freq) & 1,
5822 		  "Odd GPU freq values\n");
5823 }
5824 
5825 static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5826 {
5827 	valleyview_cleanup_pctx(dev_priv);
5828 }
5829 
5830 static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5831 {
5832 	struct intel_engine_cs *engine;
5833 	u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5834 
5835 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5836 
5837 	gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
5838 					     GT_FIFO_FREE_ENTRIES_CHV);
5839 	if (gtfifodbg) {
5840 		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5841 				 gtfifodbg);
5842 		I915_WRITE(GTFIFODBG, gtfifodbg);
5843 	}
5844 
5845 	cherryview_check_pctx(dev_priv);
5846 
5847 	/* 1a & 1b: Get forcewake during program sequence. Although the driver
5848 	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5849 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5850 
5851 	/*  Disable RC states. */
5852 	I915_WRITE(GEN6_RC_CONTROL, 0);
5853 
5854 	/* 2a: Program RC6 thresholds.*/
5855 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5856 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5857 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5858 
5859 	for_each_engine(engine, dev_priv)
5860 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5861 	I915_WRITE(GEN6_RC_SLEEP, 0);
5862 
5863 	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
5864 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
5865 
5866 	/* allows RC6 residency counter to work */
5867 	I915_WRITE(VLV_COUNTER_CONTROL,
5868 		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5869 				      VLV_MEDIA_RC6_COUNT_EN |
5870 				      VLV_RENDER_RC6_COUNT_EN));
5871 
5872 	/* For now we assume BIOS is allocating and populating the PCBR  */
5873 	pcbr = I915_READ(VLV_PCBR);
5874 
5875 	/* 3: Enable RC6 */
5876 	if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5877 	    (pcbr >> VLV_PCBR_ADDR_SHIFT))
5878 		rc6_mode = GEN7_RC_CTL_TO_MODE;
5879 
5880 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5881 
5882 	/* 4 Program defaults and thresholds for RPS*/
5883 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5884 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5885 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5886 	I915_WRITE(GEN6_RP_UP_EI, 66000);
5887 	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5888 
5889 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5890 
5891 	/* 5: Enable RPS */
5892 	I915_WRITE(GEN6_RP_CONTROL,
5893 		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
5894 		   GEN6_RP_MEDIA_IS_GFX |
5895 		   GEN6_RP_ENABLE |
5896 		   GEN6_RP_UP_BUSY_AVG |
5897 		   GEN6_RP_DOWN_IDLE_AVG);
5898 
5899 	/* Setting Fixed Bias */
5900 	val = VLV_OVERRIDE_EN |
5901 		  VLV_SOC_TDP_EN |
5902 		  CHV_BIAS_CPU_50_SOC_50;
5903 	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5904 
5905 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5906 
5907 	/* RPS code assumes GPLL is used */
5908 	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5909 
5910 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5911 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5912 
5913 	reset_rps(dev_priv, valleyview_set_rps);
5914 
5915 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5916 }
5917 
5918 static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5919 {
5920 	struct intel_engine_cs *engine;
5921 	u32 gtfifodbg, val, rc6_mode = 0;
5922 
5923 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5924 
5925 	valleyview_check_pctx(dev_priv);
5926 
5927 	gtfifodbg = I915_READ(GTFIFODBG);
5928 	if (gtfifodbg) {
5929 		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5930 				 gtfifodbg);
5931 		I915_WRITE(GTFIFODBG, gtfifodbg);
5932 	}
5933 
5934 	/* If VLV, Forcewake all wells, else re-direct to regular path */
5935 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5936 
5937 	/*  Disable RC states. */
5938 	I915_WRITE(GEN6_RC_CONTROL, 0);
5939 
5940 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
5941 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5942 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5943 	I915_WRITE(GEN6_RP_UP_EI, 66000);
5944 	I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5945 
5946 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5947 
5948 	I915_WRITE(GEN6_RP_CONTROL,
5949 		   GEN6_RP_MEDIA_TURBO |
5950 		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
5951 		   GEN6_RP_MEDIA_IS_GFX |
5952 		   GEN6_RP_ENABLE |
5953 		   GEN6_RP_UP_BUSY_AVG |
5954 		   GEN6_RP_DOWN_IDLE_CONT);
5955 
5956 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5957 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5958 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5959 
5960 	for_each_engine(engine, dev_priv)
5961 		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
5962 
5963 	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5964 
5965 	/* allows RC6 residency counter to work */
5966 	I915_WRITE(VLV_COUNTER_CONTROL,
5967 		   _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5968 				      VLV_RENDER_RC0_COUNT_EN |
5969 				      VLV_MEDIA_RC6_COUNT_EN |
5970 				      VLV_RENDER_RC6_COUNT_EN));
5971 
5972 	if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5973 		rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5974 
5975 	intel_print_rc6_info(dev_priv, rc6_mode);
5976 
5977 	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5978 
5979 	/* Setting Fixed Bias */
5980 	val = VLV_OVERRIDE_EN |
5981 		  VLV_SOC_TDP_EN |
5982 		  VLV_BIAS_CPU_125_SOC_875;
5983 	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
5984 
5985 	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5986 
5987 	/* RPS code assumes GPLL is used */
5988 	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5989 
5990 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5991 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5992 
5993 	reset_rps(dev_priv, valleyview_set_rps);
5994 
5995 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5996 }
5997 
5998 static unsigned long intel_pxfreq(u32 vidfreq)
5999 {
6000 	unsigned long freq;
6001 	int div = (vidfreq & 0x3f0000) >> 16;
6002 	int post = (vidfreq & 0x3000) >> 12;
6003 	int pre = (vidfreq & 0x7);
6004 
6005 	if (!pre)
6006 		return 0;
6007 
6008 	freq = ((div * 133333) / ((1<<post) * pre));
6009 
6010 	return freq;
6011 }
6012 
6013 static const struct cparams {
6014 	u16 i;
6015 	u16 t;
6016 	u16 m;
6017 	u16 c;
6018 } cparams[] = {
6019 	{ 1, 1333, 301, 28664 },
6020 	{ 1, 1066, 294, 24460 },
6021 	{ 1, 800, 294, 25192 },
6022 	{ 0, 1333, 276, 27605 },
6023 	{ 0, 1066, 276, 27605 },
6024 	{ 0, 800, 231, 23784 },
6025 };
6026 
6027 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
6028 {
6029 	u64 total_count, diff, ret;
6030 	u32 count1, count2, count3, m = 0, c = 0;
6031 	unsigned long now = jiffies_to_msecs(jiffies), diff1;
6032 	int i;
6033 
6034 	assert_spin_locked(&mchdev_lock);
6035 
6036 	diff1 = now - dev_priv->ips.last_time1;
6037 
6038 	/* Prevent division-by-zero if we are asking too fast.
6039 	 * Also, we don't get interesting results if we are polling
6040 	 * faster than once in 10ms, so just return the saved value
6041 	 * in such cases.
6042 	 */
6043 	if (diff1 <= 10)
6044 		return dev_priv->ips.chipset_power;
6045 
6046 	count1 = I915_READ(DMIEC);
6047 	count2 = I915_READ(DDREC);
6048 	count3 = I915_READ(CSIEC);
6049 
6050 	total_count = count1 + count2 + count3;
6051 
6052 	/* FIXME: handle per-counter overflow */
6053 	if (total_count < dev_priv->ips.last_count1) {
6054 		diff = ~0UL - dev_priv->ips.last_count1;
6055 		diff += total_count;
6056 	} else {
6057 		diff = total_count - dev_priv->ips.last_count1;
6058 	}
6059 
6060 	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
6061 		if (cparams[i].i == dev_priv->ips.c_m &&
6062 		    cparams[i].t == dev_priv->ips.r_t) {
6063 			m = cparams[i].m;
6064 			c = cparams[i].c;
6065 			break;
6066 		}
6067 	}
6068 
6069 	diff = div_u64(diff, diff1);
6070 	ret = ((m * diff) + c);
6071 	ret = div_u64(ret, 10);
6072 
6073 	dev_priv->ips.last_count1 = total_count;
6074 	dev_priv->ips.last_time1 = now;
6075 
6076 	dev_priv->ips.chipset_power = ret;
6077 
6078 	return ret;
6079 }
6080 
6081 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
6082 {
6083 	unsigned long val;
6084 
6085 	if (INTEL_INFO(dev_priv)->gen != 5)
6086 		return 0;
6087 
6088 	spin_lock_irq(&mchdev_lock);
6089 
6090 	val = __i915_chipset_val(dev_priv);
6091 
6092 	spin_unlock_irq(&mchdev_lock);
6093 
6094 	return val;
6095 }
6096 
6097 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
6098 {
6099 	unsigned long m, x, b;
6100 	u32 tsfs;
6101 
6102 	tsfs = I915_READ(TSFS);
6103 
6104 	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
6105 	x = I915_READ8(TR1);
6106 
6107 	b = tsfs & TSFS_INTR_MASK;
6108 
6109 	return ((m * x) / 127) - b;
6110 }
6111 
6112 static int _pxvid_to_vd(u8 pxvid)
6113 {
6114 	if (pxvid == 0)
6115 		return 0;
6116 
6117 	if (pxvid >= 8 && pxvid < 31)
6118 		pxvid = 31;
6119 
6120 	return (pxvid + 2) * 125;
6121 }
6122 
6123 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
6124 {
6125 	const int vd = _pxvid_to_vd(pxvid);
6126 	const int vm = vd - 1125;
6127 
6128 	if (INTEL_INFO(dev_priv)->is_mobile)
6129 		return vm > 0 ? vm : 0;
6130 
6131 	return vd;
6132 }
6133 
6134 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
6135 {
6136 	u64 now, diff, diffms;
6137 	u32 count;
6138 
6139 	assert_spin_locked(&mchdev_lock);
6140 
6141 	now = ktime_get_raw_ns();
6142 	diffms = now - dev_priv->ips.last_time2;
6143 	do_div(diffms, NSEC_PER_MSEC);
6144 
6145 	/* Don't divide by 0 */
6146 	if (!diffms)
6147 		return;
6148 
6149 	count = I915_READ(GFXEC);
6150 
6151 	if (count < dev_priv->ips.last_count2) {
6152 		diff = ~0UL - dev_priv->ips.last_count2;
6153 		diff += count;
6154 	} else {
6155 		diff = count - dev_priv->ips.last_count2;
6156 	}
6157 
6158 	dev_priv->ips.last_count2 = count;
6159 	dev_priv->ips.last_time2 = now;
6160 
6161 	/* More magic constants... */
6162 	diff = diff * 1181;
6163 	diff = div_u64(diff, diffms * 10);
6164 	dev_priv->ips.gfx_power = diff;
6165 }
6166 
6167 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
6168 {
6169 	if (INTEL_INFO(dev_priv)->gen != 5)
6170 		return;
6171 
6172 	spin_lock_irq(&mchdev_lock);
6173 
6174 	__i915_update_gfx_val(dev_priv);
6175 
6176 	spin_unlock_irq(&mchdev_lock);
6177 }
6178 
6179 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
6180 {
6181 	unsigned long t, corr, state1, corr2, state2;
6182 	u32 pxvid, ext_v;
6183 
6184 	assert_spin_locked(&mchdev_lock);
6185 
6186 	pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
6187 	pxvid = (pxvid >> 24) & 0x7f;
6188 	ext_v = pvid_to_extvid(dev_priv, pxvid);
6189 
6190 	state1 = ext_v;
6191 
6192 	t = i915_mch_val(dev_priv);
6193 
6194 	/* Revel in the empirically derived constants */
6195 
6196 	/* Correction factor in 1/100000 units */
6197 	if (t > 80)
6198 		corr = ((t * 2349) + 135940);
6199 	else if (t >= 50)
6200 		corr = ((t * 964) + 29317);
6201 	else /* < 50 */
6202 		corr = ((t * 301) + 1004);
6203 
6204 	corr = corr * ((150142 * state1) / 10000 - 78642);
6205 	corr /= 100000;
6206 	corr2 = (corr * dev_priv->ips.corr);
6207 
6208 	state2 = (corr2 * state1) / 10000;
6209 	state2 /= 100; /* convert to mW */
6210 
6211 	__i915_update_gfx_val(dev_priv);
6212 
6213 	return dev_priv->ips.gfx_power + state2;
6214 }
6215 
6216 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
6217 {
6218 	unsigned long val;
6219 
6220 	if (INTEL_INFO(dev_priv)->gen != 5)
6221 		return 0;
6222 
6223 	spin_lock_irq(&mchdev_lock);
6224 
6225 	val = __i915_gfx_val(dev_priv);
6226 
6227 	spin_unlock_irq(&mchdev_lock);
6228 
6229 	return val;
6230 }
6231 
6232 /**
6233  * i915_read_mch_val - return value for IPS use
6234  *
6235  * Calculate and return a value for the IPS driver to use when deciding whether
6236  * we have thermal and power headroom to increase CPU or GPU power budget.
6237  */
6238 unsigned long i915_read_mch_val(void)
6239 {
6240 	struct drm_i915_private *dev_priv;
6241 	unsigned long chipset_val, graphics_val, ret = 0;
6242 
6243 	spin_lock_irq(&mchdev_lock);
6244 	if (!i915_mch_dev)
6245 		goto out_unlock;
6246 	dev_priv = i915_mch_dev;
6247 
6248 	chipset_val = __i915_chipset_val(dev_priv);
6249 	graphics_val = __i915_gfx_val(dev_priv);
6250 
6251 	ret = chipset_val + graphics_val;
6252 
6253 out_unlock:
6254 	spin_unlock_irq(&mchdev_lock);
6255 
6256 	return ret;
6257 }
6258 EXPORT_SYMBOL_GPL(i915_read_mch_val);
6259 
6260 /**
6261  * i915_gpu_raise - raise GPU frequency limit
6262  *
6263  * Raise the limit; IPS indicates we have thermal headroom.
6264  */
6265 bool i915_gpu_raise(void)
6266 {
6267 	struct drm_i915_private *dev_priv;
6268 	bool ret = true;
6269 
6270 	spin_lock_irq(&mchdev_lock);
6271 	if (!i915_mch_dev) {
6272 		ret = false;
6273 		goto out_unlock;
6274 	}
6275 	dev_priv = i915_mch_dev;
6276 
6277 	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
6278 		dev_priv->ips.max_delay--;
6279 
6280 out_unlock:
6281 	spin_unlock_irq(&mchdev_lock);
6282 
6283 	return ret;
6284 }
6285 EXPORT_SYMBOL_GPL(i915_gpu_raise);
6286 
6287 /**
6288  * i915_gpu_lower - lower GPU frequency limit
6289  *
6290  * IPS indicates we're close to a thermal limit, so throttle back the GPU
6291  * frequency maximum.
6292  */
6293 bool i915_gpu_lower(void)
6294 {
6295 	struct drm_i915_private *dev_priv;
6296 	bool ret = true;
6297 
6298 	spin_lock_irq(&mchdev_lock);
6299 	if (!i915_mch_dev) {
6300 		ret = false;
6301 		goto out_unlock;
6302 	}
6303 	dev_priv = i915_mch_dev;
6304 
6305 	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6306 		dev_priv->ips.max_delay++;
6307 
6308 out_unlock:
6309 	spin_unlock_irq(&mchdev_lock);
6310 
6311 	return ret;
6312 }
6313 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6314 
6315 /**
6316  * i915_gpu_busy - indicate GPU business to IPS
6317  *
6318  * Tell the IPS driver whether or not the GPU is busy.
6319  */
6320 bool i915_gpu_busy(void)
6321 {
6322 	struct drm_i915_private *dev_priv;
6323 	struct intel_engine_cs *engine;
6324 	bool ret = false;
6325 
6326 	spin_lock_irq(&mchdev_lock);
6327 	if (!i915_mch_dev)
6328 		goto out_unlock;
6329 	dev_priv = i915_mch_dev;
6330 
6331 	for_each_engine(engine, dev_priv)
6332 		ret |= !list_empty(&engine->request_list);
6333 
6334 out_unlock:
6335 	spin_unlock_irq(&mchdev_lock);
6336 
6337 	return ret;
6338 }
6339 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6340 
6341 /**
6342  * i915_gpu_turbo_disable - disable graphics turbo
6343  *
6344  * Disable graphics turbo by resetting the max frequency and setting the
6345  * current frequency to the default.
6346  */
6347 bool i915_gpu_turbo_disable(void)
6348 {
6349 	struct drm_i915_private *dev_priv;
6350 	bool ret = true;
6351 
6352 	spin_lock_irq(&mchdev_lock);
6353 	if (!i915_mch_dev) {
6354 		ret = false;
6355 		goto out_unlock;
6356 	}
6357 	dev_priv = i915_mch_dev;
6358 
6359 	dev_priv->ips.max_delay = dev_priv->ips.fstart;
6360 
6361 	if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6362 		ret = false;
6363 
6364 out_unlock:
6365 	spin_unlock_irq(&mchdev_lock);
6366 
6367 	return ret;
6368 }
6369 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6370 
6371 /**
6372  * Tells the intel_ips driver that the i915 driver is now loaded, if
6373  * IPS got loaded first.
6374  *
6375  * This awkward dance is so that neither module has to depend on the
6376  * other in order for IPS to do the appropriate communication of
6377  * GPU turbo limits to i915.
6378  */
6379 static void
6380 ips_ping_for_i915_load(void)
6381 {
6382 #if 0
6383 	void (*link)(void);
6384 
6385 	link = symbol_get(ips_link_to_i915_driver);
6386 	if (link) {
6387 		link();
6388 		symbol_put(ips_link_to_i915_driver);
6389 	}
6390 #endif
6391 }
6392 
6393 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6394 {
6395 	/* We only register the i915 ips part with intel-ips once everything is
6396 	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6397 	spin_lock_irq(&mchdev_lock);
6398 	i915_mch_dev = dev_priv;
6399 	spin_unlock_irq(&mchdev_lock);
6400 
6401 	ips_ping_for_i915_load();
6402 }
6403 
6404 void intel_gpu_ips_teardown(void)
6405 {
6406 	spin_lock_irq(&mchdev_lock);
6407 	i915_mch_dev = NULL;
6408 	spin_unlock_irq(&mchdev_lock);
6409 }
6410 
6411 static void intel_init_emon(struct drm_i915_private *dev_priv)
6412 {
6413 	u32 lcfuse;
6414 	u8 pxw[16];
6415 	int i;
6416 
6417 	/* Disable to program */
6418 	I915_WRITE(ECR, 0);
6419 	POSTING_READ(ECR);
6420 
6421 	/* Program energy weights for various events */
6422 	I915_WRITE(SDEW, 0x15040d00);
6423 	I915_WRITE(CSIEW0, 0x007f0000);
6424 	I915_WRITE(CSIEW1, 0x1e220004);
6425 	I915_WRITE(CSIEW2, 0x04000004);
6426 
6427 	for (i = 0; i < 5; i++)
6428 		I915_WRITE(PEW(i), 0);
6429 	for (i = 0; i < 3; i++)
6430 		I915_WRITE(DEW(i), 0);
6431 
6432 	/* Program P-state weights to account for frequency power adjustment */
6433 	for (i = 0; i < 16; i++) {
6434 		u32 pxvidfreq = I915_READ(PXVFREQ(i));
6435 		unsigned long freq = intel_pxfreq(pxvidfreq);
6436 		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6437 			PXVFREQ_PX_SHIFT;
6438 		unsigned long val;
6439 
6440 		val = vid * vid;
6441 		val *= (freq / 1000);
6442 		val *= 255;
6443 		val /= (127*127*900);
6444 		if (val > 0xff)
6445 			DRM_ERROR("bad pxval: %ld\n", val);
6446 		pxw[i] = val;
6447 	}
6448 	/* Render standby states get 0 weight */
6449 	pxw[14] = 0;
6450 	pxw[15] = 0;
6451 
6452 	for (i = 0; i < 4; i++) {
6453 		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6454 			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6455 		I915_WRITE(PXW(i), val);
6456 	}
6457 
6458 	/* Adjust magic regs to magic values (more experimental results) */
6459 	I915_WRITE(OGW0, 0);
6460 	I915_WRITE(OGW1, 0);
6461 	I915_WRITE(EG0, 0x00007f00);
6462 	I915_WRITE(EG1, 0x0000000e);
6463 	I915_WRITE(EG2, 0x000e0000);
6464 	I915_WRITE(EG3, 0x68000300);
6465 	I915_WRITE(EG4, 0x42000000);
6466 	I915_WRITE(EG5, 0x00140031);
6467 	I915_WRITE(EG6, 0);
6468 	I915_WRITE(EG7, 0);
6469 
6470 	for (i = 0; i < 8; i++)
6471 		I915_WRITE(PXWL(i), 0);
6472 
6473 	/* Enable PMON + select events */
6474 	I915_WRITE(ECR, 0x80000019);
6475 
6476 	lcfuse = I915_READ(LCFUSE02);
6477 
6478 	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6479 }
6480 
6481 void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6482 {
6483 	/*
6484 	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6485 	 * requirement.
6486 	 */
6487 	if (!i915.enable_rc6) {
6488 		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6489 		intel_runtime_pm_get(dev_priv);
6490 	}
6491 
6492 	mutex_lock(&dev_priv->rps.hw_lock);
6493 
6494 	/* Initialize RPS limits (for userspace) */
6495 	if (IS_CHERRYVIEW(dev_priv))
6496 		cherryview_init_gt_powersave(dev_priv);
6497 	else if (IS_VALLEYVIEW(dev_priv))
6498 		valleyview_init_gt_powersave(dev_priv);
6499 	else
6500 		gen6_init_rps_frequencies(dev_priv);
6501 
6502 	/* Derive initial user preferences/limits from the hardware limits */
6503 	dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
6504 	dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
6505 
6506 	dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
6507 	dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
6508 
6509 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6510 		dev_priv->rps.min_freq_softlimit =
6511 			max_t(int,
6512 			      dev_priv->rps.efficient_freq,
6513 			      intel_freq_opcode(dev_priv, 450));
6514 
6515 	/* After setting max-softlimit, find the overclock max freq */
6516 	if (IS_GEN6(dev_priv) ||
6517 	    IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
6518 		u32 params = 0;
6519 
6520 		sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
6521 		if (params & BIT(31)) { /* OC supported */
6522 			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
6523 					 (dev_priv->rps.max_freq & 0xff) * 50,
6524 					 (params & 0xff) * 50);
6525 			dev_priv->rps.max_freq = params & 0xff;
6526 		}
6527 	}
6528 
6529 	/* Finally allow us to boost to max by default */
6530 	dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
6531 
6532 	mutex_unlock(&dev_priv->rps.hw_lock);
6533 
6534 	intel_autoenable_gt_powersave(dev_priv);
6535 }
6536 
6537 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6538 {
6539 	if (IS_CHERRYVIEW(dev_priv))
6540 		return;
6541 	else if (IS_VALLEYVIEW(dev_priv))
6542 		valleyview_cleanup_gt_powersave(dev_priv);
6543 
6544 	if (!i915.enable_rc6)
6545 		intel_runtime_pm_put(dev_priv);
6546 }
6547 
6548 /**
6549  * intel_suspend_gt_powersave - suspend PM work and helper threads
6550  * @dev_priv: i915 device
6551  *
6552  * We don't want to disable RC6 or other features here, we just want
6553  * to make sure any work we've queued has finished and won't bother
6554  * us while we're suspended.
6555  */
6556 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6557 {
6558 	if (INTEL_GEN(dev_priv) < 6)
6559 		return;
6560 
6561 	if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
6562 		intel_runtime_pm_put(dev_priv);
6563 
6564 	/* gen6_rps_idle() will be called later to disable interrupts */
6565 }
6566 
6567 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
6568 {
6569 	dev_priv->rps.enabled = true; /* force disabling */
6570 	intel_disable_gt_powersave(dev_priv);
6571 
6572 	gen6_reset_rps_interrupts(dev_priv);
6573 }
6574 
6575 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6576 {
6577 	if (!READ_ONCE(dev_priv->rps.enabled))
6578 		return;
6579 
6580 	mutex_lock(&dev_priv->rps.hw_lock);
6581 
6582 	if (INTEL_GEN(dev_priv) >= 9) {
6583 		gen9_disable_rc6(dev_priv);
6584 		gen9_disable_rps(dev_priv);
6585 	} else if (IS_CHERRYVIEW(dev_priv)) {
6586 		cherryview_disable_rps(dev_priv);
6587 	} else if (IS_VALLEYVIEW(dev_priv)) {
6588 		valleyview_disable_rps(dev_priv);
6589 	} else if (INTEL_GEN(dev_priv) >= 6) {
6590 		gen6_disable_rps(dev_priv);
6591 	}  else if (IS_IRONLAKE_M(dev_priv)) {
6592 		ironlake_disable_drps(dev_priv);
6593 	}
6594 
6595 	dev_priv->rps.enabled = false;
6596 	mutex_unlock(&dev_priv->rps.hw_lock);
6597 }
6598 
6599 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6600 {
6601 	/* We shouldn't be disabling as we submit, so this should be less
6602 	 * racy than it appears!
6603 	 */
6604 	if (READ_ONCE(dev_priv->rps.enabled))
6605 		return;
6606 
6607 	/* Powersaving is controlled by the host when inside a VM */
6608 	if (intel_vgpu_active(dev_priv))
6609 		return;
6610 
6611 	mutex_lock(&dev_priv->rps.hw_lock);
6612 
6613 	if (IS_CHERRYVIEW(dev_priv)) {
6614 		cherryview_enable_rps(dev_priv);
6615 	} else if (IS_VALLEYVIEW(dev_priv)) {
6616 		valleyview_enable_rps(dev_priv);
6617 	} else if (INTEL_GEN(dev_priv) >= 9) {
6618 		gen9_enable_rc6(dev_priv);
6619 		gen9_enable_rps(dev_priv);
6620 		if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6621 			gen6_update_ring_freq(dev_priv);
6622 	} else if (IS_BROADWELL(dev_priv)) {
6623 		gen8_enable_rps(dev_priv);
6624 		gen6_update_ring_freq(dev_priv);
6625 	} else if (INTEL_GEN(dev_priv) >= 6) {
6626 		gen6_enable_rps(dev_priv);
6627 		gen6_update_ring_freq(dev_priv);
6628 	} else if (IS_IRONLAKE_M(dev_priv)) {
6629 		ironlake_enable_drps(dev_priv);
6630 		intel_init_emon(dev_priv);
6631 	}
6632 
6633 	WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
6634 	WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
6635 
6636 	WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
6637 	WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
6638 
6639 	dev_priv->rps.enabled = true;
6640 	mutex_unlock(&dev_priv->rps.hw_lock);
6641 }
6642 
6643 static void __intel_autoenable_gt_powersave(struct work_struct *work)
6644 {
6645 	struct drm_i915_private *dev_priv =
6646 		container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
6647 	struct intel_engine_cs *rcs;
6648 	struct drm_i915_gem_request *req;
6649 
6650 	if (READ_ONCE(dev_priv->rps.enabled))
6651 		goto out;
6652 
6653 	rcs = &dev_priv->engine[RCS];
6654 	if (rcs->last_context)
6655 		goto out;
6656 
6657 	if (!rcs->init_context)
6658 		goto out;
6659 
6660 	mutex_lock(&dev_priv->drm.struct_mutex);
6661 
6662 	req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
6663 	if (IS_ERR(req))
6664 		goto unlock;
6665 
6666 	if (!i915.enable_execlists && i915_switch_context(req) == 0)
6667 		rcs->init_context(req);
6668 
6669 	/* Mark the device busy, calling intel_enable_gt_powersave() */
6670 	i915_add_request_no_flush(req);
6671 
6672 unlock:
6673 	mutex_unlock(&dev_priv->drm.struct_mutex);
6674 out:
6675 	intel_runtime_pm_put(dev_priv);
6676 }
6677 
6678 void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
6679 {
6680 	if (READ_ONCE(dev_priv->rps.enabled))
6681 		return;
6682 
6683 	if (IS_IRONLAKE_M(dev_priv)) {
6684 		ironlake_enable_drps(dev_priv);
6685 		mutex_lock(&dev_priv->drm.struct_mutex);
6686 		intel_init_emon(dev_priv);
6687 		mutex_unlock(&dev_priv->drm.struct_mutex);
6688 	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
6689 		/*
6690 		 * PCU communication is slow and this doesn't need to be
6691 		 * done at any specific time, so do this out of our fast path
6692 		 * to make resume and init faster.
6693 		 *
6694 		 * We depend on the HW RC6 power context save/restore
6695 		 * mechanism when entering D3 through runtime PM suspend. So
6696 		 * disable RPM until RPS/RC6 is properly setup. We can only
6697 		 * get here via the driver load/system resume/runtime resume
6698 		 * paths, so the _noresume version is enough (and in case of
6699 		 * runtime resume it's necessary).
6700 		 */
6701 		if (queue_delayed_work(dev_priv->wq,
6702 				       &dev_priv->rps.autoenable_work,
6703 				       round_jiffies_up_relative(HZ)))
6704 			intel_runtime_pm_get_noresume(dev_priv);
6705 	}
6706 }
6707 
6708 static void ibx_init_clock_gating(struct drm_device *dev)
6709 {
6710 	struct drm_i915_private *dev_priv = to_i915(dev);
6711 
6712 	/*
6713 	 * On Ibex Peak and Cougar Point, we need to disable clock
6714 	 * gating for the panel power sequencer or it will fail to
6715 	 * start up when no ports are active.
6716 	 */
6717 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6718 }
6719 
6720 static void g4x_disable_trickle_feed(struct drm_device *dev)
6721 {
6722 	struct drm_i915_private *dev_priv = to_i915(dev);
6723 	enum i915_pipe pipe;
6724 
6725 	for_each_pipe(dev_priv, pipe) {
6726 		I915_WRITE(DSPCNTR(pipe),
6727 			   I915_READ(DSPCNTR(pipe)) |
6728 			   DISPPLANE_TRICKLE_FEED_DISABLE);
6729 
6730 		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
6731 		POSTING_READ(DSPSURF(pipe));
6732 	}
6733 }
6734 
6735 static void ilk_init_lp_watermarks(struct drm_device *dev)
6736 {
6737 	struct drm_i915_private *dev_priv = to_i915(dev);
6738 
6739 	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6740 	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6741 	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6742 
6743 	/*
6744 	 * Don't touch WM1S_LP_EN here.
6745 	 * Doing so could cause underruns.
6746 	 */
6747 }
6748 
6749 static void ironlake_init_clock_gating(struct drm_device *dev)
6750 {
6751 	struct drm_i915_private *dev_priv = to_i915(dev);
6752 	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6753 
6754 	/*
6755 	 * Required for FBC
6756 	 * WaFbcDisableDpfcClockGating:ilk
6757 	 */
6758 	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6759 		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6760 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6761 
6762 	I915_WRITE(PCH_3DCGDIS0,
6763 		   MARIUNIT_CLOCK_GATE_DISABLE |
6764 		   SVSMUNIT_CLOCK_GATE_DISABLE);
6765 	I915_WRITE(PCH_3DCGDIS1,
6766 		   VFMUNIT_CLOCK_GATE_DISABLE);
6767 
6768 	/*
6769 	 * According to the spec the following bits should be set in
6770 	 * order to enable memory self-refresh
6771 	 * The bit 22/21 of 0x42004
6772 	 * The bit 5 of 0x42020
6773 	 * The bit 15 of 0x45000
6774 	 */
6775 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6776 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
6777 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6778 	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6779 	I915_WRITE(DISP_ARB_CTL,
6780 		   (I915_READ(DISP_ARB_CTL) |
6781 		    DISP_FBC_WM_DIS));
6782 
6783 	ilk_init_lp_watermarks(dev);
6784 
6785 	/*
6786 	 * Based on the document from hardware guys the following bits
6787 	 * should be set unconditionally in order to enable FBC.
6788 	 * The bit 22 of 0x42000
6789 	 * The bit 22 of 0x42004
6790 	 * The bit 7,8,9 of 0x42020.
6791 	 */
6792 	if (IS_IRONLAKE_M(dev)) {
6793 		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
6794 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
6795 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
6796 			   ILK_FBCQ_DIS);
6797 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
6798 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
6799 			   ILK_DPARB_GATE);
6800 	}
6801 
6802 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6803 
6804 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6805 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6806 		   ILK_ELPIN_409_SELECT);
6807 	I915_WRITE(_3D_CHICKEN2,
6808 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6809 		   _3D_CHICKEN2_WM_READ_PIPELINED);
6810 
6811 	/* WaDisableRenderCachePipelinedFlush:ilk */
6812 	I915_WRITE(CACHE_MODE_0,
6813 		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6814 
6815 	/* WaDisable_RenderCache_OperationalFlush:ilk */
6816 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6817 
6818 	g4x_disable_trickle_feed(dev);
6819 
6820 	ibx_init_clock_gating(dev);
6821 }
6822 
6823 static void cpt_init_clock_gating(struct drm_device *dev)
6824 {
6825 	struct drm_i915_private *dev_priv = to_i915(dev);
6826 	int pipe;
6827 	uint32_t val;
6828 
6829 	/*
6830 	 * On Ibex Peak and Cougar Point, we need to disable clock
6831 	 * gating for the panel power sequencer or it will fail to
6832 	 * start up when no ports are active.
6833 	 */
6834 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6835 		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6836 		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
6837 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6838 		   DPLS_EDP_PPS_FIX_DIS);
6839 	/* The below fixes the weird display corruption, a few pixels shifted
6840 	 * downward, on (only) LVDS of some HP laptops with IVY.
6841 	 */
6842 	for_each_pipe(dev_priv, pipe) {
6843 		val = I915_READ(TRANS_CHICKEN2(pipe));
6844 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6845 		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6846 		if (dev_priv->vbt.fdi_rx_polarity_inverted)
6847 			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6848 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6849 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6850 		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6851 		I915_WRITE(TRANS_CHICKEN2(pipe), val);
6852 	}
6853 	/* WADP0ClockGatingDisable */
6854 	for_each_pipe(dev_priv, pipe) {
6855 		I915_WRITE(TRANS_CHICKEN1(pipe),
6856 			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6857 	}
6858 }
6859 
6860 static void gen6_check_mch_setup(struct drm_device *dev)
6861 {
6862 	struct drm_i915_private *dev_priv = to_i915(dev);
6863 	uint32_t tmp;
6864 
6865 	tmp = I915_READ(MCH_SSKPD);
6866 	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6867 		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6868 			      tmp);
6869 }
6870 
6871 static void gen6_init_clock_gating(struct drm_device *dev)
6872 {
6873 	struct drm_i915_private *dev_priv = to_i915(dev);
6874 	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6875 
6876 	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6877 
6878 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6879 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6880 		   ILK_ELPIN_409_SELECT);
6881 
6882 	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6883 	I915_WRITE(_3D_CHICKEN,
6884 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6885 
6886 	/* WaDisable_RenderCache_OperationalFlush:snb */
6887 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6888 
6889 	/*
6890 	 * BSpec recoomends 8x4 when MSAA is used,
6891 	 * however in practice 16x4 seems fastest.
6892 	 *
6893 	 * Note that PS/WM thread counts depend on the WIZ hashing
6894 	 * disable bit, which we don't touch here, but it's good
6895 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6896 	 */
6897 	I915_WRITE(GEN6_GT_MODE,
6898 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6899 
6900 	ilk_init_lp_watermarks(dev);
6901 
6902 	I915_WRITE(CACHE_MODE_0,
6903 		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6904 
6905 	I915_WRITE(GEN6_UCGCTL1,
6906 		   I915_READ(GEN6_UCGCTL1) |
6907 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6908 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6909 
6910 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6911 	 * gating disable must be set.  Failure to set it results in
6912 	 * flickering pixels due to Z write ordering failures after
6913 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
6914 	 * Sanctuary and Tropics, and apparently anything else with
6915 	 * alpha test or pixel discard.
6916 	 *
6917 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
6918 	 * but we didn't debug actual testcases to find it out.
6919 	 *
6920 	 * WaDisableRCCUnitClockGating:snb
6921 	 * WaDisableRCPBUnitClockGating:snb
6922 	 */
6923 	I915_WRITE(GEN6_UCGCTL2,
6924 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6925 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6926 
6927 	/* WaStripsFansDisableFastClipPerformanceFix:snb */
6928 	I915_WRITE(_3D_CHICKEN3,
6929 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6930 
6931 	/*
6932 	 * Bspec says:
6933 	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6934 	 * 3DSTATE_SF number of SF output attributes is more than 16."
6935 	 */
6936 	I915_WRITE(_3D_CHICKEN3,
6937 		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6938 
6939 	/*
6940 	 * According to the spec the following bits should be
6941 	 * set in order to enable memory self-refresh and fbc:
6942 	 * The bit21 and bit22 of 0x42000
6943 	 * The bit21 and bit22 of 0x42004
6944 	 * The bit5 and bit7 of 0x42020
6945 	 * The bit14 of 0x70180
6946 	 * The bit14 of 0x71180
6947 	 *
6948 	 * WaFbcAsynchFlipDisableFbcQueue:snb
6949 	 */
6950 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
6951 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
6952 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6953 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
6954 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
6955 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6956 	I915_WRITE(ILK_DSPCLK_GATE_D,
6957 		   I915_READ(ILK_DSPCLK_GATE_D) |
6958 		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
6959 		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6960 
6961 	g4x_disable_trickle_feed(dev);
6962 
6963 	cpt_init_clock_gating(dev);
6964 
6965 	gen6_check_mch_setup(dev);
6966 }
6967 
6968 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6969 {
6970 	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6971 
6972 	/*
6973 	 * WaVSThreadDispatchOverride:ivb,vlv
6974 	 *
6975 	 * This actually overrides the dispatch
6976 	 * mode for all thread types.
6977 	 */
6978 	reg &= ~GEN7_FF_SCHED_MASK;
6979 	reg |= GEN7_FF_TS_SCHED_HW;
6980 	reg |= GEN7_FF_VS_SCHED_HW;
6981 	reg |= GEN7_FF_DS_SCHED_HW;
6982 
6983 	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6984 }
6985 
6986 static void lpt_init_clock_gating(struct drm_device *dev)
6987 {
6988 	struct drm_i915_private *dev_priv = to_i915(dev);
6989 
6990 	/*
6991 	 * TODO: this bit should only be enabled when really needed, then
6992 	 * disabled when not needed anymore in order to save power.
6993 	 */
6994 	if (HAS_PCH_LPT_LP(dev))
6995 		I915_WRITE(SOUTH_DSPCLK_GATE_D,
6996 			   I915_READ(SOUTH_DSPCLK_GATE_D) |
6997 			   PCH_LP_PARTITION_LEVEL_DISABLE);
6998 
6999 	/* WADPOClockGatingDisable:hsw */
7000 	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
7001 		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
7002 		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
7003 }
7004 
7005 static void lpt_suspend_hw(struct drm_device *dev)
7006 {
7007 	struct drm_i915_private *dev_priv = to_i915(dev);
7008 
7009 	if (HAS_PCH_LPT_LP(dev)) {
7010 		uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
7011 
7012 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
7013 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
7014 	}
7015 }
7016 
7017 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7018 				   int general_prio_credits,
7019 				   int high_prio_credits)
7020 {
7021 	u32 misccpctl;
7022 
7023 	/* WaTempDisableDOPClkGating:bdw */
7024 	misccpctl = I915_READ(GEN7_MISCCPCTL);
7025 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7026 
7027 	I915_WRITE(GEN8_L3SQCREG1,
7028 		   L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7029 		   L3_HIGH_PRIO_CREDITS(high_prio_credits));
7030 
7031 	/*
7032 	 * Wait at least 100 clocks before re-enabling clock gating.
7033 	 * See the definition of L3SQCREG1 in BSpec.
7034 	 */
7035 	POSTING_READ(GEN8_L3SQCREG1);
7036 	udelay(1);
7037 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7038 }
7039 
7040 static void kabylake_init_clock_gating(struct drm_device *dev)
7041 {
7042 	struct drm_i915_private *dev_priv = to_i915(dev);
7043 
7044 	gen9_init_clock_gating(dev);
7045 
7046 	/* WaDisableSDEUnitClockGating:kbl */
7047 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7048 		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7049 			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7050 
7051 	/* WaDisableGamClockGating:kbl */
7052 	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
7053 		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7054 			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
7055 
7056 	/* WaFbcNukeOnHostModify:kbl */
7057 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7058 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7059 }
7060 
7061 static void skylake_init_clock_gating(struct drm_device *dev)
7062 {
7063 	struct drm_i915_private *dev_priv = to_i915(dev);
7064 
7065 	gen9_init_clock_gating(dev);
7066 
7067 	/* WAC6entrylatency:skl */
7068 	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7069 		   FBC_LLC_FULLY_OPEN);
7070 
7071 	/* WaFbcNukeOnHostModify:skl */
7072 	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
7073 		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
7074 }
7075 
7076 static void broadwell_init_clock_gating(struct drm_device *dev)
7077 {
7078 	struct drm_i915_private *dev_priv = to_i915(dev);
7079 	enum i915_pipe pipe;
7080 
7081 	ilk_init_lp_watermarks(dev);
7082 
7083 	/* WaSwitchSolVfFArbitrationPriority:bdw */
7084 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7085 
7086 	/* WaPsrDPAMaskVBlankInSRD:bdw */
7087 	I915_WRITE(CHICKEN_PAR1_1,
7088 		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
7089 
7090 	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7091 	for_each_pipe(dev_priv, pipe) {
7092 		I915_WRITE(CHICKEN_PIPESL_1(pipe),
7093 			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
7094 			   BDW_DPRS_MASK_VBLANK_SRD);
7095 	}
7096 
7097 	/* WaVSRefCountFullforceMissDisable:bdw */
7098 	/* WaDSRefCountFullforceMissDisable:bdw */
7099 	I915_WRITE(GEN7_FF_THREAD_MODE,
7100 		   I915_READ(GEN7_FF_THREAD_MODE) &
7101 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7102 
7103 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7104 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7105 
7106 	/* WaDisableSDEUnitClockGating:bdw */
7107 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7108 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7109 
7110 	/* WaProgramL3SqcReg1Default:bdw */
7111 	gen8_set_l3sqc_credits(dev_priv, 30, 2);
7112 
7113 	/*
7114 	 * WaGttCachingOffByDefault:bdw
7115 	 * GTT cache may not work with big pages, so if those
7116 	 * are ever enabled GTT cache may need to be disabled.
7117 	 */
7118 	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7119 
7120 	/* WaKVMNotificationOnConfigChange:bdw */
7121 	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7122 		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7123 
7124 	lpt_init_clock_gating(dev);
7125 }
7126 
7127 static void haswell_init_clock_gating(struct drm_device *dev)
7128 {
7129 	struct drm_i915_private *dev_priv = to_i915(dev);
7130 
7131 	ilk_init_lp_watermarks(dev);
7132 
7133 	/* L3 caching of data atomics doesn't work -- disable it. */
7134 	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
7135 	I915_WRITE(HSW_ROW_CHICKEN3,
7136 		   _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
7137 
7138 	/* This is required by WaCatErrorRejectionIssue:hsw */
7139 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7140 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7141 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7142 
7143 	/* WaVSRefCountFullforceMissDisable:hsw */
7144 	I915_WRITE(GEN7_FF_THREAD_MODE,
7145 		   I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
7146 
7147 	/* WaDisable_RenderCache_OperationalFlush:hsw */
7148 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7149 
7150 	/* enable HiZ Raw Stall Optimization */
7151 	I915_WRITE(CACHE_MODE_0_GEN7,
7152 		   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7153 
7154 	/* WaDisable4x2SubspanOptimization:hsw */
7155 	I915_WRITE(CACHE_MODE_1,
7156 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7157 
7158 	/*
7159 	 * BSpec recommends 8x4 when MSAA is used,
7160 	 * however in practice 16x4 seems fastest.
7161 	 *
7162 	 * Note that PS/WM thread counts depend on the WIZ hashing
7163 	 * disable bit, which we don't touch here, but it's good
7164 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7165 	 */
7166 	I915_WRITE(GEN7_GT_MODE,
7167 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7168 
7169 	/* WaSampleCChickenBitEnable:hsw */
7170 	I915_WRITE(HALF_SLICE_CHICKEN3,
7171 		   _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
7172 
7173 	/* WaSwitchSolVfFArbitrationPriority:hsw */
7174 	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
7175 
7176 	/* WaRsPkgCStateDisplayPMReq:hsw */
7177 	I915_WRITE(CHICKEN_PAR1_1,
7178 		   I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
7179 
7180 	lpt_init_clock_gating(dev);
7181 }
7182 
7183 static void ivybridge_init_clock_gating(struct drm_device *dev)
7184 {
7185 	struct drm_i915_private *dev_priv = to_i915(dev);
7186 	uint32_t snpcr;
7187 
7188 	ilk_init_lp_watermarks(dev);
7189 
7190 	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
7191 
7192 	/* WaDisableEarlyCull:ivb */
7193 	I915_WRITE(_3D_CHICKEN3,
7194 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7195 
7196 	/* WaDisableBackToBackFlipFix:ivb */
7197 	I915_WRITE(IVB_CHICKEN3,
7198 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7199 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7200 
7201 	/* WaDisablePSDDualDispatchEnable:ivb */
7202 	if (IS_IVB_GT1(dev))
7203 		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7204 			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7205 
7206 	/* WaDisable_RenderCache_OperationalFlush:ivb */
7207 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7208 
7209 	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7210 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
7211 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
7212 
7213 	/* WaApplyL3ControlAndL3ChickenMode:ivb */
7214 	I915_WRITE(GEN7_L3CNTLREG1,
7215 			GEN7_WA_FOR_GEN7_L3_CONTROL);
7216 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
7217 		   GEN7_WA_L3_CHICKEN_MODE);
7218 	if (IS_IVB_GT1(dev))
7219 		I915_WRITE(GEN7_ROW_CHICKEN2,
7220 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7221 	else {
7222 		/* must write both registers */
7223 		I915_WRITE(GEN7_ROW_CHICKEN2,
7224 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7225 		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
7226 			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7227 	}
7228 
7229 	/* WaForceL3Serialization:ivb */
7230 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7231 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7232 
7233 	/*
7234 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7235 	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7236 	 */
7237 	I915_WRITE(GEN6_UCGCTL2,
7238 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7239 
7240 	/* This is required by WaCatErrorRejectionIssue:ivb */
7241 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7242 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7243 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7244 
7245 	g4x_disable_trickle_feed(dev);
7246 
7247 	gen7_setup_fixed_func_scheduler(dev_priv);
7248 
7249 	if (0) { /* causes HiZ corruption on ivb:gt1 */
7250 		/* enable HiZ Raw Stall Optimization */
7251 		I915_WRITE(CACHE_MODE_0_GEN7,
7252 			   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
7253 	}
7254 
7255 	/* WaDisable4x2SubspanOptimization:ivb */
7256 	I915_WRITE(CACHE_MODE_1,
7257 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7258 
7259 	/*
7260 	 * BSpec recommends 8x4 when MSAA is used,
7261 	 * however in practice 16x4 seems fastest.
7262 	 *
7263 	 * Note that PS/WM thread counts depend on the WIZ hashing
7264 	 * disable bit, which we don't touch here, but it's good
7265 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7266 	 */
7267 	I915_WRITE(GEN7_GT_MODE,
7268 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7269 
7270 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
7271 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
7272 	snpcr |= GEN6_MBC_SNPCR_MED;
7273 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
7274 
7275 	if (!HAS_PCH_NOP(dev))
7276 		cpt_init_clock_gating(dev);
7277 
7278 	gen6_check_mch_setup(dev);
7279 }
7280 
7281 static void valleyview_init_clock_gating(struct drm_device *dev)
7282 {
7283 	struct drm_i915_private *dev_priv = to_i915(dev);
7284 
7285 	/* WaDisableEarlyCull:vlv */
7286 	I915_WRITE(_3D_CHICKEN3,
7287 		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
7288 
7289 	/* WaDisableBackToBackFlipFix:vlv */
7290 	I915_WRITE(IVB_CHICKEN3,
7291 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
7292 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
7293 
7294 	/* WaPsdDispatchEnable:vlv */
7295 	/* WaDisablePSDDualDispatchEnable:vlv */
7296 	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
7297 		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
7298 				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
7299 
7300 	/* WaDisable_RenderCache_OperationalFlush:vlv */
7301 	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7302 
7303 	/* WaForceL3Serialization:vlv */
7304 	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
7305 		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
7306 
7307 	/* WaDisableDopClockGating:vlv */
7308 	I915_WRITE(GEN7_ROW_CHICKEN2,
7309 		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
7310 
7311 	/* This is required by WaCatErrorRejectionIssue:vlv */
7312 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
7313 		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
7314 		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
7315 
7316 	gen7_setup_fixed_func_scheduler(dev_priv);
7317 
7318 	/*
7319 	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7320 	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7321 	 */
7322 	I915_WRITE(GEN6_UCGCTL2,
7323 		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
7324 
7325 	/* WaDisableL3Bank2xClockGate:vlv
7326 	 * Disabling L3 clock gating- MMIO 940c[25] = 1
7327 	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7328 	I915_WRITE(GEN7_UCGCTL4,
7329 		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
7330 
7331 	/*
7332 	 * BSpec says this must be set, even though
7333 	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7334 	 */
7335 	I915_WRITE(CACHE_MODE_1,
7336 		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
7337 
7338 	/*
7339 	 * BSpec recommends 8x4 when MSAA is used,
7340 	 * however in practice 16x4 seems fastest.
7341 	 *
7342 	 * Note that PS/WM thread counts depend on the WIZ hashing
7343 	 * disable bit, which we don't touch here, but it's good
7344 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7345 	 */
7346 	I915_WRITE(GEN7_GT_MODE,
7347 		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
7348 
7349 	/*
7350 	 * WaIncreaseL3CreditsForVLVB0:vlv
7351 	 * This is the hardware default actually.
7352 	 */
7353 	I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
7354 
7355 	/*
7356 	 * WaDisableVLVClockGating_VBIIssue:vlv
7357 	 * Disable clock gating on th GCFG unit to prevent a delay
7358 	 * in the reporting of vblank events.
7359 	 */
7360 	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
7361 }
7362 
7363 static void cherryview_init_clock_gating(struct drm_device *dev)
7364 {
7365 	struct drm_i915_private *dev_priv = to_i915(dev);
7366 
7367 	/* WaVSRefCountFullforceMissDisable:chv */
7368 	/* WaDSRefCountFullforceMissDisable:chv */
7369 	I915_WRITE(GEN7_FF_THREAD_MODE,
7370 		   I915_READ(GEN7_FF_THREAD_MODE) &
7371 		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
7372 
7373 	/* WaDisableSemaphoreAndSyncFlipWait:chv */
7374 	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
7375 		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
7376 
7377 	/* WaDisableCSUnitClockGating:chv */
7378 	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
7379 		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
7380 
7381 	/* WaDisableSDEUnitClockGating:chv */
7382 	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
7383 		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7384 
7385 	/*
7386 	 * WaProgramL3SqcReg1Default:chv
7387 	 * See gfxspecs/Related Documents/Performance Guide/
7388 	 * LSQC Setting Recommendations.
7389 	 */
7390 	gen8_set_l3sqc_credits(dev_priv, 38, 2);
7391 
7392 	/*
7393 	 * GTT cache may not work with big pages, so if those
7394 	 * are ever enabled GTT cache may need to be disabled.
7395 	 */
7396 	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
7397 }
7398 
7399 static void g4x_init_clock_gating(struct drm_device *dev)
7400 {
7401 	struct drm_i915_private *dev_priv = to_i915(dev);
7402 	uint32_t dspclk_gate;
7403 
7404 	I915_WRITE(RENCLK_GATE_D1, 0);
7405 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
7406 		   GS_UNIT_CLOCK_GATE_DISABLE |
7407 		   CL_UNIT_CLOCK_GATE_DISABLE);
7408 	I915_WRITE(RAMCLK_GATE_D, 0);
7409 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
7410 		OVRUNIT_CLOCK_GATE_DISABLE |
7411 		OVCUNIT_CLOCK_GATE_DISABLE;
7412 	if (IS_GM45(dev))
7413 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
7414 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
7415 
7416 	/* WaDisableRenderCachePipelinedFlush */
7417 	I915_WRITE(CACHE_MODE_0,
7418 		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
7419 
7420 	/* WaDisable_RenderCache_OperationalFlush:g4x */
7421 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7422 
7423 	g4x_disable_trickle_feed(dev);
7424 }
7425 
7426 static void crestline_init_clock_gating(struct drm_device *dev)
7427 {
7428 	struct drm_i915_private *dev_priv = to_i915(dev);
7429 
7430 	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7431 	I915_WRITE(RENCLK_GATE_D2, 0);
7432 	I915_WRITE(DSPCLK_GATE_D, 0);
7433 	I915_WRITE(RAMCLK_GATE_D, 0);
7434 	I915_WRITE16(DEUC, 0);
7435 	I915_WRITE(MI_ARB_STATE,
7436 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7437 
7438 	/* WaDisable_RenderCache_OperationalFlush:gen4 */
7439 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7440 }
7441 
7442 static void broadwater_init_clock_gating(struct drm_device *dev)
7443 {
7444 	struct drm_i915_private *dev_priv = to_i915(dev);
7445 
7446 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7447 		   I965_RCC_CLOCK_GATE_DISABLE |
7448 		   I965_RCPB_CLOCK_GATE_DISABLE |
7449 		   I965_ISC_CLOCK_GATE_DISABLE |
7450 		   I965_FBC_CLOCK_GATE_DISABLE);
7451 	I915_WRITE(RENCLK_GATE_D2, 0);
7452 	I915_WRITE(MI_ARB_STATE,
7453 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7454 
7455 	/* WaDisable_RenderCache_OperationalFlush:gen4 */
7456 	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
7457 }
7458 
7459 static void gen3_init_clock_gating(struct drm_device *dev)
7460 {
7461 	struct drm_i915_private *dev_priv = to_i915(dev);
7462 	u32 dstate = I915_READ(D_STATE);
7463 
7464 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
7465 		DSTATE_DOT_CLOCK_GATING;
7466 	I915_WRITE(D_STATE, dstate);
7467 
7468 	if (IS_PINEVIEW(dev))
7469 		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
7470 
7471 	/* IIR "flip pending" means done if this bit is set */
7472 	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
7473 
7474 	/* interrupts should cause a wake up from C3 */
7475 	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7476 
7477 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7478 	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7479 
7480 	I915_WRITE(MI_ARB_STATE,
7481 		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7482 }
7483 
7484 static void i85x_init_clock_gating(struct drm_device *dev)
7485 {
7486 	struct drm_i915_private *dev_priv = to_i915(dev);
7487 
7488 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7489 
7490 	/* interrupts should cause a wake up from C3 */
7491 	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7492 		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7493 
7494 	I915_WRITE(MEM_MODE,
7495 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7496 }
7497 
7498 static void i830_init_clock_gating(struct drm_device *dev)
7499 {
7500 	struct drm_i915_private *dev_priv = to_i915(dev);
7501 
7502 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7503 
7504 	I915_WRITE(MEM_MODE,
7505 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7506 		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7507 }
7508 
7509 void intel_init_clock_gating(struct drm_device *dev)
7510 {
7511 	struct drm_i915_private *dev_priv = to_i915(dev);
7512 
7513 	dev_priv->display.init_clock_gating(dev);
7514 }
7515 
7516 void intel_suspend_hw(struct drm_device *dev)
7517 {
7518 	if (HAS_PCH_LPT(dev))
7519 		lpt_suspend_hw(dev);
7520 }
7521 
7522 static void nop_init_clock_gating(struct drm_device *dev)
7523 {
7524 	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7525 }
7526 
7527 /**
7528  * intel_init_clock_gating_hooks - setup the clock gating hooks
7529  * @dev_priv: device private
7530  *
7531  * Setup the hooks that configure which clocks of a given platform can be
7532  * gated and also apply various GT and display specific workarounds for these
7533  * platforms. Note that some GT specific workarounds are applied separately
7534  * when GPU contexts or batchbuffers start their execution.
7535  */
7536 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7537 {
7538 	if (IS_SKYLAKE(dev_priv))
7539 		dev_priv->display.init_clock_gating = skylake_init_clock_gating;
7540 	else if (IS_KABYLAKE(dev_priv))
7541 		dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
7542 	else if (IS_BROXTON(dev_priv))
7543 		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
7544 	else if (IS_BROADWELL(dev_priv))
7545 		dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7546 	else if (IS_CHERRYVIEW(dev_priv))
7547 		dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
7548 	else if (IS_HASWELL(dev_priv))
7549 		dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7550 	else if (IS_IVYBRIDGE(dev_priv))
7551 		dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7552 	else if (IS_VALLEYVIEW(dev_priv))
7553 		dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
7554 	else if (IS_GEN6(dev_priv))
7555 		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7556 	else if (IS_GEN5(dev_priv))
7557 		dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7558 	else if (IS_G4X(dev_priv))
7559 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7560 	else if (IS_CRESTLINE(dev_priv))
7561 		dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7562 	else if (IS_BROADWATER(dev_priv))
7563 		dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7564 	else if (IS_GEN3(dev_priv))
7565 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7566 	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
7567 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7568 	else if (IS_GEN2(dev_priv))
7569 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
7570 	else {
7571 		MISSING_CASE(INTEL_DEVID(dev_priv));
7572 		dev_priv->display.init_clock_gating = nop_init_clock_gating;
7573 	}
7574 }
7575 
7576 /* Set up chip specific power management-related functions */
7577 void intel_init_pm(struct drm_device *dev)
7578 {
7579 	struct drm_i915_private *dev_priv = to_i915(dev);
7580 
7581 	intel_fbc_init(dev_priv);
7582 
7583 	/* For cxsr */
7584 	if (IS_PINEVIEW(dev))
7585 		i915_pineview_get_mem_freq(dev);
7586 	else if (IS_GEN5(dev))
7587 		i915_ironlake_get_mem_freq(dev);
7588 
7589 	/* For FIFO watermark updates */
7590 	if (INTEL_INFO(dev)->gen >= 9) {
7591 		skl_setup_wm_latency(dev);
7592 		dev_priv->display.update_wm = skl_update_wm;
7593 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
7594 	} else if (HAS_PCH_SPLIT(dev)) {
7595 		ilk_setup_wm_latency(dev);
7596 
7597 		if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7598 		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7599 		    (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7600 		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7601 			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
7602 			dev_priv->display.compute_intermediate_wm =
7603 				ilk_compute_intermediate_wm;
7604 			dev_priv->display.initial_watermarks =
7605 				ilk_initial_watermarks;
7606 			dev_priv->display.optimize_watermarks =
7607 				ilk_optimize_watermarks;
7608 		} else {
7609 			DRM_DEBUG_KMS("Failed to read display plane latency. "
7610 				      "Disable CxSR\n");
7611 		}
7612 	} else if (IS_CHERRYVIEW(dev)) {
7613 		vlv_setup_wm_latency(dev);
7614 		dev_priv->display.update_wm = vlv_update_wm;
7615 	} else if (IS_VALLEYVIEW(dev)) {
7616 		vlv_setup_wm_latency(dev);
7617 		dev_priv->display.update_wm = vlv_update_wm;
7618 	} else if (IS_PINEVIEW(dev)) {
7619 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7620 					    dev_priv->is_ddr3,
7621 					    dev_priv->fsb_freq,
7622 					    dev_priv->mem_freq)) {
7623 			DRM_INFO("failed to find known CxSR latency "
7624 				 "(found ddr%s fsb freq %d, mem freq %d), "
7625 				 "disabling CxSR\n",
7626 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7627 				 dev_priv->fsb_freq, dev_priv->mem_freq);
7628 			/* Disable CxSR and never update its watermark again */
7629 			intel_set_memory_cxsr(dev_priv, false);
7630 			dev_priv->display.update_wm = NULL;
7631 		} else
7632 			dev_priv->display.update_wm = pineview_update_wm;
7633 	} else if (IS_G4X(dev)) {
7634 		dev_priv->display.update_wm = g4x_update_wm;
7635 	} else if (IS_GEN4(dev)) {
7636 		dev_priv->display.update_wm = i965_update_wm;
7637 	} else if (IS_GEN3(dev)) {
7638 		dev_priv->display.update_wm = i9xx_update_wm;
7639 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7640 	} else if (IS_GEN2(dev)) {
7641 		if (INTEL_INFO(dev)->num_pipes == 1) {
7642 			dev_priv->display.update_wm = i845_update_wm;
7643 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
7644 		} else {
7645 			dev_priv->display.update_wm = i9xx_update_wm;
7646 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
7647 		}
7648 	} else {
7649 		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7650 	}
7651 }
7652 
7653 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7654 {
7655 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7656 
7657 	/* GEN6_PCODE_* are outside of the forcewake domain, we can
7658 	 * use te fw I915_READ variants to reduce the amount of work
7659 	 * required when reading/writing.
7660 	 */
7661 
7662 	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7663 		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7664 		return -EAGAIN;
7665 	}
7666 
7667 	I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7668 	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7669 	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7670 
7671 	if (intel_wait_for_register_fw(dev_priv,
7672 				       GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7673 				       500)) {
7674 		DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7675 		return -ETIMEDOUT;
7676 	}
7677 
7678 	*val = I915_READ_FW(GEN6_PCODE_DATA);
7679 	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7680 
7681 	return 0;
7682 }
7683 
7684 int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7685 			       u32 mbox, u32 val)
7686 {
7687 	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7688 
7689 	/* GEN6_PCODE_* are outside of the forcewake domain, we can
7690 	 * use te fw I915_READ variants to reduce the amount of work
7691 	 * required when reading/writing.
7692 	 */
7693 
7694 	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7695 		DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7696 		return -EAGAIN;
7697 	}
7698 
7699 	I915_WRITE_FW(GEN6_PCODE_DATA, val);
7700 	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7701 
7702 	if (intel_wait_for_register_fw(dev_priv,
7703 				       GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7704 				       500)) {
7705 		DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7706 		return -ETIMEDOUT;
7707 	}
7708 
7709 	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7710 
7711 	return 0;
7712 }
7713 
7714 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7715 {
7716 	/*
7717 	 * N = val - 0xb7
7718 	 * Slow = Fast = GPLL ref * N
7719 	 */
7720 	return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
7721 }
7722 
7723 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7724 {
7725 	return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
7726 }
7727 
7728 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7729 {
7730 	/*
7731 	 * N = val / 2
7732 	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
7733 	 */
7734 	return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
7735 }
7736 
7737 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7738 {
7739 	/* CHV needs even values */
7740 	return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
7741 }
7742 
7743 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7744 {
7745 	if (IS_GEN9(dev_priv))
7746 		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7747 					 GEN9_FREQ_SCALER);
7748 	else if (IS_CHERRYVIEW(dev_priv))
7749 		return chv_gpu_freq(dev_priv, val);
7750 	else if (IS_VALLEYVIEW(dev_priv))
7751 		return byt_gpu_freq(dev_priv, val);
7752 	else
7753 		return val * GT_FREQUENCY_MULTIPLIER;
7754 }
7755 
7756 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7757 {
7758 	if (IS_GEN9(dev_priv))
7759 		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7760 					 GT_FREQUENCY_MULTIPLIER);
7761 	else if (IS_CHERRYVIEW(dev_priv))
7762 		return chv_freq_opcode(dev_priv, val);
7763 	else if (IS_VALLEYVIEW(dev_priv))
7764 		return byt_freq_opcode(dev_priv, val);
7765 	else
7766 		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7767 }
7768 
7769 struct request_boost {
7770 	struct work_struct work;
7771 	struct drm_i915_gem_request *req;
7772 };
7773 
7774 static void __intel_rps_boost_work(struct work_struct *work)
7775 {
7776 	struct request_boost *boost = container_of(work, struct request_boost, work);
7777 	struct drm_i915_gem_request *req = boost->req;
7778 
7779 	if (!i915_gem_request_completed(req))
7780 		gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7781 
7782 	i915_gem_request_put(req);
7783 	kfree(boost);
7784 }
7785 
7786 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7787 {
7788 	struct request_boost *boost;
7789 
7790 	if (req == NULL || INTEL_GEN(req->i915) < 6)
7791 		return;
7792 
7793 	if (i915_gem_request_completed(req))
7794 		return;
7795 
7796 	boost = kmalloc(sizeof(*boost), M_DRM, GFP_ATOMIC);
7797 	if (boost == NULL)
7798 		return;
7799 
7800 	boost->req = i915_gem_request_get(req);
7801 
7802 	INIT_WORK(&boost->work, __intel_rps_boost_work);
7803 	queue_work(req->i915->wq, &boost->work);
7804 }
7805 
7806 void intel_pm_setup(struct drm_device *dev)
7807 {
7808 	struct drm_i915_private *dev_priv = to_i915(dev);
7809 
7810 	lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
7811 	lockinit(&dev_priv->rps.client_lock, "i915rcl", 0, 0);
7812 
7813 	INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
7814 			  __intel_autoenable_gt_powersave);
7815 	INIT_LIST_HEAD(&dev_priv->rps.clients);
7816 
7817 	dev_priv->pm.suspended = false;
7818 	atomic_set(&dev_priv->pm.wakeref_count, 0);
7819 	atomic_set(&dev_priv->pm.atomic_seq, 0);
7820 }
7821