1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/input.h> 30 #include <linux/i2c.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/vgaarb.h> 34 #include <drm/drm_edid.h> 35 #include <drm/drmP.h> 36 #include "intel_drv.h" 37 #include "intel_frontbuffer.h" 38 #include <drm/i915_drm.h> 39 #include "i915_drv.h" 40 #include "i915_gem_dmabuf.h" 41 #include "intel_dsi.h" 42 #include "i915_trace.h" 43 #include <drm/drm_atomic.h> 44 #include <drm/drm_atomic_helper.h> 45 #include <drm/drm_dp_helper.h> 46 #include <drm/drm_crtc_helper.h> 47 #include <drm/drm_plane_helper.h> 48 #include <drm/drm_rect.h> 49 #include <linux/dma_remapping.h> 50 #include <linux/reservation.h> 51 52 static bool is_mmio_work(struct intel_flip_work *work) 53 { 54 return work->mmio_work.func; 55 } 56 57 /* Primary plane formats for gen <= 3 */ 58 static const uint32_t i8xx_primary_formats[] = { 59 DRM_FORMAT_C8, 60 DRM_FORMAT_RGB565, 61 DRM_FORMAT_XRGB1555, 62 DRM_FORMAT_XRGB8888, 63 }; 64 65 /* Primary plane formats for gen >= 4 */ 66 static const uint32_t i965_primary_formats[] = { 67 DRM_FORMAT_C8, 68 DRM_FORMAT_RGB565, 69 DRM_FORMAT_XRGB8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_XRGB2101010, 72 DRM_FORMAT_XBGR2101010, 73 }; 74 75 static const uint32_t skl_primary_formats[] = { 76 DRM_FORMAT_C8, 77 DRM_FORMAT_RGB565, 78 DRM_FORMAT_XRGB8888, 79 DRM_FORMAT_XBGR8888, 80 DRM_FORMAT_ARGB8888, 81 DRM_FORMAT_ABGR8888, 82 DRM_FORMAT_XRGB2101010, 83 DRM_FORMAT_XBGR2101010, 84 DRM_FORMAT_YUYV, 85 DRM_FORMAT_YVYU, 86 DRM_FORMAT_UYVY, 87 DRM_FORMAT_VYUY, 88 }; 89 90 /* Cursor formats */ 91 static const uint32_t intel_cursor_formats[] = { 92 DRM_FORMAT_ARGB8888, 93 }; 94 95 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 96 struct intel_crtc_state *pipe_config); 97 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 98 struct intel_crtc_state *pipe_config); 99 100 static int intel_framebuffer_init(struct drm_device *dev, 101 struct intel_framebuffer *ifb, 102 struct drm_mode_fb_cmd2 *mode_cmd, 103 struct drm_i915_gem_object *obj); 104 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 105 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 106 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 107 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 108 struct intel_link_m_n *m_n, 109 struct intel_link_m_n *m2_n2); 110 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 111 static void haswell_set_pipeconf(struct drm_crtc *crtc); 112 static void haswell_set_pipemisc(struct drm_crtc *crtc); 113 static void vlv_prepare_pll(struct intel_crtc *crtc, 114 const struct intel_crtc_state *pipe_config); 115 static void chv_prepare_pll(struct intel_crtc *crtc, 116 const struct intel_crtc_state *pipe_config); 117 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 118 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 119 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 120 struct intel_crtc_state *crtc_state); 121 static void skylake_pfit_enable(struct intel_crtc *crtc); 122 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 123 static void ironlake_pfit_enable(struct intel_crtc *crtc); 124 static void intel_modeset_setup_hw_state(struct drm_device *dev); 125 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 126 static int ilk_max_pixel_rate(struct drm_atomic_state *state); 127 static int bxt_calc_cdclk(int max_pixclk); 128 129 struct intel_limit { 130 struct { 131 int min, max; 132 } dot, vco, n, m, m1, m2, p, p1; 133 134 struct { 135 int dot_limit; 136 int p2_slow, p2_fast; 137 } p2; 138 }; 139 140 /* returns HPLL frequency in kHz */ 141 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 142 { 143 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 144 145 /* Obtain SKU information */ 146 mutex_lock(&dev_priv->sb_lock); 147 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 148 CCK_FUSE_HPLL_FREQ_MASK; 149 mutex_unlock(&dev_priv->sb_lock); 150 151 return vco_freq[hpll_freq] * 1000; 152 } 153 154 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 155 const char *name, u32 reg, int ref_freq) 156 { 157 u32 val; 158 int divider; 159 160 mutex_lock(&dev_priv->sb_lock); 161 val = vlv_cck_read(dev_priv, reg); 162 mutex_unlock(&dev_priv->sb_lock); 163 164 divider = val & CCK_FREQUENCY_VALUES; 165 166 WARN((val & CCK_FREQUENCY_STATUS) != 167 (divider << CCK_FREQUENCY_STATUS_SHIFT), 168 "%s change in progress\n", name); 169 170 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 171 } 172 173 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 174 const char *name, u32 reg) 175 { 176 if (dev_priv->hpll_freq == 0) 177 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 178 179 return vlv_get_cck_clock(dev_priv, name, reg, 180 dev_priv->hpll_freq); 181 } 182 183 static int 184 intel_pch_rawclk(struct drm_i915_private *dev_priv) 185 { 186 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; 187 } 188 189 static int 190 intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 191 { 192 /* RAWCLK_FREQ_VLV register updated from power well code */ 193 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 194 CCK_DISPLAY_REF_CLOCK_CONTROL); 195 } 196 197 static int 198 intel_g4x_hrawclk(struct drm_i915_private *dev_priv) 199 { 200 uint32_t clkcfg; 201 202 /* hrawclock is 1/4 the FSB frequency */ 203 clkcfg = I915_READ(CLKCFG); 204 switch (clkcfg & CLKCFG_FSB_MASK) { 205 case CLKCFG_FSB_400: 206 return 100000; 207 case CLKCFG_FSB_533: 208 return 133333; 209 case CLKCFG_FSB_667: 210 return 166667; 211 case CLKCFG_FSB_800: 212 return 200000; 213 case CLKCFG_FSB_1067: 214 return 266667; 215 case CLKCFG_FSB_1333: 216 return 333333; 217 /* these two are just a guess; one of them might be right */ 218 case CLKCFG_FSB_1600: 219 case CLKCFG_FSB_1600_ALT: 220 return 400000; 221 default: 222 return 133333; 223 } 224 } 225 226 void intel_update_rawclk(struct drm_i915_private *dev_priv) 227 { 228 if (HAS_PCH_SPLIT(dev_priv)) 229 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 230 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 231 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv); 232 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv)) 233 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv); 234 else 235 return; /* no rawclk on other platforms, or no need to know it */ 236 237 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq); 238 } 239 240 static void intel_update_czclk(struct drm_i915_private *dev_priv) 241 { 242 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 243 return; 244 245 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 246 CCK_CZ_CLOCK_CONTROL); 247 248 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 249 } 250 251 static inline u32 /* units of 100MHz */ 252 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 253 const struct intel_crtc_state *pipe_config) 254 { 255 if (HAS_DDI(dev_priv)) 256 return pipe_config->port_clock; /* SPLL */ 257 else if (IS_GEN5(dev_priv)) 258 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 259 else 260 return 270000; 261 } 262 263 static const struct intel_limit intel_limits_i8xx_dac = { 264 .dot = { .min = 25000, .max = 350000 }, 265 .vco = { .min = 908000, .max = 1512000 }, 266 .n = { .min = 2, .max = 16 }, 267 .m = { .min = 96, .max = 140 }, 268 .m1 = { .min = 18, .max = 26 }, 269 .m2 = { .min = 6, .max = 16 }, 270 .p = { .min = 4, .max = 128 }, 271 .p1 = { .min = 2, .max = 33 }, 272 .p2 = { .dot_limit = 165000, 273 .p2_slow = 4, .p2_fast = 2 }, 274 }; 275 276 static const struct intel_limit intel_limits_i8xx_dvo = { 277 .dot = { .min = 25000, .max = 350000 }, 278 .vco = { .min = 908000, .max = 1512000 }, 279 .n = { .min = 2, .max = 16 }, 280 .m = { .min = 96, .max = 140 }, 281 .m1 = { .min = 18, .max = 26 }, 282 .m2 = { .min = 6, .max = 16 }, 283 .p = { .min = 4, .max = 128 }, 284 .p1 = { .min = 2, .max = 33 }, 285 .p2 = { .dot_limit = 165000, 286 .p2_slow = 4, .p2_fast = 4 }, 287 }; 288 289 static const struct intel_limit intel_limits_i8xx_lvds = { 290 .dot = { .min = 25000, .max = 350000 }, 291 .vco = { .min = 908000, .max = 1512000 }, 292 .n = { .min = 2, .max = 16 }, 293 .m = { .min = 96, .max = 140 }, 294 .m1 = { .min = 18, .max = 26 }, 295 .m2 = { .min = 6, .max = 16 }, 296 .p = { .min = 4, .max = 128 }, 297 .p1 = { .min = 1, .max = 6 }, 298 .p2 = { .dot_limit = 165000, 299 .p2_slow = 14, .p2_fast = 7 }, 300 }; 301 302 static const struct intel_limit intel_limits_i9xx_sdvo = { 303 .dot = { .min = 20000, .max = 400000 }, 304 .vco = { .min = 1400000, .max = 2800000 }, 305 .n = { .min = 1, .max = 6 }, 306 .m = { .min = 70, .max = 120 }, 307 .m1 = { .min = 8, .max = 18 }, 308 .m2 = { .min = 3, .max = 7 }, 309 .p = { .min = 5, .max = 80 }, 310 .p1 = { .min = 1, .max = 8 }, 311 .p2 = { .dot_limit = 200000, 312 .p2_slow = 10, .p2_fast = 5 }, 313 }; 314 315 static const struct intel_limit intel_limits_i9xx_lvds = { 316 .dot = { .min = 20000, .max = 400000 }, 317 .vco = { .min = 1400000, .max = 2800000 }, 318 .n = { .min = 1, .max = 6 }, 319 .m = { .min = 70, .max = 120 }, 320 .m1 = { .min = 8, .max = 18 }, 321 .m2 = { .min = 3, .max = 7 }, 322 .p = { .min = 7, .max = 98 }, 323 .p1 = { .min = 1, .max = 8 }, 324 .p2 = { .dot_limit = 112000, 325 .p2_slow = 14, .p2_fast = 7 }, 326 }; 327 328 329 static const struct intel_limit intel_limits_g4x_sdvo = { 330 .dot = { .min = 25000, .max = 270000 }, 331 .vco = { .min = 1750000, .max = 3500000}, 332 .n = { .min = 1, .max = 4 }, 333 .m = { .min = 104, .max = 138 }, 334 .m1 = { .min = 17, .max = 23 }, 335 .m2 = { .min = 5, .max = 11 }, 336 .p = { .min = 10, .max = 30 }, 337 .p1 = { .min = 1, .max = 3}, 338 .p2 = { .dot_limit = 270000, 339 .p2_slow = 10, 340 .p2_fast = 10 341 }, 342 }; 343 344 static const struct intel_limit intel_limits_g4x_hdmi = { 345 .dot = { .min = 22000, .max = 400000 }, 346 .vco = { .min = 1750000, .max = 3500000}, 347 .n = { .min = 1, .max = 4 }, 348 .m = { .min = 104, .max = 138 }, 349 .m1 = { .min = 16, .max = 23 }, 350 .m2 = { .min = 5, .max = 11 }, 351 .p = { .min = 5, .max = 80 }, 352 .p1 = { .min = 1, .max = 8}, 353 .p2 = { .dot_limit = 165000, 354 .p2_slow = 10, .p2_fast = 5 }, 355 }; 356 357 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 358 .dot = { .min = 20000, .max = 115000 }, 359 .vco = { .min = 1750000, .max = 3500000 }, 360 .n = { .min = 1, .max = 3 }, 361 .m = { .min = 104, .max = 138 }, 362 .m1 = { .min = 17, .max = 23 }, 363 .m2 = { .min = 5, .max = 11 }, 364 .p = { .min = 28, .max = 112 }, 365 .p1 = { .min = 2, .max = 8 }, 366 .p2 = { .dot_limit = 0, 367 .p2_slow = 14, .p2_fast = 14 368 }, 369 }; 370 371 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 372 .dot = { .min = 80000, .max = 224000 }, 373 .vco = { .min = 1750000, .max = 3500000 }, 374 .n = { .min = 1, .max = 3 }, 375 .m = { .min = 104, .max = 138 }, 376 .m1 = { .min = 17, .max = 23 }, 377 .m2 = { .min = 5, .max = 11 }, 378 .p = { .min = 14, .max = 42 }, 379 .p1 = { .min = 2, .max = 6 }, 380 .p2 = { .dot_limit = 0, 381 .p2_slow = 7, .p2_fast = 7 382 }, 383 }; 384 385 static const struct intel_limit intel_limits_pineview_sdvo = { 386 .dot = { .min = 20000, .max = 400000}, 387 .vco = { .min = 1700000, .max = 3500000 }, 388 /* Pineview's Ncounter is a ring counter */ 389 .n = { .min = 3, .max = 6 }, 390 .m = { .min = 2, .max = 256 }, 391 /* Pineview only has one combined m divider, which we treat as m2. */ 392 .m1 = { .min = 0, .max = 0 }, 393 .m2 = { .min = 0, .max = 254 }, 394 .p = { .min = 5, .max = 80 }, 395 .p1 = { .min = 1, .max = 8 }, 396 .p2 = { .dot_limit = 200000, 397 .p2_slow = 10, .p2_fast = 5 }, 398 }; 399 400 static const struct intel_limit intel_limits_pineview_lvds = { 401 .dot = { .min = 20000, .max = 400000 }, 402 .vco = { .min = 1700000, .max = 3500000 }, 403 .n = { .min = 3, .max = 6 }, 404 .m = { .min = 2, .max = 256 }, 405 .m1 = { .min = 0, .max = 0 }, 406 .m2 = { .min = 0, .max = 254 }, 407 .p = { .min = 7, .max = 112 }, 408 .p1 = { .min = 1, .max = 8 }, 409 .p2 = { .dot_limit = 112000, 410 .p2_slow = 14, .p2_fast = 14 }, 411 }; 412 413 /* Ironlake / Sandybridge 414 * 415 * We calculate clock using (register_value + 2) for N/M1/M2, so here 416 * the range value for them is (actual_value - 2). 417 */ 418 static const struct intel_limit intel_limits_ironlake_dac = { 419 .dot = { .min = 25000, .max = 350000 }, 420 .vco = { .min = 1760000, .max = 3510000 }, 421 .n = { .min = 1, .max = 5 }, 422 .m = { .min = 79, .max = 127 }, 423 .m1 = { .min = 12, .max = 22 }, 424 .m2 = { .min = 5, .max = 9 }, 425 .p = { .min = 5, .max = 80 }, 426 .p1 = { .min = 1, .max = 8 }, 427 .p2 = { .dot_limit = 225000, 428 .p2_slow = 10, .p2_fast = 5 }, 429 }; 430 431 static const struct intel_limit intel_limits_ironlake_single_lvds = { 432 .dot = { .min = 25000, .max = 350000 }, 433 .vco = { .min = 1760000, .max = 3510000 }, 434 .n = { .min = 1, .max = 3 }, 435 .m = { .min = 79, .max = 118 }, 436 .m1 = { .min = 12, .max = 22 }, 437 .m2 = { .min = 5, .max = 9 }, 438 .p = { .min = 28, .max = 112 }, 439 .p1 = { .min = 2, .max = 8 }, 440 .p2 = { .dot_limit = 225000, 441 .p2_slow = 14, .p2_fast = 14 }, 442 }; 443 444 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 445 .dot = { .min = 25000, .max = 350000 }, 446 .vco = { .min = 1760000, .max = 3510000 }, 447 .n = { .min = 1, .max = 3 }, 448 .m = { .min = 79, .max = 127 }, 449 .m1 = { .min = 12, .max = 22 }, 450 .m2 = { .min = 5, .max = 9 }, 451 .p = { .min = 14, .max = 56 }, 452 .p1 = { .min = 2, .max = 8 }, 453 .p2 = { .dot_limit = 225000, 454 .p2_slow = 7, .p2_fast = 7 }, 455 }; 456 457 /* LVDS 100mhz refclk limits. */ 458 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 459 .dot = { .min = 25000, .max = 350000 }, 460 .vco = { .min = 1760000, .max = 3510000 }, 461 .n = { .min = 1, .max = 2 }, 462 .m = { .min = 79, .max = 126 }, 463 .m1 = { .min = 12, .max = 22 }, 464 .m2 = { .min = 5, .max = 9 }, 465 .p = { .min = 28, .max = 112 }, 466 .p1 = { .min = 2, .max = 8 }, 467 .p2 = { .dot_limit = 225000, 468 .p2_slow = 14, .p2_fast = 14 }, 469 }; 470 471 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 472 .dot = { .min = 25000, .max = 350000 }, 473 .vco = { .min = 1760000, .max = 3510000 }, 474 .n = { .min = 1, .max = 3 }, 475 .m = { .min = 79, .max = 126 }, 476 .m1 = { .min = 12, .max = 22 }, 477 .m2 = { .min = 5, .max = 9 }, 478 .p = { .min = 14, .max = 42 }, 479 .p1 = { .min = 2, .max = 6 }, 480 .p2 = { .dot_limit = 225000, 481 .p2_slow = 7, .p2_fast = 7 }, 482 }; 483 484 static const struct intel_limit intel_limits_vlv = { 485 /* 486 * These are the data rate limits (measured in fast clocks) 487 * since those are the strictest limits we have. The fast 488 * clock and actual rate limits are more relaxed, so checking 489 * them would make no difference. 490 */ 491 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 492 .vco = { .min = 4000000, .max = 6000000 }, 493 .n = { .min = 1, .max = 7 }, 494 .m1 = { .min = 2, .max = 3 }, 495 .m2 = { .min = 11, .max = 156 }, 496 .p1 = { .min = 2, .max = 3 }, 497 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 498 }; 499 500 static const struct intel_limit intel_limits_chv = { 501 /* 502 * These are the data rate limits (measured in fast clocks) 503 * since those are the strictest limits we have. The fast 504 * clock and actual rate limits are more relaxed, so checking 505 * them would make no difference. 506 */ 507 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 508 .vco = { .min = 4800000, .max = 6480000 }, 509 .n = { .min = 1, .max = 1 }, 510 .m1 = { .min = 2, .max = 2 }, 511 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 512 .p1 = { .min = 2, .max = 4 }, 513 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 514 }; 515 516 static const struct intel_limit intel_limits_bxt = { 517 /* FIXME: find real dot limits */ 518 .dot = { .min = 0, .max = INT_MAX }, 519 .vco = { .min = 4800000, .max = 6700000 }, 520 .n = { .min = 1, .max = 1 }, 521 .m1 = { .min = 2, .max = 2 }, 522 /* FIXME: find real m2 limits */ 523 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 524 .p1 = { .min = 2, .max = 4 }, 525 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 526 }; 527 528 static bool 529 needs_modeset(struct drm_crtc_state *state) 530 { 531 return drm_atomic_crtc_needs_modeset(state); 532 } 533 534 /* 535 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 536 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 537 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 538 * The helpers' return value is the rate of the clock that is fed to the 539 * display engine's pipe which can be the above fast dot clock rate or a 540 * divided-down version of it. 541 */ 542 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 543 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 544 { 545 clock->m = clock->m2 + 2; 546 clock->p = clock->p1 * clock->p2; 547 if (WARN_ON(clock->n == 0 || clock->p == 0)) 548 return 0; 549 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 550 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 551 552 return clock->dot; 553 } 554 555 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 556 { 557 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 558 } 559 560 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 561 { 562 clock->m = i9xx_dpll_compute_m(clock); 563 clock->p = clock->p1 * clock->p2; 564 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 565 return 0; 566 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 567 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 568 569 return clock->dot; 570 } 571 572 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 573 { 574 clock->m = clock->m1 * clock->m2; 575 clock->p = clock->p1 * clock->p2; 576 if (WARN_ON(clock->n == 0 || clock->p == 0)) 577 return 0; 578 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 579 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 580 581 return clock->dot / 5; 582 } 583 584 int chv_calc_dpll_params(int refclk, struct dpll *clock) 585 { 586 clock->m = clock->m1 * clock->m2; 587 clock->p = clock->p1 * clock->p2; 588 if (WARN_ON(clock->n == 0 || clock->p == 0)) 589 return 0; 590 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 591 clock->n << 22); 592 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 593 594 return clock->dot / 5; 595 } 596 597 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 598 /** 599 * Returns whether the given set of divisors are valid for a given refclk with 600 * the given connectors. 601 */ 602 603 static bool intel_PLL_is_valid(struct drm_device *dev, 604 const struct intel_limit *limit, 605 const struct dpll *clock) 606 { 607 if (clock->n < limit->n.min || limit->n.max < clock->n) 608 INTELPllInvalid("n out of range\n"); 609 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 610 INTELPllInvalid("p1 out of range\n"); 611 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 612 INTELPllInvalid("m2 out of range\n"); 613 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 614 INTELPllInvalid("m1 out of range\n"); 615 616 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && 617 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) 618 if (clock->m1 <= clock->m2) 619 INTELPllInvalid("m1 <= m2\n"); 620 621 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { 622 if (clock->p < limit->p.min || limit->p.max < clock->p) 623 INTELPllInvalid("p out of range\n"); 624 if (clock->m < limit->m.min || limit->m.max < clock->m) 625 INTELPllInvalid("m out of range\n"); 626 } 627 628 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 629 INTELPllInvalid("vco out of range\n"); 630 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 631 * connector, etc., rather than just a single range. 632 */ 633 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 634 INTELPllInvalid("dot out of range\n"); 635 636 return true; 637 } 638 639 static int 640 i9xx_select_p2_div(const struct intel_limit *limit, 641 const struct intel_crtc_state *crtc_state, 642 int target) 643 { 644 struct drm_device *dev = crtc_state->base.crtc->dev; 645 646 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 647 /* 648 * For LVDS just rely on its current settings for dual-channel. 649 * We haven't figured out how to reliably set up different 650 * single/dual channel state, if we even can. 651 */ 652 if (intel_is_dual_link_lvds(dev)) 653 return limit->p2.p2_fast; 654 else 655 return limit->p2.p2_slow; 656 } else { 657 if (target < limit->p2.dot_limit) 658 return limit->p2.p2_slow; 659 else 660 return limit->p2.p2_fast; 661 } 662 } 663 664 /* 665 * Returns a set of divisors for the desired target clock with the given 666 * refclk, or FALSE. The returned values represent the clock equation: 667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 668 * 669 * Target and reference clocks are specified in kHz. 670 * 671 * If match_clock is provided, then best_clock P divider must match the P 672 * divider from @match_clock used for LVDS downclocking. 673 */ 674 static bool 675 i9xx_find_best_dpll(const struct intel_limit *limit, 676 struct intel_crtc_state *crtc_state, 677 int target, int refclk, struct dpll *match_clock, 678 struct dpll *best_clock) 679 { 680 struct drm_device *dev = crtc_state->base.crtc->dev; 681 struct dpll clock; 682 int err = target; 683 684 memset(best_clock, 0, sizeof(*best_clock)); 685 686 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 687 688 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 689 clock.m1++) { 690 for (clock.m2 = limit->m2.min; 691 clock.m2 <= limit->m2.max; clock.m2++) { 692 if (clock.m2 >= clock.m1) 693 break; 694 for (clock.n = limit->n.min; 695 clock.n <= limit->n.max; clock.n++) { 696 for (clock.p1 = limit->p1.min; 697 clock.p1 <= limit->p1.max; clock.p1++) { 698 int this_err; 699 700 i9xx_calc_dpll_params(refclk, &clock); 701 if (!intel_PLL_is_valid(dev, limit, 702 &clock)) 703 continue; 704 if (match_clock && 705 clock.p != match_clock->p) 706 continue; 707 708 this_err = abs(clock.dot - target); 709 if (this_err < err) { 710 *best_clock = clock; 711 err = this_err; 712 } 713 } 714 } 715 } 716 } 717 718 return (err != target); 719 } 720 721 /* 722 * Returns a set of divisors for the desired target clock with the given 723 * refclk, or FALSE. The returned values represent the clock equation: 724 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 725 * 726 * Target and reference clocks are specified in kHz. 727 * 728 * If match_clock is provided, then best_clock P divider must match the P 729 * divider from @match_clock used for LVDS downclocking. 730 */ 731 static bool 732 pnv_find_best_dpll(const struct intel_limit *limit, 733 struct intel_crtc_state *crtc_state, 734 int target, int refclk, struct dpll *match_clock, 735 struct dpll *best_clock) 736 { 737 struct drm_device *dev = crtc_state->base.crtc->dev; 738 struct dpll clock; 739 int err = target; 740 741 memset(best_clock, 0, sizeof(*best_clock)); 742 743 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 744 745 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 746 clock.m1++) { 747 for (clock.m2 = limit->m2.min; 748 clock.m2 <= limit->m2.max; clock.m2++) { 749 for (clock.n = limit->n.min; 750 clock.n <= limit->n.max; clock.n++) { 751 for (clock.p1 = limit->p1.min; 752 clock.p1 <= limit->p1.max; clock.p1++) { 753 int this_err; 754 755 pnv_calc_dpll_params(refclk, &clock); 756 if (!intel_PLL_is_valid(dev, limit, 757 &clock)) 758 continue; 759 if (match_clock && 760 clock.p != match_clock->p) 761 continue; 762 763 this_err = abs(clock.dot - target); 764 if (this_err < err) { 765 *best_clock = clock; 766 err = this_err; 767 } 768 } 769 } 770 } 771 } 772 773 return (err != target); 774 } 775 776 /* 777 * Returns a set of divisors for the desired target clock with the given 778 * refclk, or FALSE. The returned values represent the clock equation: 779 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 780 * 781 * Target and reference clocks are specified in kHz. 782 * 783 * If match_clock is provided, then best_clock P divider must match the P 784 * divider from @match_clock used for LVDS downclocking. 785 */ 786 static bool 787 g4x_find_best_dpll(const struct intel_limit *limit, 788 struct intel_crtc_state *crtc_state, 789 int target, int refclk, struct dpll *match_clock, 790 struct dpll *best_clock) 791 { 792 struct drm_device *dev = crtc_state->base.crtc->dev; 793 struct dpll clock; 794 int max_n; 795 bool found = false; 796 /* approximately equals target * 0.00585 */ 797 int err_most = (target >> 8) + (target >> 9); 798 799 memset(best_clock, 0, sizeof(*best_clock)); 800 801 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 802 803 max_n = limit->n.max; 804 /* based on hardware requirement, prefer smaller n to precision */ 805 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 806 /* based on hardware requirement, prefere larger m1,m2 */ 807 for (clock.m1 = limit->m1.max; 808 clock.m1 >= limit->m1.min; clock.m1--) { 809 for (clock.m2 = limit->m2.max; 810 clock.m2 >= limit->m2.min; clock.m2--) { 811 for (clock.p1 = limit->p1.max; 812 clock.p1 >= limit->p1.min; clock.p1--) { 813 int this_err; 814 815 i9xx_calc_dpll_params(refclk, &clock); 816 if (!intel_PLL_is_valid(dev, limit, 817 &clock)) 818 continue; 819 820 this_err = abs(clock.dot - target); 821 if (this_err < err_most) { 822 *best_clock = clock; 823 err_most = this_err; 824 max_n = clock.n; 825 found = true; 826 } 827 } 828 } 829 } 830 } 831 return found; 832 } 833 834 /* 835 * Check if the calculated PLL configuration is more optimal compared to the 836 * best configuration and error found so far. Return the calculated error. 837 */ 838 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 839 const struct dpll *calculated_clock, 840 const struct dpll *best_clock, 841 unsigned int best_error_ppm, 842 unsigned int *error_ppm) 843 { 844 /* 845 * For CHV ignore the error and consider only the P value. 846 * Prefer a bigger P value based on HW requirements. 847 */ 848 if (IS_CHERRYVIEW(dev)) { 849 *error_ppm = 0; 850 851 return calculated_clock->p > best_clock->p; 852 } 853 854 if (WARN_ON_ONCE(!target_freq)) 855 return false; 856 857 *error_ppm = div_u64(1000000ULL * 858 abs(target_freq - calculated_clock->dot), 859 target_freq); 860 /* 861 * Prefer a better P value over a better (smaller) error if the error 862 * is small. Ensure this preference for future configurations too by 863 * setting the error to 0. 864 */ 865 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 866 *error_ppm = 0; 867 868 return true; 869 } 870 871 return *error_ppm + 10 < best_error_ppm; 872 } 873 874 /* 875 * Returns a set of divisors for the desired target clock with the given 876 * refclk, or FALSE. The returned values represent the clock equation: 877 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 878 */ 879 static bool 880 vlv_find_best_dpll(const struct intel_limit *limit, 881 struct intel_crtc_state *crtc_state, 882 int target, int refclk, struct dpll *match_clock, 883 struct dpll *best_clock) 884 { 885 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 886 struct drm_device *dev = crtc->base.dev; 887 struct dpll clock; 888 unsigned int bestppm = 1000000; 889 /* min update 19.2 MHz */ 890 int max_n = min(limit->n.max, refclk / 19200); 891 bool found = false; 892 893 target *= 5; /* fast clock */ 894 895 memset(best_clock, 0, sizeof(*best_clock)); 896 897 /* based on hardware requirement, prefer smaller n to precision */ 898 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 899 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 900 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 901 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 902 clock.p = clock.p1 * clock.p2; 903 /* based on hardware requirement, prefer bigger m1,m2 values */ 904 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 905 unsigned int ppm; 906 907 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 908 refclk * clock.m1); 909 910 vlv_calc_dpll_params(refclk, &clock); 911 912 if (!intel_PLL_is_valid(dev, limit, 913 &clock)) 914 continue; 915 916 if (!vlv_PLL_is_optimal(dev, target, 917 &clock, 918 best_clock, 919 bestppm, &ppm)) 920 continue; 921 922 *best_clock = clock; 923 bestppm = ppm; 924 found = true; 925 } 926 } 927 } 928 } 929 930 return found; 931 } 932 933 /* 934 * Returns a set of divisors for the desired target clock with the given 935 * refclk, or FALSE. The returned values represent the clock equation: 936 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 937 */ 938 static bool 939 chv_find_best_dpll(const struct intel_limit *limit, 940 struct intel_crtc_state *crtc_state, 941 int target, int refclk, struct dpll *match_clock, 942 struct dpll *best_clock) 943 { 944 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 945 struct drm_device *dev = crtc->base.dev; 946 unsigned int best_error_ppm; 947 struct dpll clock; 948 uint64_t m2; 949 int found = false; 950 951 memset(best_clock, 0, sizeof(*best_clock)); 952 best_error_ppm = 1000000; 953 954 /* 955 * Based on hardware doc, the n always set to 1, and m1 always 956 * set to 2. If requires to support 200Mhz refclk, we need to 957 * revisit this because n may not 1 anymore. 958 */ 959 clock.n = 1, clock.m1 = 2; 960 target *= 5; /* fast clock */ 961 962 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 963 for (clock.p2 = limit->p2.p2_fast; 964 clock.p2 >= limit->p2.p2_slow; 965 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 966 unsigned int error_ppm; 967 968 clock.p = clock.p1 * clock.p2; 969 970 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 971 clock.n) << 22, refclk * clock.m1); 972 973 if (m2 > INT_MAX/clock.m1) 974 continue; 975 976 clock.m2 = m2; 977 978 chv_calc_dpll_params(refclk, &clock); 979 980 if (!intel_PLL_is_valid(dev, limit, &clock)) 981 continue; 982 983 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 984 best_error_ppm, &error_ppm)) 985 continue; 986 987 *best_clock = clock; 988 best_error_ppm = error_ppm; 989 found = true; 990 } 991 } 992 993 return found; 994 } 995 996 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 997 struct dpll *best_clock) 998 { 999 int refclk = 100000; 1000 const struct intel_limit *limit = &intel_limits_bxt; 1001 1002 return chv_find_best_dpll(limit, crtc_state, 1003 target_clock, refclk, NULL, best_clock); 1004 } 1005 1006 bool intel_crtc_active(struct drm_crtc *crtc) 1007 { 1008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1009 1010 /* Be paranoid as we can arrive here with only partial 1011 * state retrieved from the hardware during setup. 1012 * 1013 * We can ditch the adjusted_mode.crtc_clock check as soon 1014 * as Haswell has gained clock readout/fastboot support. 1015 * 1016 * We can ditch the crtc->primary->fb check as soon as we can 1017 * properly reconstruct framebuffers. 1018 * 1019 * FIXME: The intel_crtc->active here should be switched to 1020 * crtc->state->active once we have proper CRTC states wired up 1021 * for atomic. 1022 */ 1023 return intel_crtc->active && crtc->primary->state->fb && 1024 intel_crtc->config->base.adjusted_mode.crtc_clock; 1025 } 1026 1027 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1028 enum i915_pipe pipe) 1029 { 1030 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1031 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1032 1033 return intel_crtc->config->cpu_transcoder; 1034 } 1035 1036 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1037 { 1038 struct drm_i915_private *dev_priv = to_i915(dev); 1039 i915_reg_t reg = PIPEDSL(pipe); 1040 u32 line1, line2; 1041 u32 line_mask; 1042 1043 if (IS_GEN2(dev)) 1044 line_mask = DSL_LINEMASK_GEN2; 1045 else 1046 line_mask = DSL_LINEMASK_GEN3; 1047 1048 line1 = I915_READ(reg) & line_mask; 1049 msleep(5); 1050 line2 = I915_READ(reg) & line_mask; 1051 1052 return line1 == line2; 1053 } 1054 1055 /* 1056 * intel_wait_for_pipe_off - wait for pipe to turn off 1057 * @crtc: crtc whose pipe to wait for 1058 * 1059 * After disabling a pipe, we can't wait for vblank in the usual way, 1060 * spinning on the vblank interrupt status bit, since we won't actually 1061 * see an interrupt when the pipe is disabled. 1062 * 1063 * On Gen4 and above: 1064 * wait for the pipe register state bit to turn off 1065 * 1066 * Otherwise: 1067 * wait for the display line value to settle (it usually 1068 * ends up stopping at the start of the next frame). 1069 * 1070 */ 1071 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1072 { 1073 struct drm_device *dev = crtc->base.dev; 1074 struct drm_i915_private *dev_priv = to_i915(dev); 1075 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1076 enum i915_pipe pipe = crtc->pipe; 1077 1078 if (INTEL_INFO(dev)->gen >= 4) { 1079 i915_reg_t reg = PIPECONF(cpu_transcoder); 1080 1081 /* Wait for the Pipe State to go off */ 1082 if (intel_wait_for_register(dev_priv, 1083 reg, I965_PIPECONF_ACTIVE, 0, 1084 100)) 1085 WARN(1, "pipe_off wait timed out\n"); 1086 } else { 1087 /* Wait for the display line to settle */ 1088 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1089 WARN(1, "pipe_off wait timed out\n"); 1090 } 1091 } 1092 1093 /* Only for pre-ILK configs */ 1094 void assert_pll(struct drm_i915_private *dev_priv, 1095 enum i915_pipe pipe, bool state) 1096 { 1097 u32 val; 1098 bool cur_state; 1099 1100 val = I915_READ(DPLL(pipe)); 1101 cur_state = !!(val & DPLL_VCO_ENABLE); 1102 I915_STATE_WARN(cur_state != state, 1103 "PLL state assertion failure (expected %s, current %s)\n", 1104 onoff(state), onoff(cur_state)); 1105 } 1106 1107 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1108 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1109 { 1110 u32 val; 1111 bool cur_state; 1112 1113 mutex_lock(&dev_priv->sb_lock); 1114 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1115 mutex_unlock(&dev_priv->sb_lock); 1116 1117 cur_state = val & DSI_PLL_VCO_EN; 1118 I915_STATE_WARN(cur_state != state, 1119 "DSI PLL state assertion failure (expected %s, current %s)\n", 1120 onoff(state), onoff(cur_state)); 1121 } 1122 1123 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1124 enum i915_pipe pipe, bool state) 1125 { 1126 bool cur_state; 1127 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1128 pipe); 1129 1130 if (HAS_DDI(dev_priv)) { 1131 /* DDI does not have a specific FDI_TX register */ 1132 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1133 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1134 } else { 1135 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1136 cur_state = !!(val & FDI_TX_ENABLE); 1137 } 1138 I915_STATE_WARN(cur_state != state, 1139 "FDI TX state assertion failure (expected %s, current %s)\n", 1140 onoff(state), onoff(cur_state)); 1141 } 1142 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1143 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1144 1145 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1146 enum i915_pipe pipe, bool state) 1147 { 1148 u32 val; 1149 bool cur_state; 1150 1151 val = I915_READ(FDI_RX_CTL(pipe)); 1152 cur_state = !!(val & FDI_RX_ENABLE); 1153 I915_STATE_WARN(cur_state != state, 1154 "FDI RX state assertion failure (expected %s, current %s)\n", 1155 onoff(state), onoff(cur_state)); 1156 } 1157 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1158 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1159 1160 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1161 enum i915_pipe pipe) 1162 { 1163 u32 val; 1164 1165 /* ILK FDI PLL is always enabled */ 1166 if (IS_GEN5(dev_priv)) 1167 return; 1168 1169 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1170 if (HAS_DDI(dev_priv)) 1171 return; 1172 1173 val = I915_READ(FDI_TX_CTL(pipe)); 1174 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1175 } 1176 1177 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1178 enum i915_pipe pipe, bool state) 1179 { 1180 u32 val; 1181 bool cur_state; 1182 1183 val = I915_READ(FDI_RX_CTL(pipe)); 1184 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1185 I915_STATE_WARN(cur_state != state, 1186 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1187 onoff(state), onoff(cur_state)); 1188 } 1189 1190 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1191 enum i915_pipe pipe) 1192 { 1193 struct drm_device *dev = &dev_priv->drm; 1194 i915_reg_t pp_reg; 1195 u32 val; 1196 enum i915_pipe panel_pipe = PIPE_A; 1197 bool locked = true; 1198 1199 if (WARN_ON(HAS_DDI(dev))) 1200 return; 1201 1202 if (HAS_PCH_SPLIT(dev)) { 1203 u32 port_sel; 1204 1205 pp_reg = PCH_PP_CONTROL; 1206 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1207 1208 if (port_sel == PANEL_PORT_SELECT_LVDS && 1209 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1210 panel_pipe = PIPE_B; 1211 /* XXX: else fix for eDP */ 1212 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1213 /* presumably write lock depends on pipe, not port select */ 1214 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1215 panel_pipe = pipe; 1216 } else { 1217 pp_reg = PP_CONTROL; 1218 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1219 panel_pipe = PIPE_B; 1220 } 1221 1222 val = I915_READ(pp_reg); 1223 if (!(val & PANEL_POWER_ON) || 1224 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1225 locked = false; 1226 1227 I915_STATE_WARN(panel_pipe == pipe && locked, 1228 "panel assertion failure, pipe %c regs locked\n", 1229 pipe_name(pipe)); 1230 } 1231 1232 static void assert_cursor(struct drm_i915_private *dev_priv, 1233 enum i915_pipe pipe, bool state) 1234 { 1235 struct drm_device *dev = &dev_priv->drm; 1236 bool cur_state; 1237 1238 if (IS_845G(dev) || IS_I865G(dev)) 1239 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1240 else 1241 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1242 1243 I915_STATE_WARN(cur_state != state, 1244 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1245 pipe_name(pipe), onoff(state), onoff(cur_state)); 1246 } 1247 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1248 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1249 1250 void assert_pipe(struct drm_i915_private *dev_priv, 1251 enum i915_pipe pipe, bool state) 1252 { 1253 bool cur_state; 1254 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1255 pipe); 1256 enum intel_display_power_domain power_domain; 1257 1258 /* if we need the pipe quirk it must be always on */ 1259 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1260 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1261 state = true; 1262 1263 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1264 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1265 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1266 cur_state = !!(val & PIPECONF_ENABLE); 1267 1268 intel_display_power_put(dev_priv, power_domain); 1269 } else { 1270 cur_state = false; 1271 } 1272 1273 I915_STATE_WARN(cur_state != state, 1274 "pipe %c assertion failure (expected %s, current %s)\n", 1275 pipe_name(pipe), onoff(state), onoff(cur_state)); 1276 } 1277 1278 static void assert_plane(struct drm_i915_private *dev_priv, 1279 enum plane plane, bool state) 1280 { 1281 u32 val; 1282 bool cur_state; 1283 1284 val = I915_READ(DSPCNTR(plane)); 1285 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1286 I915_STATE_WARN(cur_state != state, 1287 "plane %c assertion failure (expected %s, current %s)\n", 1288 plane_name(plane), onoff(state), onoff(cur_state)); 1289 } 1290 1291 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1292 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1293 1294 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1295 enum i915_pipe pipe) 1296 { 1297 struct drm_device *dev = &dev_priv->drm; 1298 int i; 1299 1300 /* Primary planes are fixed to pipes on gen4+ */ 1301 if (INTEL_INFO(dev)->gen >= 4) { 1302 u32 val = I915_READ(DSPCNTR(pipe)); 1303 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1304 "plane %c assertion failure, should be disabled but not\n", 1305 plane_name(pipe)); 1306 return; 1307 } 1308 1309 /* Need to check both planes against the pipe */ 1310 for_each_pipe(dev_priv, i) { 1311 u32 val = I915_READ(DSPCNTR(i)); 1312 enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1313 DISPPLANE_SEL_PIPE_SHIFT; 1314 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1315 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1316 plane_name(i), pipe_name(pipe)); 1317 } 1318 } 1319 1320 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1321 enum i915_pipe pipe) 1322 { 1323 struct drm_device *dev = &dev_priv->drm; 1324 int sprite; 1325 1326 if (INTEL_INFO(dev)->gen >= 9) { 1327 for_each_sprite(dev_priv, pipe, sprite) { 1328 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1329 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1330 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1331 sprite, pipe_name(pipe)); 1332 } 1333 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1334 for_each_sprite(dev_priv, pipe, sprite) { 1335 u32 val = I915_READ(SPCNTR(pipe, sprite)); 1336 I915_STATE_WARN(val & SP_ENABLE, 1337 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1338 sprite_name(pipe, sprite), pipe_name(pipe)); 1339 } 1340 } else if (INTEL_INFO(dev)->gen >= 7) { 1341 u32 val = I915_READ(SPRCTL(pipe)); 1342 I915_STATE_WARN(val & SPRITE_ENABLE, 1343 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1344 plane_name(pipe), pipe_name(pipe)); 1345 } else if (INTEL_INFO(dev)->gen >= 5) { 1346 u32 val = I915_READ(DVSCNTR(pipe)); 1347 I915_STATE_WARN(val & DVS_ENABLE, 1348 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1349 plane_name(pipe), pipe_name(pipe)); 1350 } 1351 } 1352 1353 static void assert_vblank_disabled(struct drm_crtc *crtc) 1354 { 1355 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1356 drm_crtc_vblank_put(crtc); 1357 } 1358 1359 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1360 enum i915_pipe pipe) 1361 { 1362 u32 val; 1363 bool enabled; 1364 1365 val = I915_READ(PCH_TRANSCONF(pipe)); 1366 enabled = !!(val & TRANS_ENABLE); 1367 I915_STATE_WARN(enabled, 1368 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1369 pipe_name(pipe)); 1370 } 1371 1372 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1373 enum i915_pipe pipe, u32 port_sel, u32 val) 1374 { 1375 if ((val & DP_PORT_EN) == 0) 1376 return false; 1377 1378 if (HAS_PCH_CPT(dev_priv)) { 1379 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1380 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1381 return false; 1382 } else if (IS_CHERRYVIEW(dev_priv)) { 1383 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1384 return false; 1385 } else { 1386 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1387 return false; 1388 } 1389 return true; 1390 } 1391 1392 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1393 enum i915_pipe pipe, u32 val) 1394 { 1395 if ((val & SDVO_ENABLE) == 0) 1396 return false; 1397 1398 if (HAS_PCH_CPT(dev_priv)) { 1399 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1400 return false; 1401 } else if (IS_CHERRYVIEW(dev_priv)) { 1402 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1403 return false; 1404 } else { 1405 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1406 return false; 1407 } 1408 return true; 1409 } 1410 1411 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1412 enum i915_pipe pipe, u32 val) 1413 { 1414 if ((val & LVDS_PORT_EN) == 0) 1415 return false; 1416 1417 if (HAS_PCH_CPT(dev_priv)) { 1418 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1419 return false; 1420 } else { 1421 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1422 return false; 1423 } 1424 return true; 1425 } 1426 1427 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1428 enum i915_pipe pipe, u32 val) 1429 { 1430 if ((val & ADPA_DAC_ENABLE) == 0) 1431 return false; 1432 if (HAS_PCH_CPT(dev_priv)) { 1433 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1434 return false; 1435 } else { 1436 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1437 return false; 1438 } 1439 return true; 1440 } 1441 1442 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1443 enum i915_pipe pipe, i915_reg_t reg, 1444 u32 port_sel) 1445 { 1446 u32 val = I915_READ(reg); 1447 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1448 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1449 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1450 1451 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1452 && (val & DP_PIPEB_SELECT), 1453 "IBX PCH dp port still using transcoder B\n"); 1454 } 1455 1456 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1457 enum i915_pipe pipe, i915_reg_t reg) 1458 { 1459 u32 val = I915_READ(reg); 1460 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1461 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1462 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1463 1464 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1465 && (val & SDVO_PIPE_B_SELECT), 1466 "IBX PCH hdmi port still using transcoder B\n"); 1467 } 1468 1469 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1470 enum i915_pipe pipe) 1471 { 1472 u32 val; 1473 1474 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1475 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1476 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1477 1478 val = I915_READ(PCH_ADPA); 1479 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1480 "PCH VGA enabled on transcoder %c, should be disabled\n", 1481 pipe_name(pipe)); 1482 1483 val = I915_READ(PCH_LVDS); 1484 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1485 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1486 pipe_name(pipe)); 1487 1488 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1489 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1490 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1491 } 1492 1493 static void _vlv_enable_pll(struct intel_crtc *crtc, 1494 const struct intel_crtc_state *pipe_config) 1495 { 1496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1497 enum i915_pipe pipe = crtc->pipe; 1498 1499 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1500 POSTING_READ(DPLL(pipe)); 1501 udelay(150); 1502 1503 if (intel_wait_for_register(dev_priv, 1504 DPLL(pipe), 1505 DPLL_LOCK_VLV, 1506 DPLL_LOCK_VLV, 1507 1)) 1508 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1509 } 1510 1511 static void vlv_enable_pll(struct intel_crtc *crtc, 1512 const struct intel_crtc_state *pipe_config) 1513 { 1514 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1515 enum i915_pipe pipe = crtc->pipe; 1516 1517 assert_pipe_disabled(dev_priv, pipe); 1518 1519 /* PLL is protected by panel, make sure we can write it */ 1520 assert_panel_unlocked(dev_priv, pipe); 1521 1522 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1523 _vlv_enable_pll(crtc, pipe_config); 1524 1525 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1526 POSTING_READ(DPLL_MD(pipe)); 1527 } 1528 1529 1530 static void _chv_enable_pll(struct intel_crtc *crtc, 1531 const struct intel_crtc_state *pipe_config) 1532 { 1533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1534 enum i915_pipe pipe = crtc->pipe; 1535 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1536 u32 tmp; 1537 1538 mutex_lock(&dev_priv->sb_lock); 1539 1540 /* Enable back the 10bit clock to display controller */ 1541 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1542 tmp |= DPIO_DCLKP_EN; 1543 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1544 1545 mutex_unlock(&dev_priv->sb_lock); 1546 1547 /* 1548 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1549 */ 1550 udelay(1); 1551 1552 /* Enable PLL */ 1553 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1554 1555 /* Check PLL is locked */ 1556 if (intel_wait_for_register(dev_priv, 1557 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1558 1)) 1559 DRM_ERROR("PLL %d failed to lock\n", pipe); 1560 } 1561 1562 static void chv_enable_pll(struct intel_crtc *crtc, 1563 const struct intel_crtc_state *pipe_config) 1564 { 1565 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1566 enum i915_pipe pipe = crtc->pipe; 1567 1568 assert_pipe_disabled(dev_priv, pipe); 1569 1570 /* PLL is protected by panel, make sure we can write it */ 1571 assert_panel_unlocked(dev_priv, pipe); 1572 1573 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1574 _chv_enable_pll(crtc, pipe_config); 1575 1576 if (pipe != PIPE_A) { 1577 /* 1578 * WaPixelRepeatModeFixForC0:chv 1579 * 1580 * DPLLCMD is AWOL. Use chicken bits to propagate 1581 * the value from DPLLBMD to either pipe B or C. 1582 */ 1583 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); 1584 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1585 I915_WRITE(CBR4_VLV, 0); 1586 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1587 1588 /* 1589 * DPLLB VGA mode also seems to cause problems. 1590 * We should always have it disabled. 1591 */ 1592 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1593 } else { 1594 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1595 POSTING_READ(DPLL_MD(pipe)); 1596 } 1597 } 1598 1599 static int intel_num_dvo_pipes(struct drm_device *dev) 1600 { 1601 struct intel_crtc *crtc; 1602 int count = 0; 1603 1604 for_each_intel_crtc(dev, crtc) { 1605 count += crtc->base.state->active && 1606 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1607 } 1608 1609 return count; 1610 } 1611 1612 static void i9xx_enable_pll(struct intel_crtc *crtc) 1613 { 1614 struct drm_device *dev = crtc->base.dev; 1615 struct drm_i915_private *dev_priv = to_i915(dev); 1616 i915_reg_t reg = DPLL(crtc->pipe); 1617 u32 dpll = crtc->config->dpll_hw_state.dpll; 1618 1619 assert_pipe_disabled(dev_priv, crtc->pipe); 1620 1621 /* PLL is protected by panel, make sure we can write it */ 1622 if (IS_MOBILE(dev) && !IS_I830(dev)) 1623 assert_panel_unlocked(dev_priv, crtc->pipe); 1624 1625 /* Enable DVO 2x clock on both PLLs if necessary */ 1626 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1627 /* 1628 * It appears to be important that we don't enable this 1629 * for the current pipe before otherwise configuring the 1630 * PLL. No idea how this should be handled if multiple 1631 * DVO outputs are enabled simultaneosly. 1632 */ 1633 dpll |= DPLL_DVO_2X_MODE; 1634 I915_WRITE(DPLL(!crtc->pipe), 1635 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1636 } 1637 1638 /* 1639 * Apparently we need to have VGA mode enabled prior to changing 1640 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1641 * dividers, even though the register value does change. 1642 */ 1643 I915_WRITE(reg, 0); 1644 1645 I915_WRITE(reg, dpll); 1646 1647 /* Wait for the clocks to stabilize. */ 1648 POSTING_READ(reg); 1649 udelay(150); 1650 1651 if (INTEL_INFO(dev)->gen >= 4) { 1652 I915_WRITE(DPLL_MD(crtc->pipe), 1653 crtc->config->dpll_hw_state.dpll_md); 1654 } else { 1655 /* The pixel multiplier can only be updated once the 1656 * DPLL is enabled and the clocks are stable. 1657 * 1658 * So write it again. 1659 */ 1660 I915_WRITE(reg, dpll); 1661 } 1662 1663 /* We do this three times for luck */ 1664 I915_WRITE(reg, dpll); 1665 POSTING_READ(reg); 1666 udelay(150); /* wait for warmup */ 1667 I915_WRITE(reg, dpll); 1668 POSTING_READ(reg); 1669 udelay(150); /* wait for warmup */ 1670 I915_WRITE(reg, dpll); 1671 POSTING_READ(reg); 1672 udelay(150); /* wait for warmup */ 1673 } 1674 1675 /** 1676 * i9xx_disable_pll - disable a PLL 1677 * @dev_priv: i915 private structure 1678 * @pipe: pipe PLL to disable 1679 * 1680 * Disable the PLL for @pipe, making sure the pipe is off first. 1681 * 1682 * Note! This is for pre-ILK only. 1683 */ 1684 static void i9xx_disable_pll(struct intel_crtc *crtc) 1685 { 1686 struct drm_device *dev = crtc->base.dev; 1687 struct drm_i915_private *dev_priv = to_i915(dev); 1688 enum i915_pipe pipe = crtc->pipe; 1689 1690 /* Disable DVO 2x clock on both PLLs if necessary */ 1691 if (IS_I830(dev) && 1692 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1693 !intel_num_dvo_pipes(dev)) { 1694 I915_WRITE(DPLL(PIPE_B), 1695 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1696 I915_WRITE(DPLL(PIPE_A), 1697 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1698 } 1699 1700 /* Don't disable pipe or pipe PLLs if needed */ 1701 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1702 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1703 return; 1704 1705 /* Make sure the pipe isn't still relying on us */ 1706 assert_pipe_disabled(dev_priv, pipe); 1707 1708 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1709 POSTING_READ(DPLL(pipe)); 1710 } 1711 1712 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1713 { 1714 u32 val; 1715 1716 /* Make sure the pipe isn't still relying on us */ 1717 assert_pipe_disabled(dev_priv, pipe); 1718 1719 val = DPLL_INTEGRATED_REF_CLK_VLV | 1720 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1721 if (pipe != PIPE_A) 1722 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1723 1724 I915_WRITE(DPLL(pipe), val); 1725 POSTING_READ(DPLL(pipe)); 1726 } 1727 1728 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1729 { 1730 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1731 u32 val; 1732 1733 /* Make sure the pipe isn't still relying on us */ 1734 assert_pipe_disabled(dev_priv, pipe); 1735 1736 val = DPLL_SSC_REF_CLK_CHV | 1737 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1738 if (pipe != PIPE_A) 1739 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1740 1741 I915_WRITE(DPLL(pipe), val); 1742 POSTING_READ(DPLL(pipe)); 1743 1744 mutex_lock(&dev_priv->sb_lock); 1745 1746 /* Disable 10bit clock to display controller */ 1747 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1748 val &= ~DPIO_DCLKP_EN; 1749 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1750 1751 mutex_unlock(&dev_priv->sb_lock); 1752 } 1753 1754 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1755 struct intel_digital_port *dport, 1756 unsigned int expected_mask) 1757 { 1758 u32 port_mask; 1759 i915_reg_t dpll_reg; 1760 1761 switch (dport->port) { 1762 case PORT_B: 1763 port_mask = DPLL_PORTB_READY_MASK; 1764 dpll_reg = DPLL(0); 1765 break; 1766 case PORT_C: 1767 port_mask = DPLL_PORTC_READY_MASK; 1768 dpll_reg = DPLL(0); 1769 expected_mask <<= 4; 1770 break; 1771 case PORT_D: 1772 port_mask = DPLL_PORTD_READY_MASK; 1773 dpll_reg = DPIO_PHY_STATUS; 1774 break; 1775 default: 1776 BUG(); 1777 } 1778 1779 if (intel_wait_for_register(dev_priv, 1780 dpll_reg, port_mask, expected_mask, 1781 1000)) 1782 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1783 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1784 } 1785 1786 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1787 enum i915_pipe pipe) 1788 { 1789 struct drm_device *dev = &dev_priv->drm; 1790 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1791 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1792 i915_reg_t reg; 1793 uint32_t val, pipeconf_val; 1794 1795 /* Make sure PCH DPLL is enabled */ 1796 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1797 1798 /* FDI must be feeding us bits for PCH ports */ 1799 assert_fdi_tx_enabled(dev_priv, pipe); 1800 assert_fdi_rx_enabled(dev_priv, pipe); 1801 1802 if (HAS_PCH_CPT(dev)) { 1803 /* Workaround: Set the timing override bit before enabling the 1804 * pch transcoder. */ 1805 reg = TRANS_CHICKEN2(pipe); 1806 val = I915_READ(reg); 1807 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1808 I915_WRITE(reg, val); 1809 } 1810 1811 reg = PCH_TRANSCONF(pipe); 1812 val = I915_READ(reg); 1813 pipeconf_val = I915_READ(PIPECONF(pipe)); 1814 1815 if (HAS_PCH_IBX(dev_priv)) { 1816 /* 1817 * Make the BPC in transcoder be consistent with 1818 * that in pipeconf reg. For HDMI we must use 8bpc 1819 * here for both 8bpc and 12bpc. 1820 */ 1821 val &= ~PIPECONF_BPC_MASK; 1822 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1823 val |= PIPECONF_8BPC; 1824 else 1825 val |= pipeconf_val & PIPECONF_BPC_MASK; 1826 } 1827 1828 val &= ~TRANS_INTERLACE_MASK; 1829 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1830 if (HAS_PCH_IBX(dev_priv) && 1831 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1832 val |= TRANS_LEGACY_INTERLACED_ILK; 1833 else 1834 val |= TRANS_INTERLACED; 1835 else 1836 val |= TRANS_PROGRESSIVE; 1837 1838 I915_WRITE(reg, val | TRANS_ENABLE); 1839 if (intel_wait_for_register(dev_priv, 1840 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1841 100)) 1842 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1843 } 1844 1845 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1846 enum transcoder cpu_transcoder) 1847 { 1848 u32 val, pipeconf_val; 1849 1850 /* FDI must be feeding us bits for PCH ports */ 1851 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1852 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1853 1854 /* Workaround: set timing override bit. */ 1855 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1856 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1857 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1858 1859 val = TRANS_ENABLE; 1860 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1861 1862 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1863 PIPECONF_INTERLACED_ILK) 1864 val |= TRANS_INTERLACED; 1865 else 1866 val |= TRANS_PROGRESSIVE; 1867 1868 I915_WRITE(LPT_TRANSCONF, val); 1869 if (intel_wait_for_register(dev_priv, 1870 LPT_TRANSCONF, 1871 TRANS_STATE_ENABLE, 1872 TRANS_STATE_ENABLE, 1873 100)) 1874 DRM_ERROR("Failed to enable PCH transcoder\n"); 1875 } 1876 1877 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1878 enum i915_pipe pipe) 1879 { 1880 struct drm_device *dev = &dev_priv->drm; 1881 i915_reg_t reg; 1882 uint32_t val; 1883 1884 /* FDI relies on the transcoder */ 1885 assert_fdi_tx_disabled(dev_priv, pipe); 1886 assert_fdi_rx_disabled(dev_priv, pipe); 1887 1888 /* Ports must be off as well */ 1889 assert_pch_ports_disabled(dev_priv, pipe); 1890 1891 reg = PCH_TRANSCONF(pipe); 1892 val = I915_READ(reg); 1893 val &= ~TRANS_ENABLE; 1894 I915_WRITE(reg, val); 1895 /* wait for PCH transcoder off, transcoder state */ 1896 if (intel_wait_for_register(dev_priv, 1897 reg, TRANS_STATE_ENABLE, 0, 1898 50)) 1899 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1900 1901 if (HAS_PCH_CPT(dev)) { 1902 /* Workaround: Clear the timing override chicken bit again. */ 1903 reg = TRANS_CHICKEN2(pipe); 1904 val = I915_READ(reg); 1905 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1906 I915_WRITE(reg, val); 1907 } 1908 } 1909 1910 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1911 { 1912 u32 val; 1913 1914 val = I915_READ(LPT_TRANSCONF); 1915 val &= ~TRANS_ENABLE; 1916 I915_WRITE(LPT_TRANSCONF, val); 1917 /* wait for PCH transcoder off, transcoder state */ 1918 if (intel_wait_for_register(dev_priv, 1919 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1920 50)) 1921 DRM_ERROR("Failed to disable PCH transcoder\n"); 1922 1923 /* Workaround: clear timing override bit. */ 1924 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1925 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1926 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1927 } 1928 1929 /** 1930 * intel_enable_pipe - enable a pipe, asserting requirements 1931 * @crtc: crtc responsible for the pipe 1932 * 1933 * Enable @crtc's pipe, making sure that various hardware specific requirements 1934 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1935 */ 1936 static void intel_enable_pipe(struct intel_crtc *crtc) 1937 { 1938 struct drm_device *dev = crtc->base.dev; 1939 struct drm_i915_private *dev_priv = to_i915(dev); 1940 enum i915_pipe pipe = crtc->pipe; 1941 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1942 enum i915_pipe pch_transcoder; 1943 i915_reg_t reg; 1944 u32 val; 1945 1946 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1947 1948 assert_planes_disabled(dev_priv, pipe); 1949 assert_cursor_disabled(dev_priv, pipe); 1950 assert_sprites_disabled(dev_priv, pipe); 1951 1952 if (HAS_PCH_LPT(dev_priv)) 1953 pch_transcoder = TRANSCODER_A; 1954 else 1955 pch_transcoder = pipe; 1956 1957 /* 1958 * A pipe without a PLL won't actually be able to drive bits from 1959 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1960 * need the check. 1961 */ 1962 if (HAS_GMCH_DISPLAY(dev_priv)) { 1963 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1964 assert_dsi_pll_enabled(dev_priv); 1965 else 1966 assert_pll_enabled(dev_priv, pipe); 1967 } else { 1968 if (crtc->config->has_pch_encoder) { 1969 /* if driving the PCH, we need FDI enabled */ 1970 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1971 assert_fdi_tx_pll_enabled(dev_priv, 1972 (enum i915_pipe) cpu_transcoder); 1973 } 1974 /* FIXME: assert CPU port conditions for SNB+ */ 1975 } 1976 1977 reg = PIPECONF(cpu_transcoder); 1978 val = I915_READ(reg); 1979 if (val & PIPECONF_ENABLE) { 1980 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1981 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 1982 return; 1983 } 1984 1985 I915_WRITE(reg, val | PIPECONF_ENABLE); 1986 POSTING_READ(reg); 1987 1988 /* 1989 * Until the pipe starts DSL will read as 0, which would cause 1990 * an apparent vblank timestamp jump, which messes up also the 1991 * frame count when it's derived from the timestamps. So let's 1992 * wait for the pipe to start properly before we call 1993 * drm_crtc_vblank_on() 1994 */ 1995 if (dev->max_vblank_count == 0 && 1996 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1997 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 1998 } 1999 2000 /** 2001 * intel_disable_pipe - disable a pipe, asserting requirements 2002 * @crtc: crtc whose pipes is to be disabled 2003 * 2004 * Disable the pipe of @crtc, making sure that various hardware 2005 * specific requirements are met, if applicable, e.g. plane 2006 * disabled, panel fitter off, etc. 2007 * 2008 * Will wait until the pipe has shut down before returning. 2009 */ 2010 static void intel_disable_pipe(struct intel_crtc *crtc) 2011 { 2012 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2013 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2014 enum i915_pipe pipe = crtc->pipe; 2015 i915_reg_t reg; 2016 u32 val; 2017 2018 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2019 2020 /* 2021 * Make sure planes won't keep trying to pump pixels to us, 2022 * or we might hang the display. 2023 */ 2024 assert_planes_disabled(dev_priv, pipe); 2025 assert_cursor_disabled(dev_priv, pipe); 2026 assert_sprites_disabled(dev_priv, pipe); 2027 2028 reg = PIPECONF(cpu_transcoder); 2029 val = I915_READ(reg); 2030 if ((val & PIPECONF_ENABLE) == 0) 2031 return; 2032 2033 /* 2034 * Double wide has implications for planes 2035 * so best keep it disabled when not needed. 2036 */ 2037 if (crtc->config->double_wide) 2038 val &= ~PIPECONF_DOUBLE_WIDE; 2039 2040 /* Don't disable pipe or pipe PLLs if needed */ 2041 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2042 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2043 val &= ~PIPECONF_ENABLE; 2044 2045 I915_WRITE(reg, val); 2046 if ((val & PIPECONF_ENABLE) == 0) 2047 intel_wait_for_pipe_off(crtc); 2048 } 2049 2050 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 2051 { 2052 return IS_GEN2(dev_priv) ? 2048 : 4096; 2053 } 2054 2055 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv, 2056 uint64_t fb_modifier, unsigned int cpp) 2057 { 2058 switch (fb_modifier) { 2059 case DRM_FORMAT_MOD_NONE: 2060 return cpp; 2061 case I915_FORMAT_MOD_X_TILED: 2062 if (IS_GEN2(dev_priv)) 2063 return 128; 2064 else 2065 return 512; 2066 case I915_FORMAT_MOD_Y_TILED: 2067 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2068 return 128; 2069 else 2070 return 512; 2071 case I915_FORMAT_MOD_Yf_TILED: 2072 switch (cpp) { 2073 case 1: 2074 return 64; 2075 case 2: 2076 case 4: 2077 return 128; 2078 case 8: 2079 case 16: 2080 return 256; 2081 default: 2082 MISSING_CASE(cpp); 2083 return cpp; 2084 } 2085 break; 2086 default: 2087 MISSING_CASE(fb_modifier); 2088 return cpp; 2089 } 2090 } 2091 2092 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, 2093 uint64_t fb_modifier, unsigned int cpp) 2094 { 2095 if (fb_modifier == DRM_FORMAT_MOD_NONE) 2096 return 1; 2097 else 2098 return intel_tile_size(dev_priv) / 2099 intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2100 } 2101 2102 /* Return the tile dimensions in pixel units */ 2103 static void intel_tile_dims(const struct drm_i915_private *dev_priv, 2104 unsigned int *tile_width, 2105 unsigned int *tile_height, 2106 uint64_t fb_modifier, 2107 unsigned int cpp) 2108 { 2109 unsigned int tile_width_bytes = 2110 intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2111 2112 *tile_width = tile_width_bytes / cpp; 2113 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes; 2114 } 2115 2116 unsigned int 2117 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2118 uint32_t pixel_format, uint64_t fb_modifier) 2119 { 2120 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0); 2121 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp); 2122 2123 return ALIGN(height, tile_height); 2124 } 2125 2126 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2127 { 2128 unsigned int size = 0; 2129 int i; 2130 2131 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2132 size += rot_info->plane[i].width * rot_info->plane[i].height; 2133 2134 return size; 2135 } 2136 2137 static void 2138 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2139 struct drm_framebuffer *fb, 2140 unsigned int rotation) 2141 { 2142 if (intel_rotation_90_or_270(rotation)) { 2143 *view = i915_ggtt_view_rotated; 2144 view->params.rotated = to_intel_framebuffer(fb)->rot_info; 2145 } else { 2146 *view = i915_ggtt_view_normal; 2147 } 2148 } 2149 2150 static void 2151 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2152 struct drm_framebuffer *fb) 2153 { 2154 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; 2155 unsigned int tile_size, tile_width, tile_height, cpp; 2156 2157 tile_size = intel_tile_size(dev_priv); 2158 2159 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2160 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2161 fb->modifier[0], cpp); 2162 2163 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); 2164 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); 2165 2166 if (info->pixel_format == DRM_FORMAT_NV12) { 2167 cpp = drm_format_plane_cpp(fb->pixel_format, 1); 2168 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2169 fb->modifier[1], cpp); 2170 2171 info->uv_offset = fb->offsets[1]; 2172 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); 2173 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); 2174 } 2175 } 2176 2177 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2178 { 2179 if (INTEL_INFO(dev_priv)->gen >= 9) 2180 return 256 * 1024; 2181 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || 2182 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2183 return 128 * 1024; 2184 else if (INTEL_INFO(dev_priv)->gen >= 4) 2185 return 4 * 1024; 2186 else 2187 return 0; 2188 } 2189 2190 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv, 2191 uint64_t fb_modifier) 2192 { 2193 switch (fb_modifier) { 2194 case DRM_FORMAT_MOD_NONE: 2195 return intel_linear_alignment(dev_priv); 2196 case I915_FORMAT_MOD_X_TILED: 2197 if (INTEL_INFO(dev_priv)->gen >= 9) 2198 return 256 * 1024; 2199 return 0; 2200 case I915_FORMAT_MOD_Y_TILED: 2201 case I915_FORMAT_MOD_Yf_TILED: 2202 return 1 * 1024 * 1024; 2203 default: 2204 MISSING_CASE(fb_modifier); 2205 return 0; 2206 } 2207 } 2208 2209 int 2210 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2211 unsigned int rotation) 2212 { 2213 struct drm_device *dev = fb->dev; 2214 struct drm_i915_private *dev_priv = to_i915(dev); 2215 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2216 struct i915_ggtt_view view; 2217 u32 alignment; 2218 int ret; 2219 2220 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2221 2222 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); 2223 2224 intel_fill_fb_ggtt_view(&view, fb, rotation); 2225 2226 /* Note that the w/a also requires 64 PTE of padding following the 2227 * bo. We currently fill all unused PTE with the shadow page and so 2228 * we should always have valid PTE following the scanout preventing 2229 * the VT-d warning. 2230 */ 2231 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2232 alignment = 256 * 1024; 2233 2234 /* 2235 * Global gtt pte registers are special registers which actually forward 2236 * writes to a chunk of system memory. Which means that there is no risk 2237 * that the register values disappear as soon as we call 2238 * intel_runtime_pm_put(), so it is correct to wrap only the 2239 * pin/unpin/fence and not more. 2240 */ 2241 intel_runtime_pm_get(dev_priv); 2242 2243 ret = i915_gem_object_pin_to_display_plane(obj, alignment, 2244 &view); 2245 if (ret) 2246 goto err_pm; 2247 2248 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2249 * fence, whereas 965+ only requires a fence if using 2250 * framebuffer compression. For simplicity, we always install 2251 * a fence as the cost is not that onerous. 2252 */ 2253 if (view.type == I915_GGTT_VIEW_NORMAL) { 2254 ret = i915_gem_object_get_fence(obj); 2255 if (ret == -EDEADLK) { 2256 /* 2257 * -EDEADLK means there are no free fences 2258 * no pending flips. 2259 * 2260 * This is propagated to atomic, but it uses 2261 * -EDEADLK to force a locking recovery, so 2262 * change the returned error to -EBUSY. 2263 */ 2264 ret = -EBUSY; 2265 goto err_unpin; 2266 } else if (ret) 2267 goto err_unpin; 2268 2269 i915_gem_object_pin_fence(obj); 2270 } 2271 2272 intel_runtime_pm_put(dev_priv); 2273 return 0; 2274 2275 err_unpin: 2276 i915_gem_object_unpin_from_display_plane(obj, &view); 2277 err_pm: 2278 intel_runtime_pm_put(dev_priv); 2279 return ret; 2280 } 2281 2282 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2283 { 2284 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2285 struct i915_ggtt_view view; 2286 2287 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2288 2289 intel_fill_fb_ggtt_view(&view, fb, rotation); 2290 2291 if (view.type == I915_GGTT_VIEW_NORMAL) 2292 i915_gem_object_unpin_fence(obj); 2293 2294 i915_gem_object_unpin_from_display_plane(obj, &view); 2295 } 2296 2297 /* 2298 * Adjust the tile offset by moving the difference into 2299 * the x/y offsets. 2300 * 2301 * Input tile dimensions and pitch must already be 2302 * rotated to match x and y, and in pixel units. 2303 */ 2304 static u32 intel_adjust_tile_offset(int *x, int *y, 2305 unsigned int tile_width, 2306 unsigned int tile_height, 2307 unsigned int tile_size, 2308 unsigned int pitch_tiles, 2309 u32 old_offset, 2310 u32 new_offset) 2311 { 2312 unsigned int tiles; 2313 2314 WARN_ON(old_offset & (tile_size - 1)); 2315 WARN_ON(new_offset & (tile_size - 1)); 2316 WARN_ON(new_offset > old_offset); 2317 2318 tiles = (old_offset - new_offset) / tile_size; 2319 2320 *y += tiles / pitch_tiles * tile_height; 2321 *x += tiles % pitch_tiles * tile_width; 2322 2323 return new_offset; 2324 } 2325 2326 /* 2327 * Computes the linear offset to the base tile and adjusts 2328 * x, y. bytes per pixel is assumed to be a power-of-two. 2329 * 2330 * In the 90/270 rotated case, x and y are assumed 2331 * to be already rotated to match the rotated GTT view, and 2332 * pitch is the tile_height aligned framebuffer height. 2333 */ 2334 u32 intel_compute_tile_offset(int *x, int *y, 2335 const struct drm_framebuffer *fb, int plane, 2336 unsigned int pitch, 2337 unsigned int rotation) 2338 { 2339 const struct drm_i915_private *dev_priv = to_i915(fb->dev); 2340 uint64_t fb_modifier = fb->modifier[plane]; 2341 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); 2342 u32 offset, offset_aligned, alignment; 2343 2344 alignment = intel_surf_alignment(dev_priv, fb_modifier); 2345 if (alignment) 2346 alignment--; 2347 2348 if (fb_modifier != DRM_FORMAT_MOD_NONE) { 2349 unsigned int tile_size, tile_width, tile_height; 2350 unsigned int tile_rows, tiles, pitch_tiles; 2351 2352 tile_size = intel_tile_size(dev_priv); 2353 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2354 fb_modifier, cpp); 2355 2356 if (intel_rotation_90_or_270(rotation)) { 2357 pitch_tiles = pitch / tile_height; 2358 swap(tile_width, tile_height); 2359 } else { 2360 pitch_tiles = pitch / (tile_width * cpp); 2361 } 2362 2363 tile_rows = *y / tile_height; 2364 *y %= tile_height; 2365 2366 tiles = *x / tile_width; 2367 *x %= tile_width; 2368 2369 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2370 offset_aligned = offset & ~alignment; 2371 2372 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2373 tile_size, pitch_tiles, 2374 offset, offset_aligned); 2375 } else { 2376 offset = *y * pitch + *x * cpp; 2377 offset_aligned = offset & ~alignment; 2378 2379 *y = (offset & alignment) / pitch; 2380 *x = ((offset & alignment) - *y * pitch) / cpp; 2381 } 2382 2383 return offset_aligned; 2384 } 2385 2386 static int i9xx_format_to_fourcc(int format) 2387 { 2388 switch (format) { 2389 case DISPPLANE_8BPP: 2390 return DRM_FORMAT_C8; 2391 case DISPPLANE_BGRX555: 2392 return DRM_FORMAT_XRGB1555; 2393 case DISPPLANE_BGRX565: 2394 return DRM_FORMAT_RGB565; 2395 default: 2396 case DISPPLANE_BGRX888: 2397 return DRM_FORMAT_XRGB8888; 2398 case DISPPLANE_RGBX888: 2399 return DRM_FORMAT_XBGR8888; 2400 case DISPPLANE_BGRX101010: 2401 return DRM_FORMAT_XRGB2101010; 2402 case DISPPLANE_RGBX101010: 2403 return DRM_FORMAT_XBGR2101010; 2404 } 2405 } 2406 2407 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2408 { 2409 switch (format) { 2410 case PLANE_CTL_FORMAT_RGB_565: 2411 return DRM_FORMAT_RGB565; 2412 default: 2413 case PLANE_CTL_FORMAT_XRGB_8888: 2414 if (rgb_order) { 2415 if (alpha) 2416 return DRM_FORMAT_ABGR8888; 2417 else 2418 return DRM_FORMAT_XBGR8888; 2419 } else { 2420 if (alpha) 2421 return DRM_FORMAT_ARGB8888; 2422 else 2423 return DRM_FORMAT_XRGB8888; 2424 } 2425 case PLANE_CTL_FORMAT_XRGB_2101010: 2426 if (rgb_order) 2427 return DRM_FORMAT_XBGR2101010; 2428 else 2429 return DRM_FORMAT_XRGB2101010; 2430 } 2431 } 2432 2433 static bool 2434 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2435 struct intel_initial_plane_config *plane_config) 2436 { 2437 struct drm_device *dev = crtc->base.dev; 2438 struct drm_i915_private *dev_priv = to_i915(dev); 2439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2440 struct drm_i915_gem_object *obj = NULL; 2441 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2442 struct drm_framebuffer *fb = &plane_config->fb->base; 2443 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2444 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2445 PAGE_SIZE); 2446 2447 size_aligned -= base_aligned; 2448 2449 if (plane_config->size == 0) 2450 return false; 2451 2452 /* If the FB is too big, just don't use it since fbdev is not very 2453 * important and we should probably use that space with FBC or other 2454 * features. */ 2455 if (size_aligned * 2 > ggtt->stolen_usable_size) 2456 return false; 2457 2458 mutex_lock(&dev->struct_mutex); 2459 2460 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2461 base_aligned, 2462 base_aligned, 2463 size_aligned); 2464 if (!obj) { 2465 mutex_unlock(&dev->struct_mutex); 2466 return false; 2467 } 2468 2469 if (plane_config->tiling == I915_TILING_X) 2470 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; 2471 2472 mode_cmd.pixel_format = fb->pixel_format; 2473 mode_cmd.width = fb->width; 2474 mode_cmd.height = fb->height; 2475 mode_cmd.pitches[0] = fb->pitches[0]; 2476 mode_cmd.modifier[0] = fb->modifier[0]; 2477 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2478 2479 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2480 &mode_cmd, obj)) { 2481 DRM_DEBUG_KMS("intel fb init failed\n"); 2482 goto out_unref_obj; 2483 } 2484 2485 mutex_unlock(&dev->struct_mutex); 2486 2487 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2488 return true; 2489 2490 out_unref_obj: 2491 i915_gem_object_put(obj); 2492 mutex_unlock(&dev->struct_mutex); 2493 return false; 2494 } 2495 2496 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2497 static void 2498 update_state_fb(struct drm_plane *plane) 2499 { 2500 if (plane->fb == plane->state->fb) 2501 return; 2502 2503 if (plane->state->fb) 2504 drm_framebuffer_unreference(plane->state->fb); 2505 plane->state->fb = plane->fb; 2506 if (plane->state->fb) 2507 drm_framebuffer_reference(plane->state->fb); 2508 } 2509 2510 static void 2511 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2512 struct intel_initial_plane_config *plane_config) 2513 { 2514 struct drm_device *dev = intel_crtc->base.dev; 2515 struct drm_i915_private *dev_priv = to_i915(dev); 2516 struct drm_crtc *c; 2517 struct intel_crtc *i; 2518 struct drm_i915_gem_object *obj; 2519 struct drm_plane *primary = intel_crtc->base.primary; 2520 struct drm_plane_state *plane_state = primary->state; 2521 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2522 struct intel_plane *intel_plane = to_intel_plane(primary); 2523 struct intel_plane_state *intel_state = 2524 to_intel_plane_state(plane_state); 2525 struct drm_framebuffer *fb; 2526 2527 if (!plane_config->fb) 2528 return; 2529 2530 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2531 fb = &plane_config->fb->base; 2532 goto valid_fb; 2533 } 2534 2535 kfree(plane_config->fb); 2536 2537 /* 2538 * Failed to alloc the obj, check to see if we should share 2539 * an fb with another CRTC instead 2540 */ 2541 for_each_crtc(dev, c) { 2542 i = to_intel_crtc(c); 2543 2544 if (c == &intel_crtc->base) 2545 continue; 2546 2547 if (!i->active) 2548 continue; 2549 2550 fb = c->primary->fb; 2551 if (!fb) 2552 continue; 2553 2554 obj = intel_fb_obj(fb); 2555 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2556 drm_framebuffer_reference(fb); 2557 goto valid_fb; 2558 } 2559 } 2560 2561 /* 2562 * We've failed to reconstruct the BIOS FB. Current display state 2563 * indicates that the primary plane is visible, but has a NULL FB, 2564 * which will lead to problems later if we don't fix it up. The 2565 * simplest solution is to just disable the primary plane now and 2566 * pretend the BIOS never had it enabled. 2567 */ 2568 to_intel_plane_state(plane_state)->visible = false; 2569 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); 2570 intel_pre_disable_primary_noatomic(&intel_crtc->base); 2571 intel_plane->disable_plane(primary, &intel_crtc->base); 2572 2573 return; 2574 2575 valid_fb: 2576 plane_state->src_x = 0; 2577 plane_state->src_y = 0; 2578 plane_state->src_w = fb->width << 16; 2579 plane_state->src_h = fb->height << 16; 2580 2581 plane_state->crtc_x = 0; 2582 plane_state->crtc_y = 0; 2583 plane_state->crtc_w = fb->width; 2584 plane_state->crtc_h = fb->height; 2585 2586 intel_state->src.x1 = plane_state->src_x; 2587 intel_state->src.y1 = plane_state->src_y; 2588 intel_state->src.x2 = plane_state->src_x + plane_state->src_w; 2589 intel_state->src.y2 = plane_state->src_y + plane_state->src_h; 2590 intel_state->dst.x1 = plane_state->crtc_x; 2591 intel_state->dst.y1 = plane_state->crtc_y; 2592 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; 2593 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; 2594 2595 obj = intel_fb_obj(fb); 2596 if (i915_gem_object_is_tiled(obj)) 2597 dev_priv->preserve_bios_swizzle = true; 2598 2599 drm_framebuffer_reference(fb); 2600 primary->fb = primary->state->fb = fb; 2601 primary->crtc = primary->state->crtc = &intel_crtc->base; 2602 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); 2603 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2604 &obj->frontbuffer_bits); 2605 } 2606 2607 static void i9xx_update_primary_plane(struct drm_plane *primary, 2608 const struct intel_crtc_state *crtc_state, 2609 const struct intel_plane_state *plane_state) 2610 { 2611 struct drm_device *dev = primary->dev; 2612 struct drm_i915_private *dev_priv = to_i915(dev); 2613 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2614 struct drm_framebuffer *fb = plane_state->base.fb; 2615 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2616 int plane = intel_crtc->plane; 2617 u32 linear_offset; 2618 u32 dspcntr; 2619 i915_reg_t reg = DSPCNTR(plane); 2620 unsigned int rotation = plane_state->base.rotation; 2621 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2622 int x = plane_state->src.x1 >> 16; 2623 int y = plane_state->src.y1 >> 16; 2624 2625 dspcntr = DISPPLANE_GAMMA_ENABLE; 2626 2627 dspcntr |= DISPLAY_PLANE_ENABLE; 2628 2629 if (INTEL_INFO(dev)->gen < 4) { 2630 if (intel_crtc->pipe == PIPE_B) 2631 dspcntr |= DISPPLANE_SEL_PIPE_B; 2632 2633 /* pipesrc and dspsize control the size that is scaled from, 2634 * which should always be the user's requested size. 2635 */ 2636 I915_WRITE(DSPSIZE(plane), 2637 ((crtc_state->pipe_src_h - 1) << 16) | 2638 (crtc_state->pipe_src_w - 1)); 2639 I915_WRITE(DSPPOS(plane), 0); 2640 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2641 I915_WRITE(PRIMSIZE(plane), 2642 ((crtc_state->pipe_src_h - 1) << 16) | 2643 (crtc_state->pipe_src_w - 1)); 2644 I915_WRITE(PRIMPOS(plane), 0); 2645 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2646 } 2647 2648 switch (fb->pixel_format) { 2649 case DRM_FORMAT_C8: 2650 dspcntr |= DISPPLANE_8BPP; 2651 break; 2652 case DRM_FORMAT_XRGB1555: 2653 dspcntr |= DISPPLANE_BGRX555; 2654 break; 2655 case DRM_FORMAT_RGB565: 2656 dspcntr |= DISPPLANE_BGRX565; 2657 break; 2658 case DRM_FORMAT_XRGB8888: 2659 dspcntr |= DISPPLANE_BGRX888; 2660 break; 2661 case DRM_FORMAT_XBGR8888: 2662 dspcntr |= DISPPLANE_RGBX888; 2663 break; 2664 case DRM_FORMAT_XRGB2101010: 2665 dspcntr |= DISPPLANE_BGRX101010; 2666 break; 2667 case DRM_FORMAT_XBGR2101010: 2668 dspcntr |= DISPPLANE_RGBX101010; 2669 break; 2670 default: 2671 BUG(); 2672 } 2673 2674 if (INTEL_INFO(dev)->gen >= 4 && i915_gem_object_is_tiled(obj)) 2675 dspcntr |= DISPPLANE_TILED; 2676 2677 if (IS_G4X(dev)) 2678 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2679 2680 linear_offset = y * fb->pitches[0] + x * cpp; 2681 2682 if (INTEL_INFO(dev)->gen >= 4) { 2683 intel_crtc->dspaddr_offset = 2684 intel_compute_tile_offset(&x, &y, fb, 0, 2685 fb->pitches[0], rotation); 2686 linear_offset -= intel_crtc->dspaddr_offset; 2687 } else { 2688 intel_crtc->dspaddr_offset = linear_offset; 2689 } 2690 2691 if (rotation == DRM_ROTATE_180) { 2692 dspcntr |= DISPPLANE_ROTATE_180; 2693 2694 x += (crtc_state->pipe_src_w - 1); 2695 y += (crtc_state->pipe_src_h - 1); 2696 2697 /* Finding the last pixel of the last line of the display 2698 data and adding to linear_offset*/ 2699 linear_offset += 2700 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2701 (crtc_state->pipe_src_w - 1) * cpp; 2702 } 2703 2704 intel_crtc->adjusted_x = x; 2705 intel_crtc->adjusted_y = y; 2706 2707 I915_WRITE(reg, dspcntr); 2708 2709 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2710 if (INTEL_INFO(dev)->gen >= 4) { 2711 I915_WRITE(DSPSURF(plane), 2712 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2713 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2714 I915_WRITE(DSPLINOFF(plane), linear_offset); 2715 } else 2716 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2717 POSTING_READ(reg); 2718 } 2719 2720 static void i9xx_disable_primary_plane(struct drm_plane *primary, 2721 struct drm_crtc *crtc) 2722 { 2723 struct drm_device *dev = crtc->dev; 2724 struct drm_i915_private *dev_priv = to_i915(dev); 2725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2726 int plane = intel_crtc->plane; 2727 2728 I915_WRITE(DSPCNTR(plane), 0); 2729 if (INTEL_INFO(dev_priv)->gen >= 4) 2730 I915_WRITE(DSPSURF(plane), 0); 2731 else 2732 I915_WRITE(DSPADDR(plane), 0); 2733 POSTING_READ(DSPCNTR(plane)); 2734 } 2735 2736 static void ironlake_update_primary_plane(struct drm_plane *primary, 2737 const struct intel_crtc_state *crtc_state, 2738 const struct intel_plane_state *plane_state) 2739 { 2740 struct drm_device *dev = primary->dev; 2741 struct drm_i915_private *dev_priv = to_i915(dev); 2742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2743 struct drm_framebuffer *fb = plane_state->base.fb; 2744 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2745 int plane = intel_crtc->plane; 2746 u32 linear_offset; 2747 u32 dspcntr; 2748 i915_reg_t reg = DSPCNTR(plane); 2749 unsigned int rotation = plane_state->base.rotation; 2750 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2751 int x = plane_state->src.x1 >> 16; 2752 int y = plane_state->src.y1 >> 16; 2753 2754 dspcntr = DISPPLANE_GAMMA_ENABLE; 2755 dspcntr |= DISPLAY_PLANE_ENABLE; 2756 2757 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2758 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2759 2760 switch (fb->pixel_format) { 2761 case DRM_FORMAT_C8: 2762 dspcntr |= DISPPLANE_8BPP; 2763 break; 2764 case DRM_FORMAT_RGB565: 2765 dspcntr |= DISPPLANE_BGRX565; 2766 break; 2767 case DRM_FORMAT_XRGB8888: 2768 dspcntr |= DISPPLANE_BGRX888; 2769 break; 2770 case DRM_FORMAT_XBGR8888: 2771 dspcntr |= DISPPLANE_RGBX888; 2772 break; 2773 case DRM_FORMAT_XRGB2101010: 2774 dspcntr |= DISPPLANE_BGRX101010; 2775 break; 2776 case DRM_FORMAT_XBGR2101010: 2777 dspcntr |= DISPPLANE_RGBX101010; 2778 break; 2779 default: 2780 BUG(); 2781 } 2782 2783 if (i915_gem_object_is_tiled(obj)) 2784 dspcntr |= DISPPLANE_TILED; 2785 2786 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2787 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2788 2789 linear_offset = y * fb->pitches[0] + x * cpp; 2790 intel_crtc->dspaddr_offset = 2791 intel_compute_tile_offset(&x, &y, fb, 0, 2792 fb->pitches[0], rotation); 2793 linear_offset -= intel_crtc->dspaddr_offset; 2794 if (rotation == DRM_ROTATE_180) { 2795 dspcntr |= DISPPLANE_ROTATE_180; 2796 2797 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2798 x += (crtc_state->pipe_src_w - 1); 2799 y += (crtc_state->pipe_src_h - 1); 2800 2801 /* Finding the last pixel of the last line of the display 2802 data and adding to linear_offset*/ 2803 linear_offset += 2804 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2805 (crtc_state->pipe_src_w - 1) * cpp; 2806 } 2807 } 2808 2809 intel_crtc->adjusted_x = x; 2810 intel_crtc->adjusted_y = y; 2811 2812 I915_WRITE(reg, dspcntr); 2813 2814 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2815 I915_WRITE(DSPSURF(plane), 2816 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2817 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2818 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2819 } else { 2820 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2821 I915_WRITE(DSPLINOFF(plane), linear_offset); 2822 } 2823 POSTING_READ(reg); 2824 } 2825 2826 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, 2827 uint64_t fb_modifier, uint32_t pixel_format) 2828 { 2829 if (fb_modifier == DRM_FORMAT_MOD_NONE) { 2830 return 64; 2831 } else { 2832 int cpp = drm_format_plane_cpp(pixel_format, 0); 2833 2834 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2835 } 2836 } 2837 2838 u32 intel_plane_obj_offset(struct intel_plane *intel_plane, 2839 struct drm_i915_gem_object *obj, 2840 unsigned int plane) 2841 { 2842 struct i915_ggtt_view view; 2843 struct i915_vma *vma; 2844 u64 offset; 2845 2846 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, 2847 intel_plane->base.state->rotation); 2848 2849 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2850 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2851 view.type)) 2852 return -1; 2853 2854 offset = vma->node.start; 2855 2856 if (plane == 1) { 2857 offset += vma->ggtt_view.params.rotated.uv_start_page * 2858 PAGE_SIZE; 2859 } 2860 2861 WARN_ON(upper_32_bits(offset)); 2862 2863 return lower_32_bits(offset); 2864 } 2865 2866 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2867 { 2868 struct drm_device *dev = intel_crtc->base.dev; 2869 struct drm_i915_private *dev_priv = to_i915(dev); 2870 2871 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2872 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2873 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2874 } 2875 2876 /* 2877 * This function detaches (aka. unbinds) unused scalers in hardware 2878 */ 2879 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 2880 { 2881 struct intel_crtc_scaler_state *scaler_state; 2882 int i; 2883 2884 scaler_state = &intel_crtc->config->scaler_state; 2885 2886 /* loop through and disable scalers that aren't in use */ 2887 for (i = 0; i < intel_crtc->num_scalers; i++) { 2888 if (!scaler_state->scalers[i].in_use) 2889 skl_detach_scaler(intel_crtc, i); 2890 } 2891 } 2892 2893 u32 skl_plane_ctl_format(uint32_t pixel_format) 2894 { 2895 switch (pixel_format) { 2896 case DRM_FORMAT_C8: 2897 return PLANE_CTL_FORMAT_INDEXED; 2898 case DRM_FORMAT_RGB565: 2899 return PLANE_CTL_FORMAT_RGB_565; 2900 case DRM_FORMAT_XBGR8888: 2901 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 2902 case DRM_FORMAT_XRGB8888: 2903 return PLANE_CTL_FORMAT_XRGB_8888; 2904 /* 2905 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 2906 * to be already pre-multiplied. We need to add a knob (or a different 2907 * DRM_FORMAT) for user-space to configure that. 2908 */ 2909 case DRM_FORMAT_ABGR8888: 2910 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 2911 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2912 case DRM_FORMAT_ARGB8888: 2913 return PLANE_CTL_FORMAT_XRGB_8888 | 2914 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2915 case DRM_FORMAT_XRGB2101010: 2916 return PLANE_CTL_FORMAT_XRGB_2101010; 2917 case DRM_FORMAT_XBGR2101010: 2918 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 2919 case DRM_FORMAT_YUYV: 2920 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 2921 case DRM_FORMAT_YVYU: 2922 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 2923 case DRM_FORMAT_UYVY: 2924 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 2925 case DRM_FORMAT_VYUY: 2926 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 2927 default: 2928 MISSING_CASE(pixel_format); 2929 } 2930 2931 return 0; 2932 } 2933 2934 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 2935 { 2936 switch (fb_modifier) { 2937 case DRM_FORMAT_MOD_NONE: 2938 break; 2939 case I915_FORMAT_MOD_X_TILED: 2940 return PLANE_CTL_TILED_X; 2941 case I915_FORMAT_MOD_Y_TILED: 2942 return PLANE_CTL_TILED_Y; 2943 case I915_FORMAT_MOD_Yf_TILED: 2944 return PLANE_CTL_TILED_YF; 2945 default: 2946 MISSING_CASE(fb_modifier); 2947 } 2948 2949 return 0; 2950 } 2951 2952 u32 skl_plane_ctl_rotation(unsigned int rotation) 2953 { 2954 switch (rotation) { 2955 case DRM_ROTATE_0: 2956 break; 2957 /* 2958 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 2959 * while i915 HW rotation is clockwise, thats why this swapping. 2960 */ 2961 case DRM_ROTATE_90: 2962 return PLANE_CTL_ROTATE_270; 2963 case DRM_ROTATE_180: 2964 return PLANE_CTL_ROTATE_180; 2965 case DRM_ROTATE_270: 2966 return PLANE_CTL_ROTATE_90; 2967 default: 2968 MISSING_CASE(rotation); 2969 } 2970 2971 return 0; 2972 } 2973 2974 static void skylake_update_primary_plane(struct drm_plane *plane, 2975 const struct intel_crtc_state *crtc_state, 2976 const struct intel_plane_state *plane_state) 2977 { 2978 struct drm_device *dev = plane->dev; 2979 struct drm_i915_private *dev_priv = to_i915(dev); 2980 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2981 struct drm_framebuffer *fb = plane_state->base.fb; 2982 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2983 int pipe = intel_crtc->pipe; 2984 u32 plane_ctl, stride_div, stride; 2985 u32 tile_height, plane_offset, plane_size; 2986 unsigned int rotation = plane_state->base.rotation; 2987 int x_offset, y_offset; 2988 u32 surf_addr; 2989 int scaler_id = plane_state->scaler_id; 2990 int src_x = plane_state->src.x1 >> 16; 2991 int src_y = plane_state->src.y1 >> 16; 2992 int src_w = drm_rect_width(&plane_state->src) >> 16; 2993 int src_h = drm_rect_height(&plane_state->src) >> 16; 2994 int dst_x = plane_state->dst.x1; 2995 int dst_y = plane_state->dst.y1; 2996 int dst_w = drm_rect_width(&plane_state->dst); 2997 int dst_h = drm_rect_height(&plane_state->dst); 2998 2999 plane_ctl = PLANE_CTL_ENABLE | 3000 PLANE_CTL_PIPE_GAMMA_ENABLE | 3001 PLANE_CTL_PIPE_CSC_ENABLE; 3002 3003 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3004 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3005 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3006 plane_ctl |= skl_plane_ctl_rotation(rotation); 3007 3008 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 3009 fb->pixel_format); 3010 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); 3011 3012 WARN_ON(drm_rect_width(&plane_state->src) == 0); 3013 3014 if (intel_rotation_90_or_270(rotation)) { 3015 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3016 3017 /* stride = Surface height in tiles */ 3018 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); 3019 stride = DIV_ROUND_UP(fb->height, tile_height); 3020 x_offset = stride * tile_height - src_y - src_h; 3021 y_offset = src_x; 3022 plane_size = (src_w - 1) << 16 | (src_h - 1); 3023 } else { 3024 stride = fb->pitches[0] / stride_div; 3025 x_offset = src_x; 3026 y_offset = src_y; 3027 plane_size = (src_h - 1) << 16 | (src_w - 1); 3028 } 3029 plane_offset = y_offset << 16 | x_offset; 3030 3031 intel_crtc->adjusted_x = x_offset; 3032 intel_crtc->adjusted_y = y_offset; 3033 3034 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3035 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3036 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3037 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3038 3039 if (scaler_id >= 0) { 3040 uint32_t ps_ctrl = 0; 3041 3042 WARN_ON(!dst_w || !dst_h); 3043 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3044 crtc_state->scaler_state.scalers[scaler_id].mode; 3045 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3046 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3047 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3048 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3049 I915_WRITE(PLANE_POS(pipe, 0), 0); 3050 } else { 3051 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3052 } 3053 3054 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3055 3056 POSTING_READ(PLANE_SURF(pipe, 0)); 3057 } 3058 3059 static void skylake_disable_primary_plane(struct drm_plane *primary, 3060 struct drm_crtc *crtc) 3061 { 3062 struct drm_device *dev = crtc->dev; 3063 struct drm_i915_private *dev_priv = to_i915(dev); 3064 int pipe = to_intel_crtc(crtc)->pipe; 3065 3066 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3067 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3068 POSTING_READ(PLANE_SURF(pipe, 0)); 3069 } 3070 3071 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3072 static int 3073 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3074 int x, int y, enum mode_set_atomic state) 3075 { 3076 /* Support for kgdboc is disabled, this needs a major rework. */ 3077 DRM_ERROR("legacy panic handler not supported any more.\n"); 3078 3079 return -ENODEV; 3080 } 3081 3082 static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3083 { 3084 struct intel_crtc *crtc; 3085 3086 for_each_intel_crtc(&dev_priv->drm, crtc) 3087 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3088 } 3089 3090 static void intel_update_primary_planes(struct drm_device *dev) 3091 { 3092 struct drm_crtc *crtc; 3093 3094 for_each_crtc(dev, crtc) { 3095 struct intel_plane *plane = to_intel_plane(crtc->primary); 3096 struct intel_plane_state *plane_state = 3097 to_intel_plane_state(plane->base.state); 3098 3099 if (plane_state->visible) 3100 plane->update_plane(&plane->base, 3101 to_intel_crtc_state(crtc->state), 3102 plane_state); 3103 } 3104 } 3105 3106 static int 3107 __intel_display_resume(struct drm_device *dev, 3108 struct drm_atomic_state *state) 3109 { 3110 struct drm_crtc_state *crtc_state; 3111 struct drm_crtc *crtc; 3112 int i, ret; 3113 3114 intel_modeset_setup_hw_state(dev); 3115 i915_redisable_vga(dev); 3116 3117 if (!state) 3118 return 0; 3119 3120 for_each_crtc_in_state(state, crtc, crtc_state, i) { 3121 /* 3122 * Force recalculation even if we restore 3123 * current state. With fast modeset this may not result 3124 * in a modeset when the state is compatible. 3125 */ 3126 crtc_state->mode_changed = true; 3127 } 3128 3129 /* ignore any reset values/BIOS leftovers in the WM registers */ 3130 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3131 3132 ret = drm_atomic_commit(state); 3133 3134 WARN_ON(ret == -EDEADLK); 3135 return ret; 3136 } 3137 3138 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3139 { 3140 return intel_has_gpu_reset(dev_priv) && 3141 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3142 } 3143 3144 void intel_prepare_reset(struct drm_i915_private *dev_priv) 3145 { 3146 struct drm_device *dev = &dev_priv->drm; 3147 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3148 struct drm_atomic_state *state; 3149 int ret; 3150 3151 /* 3152 * Need mode_config.mutex so that we don't 3153 * trample ongoing ->detect() and whatnot. 3154 */ 3155 mutex_lock(&dev->mode_config.mutex); 3156 drm_modeset_acquire_init(ctx, 0); 3157 while (1) { 3158 ret = drm_modeset_lock_all_ctx(dev, ctx); 3159 if (ret != -EDEADLK) 3160 break; 3161 3162 drm_modeset_backoff(ctx); 3163 } 3164 3165 /* reset doesn't touch the display, but flips might get nuked anyway, */ 3166 if (!i915.force_reset_modeset_test && 3167 !gpu_reset_clobbers_display(dev_priv)) 3168 return; 3169 3170 /* 3171 * Disabling the crtcs gracefully seems nicer. Also the 3172 * g33 docs say we should at least disable all the planes. 3173 */ 3174 state = drm_atomic_helper_duplicate_state(dev, ctx); 3175 if (IS_ERR(state)) { 3176 ret = PTR_ERR(state); 3177 state = NULL; 3178 DRM_ERROR("Duplicating state failed with %i\n", ret); 3179 goto err; 3180 } 3181 3182 ret = drm_atomic_helper_disable_all(dev, ctx); 3183 if (ret) { 3184 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3185 goto err; 3186 } 3187 3188 dev_priv->modeset_restore_state = state; 3189 state->acquire_ctx = ctx; 3190 return; 3191 3192 err: 3193 drm_atomic_state_free(state); 3194 } 3195 3196 void intel_finish_reset(struct drm_i915_private *dev_priv) 3197 { 3198 struct drm_device *dev = &dev_priv->drm; 3199 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3200 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3201 int ret; 3202 3203 /* 3204 * Flips in the rings will be nuked by the reset, 3205 * so complete all pending flips so that user space 3206 * will get its events and not get stuck. 3207 */ 3208 intel_complete_page_flips(dev_priv); 3209 3210 dev_priv->modeset_restore_state = NULL; 3211 3212 /* reset doesn't touch the display */ 3213 if (!gpu_reset_clobbers_display(dev_priv)) { 3214 if (!state) { 3215 /* 3216 * Flips in the rings have been nuked by the reset, 3217 * so update the base address of all primary 3218 * planes to the the last fb to make sure we're 3219 * showing the correct fb after a reset. 3220 * 3221 * FIXME: Atomic will make this obsolete since we won't schedule 3222 * CS-based flips (which might get lost in gpu resets) any more. 3223 */ 3224 intel_update_primary_planes(dev); 3225 } else { 3226 ret = __intel_display_resume(dev, state); 3227 if (ret) 3228 DRM_ERROR("Restoring old state failed with %i\n", ret); 3229 } 3230 } else { 3231 /* 3232 * The display has been reset as well, 3233 * so need a full re-initialization. 3234 */ 3235 intel_runtime_pm_disable_interrupts(dev_priv); 3236 intel_runtime_pm_enable_interrupts(dev_priv); 3237 3238 intel_modeset_init_hw(dev); 3239 3240 spin_lock_irq(&dev_priv->irq_lock); 3241 if (dev_priv->display.hpd_irq_setup) 3242 dev_priv->display.hpd_irq_setup(dev_priv); 3243 spin_unlock_irq(&dev_priv->irq_lock); 3244 3245 ret = __intel_display_resume(dev, state); 3246 if (ret) 3247 DRM_ERROR("Restoring old state failed with %i\n", ret); 3248 3249 intel_hpd_init(dev_priv); 3250 } 3251 3252 drm_modeset_drop_locks(ctx); 3253 drm_modeset_acquire_fini(ctx); 3254 mutex_unlock(&dev->mode_config.mutex); 3255 } 3256 3257 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3258 { 3259 struct drm_device *dev = crtc->dev; 3260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3261 unsigned reset_counter; 3262 bool pending; 3263 3264 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error); 3265 if (intel_crtc->reset_counter != reset_counter) 3266 return false; 3267 3268 spin_lock_irq(&dev->event_lock); 3269 pending = to_intel_crtc(crtc)->flip_work != NULL; 3270 spin_unlock_irq(&dev->event_lock); 3271 3272 return pending; 3273 } 3274 3275 static void intel_update_pipe_config(struct intel_crtc *crtc, 3276 struct intel_crtc_state *old_crtc_state) 3277 { 3278 struct drm_device *dev = crtc->base.dev; 3279 struct drm_i915_private *dev_priv = to_i915(dev); 3280 struct intel_crtc_state *pipe_config = 3281 to_intel_crtc_state(crtc->base.state); 3282 3283 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3284 crtc->base.mode = crtc->base.state->mode; 3285 3286 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", 3287 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, 3288 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3289 3290 /* 3291 * Update pipe size and adjust fitter if needed: the reason for this is 3292 * that in compute_mode_changes we check the native mode (not the pfit 3293 * mode) to see if we can flip rather than do a full mode set. In the 3294 * fastboot case, we'll flip, but if we don't update the pipesrc and 3295 * pfit state, we'll end up with a big fb scanned out into the wrong 3296 * sized surface. 3297 */ 3298 3299 I915_WRITE(PIPESRC(crtc->pipe), 3300 ((pipe_config->pipe_src_w - 1) << 16) | 3301 (pipe_config->pipe_src_h - 1)); 3302 3303 /* on skylake this is done by detaching scalers */ 3304 if (INTEL_INFO(dev)->gen >= 9) { 3305 skl_detach_scalers(crtc); 3306 3307 if (pipe_config->pch_pfit.enabled) 3308 skylake_pfit_enable(crtc); 3309 } else if (HAS_PCH_SPLIT(dev)) { 3310 if (pipe_config->pch_pfit.enabled) 3311 ironlake_pfit_enable(crtc); 3312 else if (old_crtc_state->pch_pfit.enabled) 3313 ironlake_pfit_disable(crtc, true); 3314 } 3315 } 3316 3317 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3318 { 3319 struct drm_device *dev = crtc->dev; 3320 struct drm_i915_private *dev_priv = to_i915(dev); 3321 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3322 int pipe = intel_crtc->pipe; 3323 i915_reg_t reg; 3324 u32 temp; 3325 3326 /* enable normal train */ 3327 reg = FDI_TX_CTL(pipe); 3328 temp = I915_READ(reg); 3329 if (IS_IVYBRIDGE(dev)) { 3330 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3331 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3332 } else { 3333 temp &= ~FDI_LINK_TRAIN_NONE; 3334 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3335 } 3336 I915_WRITE(reg, temp); 3337 3338 reg = FDI_RX_CTL(pipe); 3339 temp = I915_READ(reg); 3340 if (HAS_PCH_CPT(dev)) { 3341 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3342 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3343 } else { 3344 temp &= ~FDI_LINK_TRAIN_NONE; 3345 temp |= FDI_LINK_TRAIN_NONE; 3346 } 3347 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3348 3349 /* wait one idle pattern time */ 3350 POSTING_READ(reg); 3351 udelay(1000); 3352 3353 /* IVB wants error correction enabled */ 3354 if (IS_IVYBRIDGE(dev)) 3355 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3356 FDI_FE_ERRC_ENABLE); 3357 } 3358 3359 /* The FDI link training functions for ILK/Ibexpeak. */ 3360 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3361 { 3362 struct drm_device *dev = crtc->dev; 3363 struct drm_i915_private *dev_priv = to_i915(dev); 3364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3365 int pipe = intel_crtc->pipe; 3366 i915_reg_t reg; 3367 u32 temp, tries; 3368 3369 /* FDI needs bits from pipe first */ 3370 assert_pipe_enabled(dev_priv, pipe); 3371 3372 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3373 for train result */ 3374 reg = FDI_RX_IMR(pipe); 3375 temp = I915_READ(reg); 3376 temp &= ~FDI_RX_SYMBOL_LOCK; 3377 temp &= ~FDI_RX_BIT_LOCK; 3378 I915_WRITE(reg, temp); 3379 I915_READ(reg); 3380 udelay(150); 3381 3382 /* enable CPU FDI TX and PCH FDI RX */ 3383 reg = FDI_TX_CTL(pipe); 3384 temp = I915_READ(reg); 3385 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3386 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3387 temp &= ~FDI_LINK_TRAIN_NONE; 3388 temp |= FDI_LINK_TRAIN_PATTERN_1; 3389 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3390 3391 reg = FDI_RX_CTL(pipe); 3392 temp = I915_READ(reg); 3393 temp &= ~FDI_LINK_TRAIN_NONE; 3394 temp |= FDI_LINK_TRAIN_PATTERN_1; 3395 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3396 3397 POSTING_READ(reg); 3398 udelay(150); 3399 3400 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3401 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3402 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3403 FDI_RX_PHASE_SYNC_POINTER_EN); 3404 3405 reg = FDI_RX_IIR(pipe); 3406 for (tries = 0; tries < 5; tries++) { 3407 temp = I915_READ(reg); 3408 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3409 3410 if ((temp & FDI_RX_BIT_LOCK)) { 3411 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3412 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3413 break; 3414 } 3415 } 3416 if (tries == 5) 3417 DRM_ERROR("FDI train 1 fail!\n"); 3418 3419 /* Train 2 */ 3420 reg = FDI_TX_CTL(pipe); 3421 temp = I915_READ(reg); 3422 temp &= ~FDI_LINK_TRAIN_NONE; 3423 temp |= FDI_LINK_TRAIN_PATTERN_2; 3424 I915_WRITE(reg, temp); 3425 3426 reg = FDI_RX_CTL(pipe); 3427 temp = I915_READ(reg); 3428 temp &= ~FDI_LINK_TRAIN_NONE; 3429 temp |= FDI_LINK_TRAIN_PATTERN_2; 3430 I915_WRITE(reg, temp); 3431 3432 POSTING_READ(reg); 3433 udelay(150); 3434 3435 reg = FDI_RX_IIR(pipe); 3436 for (tries = 0; tries < 5; tries++) { 3437 temp = I915_READ(reg); 3438 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3439 3440 if (temp & FDI_RX_SYMBOL_LOCK) { 3441 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3442 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3443 break; 3444 } 3445 } 3446 if (tries == 5) 3447 DRM_ERROR("FDI train 2 fail!\n"); 3448 3449 DRM_DEBUG_KMS("FDI train done\n"); 3450 3451 } 3452 3453 static const int snb_b_fdi_train_param[] = { 3454 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3455 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3456 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3457 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3458 }; 3459 3460 /* The FDI link training functions for SNB/Cougarpoint. */ 3461 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3462 { 3463 struct drm_device *dev = crtc->dev; 3464 struct drm_i915_private *dev_priv = to_i915(dev); 3465 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3466 int pipe = intel_crtc->pipe; 3467 i915_reg_t reg; 3468 u32 temp, i, retry; 3469 3470 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3471 for train result */ 3472 reg = FDI_RX_IMR(pipe); 3473 temp = I915_READ(reg); 3474 temp &= ~FDI_RX_SYMBOL_LOCK; 3475 temp &= ~FDI_RX_BIT_LOCK; 3476 I915_WRITE(reg, temp); 3477 3478 POSTING_READ(reg); 3479 udelay(150); 3480 3481 /* enable CPU FDI TX and PCH FDI RX */ 3482 reg = FDI_TX_CTL(pipe); 3483 temp = I915_READ(reg); 3484 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3485 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3486 temp &= ~FDI_LINK_TRAIN_NONE; 3487 temp |= FDI_LINK_TRAIN_PATTERN_1; 3488 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3489 /* SNB-B */ 3490 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3491 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3492 3493 I915_WRITE(FDI_RX_MISC(pipe), 3494 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3495 3496 reg = FDI_RX_CTL(pipe); 3497 temp = I915_READ(reg); 3498 if (HAS_PCH_CPT(dev)) { 3499 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3500 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3501 } else { 3502 temp &= ~FDI_LINK_TRAIN_NONE; 3503 temp |= FDI_LINK_TRAIN_PATTERN_1; 3504 } 3505 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3506 3507 POSTING_READ(reg); 3508 udelay(150); 3509 3510 for (i = 0; i < 4; i++) { 3511 reg = FDI_TX_CTL(pipe); 3512 temp = I915_READ(reg); 3513 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3514 temp |= snb_b_fdi_train_param[i]; 3515 I915_WRITE(reg, temp); 3516 3517 POSTING_READ(reg); 3518 udelay(500); 3519 3520 for (retry = 0; retry < 5; retry++) { 3521 reg = FDI_RX_IIR(pipe); 3522 temp = I915_READ(reg); 3523 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3524 if (temp & FDI_RX_BIT_LOCK) { 3525 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3526 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3527 break; 3528 } 3529 udelay(50); 3530 } 3531 if (retry < 5) 3532 break; 3533 } 3534 if (i == 4) 3535 DRM_ERROR("FDI train 1 fail!\n"); 3536 3537 /* Train 2 */ 3538 reg = FDI_TX_CTL(pipe); 3539 temp = I915_READ(reg); 3540 temp &= ~FDI_LINK_TRAIN_NONE; 3541 temp |= FDI_LINK_TRAIN_PATTERN_2; 3542 if (IS_GEN6(dev)) { 3543 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3544 /* SNB-B */ 3545 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3546 } 3547 I915_WRITE(reg, temp); 3548 3549 reg = FDI_RX_CTL(pipe); 3550 temp = I915_READ(reg); 3551 if (HAS_PCH_CPT(dev)) { 3552 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3553 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3554 } else { 3555 temp &= ~FDI_LINK_TRAIN_NONE; 3556 temp |= FDI_LINK_TRAIN_PATTERN_2; 3557 } 3558 I915_WRITE(reg, temp); 3559 3560 POSTING_READ(reg); 3561 udelay(150); 3562 3563 for (i = 0; i < 4; i++) { 3564 reg = FDI_TX_CTL(pipe); 3565 temp = I915_READ(reg); 3566 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3567 temp |= snb_b_fdi_train_param[i]; 3568 I915_WRITE(reg, temp); 3569 3570 POSTING_READ(reg); 3571 udelay(500); 3572 3573 for (retry = 0; retry < 5; retry++) { 3574 reg = FDI_RX_IIR(pipe); 3575 temp = I915_READ(reg); 3576 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3577 if (temp & FDI_RX_SYMBOL_LOCK) { 3578 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3579 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3580 break; 3581 } 3582 udelay(50); 3583 } 3584 if (retry < 5) 3585 break; 3586 } 3587 if (i == 4) 3588 DRM_ERROR("FDI train 2 fail!\n"); 3589 3590 DRM_DEBUG_KMS("FDI train done.\n"); 3591 } 3592 3593 /* Manual link training for Ivy Bridge A0 parts */ 3594 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3595 { 3596 struct drm_device *dev = crtc->dev; 3597 struct drm_i915_private *dev_priv = to_i915(dev); 3598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3599 int pipe = intel_crtc->pipe; 3600 i915_reg_t reg; 3601 u32 temp, i, j; 3602 3603 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3604 for train result */ 3605 reg = FDI_RX_IMR(pipe); 3606 temp = I915_READ(reg); 3607 temp &= ~FDI_RX_SYMBOL_LOCK; 3608 temp &= ~FDI_RX_BIT_LOCK; 3609 I915_WRITE(reg, temp); 3610 3611 POSTING_READ(reg); 3612 udelay(150); 3613 3614 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3615 I915_READ(FDI_RX_IIR(pipe))); 3616 3617 /* Try each vswing and preemphasis setting twice before moving on */ 3618 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3619 /* disable first in case we need to retry */ 3620 reg = FDI_TX_CTL(pipe); 3621 temp = I915_READ(reg); 3622 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3623 temp &= ~FDI_TX_ENABLE; 3624 I915_WRITE(reg, temp); 3625 3626 reg = FDI_RX_CTL(pipe); 3627 temp = I915_READ(reg); 3628 temp &= ~FDI_LINK_TRAIN_AUTO; 3629 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3630 temp &= ~FDI_RX_ENABLE; 3631 I915_WRITE(reg, temp); 3632 3633 /* enable CPU FDI TX and PCH FDI RX */ 3634 reg = FDI_TX_CTL(pipe); 3635 temp = I915_READ(reg); 3636 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3637 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3638 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3639 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3640 temp |= snb_b_fdi_train_param[j/2]; 3641 temp |= FDI_COMPOSITE_SYNC; 3642 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3643 3644 I915_WRITE(FDI_RX_MISC(pipe), 3645 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3646 3647 reg = FDI_RX_CTL(pipe); 3648 temp = I915_READ(reg); 3649 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3650 temp |= FDI_COMPOSITE_SYNC; 3651 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3652 3653 POSTING_READ(reg); 3654 udelay(1); /* should be 0.5us */ 3655 3656 for (i = 0; i < 4; i++) { 3657 reg = FDI_RX_IIR(pipe); 3658 temp = I915_READ(reg); 3659 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3660 3661 if (temp & FDI_RX_BIT_LOCK || 3662 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3663 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3664 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3665 i); 3666 break; 3667 } 3668 udelay(1); /* should be 0.5us */ 3669 } 3670 if (i == 4) { 3671 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3672 continue; 3673 } 3674 3675 /* Train 2 */ 3676 reg = FDI_TX_CTL(pipe); 3677 temp = I915_READ(reg); 3678 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3679 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3680 I915_WRITE(reg, temp); 3681 3682 reg = FDI_RX_CTL(pipe); 3683 temp = I915_READ(reg); 3684 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3685 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3686 I915_WRITE(reg, temp); 3687 3688 POSTING_READ(reg); 3689 udelay(2); /* should be 1.5us */ 3690 3691 for (i = 0; i < 4; i++) { 3692 reg = FDI_RX_IIR(pipe); 3693 temp = I915_READ(reg); 3694 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3695 3696 if (temp & FDI_RX_SYMBOL_LOCK || 3697 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3698 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3699 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3700 i); 3701 goto train_done; 3702 } 3703 udelay(2); /* should be 1.5us */ 3704 } 3705 if (i == 4) 3706 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3707 } 3708 3709 train_done: 3710 DRM_DEBUG_KMS("FDI train done.\n"); 3711 } 3712 3713 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3714 { 3715 struct drm_device *dev = intel_crtc->base.dev; 3716 struct drm_i915_private *dev_priv = to_i915(dev); 3717 int pipe = intel_crtc->pipe; 3718 i915_reg_t reg; 3719 u32 temp; 3720 3721 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3722 reg = FDI_RX_CTL(pipe); 3723 temp = I915_READ(reg); 3724 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3725 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3726 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3727 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3728 3729 POSTING_READ(reg); 3730 udelay(200); 3731 3732 /* Switch from Rawclk to PCDclk */ 3733 temp = I915_READ(reg); 3734 I915_WRITE(reg, temp | FDI_PCDCLK); 3735 3736 POSTING_READ(reg); 3737 udelay(200); 3738 3739 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3740 reg = FDI_TX_CTL(pipe); 3741 temp = I915_READ(reg); 3742 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3743 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3744 3745 POSTING_READ(reg); 3746 udelay(100); 3747 } 3748 } 3749 3750 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3751 { 3752 struct drm_device *dev = intel_crtc->base.dev; 3753 struct drm_i915_private *dev_priv = to_i915(dev); 3754 int pipe = intel_crtc->pipe; 3755 i915_reg_t reg; 3756 u32 temp; 3757 3758 /* Switch from PCDclk to Rawclk */ 3759 reg = FDI_RX_CTL(pipe); 3760 temp = I915_READ(reg); 3761 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3762 3763 /* Disable CPU FDI TX PLL */ 3764 reg = FDI_TX_CTL(pipe); 3765 temp = I915_READ(reg); 3766 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3767 3768 POSTING_READ(reg); 3769 udelay(100); 3770 3771 reg = FDI_RX_CTL(pipe); 3772 temp = I915_READ(reg); 3773 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3774 3775 /* Wait for the clocks to turn off. */ 3776 POSTING_READ(reg); 3777 udelay(100); 3778 } 3779 3780 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3781 { 3782 struct drm_device *dev = crtc->dev; 3783 struct drm_i915_private *dev_priv = to_i915(dev); 3784 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3785 int pipe = intel_crtc->pipe; 3786 i915_reg_t reg; 3787 u32 temp; 3788 3789 /* disable CPU FDI tx and PCH FDI rx */ 3790 reg = FDI_TX_CTL(pipe); 3791 temp = I915_READ(reg); 3792 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3793 POSTING_READ(reg); 3794 3795 reg = FDI_RX_CTL(pipe); 3796 temp = I915_READ(reg); 3797 temp &= ~(0x7 << 16); 3798 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3799 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3800 3801 POSTING_READ(reg); 3802 udelay(100); 3803 3804 /* Ironlake workaround, disable clock pointer after downing FDI */ 3805 if (HAS_PCH_IBX(dev)) 3806 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3807 3808 /* still set train pattern 1 */ 3809 reg = FDI_TX_CTL(pipe); 3810 temp = I915_READ(reg); 3811 temp &= ~FDI_LINK_TRAIN_NONE; 3812 temp |= FDI_LINK_TRAIN_PATTERN_1; 3813 I915_WRITE(reg, temp); 3814 3815 reg = FDI_RX_CTL(pipe); 3816 temp = I915_READ(reg); 3817 if (HAS_PCH_CPT(dev)) { 3818 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3819 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3820 } else { 3821 temp &= ~FDI_LINK_TRAIN_NONE; 3822 temp |= FDI_LINK_TRAIN_PATTERN_1; 3823 } 3824 /* BPC in FDI rx is consistent with that in PIPECONF */ 3825 temp &= ~(0x07 << 16); 3826 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3827 I915_WRITE(reg, temp); 3828 3829 POSTING_READ(reg); 3830 udelay(100); 3831 } 3832 3833 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3834 { 3835 struct intel_crtc *crtc; 3836 3837 /* Note that we don't need to be called with mode_config.lock here 3838 * as our list of CRTC objects is static for the lifetime of the 3839 * device and so cannot disappear as we iterate. Similarly, we can 3840 * happily treat the predicates as racy, atomic checks as userspace 3841 * cannot claim and pin a new fb without at least acquring the 3842 * struct_mutex and so serialising with us. 3843 */ 3844 for_each_intel_crtc(dev, crtc) { 3845 if (atomic_read(&crtc->unpin_work_count) == 0) 3846 continue; 3847 3848 if (crtc->flip_work) 3849 intel_wait_for_vblank(dev, crtc->pipe); 3850 3851 return true; 3852 } 3853 3854 return false; 3855 } 3856 3857 static void page_flip_completed(struct intel_crtc *intel_crtc) 3858 { 3859 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3860 struct intel_flip_work *work = intel_crtc->flip_work; 3861 3862 intel_crtc->flip_work = NULL; 3863 3864 if (work->event) 3865 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3866 3867 drm_crtc_vblank_put(&intel_crtc->base); 3868 3869 wake_up_all(&dev_priv->pending_flip_queue); 3870 queue_work(dev_priv->wq, &work->unpin_work); 3871 3872 trace_i915_flip_complete(intel_crtc->plane, 3873 work->pending_flip_obj); 3874 } 3875 3876 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3877 { 3878 struct drm_device *dev = crtc->dev; 3879 struct drm_i915_private *dev_priv = to_i915(dev); 3880 long ret; 3881 3882 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3883 3884 ret = wait_event_interruptible_timeout( 3885 dev_priv->pending_flip_queue, 3886 !intel_crtc_has_pending_flip(crtc), 3887 60*HZ); 3888 3889 if (ret < 0) 3890 return ret; 3891 3892 if (ret == 0) { 3893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3894 struct intel_flip_work *work; 3895 3896 spin_lock_irq(&dev->event_lock); 3897 work = intel_crtc->flip_work; 3898 if (work && !is_mmio_work(work)) { 3899 WARN_ONCE(1, "Removing stuck page flip\n"); 3900 page_flip_completed(intel_crtc); 3901 } 3902 spin_unlock_irq(&dev->event_lock); 3903 } 3904 3905 return 0; 3906 } 3907 3908 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 3909 { 3910 u32 temp; 3911 3912 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3913 3914 mutex_lock(&dev_priv->sb_lock); 3915 3916 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3917 temp |= SBI_SSCCTL_DISABLE; 3918 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3919 3920 mutex_unlock(&dev_priv->sb_lock); 3921 } 3922 3923 /* Program iCLKIP clock to the desired frequency */ 3924 static void lpt_program_iclkip(struct drm_crtc *crtc) 3925 { 3926 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3927 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3928 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3929 u32 temp; 3930 3931 lpt_disable_iclkip(dev_priv); 3932 3933 /* The iCLK virtual clock root frequency is in MHz, 3934 * but the adjusted_mode->crtc_clock in in KHz. To get the 3935 * divisors, it is necessary to divide one by another, so we 3936 * convert the virtual clock precision to KHz here for higher 3937 * precision. 3938 */ 3939 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 3940 u32 iclk_virtual_root_freq = 172800 * 1000; 3941 u32 iclk_pi_range = 64; 3942 u32 desired_divisor; 3943 3944 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 3945 clock << auxdiv); 3946 divsel = (desired_divisor / iclk_pi_range) - 2; 3947 phaseinc = desired_divisor % iclk_pi_range; 3948 3949 /* 3950 * Near 20MHz is a corner case which is 3951 * out of range for the 7-bit divisor 3952 */ 3953 if (divsel <= 0x7f) 3954 break; 3955 } 3956 3957 /* This should not happen with any sane values */ 3958 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3959 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3960 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3961 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3962 3963 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3964 clock, 3965 auxdiv, 3966 divsel, 3967 phasedir, 3968 phaseinc); 3969 3970 mutex_lock(&dev_priv->sb_lock); 3971 3972 /* Program SSCDIVINTPHASE6 */ 3973 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3974 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3975 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3976 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3977 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3978 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3979 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3980 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3981 3982 /* Program SSCAUXDIV */ 3983 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3984 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3985 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3986 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3987 3988 /* Enable modulator and associated divider */ 3989 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3990 temp &= ~SBI_SSCCTL_DISABLE; 3991 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3992 3993 mutex_unlock(&dev_priv->sb_lock); 3994 3995 /* Wait for initialization time */ 3996 udelay(24); 3997 3998 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3999 } 4000 4001 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4002 { 4003 u32 divsel, phaseinc, auxdiv; 4004 u32 iclk_virtual_root_freq = 172800 * 1000; 4005 u32 iclk_pi_range = 64; 4006 u32 desired_divisor; 4007 u32 temp; 4008 4009 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4010 return 0; 4011 4012 mutex_lock(&dev_priv->sb_lock); 4013 4014 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4015 if (temp & SBI_SSCCTL_DISABLE) { 4016 mutex_unlock(&dev_priv->sb_lock); 4017 return 0; 4018 } 4019 4020 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4021 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4022 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4023 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4024 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4025 4026 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4027 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4028 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4029 4030 mutex_unlock(&dev_priv->sb_lock); 4031 4032 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4033 4034 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4035 desired_divisor << auxdiv); 4036 } 4037 4038 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4039 enum i915_pipe pch_transcoder) 4040 { 4041 struct drm_device *dev = crtc->base.dev; 4042 struct drm_i915_private *dev_priv = to_i915(dev); 4043 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4044 4045 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4046 I915_READ(HTOTAL(cpu_transcoder))); 4047 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4048 I915_READ(HBLANK(cpu_transcoder))); 4049 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4050 I915_READ(HSYNC(cpu_transcoder))); 4051 4052 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4053 I915_READ(VTOTAL(cpu_transcoder))); 4054 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4055 I915_READ(VBLANK(cpu_transcoder))); 4056 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4057 I915_READ(VSYNC(cpu_transcoder))); 4058 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4059 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4060 } 4061 4062 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4063 { 4064 struct drm_i915_private *dev_priv = to_i915(dev); 4065 uint32_t temp; 4066 4067 temp = I915_READ(SOUTH_CHICKEN1); 4068 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4069 return; 4070 4071 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4072 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4073 4074 temp &= ~FDI_BC_BIFURCATION_SELECT; 4075 if (enable) 4076 temp |= FDI_BC_BIFURCATION_SELECT; 4077 4078 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4079 I915_WRITE(SOUTH_CHICKEN1, temp); 4080 POSTING_READ(SOUTH_CHICKEN1); 4081 } 4082 4083 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4084 { 4085 struct drm_device *dev = intel_crtc->base.dev; 4086 4087 switch (intel_crtc->pipe) { 4088 case PIPE_A: 4089 break; 4090 case PIPE_B: 4091 if (intel_crtc->config->fdi_lanes > 2) 4092 cpt_set_fdi_bc_bifurcation(dev, false); 4093 else 4094 cpt_set_fdi_bc_bifurcation(dev, true); 4095 4096 break; 4097 case PIPE_C: 4098 cpt_set_fdi_bc_bifurcation(dev, true); 4099 4100 break; 4101 default: 4102 BUG(); 4103 } 4104 } 4105 4106 /* Return which DP Port should be selected for Transcoder DP control */ 4107 static enum port 4108 intel_trans_dp_port_sel(struct drm_crtc *crtc) 4109 { 4110 struct drm_device *dev = crtc->dev; 4111 struct intel_encoder *encoder; 4112 4113 for_each_encoder_on_crtc(dev, crtc, encoder) { 4114 if (encoder->type == INTEL_OUTPUT_DP || 4115 encoder->type == INTEL_OUTPUT_EDP) 4116 return enc_to_dig_port(&encoder->base)->port; 4117 } 4118 4119 return -1; 4120 } 4121 4122 /* 4123 * Enable PCH resources required for PCH ports: 4124 * - PCH PLLs 4125 * - FDI training & RX/TX 4126 * - update transcoder timings 4127 * - DP transcoding bits 4128 * - transcoder 4129 */ 4130 static void ironlake_pch_enable(struct drm_crtc *crtc) 4131 { 4132 struct drm_device *dev = crtc->dev; 4133 struct drm_i915_private *dev_priv = to_i915(dev); 4134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4135 int pipe = intel_crtc->pipe; 4136 u32 temp; 4137 4138 assert_pch_transcoder_disabled(dev_priv, pipe); 4139 4140 if (IS_IVYBRIDGE(dev)) 4141 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4142 4143 /* Write the TU size bits before fdi link training, so that error 4144 * detection works. */ 4145 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4146 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4147 4148 /* For PCH output, training FDI link */ 4149 dev_priv->display.fdi_link_train(crtc); 4150 4151 /* We need to program the right clock selection before writing the pixel 4152 * mutliplier into the DPLL. */ 4153 if (HAS_PCH_CPT(dev)) { 4154 u32 sel; 4155 4156 temp = I915_READ(PCH_DPLL_SEL); 4157 temp |= TRANS_DPLL_ENABLE(pipe); 4158 sel = TRANS_DPLLB_SEL(pipe); 4159 if (intel_crtc->config->shared_dpll == 4160 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4161 temp |= sel; 4162 else 4163 temp &= ~sel; 4164 I915_WRITE(PCH_DPLL_SEL, temp); 4165 } 4166 4167 /* XXX: pch pll's can be enabled any time before we enable the PCH 4168 * transcoder, and we actually should do this to not upset any PCH 4169 * transcoder that already use the clock when we share it. 4170 * 4171 * Note that enable_shared_dpll tries to do the right thing, but 4172 * get_shared_dpll unconditionally resets the pll - we need that to have 4173 * the right LVDS enable sequence. */ 4174 intel_enable_shared_dpll(intel_crtc); 4175 4176 /* set transcoder timing, panel must allow it */ 4177 assert_panel_unlocked(dev_priv, pipe); 4178 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4179 4180 intel_fdi_normal_train(crtc); 4181 4182 /* For PCH DP, enable TRANS_DP_CTL */ 4183 if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) { 4184 const struct drm_display_mode *adjusted_mode = 4185 &intel_crtc->config->base.adjusted_mode; 4186 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4187 i915_reg_t reg = TRANS_DP_CTL(pipe); 4188 temp = I915_READ(reg); 4189 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4190 TRANS_DP_SYNC_MASK | 4191 TRANS_DP_BPC_MASK); 4192 temp |= TRANS_DP_OUTPUT_ENABLE; 4193 temp |= bpc << 9; /* same format but at 11:9 */ 4194 4195 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4196 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4197 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4198 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4199 4200 switch (intel_trans_dp_port_sel(crtc)) { 4201 case PORT_B: 4202 temp |= TRANS_DP_PORT_SEL_B; 4203 break; 4204 case PORT_C: 4205 temp |= TRANS_DP_PORT_SEL_C; 4206 break; 4207 case PORT_D: 4208 temp |= TRANS_DP_PORT_SEL_D; 4209 break; 4210 default: 4211 BUG(); 4212 } 4213 4214 I915_WRITE(reg, temp); 4215 } 4216 4217 ironlake_enable_pch_transcoder(dev_priv, pipe); 4218 } 4219 4220 static void lpt_pch_enable(struct drm_crtc *crtc) 4221 { 4222 struct drm_device *dev = crtc->dev; 4223 struct drm_i915_private *dev_priv = to_i915(dev); 4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4225 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4226 4227 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4228 4229 lpt_program_iclkip(crtc); 4230 4231 /* Set transcoder timing. */ 4232 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4233 4234 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4235 } 4236 4237 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4238 { 4239 struct drm_i915_private *dev_priv = to_i915(dev); 4240 i915_reg_t dslreg = PIPEDSL(pipe); 4241 u32 temp; 4242 4243 temp = I915_READ(dslreg); 4244 udelay(500); 4245 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4246 if (wait_for(I915_READ(dslreg) != temp, 5)) 4247 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4248 } 4249 } 4250 4251 static int 4252 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4253 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4254 int src_w, int src_h, int dst_w, int dst_h) 4255 { 4256 struct intel_crtc_scaler_state *scaler_state = 4257 &crtc_state->scaler_state; 4258 struct intel_crtc *intel_crtc = 4259 to_intel_crtc(crtc_state->base.crtc); 4260 int need_scaling; 4261 4262 need_scaling = intel_rotation_90_or_270(rotation) ? 4263 (src_h != dst_w || src_w != dst_h): 4264 (src_w != dst_w || src_h != dst_h); 4265 4266 /* 4267 * if plane is being disabled or scaler is no more required or force detach 4268 * - free scaler binded to this plane/crtc 4269 * - in order to do this, update crtc->scaler_usage 4270 * 4271 * Here scaler state in crtc_state is set free so that 4272 * scaler can be assigned to other user. Actual register 4273 * update to free the scaler is done in plane/panel-fit programming. 4274 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4275 */ 4276 if (force_detach || !need_scaling) { 4277 if (*scaler_id >= 0) { 4278 scaler_state->scaler_users &= ~(1 << scaler_user); 4279 scaler_state->scalers[*scaler_id].in_use = 0; 4280 4281 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4282 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4283 intel_crtc->pipe, scaler_user, *scaler_id, 4284 scaler_state->scaler_users); 4285 *scaler_id = -1; 4286 } 4287 return 0; 4288 } 4289 4290 /* range checks */ 4291 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4292 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4293 4294 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4295 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4296 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4297 "size is out of scaler range\n", 4298 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4299 return -EINVAL; 4300 } 4301 4302 /* mark this plane as a scaler user in crtc_state */ 4303 scaler_state->scaler_users |= (1 << scaler_user); 4304 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4305 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4306 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4307 scaler_state->scaler_users); 4308 4309 return 0; 4310 } 4311 4312 /** 4313 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4314 * 4315 * @state: crtc's scaler state 4316 * 4317 * Return 4318 * 0 - scaler_usage updated successfully 4319 * error - requested scaling cannot be supported or other error condition 4320 */ 4321 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4322 { 4323 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4324 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4325 4326 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", 4327 intel_crtc->base.base.id, intel_crtc->base.name, 4328 intel_crtc->pipe, SKL_CRTC_INDEX); 4329 4330 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4331 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4332 state->pipe_src_w, state->pipe_src_h, 4333 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4334 } 4335 4336 /** 4337 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4338 * 4339 * @state: crtc's scaler state 4340 * @plane_state: atomic plane state to update 4341 * 4342 * Return 4343 * 0 - scaler_usage updated successfully 4344 * error - requested scaling cannot be supported or other error condition 4345 */ 4346 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4347 struct intel_plane_state *plane_state) 4348 { 4349 4350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4351 struct intel_plane *intel_plane = 4352 to_intel_plane(plane_state->base.plane); 4353 struct drm_framebuffer *fb = plane_state->base.fb; 4354 int ret; 4355 4356 bool force_detach = !fb || !plane_state->visible; 4357 4358 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", 4359 intel_plane->base.base.id, intel_plane->base.name, 4360 intel_crtc->pipe, drm_plane_index(&intel_plane->base)); 4361 4362 ret = skl_update_scaler(crtc_state, force_detach, 4363 drm_plane_index(&intel_plane->base), 4364 &plane_state->scaler_id, 4365 plane_state->base.rotation, 4366 drm_rect_width(&plane_state->src) >> 16, 4367 drm_rect_height(&plane_state->src) >> 16, 4368 drm_rect_width(&plane_state->dst), 4369 drm_rect_height(&plane_state->dst)); 4370 4371 if (ret || plane_state->scaler_id < 0) 4372 return ret; 4373 4374 /* check colorkey */ 4375 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4376 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4377 intel_plane->base.base.id, 4378 intel_plane->base.name); 4379 return -EINVAL; 4380 } 4381 4382 /* Check src format */ 4383 switch (fb->pixel_format) { 4384 case DRM_FORMAT_RGB565: 4385 case DRM_FORMAT_XBGR8888: 4386 case DRM_FORMAT_XRGB8888: 4387 case DRM_FORMAT_ABGR8888: 4388 case DRM_FORMAT_ARGB8888: 4389 case DRM_FORMAT_XRGB2101010: 4390 case DRM_FORMAT_XBGR2101010: 4391 case DRM_FORMAT_YUYV: 4392 case DRM_FORMAT_YVYU: 4393 case DRM_FORMAT_UYVY: 4394 case DRM_FORMAT_VYUY: 4395 break; 4396 default: 4397 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4398 intel_plane->base.base.id, intel_plane->base.name, 4399 fb->base.id, fb->pixel_format); 4400 return -EINVAL; 4401 } 4402 4403 return 0; 4404 } 4405 4406 static void skylake_scaler_disable(struct intel_crtc *crtc) 4407 { 4408 int i; 4409 4410 for (i = 0; i < crtc->num_scalers; i++) 4411 skl_detach_scaler(crtc, i); 4412 } 4413 4414 static void skylake_pfit_enable(struct intel_crtc *crtc) 4415 { 4416 struct drm_device *dev = crtc->base.dev; 4417 struct drm_i915_private *dev_priv = to_i915(dev); 4418 int pipe = crtc->pipe; 4419 struct intel_crtc_scaler_state *scaler_state = 4420 &crtc->config->scaler_state; 4421 4422 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4423 4424 if (crtc->config->pch_pfit.enabled) { 4425 int id; 4426 4427 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4428 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4429 return; 4430 } 4431 4432 id = scaler_state->scaler_id; 4433 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4434 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4435 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4436 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4437 4438 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4439 } 4440 } 4441 4442 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4443 { 4444 struct drm_device *dev = crtc->base.dev; 4445 struct drm_i915_private *dev_priv = to_i915(dev); 4446 int pipe = crtc->pipe; 4447 4448 if (crtc->config->pch_pfit.enabled) { 4449 /* Force use of hard-coded filter coefficients 4450 * as some pre-programmed values are broken, 4451 * e.g. x201. 4452 */ 4453 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4454 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4455 PF_PIPE_SEL_IVB(pipe)); 4456 else 4457 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4458 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4459 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4460 } 4461 } 4462 4463 void hsw_enable_ips(struct intel_crtc *crtc) 4464 { 4465 struct drm_device *dev = crtc->base.dev; 4466 struct drm_i915_private *dev_priv = to_i915(dev); 4467 4468 if (!crtc->config->ips_enabled) 4469 return; 4470 4471 /* 4472 * We can only enable IPS after we enable a plane and wait for a vblank 4473 * This function is called from post_plane_update, which is run after 4474 * a vblank wait. 4475 */ 4476 4477 assert_plane_enabled(dev_priv, crtc->plane); 4478 if (IS_BROADWELL(dev)) { 4479 mutex_lock(&dev_priv->rps.hw_lock); 4480 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4481 mutex_unlock(&dev_priv->rps.hw_lock); 4482 /* Quoting Art Runyan: "its not safe to expect any particular 4483 * value in IPS_CTL bit 31 after enabling IPS through the 4484 * mailbox." Moreover, the mailbox may return a bogus state, 4485 * so we need to just enable it and continue on. 4486 */ 4487 } else { 4488 I915_WRITE(IPS_CTL, IPS_ENABLE); 4489 /* The bit only becomes 1 in the next vblank, so this wait here 4490 * is essentially intel_wait_for_vblank. If we don't have this 4491 * and don't wait for vblanks until the end of crtc_enable, then 4492 * the HW state readout code will complain that the expected 4493 * IPS_CTL value is not the one we read. */ 4494 if (intel_wait_for_register(dev_priv, 4495 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4496 50)) 4497 DRM_ERROR("Timed out waiting for IPS enable\n"); 4498 } 4499 } 4500 4501 void hsw_disable_ips(struct intel_crtc *crtc) 4502 { 4503 struct drm_device *dev = crtc->base.dev; 4504 struct drm_i915_private *dev_priv = to_i915(dev); 4505 4506 if (!crtc->config->ips_enabled) 4507 return; 4508 4509 assert_plane_enabled(dev_priv, crtc->plane); 4510 if (IS_BROADWELL(dev)) { 4511 mutex_lock(&dev_priv->rps.hw_lock); 4512 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4513 mutex_unlock(&dev_priv->rps.hw_lock); 4514 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4515 if (intel_wait_for_register(dev_priv, 4516 IPS_CTL, IPS_ENABLE, 0, 4517 42)) 4518 DRM_ERROR("Timed out waiting for IPS disable\n"); 4519 } else { 4520 I915_WRITE(IPS_CTL, 0); 4521 POSTING_READ(IPS_CTL); 4522 } 4523 4524 /* We need to wait for a vblank before we can disable the plane. */ 4525 intel_wait_for_vblank(dev, crtc->pipe); 4526 } 4527 4528 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4529 { 4530 if (intel_crtc->overlay) { 4531 struct drm_device *dev = intel_crtc->base.dev; 4532 struct drm_i915_private *dev_priv = to_i915(dev); 4533 4534 mutex_lock(&dev->struct_mutex); 4535 dev_priv->mm.interruptible = false; 4536 (void) intel_overlay_switch_off(intel_crtc->overlay); 4537 dev_priv->mm.interruptible = true; 4538 mutex_unlock(&dev->struct_mutex); 4539 } 4540 4541 /* Let userspace switch the overlay on again. In most cases userspace 4542 * has to recompute where to put it anyway. 4543 */ 4544 } 4545 4546 /** 4547 * intel_post_enable_primary - Perform operations after enabling primary plane 4548 * @crtc: the CRTC whose primary plane was just enabled 4549 * 4550 * Performs potentially sleeping operations that must be done after the primary 4551 * plane is enabled, such as updating FBC and IPS. Note that this may be 4552 * called due to an explicit primary plane update, or due to an implicit 4553 * re-enable that is caused when a sprite plane is updated to no longer 4554 * completely hide the primary plane. 4555 */ 4556 static void 4557 intel_post_enable_primary(struct drm_crtc *crtc) 4558 { 4559 struct drm_device *dev = crtc->dev; 4560 struct drm_i915_private *dev_priv = to_i915(dev); 4561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4562 int pipe = intel_crtc->pipe; 4563 4564 /* 4565 * FIXME IPS should be fine as long as one plane is 4566 * enabled, but in practice it seems to have problems 4567 * when going from primary only to sprite only and vice 4568 * versa. 4569 */ 4570 hsw_enable_ips(intel_crtc); 4571 4572 /* 4573 * Gen2 reports pipe underruns whenever all planes are disabled. 4574 * So don't enable underrun reporting before at least some planes 4575 * are enabled. 4576 * FIXME: Need to fix the logic to work when we turn off all planes 4577 * but leave the pipe running. 4578 */ 4579 if (IS_GEN2(dev)) 4580 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4581 4582 /* Underruns don't always raise interrupts, so check manually. */ 4583 intel_check_cpu_fifo_underruns(dev_priv); 4584 intel_check_pch_fifo_underruns(dev_priv); 4585 } 4586 4587 /* FIXME move all this to pre_plane_update() with proper state tracking */ 4588 static void 4589 intel_pre_disable_primary(struct drm_crtc *crtc) 4590 { 4591 struct drm_device *dev = crtc->dev; 4592 struct drm_i915_private *dev_priv = to_i915(dev); 4593 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4594 int pipe = intel_crtc->pipe; 4595 4596 /* 4597 * Gen2 reports pipe underruns whenever all planes are disabled. 4598 * So diasble underrun reporting before all the planes get disabled. 4599 * FIXME: Need to fix the logic to work when we turn off all planes 4600 * but leave the pipe running. 4601 */ 4602 if (IS_GEN2(dev)) 4603 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4604 4605 /* 4606 * FIXME IPS should be fine as long as one plane is 4607 * enabled, but in practice it seems to have problems 4608 * when going from primary only to sprite only and vice 4609 * versa. 4610 */ 4611 hsw_disable_ips(intel_crtc); 4612 } 4613 4614 /* FIXME get rid of this and use pre_plane_update */ 4615 static void 4616 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4617 { 4618 struct drm_device *dev = crtc->dev; 4619 struct drm_i915_private *dev_priv = to_i915(dev); 4620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4621 int pipe = intel_crtc->pipe; 4622 4623 intel_pre_disable_primary(crtc); 4624 4625 /* 4626 * Vblank time updates from the shadow to live plane control register 4627 * are blocked if the memory self-refresh mode is active at that 4628 * moment. So to make sure the plane gets truly disabled, disable 4629 * first the self-refresh mode. The self-refresh enable bit in turn 4630 * will be checked/applied by the HW only at the next frame start 4631 * event which is after the vblank start event, so we need to have a 4632 * wait-for-vblank between disabling the plane and the pipe. 4633 */ 4634 if (HAS_GMCH_DISPLAY(dev)) { 4635 intel_set_memory_cxsr(dev_priv, false); 4636 dev_priv->wm.vlv.cxsr = false; 4637 intel_wait_for_vblank(dev, pipe); 4638 } 4639 } 4640 4641 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4642 { 4643 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4644 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4645 struct intel_crtc_state *pipe_config = 4646 to_intel_crtc_state(crtc->base.state); 4647 struct drm_plane *primary = crtc->base.primary; 4648 struct drm_plane_state *old_pri_state = 4649 drm_atomic_get_existing_plane_state(old_state, primary); 4650 4651 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 4652 4653 crtc->wm.cxsr_allowed = true; 4654 4655 if (pipe_config->update_wm_post && pipe_config->base.active) 4656 intel_update_watermarks(&crtc->base); 4657 4658 if (old_pri_state) { 4659 struct intel_plane_state *primary_state = 4660 to_intel_plane_state(primary->state); 4661 struct intel_plane_state *old_primary_state = 4662 to_intel_plane_state(old_pri_state); 4663 4664 intel_fbc_post_update(crtc); 4665 4666 if (primary_state->visible && 4667 (needs_modeset(&pipe_config->base) || 4668 !old_primary_state->visible)) 4669 intel_post_enable_primary(&crtc->base); 4670 } 4671 } 4672 4673 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) 4674 { 4675 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4676 struct drm_device *dev = crtc->base.dev; 4677 struct drm_i915_private *dev_priv = to_i915(dev); 4678 struct intel_crtc_state *pipe_config = 4679 to_intel_crtc_state(crtc->base.state); 4680 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4681 struct drm_plane *primary = crtc->base.primary; 4682 struct drm_plane_state *old_pri_state = 4683 drm_atomic_get_existing_plane_state(old_state, primary); 4684 bool modeset = needs_modeset(&pipe_config->base); 4685 4686 if (old_pri_state) { 4687 struct intel_plane_state *primary_state = 4688 to_intel_plane_state(primary->state); 4689 struct intel_plane_state *old_primary_state = 4690 to_intel_plane_state(old_pri_state); 4691 4692 intel_fbc_pre_update(crtc, pipe_config, primary_state); 4693 4694 if (old_primary_state->visible && 4695 (modeset || !primary_state->visible)) 4696 intel_pre_disable_primary(&crtc->base); 4697 } 4698 4699 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { 4700 crtc->wm.cxsr_allowed = false; 4701 4702 /* 4703 * Vblank time updates from the shadow to live plane control register 4704 * are blocked if the memory self-refresh mode is active at that 4705 * moment. So to make sure the plane gets truly disabled, disable 4706 * first the self-refresh mode. The self-refresh enable bit in turn 4707 * will be checked/applied by the HW only at the next frame start 4708 * event which is after the vblank start event, so we need to have a 4709 * wait-for-vblank between disabling the plane and the pipe. 4710 */ 4711 if (old_crtc_state->base.active) { 4712 intel_set_memory_cxsr(dev_priv, false); 4713 dev_priv->wm.vlv.cxsr = false; 4714 intel_wait_for_vblank(dev, crtc->pipe); 4715 } 4716 } 4717 4718 /* 4719 * IVB workaround: must disable low power watermarks for at least 4720 * one frame before enabling scaling. LP watermarks can be re-enabled 4721 * when scaling is disabled. 4722 * 4723 * WaCxSRDisabledForSpriteScaling:ivb 4724 */ 4725 if (pipe_config->disable_lp_wm) { 4726 ilk_disable_lp_wm(dev); 4727 intel_wait_for_vblank(dev, crtc->pipe); 4728 } 4729 4730 /* 4731 * If we're doing a modeset, we're done. No need to do any pre-vblank 4732 * watermark programming here. 4733 */ 4734 if (needs_modeset(&pipe_config->base)) 4735 return; 4736 4737 /* 4738 * For platforms that support atomic watermarks, program the 4739 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 4740 * will be the intermediate values that are safe for both pre- and 4741 * post- vblank; when vblank happens, the 'active' values will be set 4742 * to the final 'target' values and we'll do this again to get the 4743 * optimal watermarks. For gen9+ platforms, the values we program here 4744 * will be the final target values which will get automatically latched 4745 * at vblank time; no further programming will be necessary. 4746 * 4747 * If a platform hasn't been transitioned to atomic watermarks yet, 4748 * we'll continue to update watermarks the old way, if flags tell 4749 * us to. 4750 */ 4751 if (dev_priv->display.initial_watermarks != NULL) 4752 dev_priv->display.initial_watermarks(pipe_config); 4753 else if (pipe_config->update_wm_pre) 4754 intel_update_watermarks(&crtc->base); 4755 } 4756 4757 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 4758 { 4759 struct drm_device *dev = crtc->dev; 4760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4761 struct drm_plane *p; 4762 int pipe = intel_crtc->pipe; 4763 4764 intel_crtc_dpms_overlay_disable(intel_crtc); 4765 4766 drm_for_each_plane_mask(p, dev, plane_mask) 4767 to_intel_plane(p)->disable_plane(p, crtc); 4768 4769 /* 4770 * FIXME: Once we grow proper nuclear flip support out of this we need 4771 * to compute the mask of flip planes precisely. For the time being 4772 * consider this a flip to a NULL plane. 4773 */ 4774 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4775 } 4776 4777 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4778 { 4779 struct drm_device *dev = crtc->dev; 4780 struct drm_i915_private *dev_priv = to_i915(dev); 4781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4782 struct intel_encoder *encoder; 4783 int pipe = intel_crtc->pipe; 4784 struct intel_crtc_state *pipe_config = 4785 to_intel_crtc_state(crtc->state); 4786 4787 if (WARN_ON(intel_crtc->active)) 4788 return; 4789 4790 /* 4791 * Sometimes spurious CPU pipe underruns happen during FDI 4792 * training, at least with VGA+HDMI cloning. Suppress them. 4793 * 4794 * On ILK we get an occasional spurious CPU pipe underruns 4795 * between eDP port A enable and vdd enable. Also PCH port 4796 * enable seems to result in the occasional CPU pipe underrun. 4797 * 4798 * Spurious PCH underruns also occur during PCH enabling. 4799 */ 4800 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 4801 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4802 if (intel_crtc->config->has_pch_encoder) 4803 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4804 4805 if (intel_crtc->config->has_pch_encoder) 4806 intel_prepare_shared_dpll(intel_crtc); 4807 4808 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 4809 intel_dp_set_m_n(intel_crtc, M1_N1); 4810 4811 intel_set_pipe_timings(intel_crtc); 4812 intel_set_pipe_src_size(intel_crtc); 4813 4814 if (intel_crtc->config->has_pch_encoder) { 4815 intel_cpu_transcoder_set_m_n(intel_crtc, 4816 &intel_crtc->config->fdi_m_n, NULL); 4817 } 4818 4819 ironlake_set_pipeconf(crtc); 4820 4821 intel_crtc->active = true; 4822 4823 for_each_encoder_on_crtc(dev, crtc, encoder) 4824 if (encoder->pre_enable) 4825 encoder->pre_enable(encoder); 4826 4827 if (intel_crtc->config->has_pch_encoder) { 4828 /* Note: FDI PLL enabling _must_ be done before we enable the 4829 * cpu pipes, hence this is separate from all the other fdi/pch 4830 * enabling. */ 4831 ironlake_fdi_pll_enable(intel_crtc); 4832 } else { 4833 assert_fdi_tx_disabled(dev_priv, pipe); 4834 assert_fdi_rx_disabled(dev_priv, pipe); 4835 } 4836 4837 ironlake_pfit_enable(intel_crtc); 4838 4839 /* 4840 * On ILK+ LUT must be loaded before the pipe is running but with 4841 * clocks enabled 4842 */ 4843 intel_color_load_luts(&pipe_config->base); 4844 4845 if (dev_priv->display.initial_watermarks != NULL) 4846 dev_priv->display.initial_watermarks(intel_crtc->config); 4847 intel_enable_pipe(intel_crtc); 4848 4849 if (intel_crtc->config->has_pch_encoder) 4850 ironlake_pch_enable(crtc); 4851 4852 assert_vblank_disabled(crtc); 4853 drm_crtc_vblank_on(crtc); 4854 4855 for_each_encoder_on_crtc(dev, crtc, encoder) 4856 encoder->enable(encoder); 4857 4858 if (HAS_PCH_CPT(dev)) 4859 cpt_verify_modeset(dev, intel_crtc->pipe); 4860 4861 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 4862 if (intel_crtc->config->has_pch_encoder) 4863 intel_wait_for_vblank(dev, pipe); 4864 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4865 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4866 } 4867 4868 /* IPS only exists on ULT machines and is tied to pipe A. */ 4869 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4870 { 4871 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4872 } 4873 4874 static void haswell_crtc_enable(struct drm_crtc *crtc) 4875 { 4876 struct drm_device *dev = crtc->dev; 4877 struct drm_i915_private *dev_priv = to_i915(dev); 4878 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4879 struct intel_encoder *encoder; 4880 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4881 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4882 struct intel_crtc_state *pipe_config = 4883 to_intel_crtc_state(crtc->state); 4884 4885 if (WARN_ON(intel_crtc->active)) 4886 return; 4887 4888 if (intel_crtc->config->has_pch_encoder) 4889 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4890 false); 4891 4892 for_each_encoder_on_crtc(dev, crtc, encoder) 4893 if (encoder->pre_pll_enable) 4894 encoder->pre_pll_enable(encoder); 4895 4896 if (intel_crtc->config->shared_dpll) 4897 intel_enable_shared_dpll(intel_crtc); 4898 4899 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 4900 intel_dp_set_m_n(intel_crtc, M1_N1); 4901 4902 if (!transcoder_is_dsi(cpu_transcoder)) 4903 intel_set_pipe_timings(intel_crtc); 4904 4905 intel_set_pipe_src_size(intel_crtc); 4906 4907 if (cpu_transcoder != TRANSCODER_EDP && 4908 !transcoder_is_dsi(cpu_transcoder)) { 4909 I915_WRITE(PIPE_MULT(cpu_transcoder), 4910 intel_crtc->config->pixel_multiplier - 1); 4911 } 4912 4913 if (intel_crtc->config->has_pch_encoder) { 4914 intel_cpu_transcoder_set_m_n(intel_crtc, 4915 &intel_crtc->config->fdi_m_n, NULL); 4916 } 4917 4918 if (!transcoder_is_dsi(cpu_transcoder)) 4919 haswell_set_pipeconf(crtc); 4920 4921 haswell_set_pipemisc(crtc); 4922 4923 intel_color_set_csc(&pipe_config->base); 4924 4925 intel_crtc->active = true; 4926 4927 if (intel_crtc->config->has_pch_encoder) 4928 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4929 else 4930 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4931 4932 for_each_encoder_on_crtc(dev, crtc, encoder) { 4933 if (encoder->pre_enable) 4934 encoder->pre_enable(encoder); 4935 } 4936 4937 if (intel_crtc->config->has_pch_encoder) 4938 dev_priv->display.fdi_link_train(crtc); 4939 4940 if (!transcoder_is_dsi(cpu_transcoder)) 4941 intel_ddi_enable_pipe_clock(intel_crtc); 4942 4943 if (INTEL_INFO(dev)->gen >= 9) 4944 skylake_pfit_enable(intel_crtc); 4945 else 4946 ironlake_pfit_enable(intel_crtc); 4947 4948 /* 4949 * On ILK+ LUT must be loaded before the pipe is running but with 4950 * clocks enabled 4951 */ 4952 intel_color_load_luts(&pipe_config->base); 4953 4954 intel_ddi_set_pipe_settings(crtc); 4955 if (!transcoder_is_dsi(cpu_transcoder)) 4956 intel_ddi_enable_transcoder_func(crtc); 4957 4958 if (dev_priv->display.initial_watermarks != NULL) 4959 dev_priv->display.initial_watermarks(pipe_config); 4960 else 4961 intel_update_watermarks(crtc); 4962 4963 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 4964 if (!transcoder_is_dsi(cpu_transcoder)) 4965 intel_enable_pipe(intel_crtc); 4966 4967 if (intel_crtc->config->has_pch_encoder) 4968 lpt_pch_enable(crtc); 4969 4970 if (intel_crtc->config->dp_encoder_is_mst) 4971 intel_ddi_set_vc_payload_alloc(crtc, true); 4972 4973 assert_vblank_disabled(crtc); 4974 drm_crtc_vblank_on(crtc); 4975 4976 for_each_encoder_on_crtc(dev, crtc, encoder) { 4977 encoder->enable(encoder); 4978 intel_opregion_notify_encoder(encoder, true); 4979 } 4980 4981 if (intel_crtc->config->has_pch_encoder) { 4982 intel_wait_for_vblank(dev, pipe); 4983 intel_wait_for_vblank(dev, pipe); 4984 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4985 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4986 true); 4987 } 4988 4989 /* If we change the relative order between pipe/planes enabling, we need 4990 * to change the workaround. */ 4991 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 4992 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { 4993 intel_wait_for_vblank(dev, hsw_workaround_pipe); 4994 intel_wait_for_vblank(dev, hsw_workaround_pipe); 4995 } 4996 } 4997 4998 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 4999 { 5000 struct drm_device *dev = crtc->base.dev; 5001 struct drm_i915_private *dev_priv = to_i915(dev); 5002 int pipe = crtc->pipe; 5003 5004 /* To avoid upsetting the power well on haswell only disable the pfit if 5005 * it's in use. The hw state code will make sure we get this right. */ 5006 if (force || crtc->config->pch_pfit.enabled) { 5007 I915_WRITE(PF_CTL(pipe), 0); 5008 I915_WRITE(PF_WIN_POS(pipe), 0); 5009 I915_WRITE(PF_WIN_SZ(pipe), 0); 5010 } 5011 } 5012 5013 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5014 { 5015 struct drm_device *dev = crtc->dev; 5016 struct drm_i915_private *dev_priv = to_i915(dev); 5017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5018 struct intel_encoder *encoder; 5019 int pipe = intel_crtc->pipe; 5020 5021 /* 5022 * Sometimes spurious CPU pipe underruns happen when the 5023 * pipe is already disabled, but FDI RX/TX is still enabled. 5024 * Happens at least with VGA+HDMI cloning. Suppress them. 5025 */ 5026 if (intel_crtc->config->has_pch_encoder) { 5027 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5028 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5029 } 5030 5031 for_each_encoder_on_crtc(dev, crtc, encoder) 5032 encoder->disable(encoder); 5033 5034 drm_crtc_vblank_off(crtc); 5035 assert_vblank_disabled(crtc); 5036 5037 intel_disable_pipe(intel_crtc); 5038 5039 ironlake_pfit_disable(intel_crtc, false); 5040 5041 if (intel_crtc->config->has_pch_encoder) 5042 ironlake_fdi_disable(crtc); 5043 5044 for_each_encoder_on_crtc(dev, crtc, encoder) 5045 if (encoder->post_disable) 5046 encoder->post_disable(encoder); 5047 5048 if (intel_crtc->config->has_pch_encoder) { 5049 ironlake_disable_pch_transcoder(dev_priv, pipe); 5050 5051 if (HAS_PCH_CPT(dev)) { 5052 i915_reg_t reg; 5053 u32 temp; 5054 5055 /* disable TRANS_DP_CTL */ 5056 reg = TRANS_DP_CTL(pipe); 5057 temp = I915_READ(reg); 5058 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5059 TRANS_DP_PORT_SEL_MASK); 5060 temp |= TRANS_DP_PORT_SEL_NONE; 5061 I915_WRITE(reg, temp); 5062 5063 /* disable DPLL_SEL */ 5064 temp = I915_READ(PCH_DPLL_SEL); 5065 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5066 I915_WRITE(PCH_DPLL_SEL, temp); 5067 } 5068 5069 ironlake_fdi_pll_disable(intel_crtc); 5070 } 5071 5072 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5073 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5074 } 5075 5076 static void haswell_crtc_disable(struct drm_crtc *crtc) 5077 { 5078 struct drm_device *dev = crtc->dev; 5079 struct drm_i915_private *dev_priv = to_i915(dev); 5080 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5081 struct intel_encoder *encoder; 5082 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5083 5084 if (intel_crtc->config->has_pch_encoder) 5085 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5086 false); 5087 5088 for_each_encoder_on_crtc(dev, crtc, encoder) { 5089 intel_opregion_notify_encoder(encoder, false); 5090 encoder->disable(encoder); 5091 } 5092 5093 drm_crtc_vblank_off(crtc); 5094 assert_vblank_disabled(crtc); 5095 5096 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5097 if (!transcoder_is_dsi(cpu_transcoder)) 5098 intel_disable_pipe(intel_crtc); 5099 5100 if (intel_crtc->config->dp_encoder_is_mst) 5101 intel_ddi_set_vc_payload_alloc(crtc, false); 5102 5103 if (!transcoder_is_dsi(cpu_transcoder)) 5104 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5105 5106 if (INTEL_INFO(dev)->gen >= 9) 5107 skylake_scaler_disable(intel_crtc); 5108 else 5109 ironlake_pfit_disable(intel_crtc, false); 5110 5111 if (!transcoder_is_dsi(cpu_transcoder)) 5112 intel_ddi_disable_pipe_clock(intel_crtc); 5113 5114 for_each_encoder_on_crtc(dev, crtc, encoder) 5115 if (encoder->post_disable) 5116 encoder->post_disable(encoder); 5117 5118 if (intel_crtc->config->has_pch_encoder) { 5119 lpt_disable_pch_transcoder(dev_priv); 5120 lpt_disable_iclkip(dev_priv); 5121 intel_ddi_fdi_disable(crtc); 5122 5123 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5124 true); 5125 } 5126 } 5127 5128 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5129 { 5130 struct drm_device *dev = crtc->base.dev; 5131 struct drm_i915_private *dev_priv = to_i915(dev); 5132 struct intel_crtc_state *pipe_config = crtc->config; 5133 5134 if (!pipe_config->gmch_pfit.control) 5135 return; 5136 5137 /* 5138 * The panel fitter should only be adjusted whilst the pipe is disabled, 5139 * according to register description and PRM. 5140 */ 5141 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5142 assert_pipe_disabled(dev_priv, crtc->pipe); 5143 5144 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5145 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5146 5147 /* Border color in case we don't scale up to the full screen. Black by 5148 * default, change to something else for debugging. */ 5149 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5150 } 5151 5152 static enum intel_display_power_domain port_to_power_domain(enum port port) 5153 { 5154 switch (port) { 5155 case PORT_A: 5156 return POWER_DOMAIN_PORT_DDI_A_LANES; 5157 case PORT_B: 5158 return POWER_DOMAIN_PORT_DDI_B_LANES; 5159 case PORT_C: 5160 return POWER_DOMAIN_PORT_DDI_C_LANES; 5161 case PORT_D: 5162 return POWER_DOMAIN_PORT_DDI_D_LANES; 5163 case PORT_E: 5164 return POWER_DOMAIN_PORT_DDI_E_LANES; 5165 default: 5166 MISSING_CASE(port); 5167 return POWER_DOMAIN_PORT_OTHER; 5168 } 5169 } 5170 5171 static enum intel_display_power_domain port_to_aux_power_domain(enum port port) 5172 { 5173 switch (port) { 5174 case PORT_A: 5175 return POWER_DOMAIN_AUX_A; 5176 case PORT_B: 5177 return POWER_DOMAIN_AUX_B; 5178 case PORT_C: 5179 return POWER_DOMAIN_AUX_C; 5180 case PORT_D: 5181 return POWER_DOMAIN_AUX_D; 5182 case PORT_E: 5183 /* FIXME: Check VBT for actual wiring of PORT E */ 5184 return POWER_DOMAIN_AUX_D; 5185 default: 5186 MISSING_CASE(port); 5187 return POWER_DOMAIN_AUX_A; 5188 } 5189 } 5190 5191 enum intel_display_power_domain 5192 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5193 { 5194 struct drm_device *dev = intel_encoder->base.dev; 5195 struct intel_digital_port *intel_dig_port; 5196 5197 switch (intel_encoder->type) { 5198 case INTEL_OUTPUT_UNKNOWN: 5199 /* Only DDI platforms should ever use this output type */ 5200 WARN_ON_ONCE(!HAS_DDI(dev)); 5201 case INTEL_OUTPUT_DP: 5202 case INTEL_OUTPUT_HDMI: 5203 case INTEL_OUTPUT_EDP: 5204 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5205 return port_to_power_domain(intel_dig_port->port); 5206 case INTEL_OUTPUT_DP_MST: 5207 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5208 return port_to_power_domain(intel_dig_port->port); 5209 case INTEL_OUTPUT_ANALOG: 5210 return POWER_DOMAIN_PORT_CRT; 5211 case INTEL_OUTPUT_DSI: 5212 return POWER_DOMAIN_PORT_DSI; 5213 default: 5214 return POWER_DOMAIN_PORT_OTHER; 5215 } 5216 } 5217 5218 enum intel_display_power_domain 5219 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) 5220 { 5221 struct drm_device *dev = intel_encoder->base.dev; 5222 struct intel_digital_port *intel_dig_port; 5223 5224 switch (intel_encoder->type) { 5225 case INTEL_OUTPUT_UNKNOWN: 5226 case INTEL_OUTPUT_HDMI: 5227 /* 5228 * Only DDI platforms should ever use these output types. 5229 * We can get here after the HDMI detect code has already set 5230 * the type of the shared encoder. Since we can't be sure 5231 * what's the status of the given connectors, play safe and 5232 * run the DP detection too. 5233 */ 5234 WARN_ON_ONCE(!HAS_DDI(dev)); 5235 case INTEL_OUTPUT_DP: 5236 case INTEL_OUTPUT_EDP: 5237 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5238 return port_to_aux_power_domain(intel_dig_port->port); 5239 case INTEL_OUTPUT_DP_MST: 5240 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5241 return port_to_aux_power_domain(intel_dig_port->port); 5242 default: 5243 MISSING_CASE(intel_encoder->type); 5244 return POWER_DOMAIN_AUX_A; 5245 } 5246 } 5247 5248 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc, 5249 struct intel_crtc_state *crtc_state) 5250 { 5251 struct drm_device *dev = crtc->dev; 5252 struct drm_encoder *encoder; 5253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5254 enum i915_pipe pipe = intel_crtc->pipe; 5255 unsigned long mask; 5256 enum transcoder transcoder = crtc_state->cpu_transcoder; 5257 5258 if (!crtc_state->base.active) 5259 return 0; 5260 5261 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5262 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5263 if (crtc_state->pch_pfit.enabled || 5264 crtc_state->pch_pfit.force_thru) 5265 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5266 5267 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5268 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5269 5270 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5271 } 5272 5273 if (crtc_state->shared_dpll) 5274 mask |= BIT(POWER_DOMAIN_PLLS); 5275 5276 return mask; 5277 } 5278 5279 static unsigned long 5280 modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5281 struct intel_crtc_state *crtc_state) 5282 { 5283 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5285 enum intel_display_power_domain domain; 5286 unsigned long domains, new_domains, old_domains; 5287 5288 old_domains = intel_crtc->enabled_power_domains; 5289 intel_crtc->enabled_power_domains = new_domains = 5290 get_crtc_power_domains(crtc, crtc_state); 5291 5292 domains = new_domains & ~old_domains; 5293 5294 for_each_power_domain(domain, domains) 5295 intel_display_power_get(dev_priv, domain); 5296 5297 return old_domains & ~new_domains; 5298 } 5299 5300 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5301 unsigned long domains) 5302 { 5303 enum intel_display_power_domain domain; 5304 5305 for_each_power_domain(domain, domains) 5306 intel_display_power_put(dev_priv, domain); 5307 } 5308 5309 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 5310 { 5311 int max_cdclk_freq = dev_priv->max_cdclk_freq; 5312 5313 if (INTEL_INFO(dev_priv)->gen >= 9 || 5314 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5315 return max_cdclk_freq; 5316 else if (IS_CHERRYVIEW(dev_priv)) 5317 return max_cdclk_freq*95/100; 5318 else if (INTEL_INFO(dev_priv)->gen < 4) 5319 return 2*max_cdclk_freq*90/100; 5320 else 5321 return max_cdclk_freq*90/100; 5322 } 5323 5324 static int skl_calc_cdclk(int max_pixclk, int vco); 5325 5326 static void intel_update_max_cdclk(struct drm_device *dev) 5327 { 5328 struct drm_i915_private *dev_priv = to_i915(dev); 5329 5330 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5331 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5332 int max_cdclk, vco; 5333 5334 vco = dev_priv->skl_preferred_vco_freq; 5335 WARN_ON(vco != 8100000 && vco != 8640000); 5336 5337 /* 5338 * Use the lower (vco 8640) cdclk values as a 5339 * first guess. skl_calc_cdclk() will correct it 5340 * if the preferred vco is 8100 instead. 5341 */ 5342 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5343 max_cdclk = 617143; 5344 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5345 max_cdclk = 540000; 5346 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5347 max_cdclk = 432000; 5348 else 5349 max_cdclk = 308571; 5350 5351 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); 5352 } else if (IS_BROXTON(dev)) { 5353 dev_priv->max_cdclk_freq = 624000; 5354 } else if (IS_BROADWELL(dev)) { 5355 /* 5356 * FIXME with extra cooling we can allow 5357 * 540 MHz for ULX and 675 Mhz for ULT. 5358 * How can we know if extra cooling is 5359 * available? PCI ID, VTB, something else? 5360 */ 5361 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 5362 dev_priv->max_cdclk_freq = 450000; 5363 else if (IS_BDW_ULX(dev)) 5364 dev_priv->max_cdclk_freq = 450000; 5365 else if (IS_BDW_ULT(dev)) 5366 dev_priv->max_cdclk_freq = 540000; 5367 else 5368 dev_priv->max_cdclk_freq = 675000; 5369 } else if (IS_CHERRYVIEW(dev)) { 5370 dev_priv->max_cdclk_freq = 320000; 5371 } else if (IS_VALLEYVIEW(dev)) { 5372 dev_priv->max_cdclk_freq = 400000; 5373 } else { 5374 /* otherwise assume cdclk is fixed */ 5375 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5376 } 5377 5378 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 5379 5380 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5381 dev_priv->max_cdclk_freq); 5382 5383 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", 5384 dev_priv->max_dotclk_freq); 5385 } 5386 5387 static void intel_update_cdclk(struct drm_device *dev) 5388 { 5389 struct drm_i915_private *dev_priv = to_i915(dev); 5390 5391 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5392 5393 if (INTEL_GEN(dev_priv) >= 9) 5394 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", 5395 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, 5396 dev_priv->cdclk_pll.ref); 5397 else 5398 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5399 dev_priv->cdclk_freq); 5400 5401 /* 5402 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): 5403 * Programmng [sic] note: bit[9:2] should be programmed to the number 5404 * of cdclk that generates 4MHz reference clock freq which is used to 5405 * generate GMBus clock. This will vary with the cdclk freq. 5406 */ 5407 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5408 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5409 } 5410 5411 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5412 static int skl_cdclk_decimal(int cdclk) 5413 { 5414 return DIV_ROUND_CLOSEST(cdclk - 1000, 500); 5415 } 5416 5417 static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) 5418 { 5419 int ratio; 5420 5421 if (cdclk == dev_priv->cdclk_pll.ref) 5422 return 0; 5423 5424 switch (cdclk) { 5425 default: 5426 MISSING_CASE(cdclk); 5427 case 144000: 5428 case 288000: 5429 case 384000: 5430 case 576000: 5431 ratio = 60; 5432 break; 5433 case 624000: 5434 ratio = 65; 5435 break; 5436 } 5437 5438 return dev_priv->cdclk_pll.ref * ratio; 5439 } 5440 5441 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) 5442 { 5443 I915_WRITE(BXT_DE_PLL_ENABLE, 0); 5444 5445 /* Timeout 200us */ 5446 if (intel_wait_for_register(dev_priv, 5447 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, 5448 1)) 5449 DRM_ERROR("timeout waiting for DE PLL unlock\n"); 5450 5451 dev_priv->cdclk_pll.vco = 0; 5452 } 5453 5454 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) 5455 { 5456 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); 5457 u32 val; 5458 5459 val = I915_READ(BXT_DE_PLL_CTL); 5460 val &= ~BXT_DE_PLL_RATIO_MASK; 5461 val |= BXT_DE_PLL_RATIO(ratio); 5462 I915_WRITE(BXT_DE_PLL_CTL, val); 5463 5464 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5465 5466 /* Timeout 200us */ 5467 if (intel_wait_for_register(dev_priv, 5468 BXT_DE_PLL_ENABLE, 5469 BXT_DE_PLL_LOCK, 5470 BXT_DE_PLL_LOCK, 5471 1)) 5472 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5473 5474 dev_priv->cdclk_pll.vco = vco; 5475 } 5476 5477 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) 5478 { 5479 u32 val, divider; 5480 int vco, ret; 5481 5482 vco = bxt_de_pll_vco(dev_priv, cdclk); 5483 5484 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); 5485 5486 /* cdclk = vco / 2 / div{1,1.5,2,4} */ 5487 switch (DIV_ROUND_CLOSEST(vco, cdclk)) { 5488 case 8: 5489 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5490 break; 5491 case 4: 5492 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5493 break; 5494 case 3: 5495 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5496 break; 5497 case 2: 5498 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5499 break; 5500 default: 5501 WARN_ON(cdclk != dev_priv->cdclk_pll.ref); 5502 WARN_ON(vco != 0); 5503 5504 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5505 break; 5506 } 5507 5508 /* Inform power controller of upcoming frequency change */ 5509 mutex_lock(&dev_priv->rps.hw_lock); 5510 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5511 0x80000000); 5512 mutex_unlock(&dev_priv->rps.hw_lock); 5513 5514 if (ret) { 5515 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5516 ret, cdclk); 5517 return; 5518 } 5519 5520 if (dev_priv->cdclk_pll.vco != 0 && 5521 dev_priv->cdclk_pll.vco != vco) 5522 bxt_de_pll_disable(dev_priv); 5523 5524 if (dev_priv->cdclk_pll.vco != vco) 5525 bxt_de_pll_enable(dev_priv, vco); 5526 5527 val = divider | skl_cdclk_decimal(cdclk); 5528 /* 5529 * FIXME if only the cd2x divider needs changing, it could be done 5530 * without shutting off the pipe (if only one pipe is active). 5531 */ 5532 val |= BXT_CDCLK_CD2X_PIPE_NONE; 5533 /* 5534 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5535 * enable otherwise. 5536 */ 5537 if (cdclk >= 500000) 5538 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5539 I915_WRITE(CDCLK_CTL, val); 5540 5541 mutex_lock(&dev_priv->rps.hw_lock); 5542 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5543 DIV_ROUND_UP(cdclk, 25000)); 5544 mutex_unlock(&dev_priv->rps.hw_lock); 5545 5546 if (ret) { 5547 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5548 ret, cdclk); 5549 return; 5550 } 5551 5552 intel_update_cdclk(&dev_priv->drm); 5553 } 5554 5555 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 5556 { 5557 u32 cdctl, expected; 5558 5559 intel_update_cdclk(&dev_priv->drm); 5560 5561 if (dev_priv->cdclk_pll.vco == 0 || 5562 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5563 goto sanitize; 5564 5565 /* DPLL okay; verify the cdclock 5566 * 5567 * Some BIOS versions leave an incorrect decimal frequency value and 5568 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, 5569 * so sanitize this register. 5570 */ 5571 cdctl = I915_READ(CDCLK_CTL); 5572 /* 5573 * Let's ignore the pipe field, since BIOS could have configured the 5574 * dividers both synching to an active pipe, or asynchronously 5575 * (PIPE_NONE). 5576 */ 5577 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; 5578 5579 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | 5580 skl_cdclk_decimal(dev_priv->cdclk_freq); 5581 /* 5582 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5583 * enable otherwise. 5584 */ 5585 if (dev_priv->cdclk_freq >= 500000) 5586 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5587 5588 if (cdctl == expected) 5589 /* All well; nothing to sanitize */ 5590 return; 5591 5592 sanitize: 5593 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); 5594 5595 /* force cdclk programming */ 5596 dev_priv->cdclk_freq = 0; 5597 5598 /* force full PLL disable + enable */ 5599 dev_priv->cdclk_pll.vco = -1; 5600 } 5601 5602 void bxt_init_cdclk(struct drm_i915_private *dev_priv) 5603 { 5604 bxt_sanitize_cdclk(dev_priv); 5605 5606 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) 5607 return; 5608 5609 /* 5610 * FIXME: 5611 * - The initial CDCLK needs to be read from VBT. 5612 * Need to make this change after VBT has changes for BXT. 5613 */ 5614 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); 5615 } 5616 5617 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) 5618 { 5619 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); 5620 } 5621 5622 static int skl_calc_cdclk(int max_pixclk, int vco) 5623 { 5624 if (vco == 8640000) { 5625 if (max_pixclk > 540000) 5626 return 617143; 5627 else if (max_pixclk > 432000) 5628 return 540000; 5629 else if (max_pixclk > 308571) 5630 return 432000; 5631 else 5632 return 308571; 5633 } else { 5634 if (max_pixclk > 540000) 5635 return 675000; 5636 else if (max_pixclk > 450000) 5637 return 540000; 5638 else if (max_pixclk > 337500) 5639 return 450000; 5640 else 5641 return 337500; 5642 } 5643 } 5644 5645 static void 5646 skl_dpll0_update(struct drm_i915_private *dev_priv) 5647 { 5648 u32 val; 5649 5650 dev_priv->cdclk_pll.ref = 24000; 5651 dev_priv->cdclk_pll.vco = 0; 5652 5653 val = I915_READ(LCPLL1_CTL); 5654 if ((val & LCPLL_PLL_ENABLE) == 0) 5655 return; 5656 5657 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) 5658 return; 5659 5660 val = I915_READ(DPLL_CTRL1); 5661 5662 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | 5663 DPLL_CTRL1_SSC(SKL_DPLL0) | 5664 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != 5665 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) 5666 return; 5667 5668 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { 5669 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): 5670 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): 5671 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): 5672 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): 5673 dev_priv->cdclk_pll.vco = 8100000; 5674 break; 5675 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): 5676 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): 5677 dev_priv->cdclk_pll.vco = 8640000; 5678 break; 5679 default: 5680 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5681 break; 5682 } 5683 } 5684 5685 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) 5686 { 5687 bool changed = dev_priv->skl_preferred_vco_freq != vco; 5688 5689 dev_priv->skl_preferred_vco_freq = vco; 5690 5691 if (changed) 5692 intel_update_max_cdclk(&dev_priv->drm); 5693 } 5694 5695 static void 5696 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 5697 { 5698 int min_cdclk = skl_calc_cdclk(0, vco); 5699 u32 val; 5700 5701 WARN_ON(vco != 8100000 && vco != 8640000); 5702 5703 /* select the minimum CDCLK before enabling DPLL 0 */ 5704 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); 5705 I915_WRITE(CDCLK_CTL, val); 5706 POSTING_READ(CDCLK_CTL); 5707 5708 /* 5709 * We always enable DPLL0 with the lowest link rate possible, but still 5710 * taking into account the VCO required to operate the eDP panel at the 5711 * desired frequency. The usual DP link rates operate with a VCO of 5712 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5713 * The modeset code is responsible for the selection of the exact link 5714 * rate later on, with the constraint of choosing a frequency that 5715 * works with vco. 5716 */ 5717 val = I915_READ(DPLL_CTRL1); 5718 5719 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5720 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5721 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5722 if (vco == 8640000) 5723 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5724 SKL_DPLL0); 5725 else 5726 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5727 SKL_DPLL0); 5728 5729 I915_WRITE(DPLL_CTRL1, val); 5730 POSTING_READ(DPLL_CTRL1); 5731 5732 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5733 5734 if (intel_wait_for_register(dev_priv, 5735 LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5736 5)) 5737 DRM_ERROR("DPLL0 not locked\n"); 5738 5739 dev_priv->cdclk_pll.vco = vco; 5740 5741 /* We'll want to keep using the current vco from now on. */ 5742 skl_set_preferred_cdclk_vco(dev_priv, vco); 5743 } 5744 5745 static void 5746 skl_dpll0_disable(struct drm_i915_private *dev_priv) 5747 { 5748 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5749 if (intel_wait_for_register(dev_priv, 5750 LCPLL1_CTL, LCPLL_PLL_LOCK, 0, 5751 1)) 5752 DRM_ERROR("Couldn't disable DPLL0\n"); 5753 5754 dev_priv->cdclk_pll.vco = 0; 5755 } 5756 5757 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5758 { 5759 int ret; 5760 u32 val; 5761 5762 /* inform PCU we want to change CDCLK */ 5763 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5764 mutex_lock(&dev_priv->rps.hw_lock); 5765 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5766 mutex_unlock(&dev_priv->rps.hw_lock); 5767 5768 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5769 } 5770 5771 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5772 { 5773 return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; 5774 } 5775 5776 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 5777 { 5778 struct drm_device *dev = &dev_priv->drm; 5779 u32 freq_select, pcu_ack; 5780 5781 WARN_ON((cdclk == 24000) != (vco == 0)); 5782 5783 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); 5784 5785 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5786 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5787 return; 5788 } 5789 5790 /* set CDCLK_CTL */ 5791 switch (cdclk) { 5792 case 450000: 5793 case 432000: 5794 freq_select = CDCLK_FREQ_450_432; 5795 pcu_ack = 1; 5796 break; 5797 case 540000: 5798 freq_select = CDCLK_FREQ_540; 5799 pcu_ack = 2; 5800 break; 5801 case 308571: 5802 case 337500: 5803 default: 5804 freq_select = CDCLK_FREQ_337_308; 5805 pcu_ack = 0; 5806 break; 5807 case 617143: 5808 case 675000: 5809 freq_select = CDCLK_FREQ_675_617; 5810 pcu_ack = 3; 5811 break; 5812 } 5813 5814 if (dev_priv->cdclk_pll.vco != 0 && 5815 dev_priv->cdclk_pll.vco != vco) 5816 skl_dpll0_disable(dev_priv); 5817 5818 if (dev_priv->cdclk_pll.vco != vco) 5819 skl_dpll0_enable(dev_priv, vco); 5820 5821 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); 5822 POSTING_READ(CDCLK_CTL); 5823 5824 /* inform PCU of the change */ 5825 mutex_lock(&dev_priv->rps.hw_lock); 5826 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5827 mutex_unlock(&dev_priv->rps.hw_lock); 5828 5829 intel_update_cdclk(dev); 5830 } 5831 5832 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); 5833 5834 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5835 { 5836 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); 5837 } 5838 5839 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5840 { 5841 int cdclk, vco; 5842 5843 skl_sanitize_cdclk(dev_priv); 5844 5845 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { 5846 /* 5847 * Use the current vco as our initial 5848 * guess as to what the preferred vco is. 5849 */ 5850 if (dev_priv->skl_preferred_vco_freq == 0) 5851 skl_set_preferred_cdclk_vco(dev_priv, 5852 dev_priv->cdclk_pll.vco); 5853 return; 5854 } 5855 5856 vco = dev_priv->skl_preferred_vco_freq; 5857 if (vco == 0) 5858 vco = 8100000; 5859 cdclk = skl_calc_cdclk(0, vco); 5860 5861 skl_set_cdclk(dev_priv, cdclk, vco); 5862 } 5863 5864 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5865 { 5866 uint32_t cdctl, expected; 5867 5868 /* 5869 * check if the pre-os intialized the display 5870 * There is SWF18 scratchpad register defined which is set by the 5871 * pre-os which can be used by the OS drivers to check the status 5872 */ 5873 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5874 goto sanitize; 5875 5876 intel_update_cdclk(&dev_priv->drm); 5877 /* Is PLL enabled and locked ? */ 5878 if (dev_priv->cdclk_pll.vco == 0 || 5879 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5880 goto sanitize; 5881 5882 /* DPLL okay; verify the cdclock 5883 * 5884 * Noticed in some instances that the freq selection is correct but 5885 * decimal part is programmed wrong from BIOS where pre-os does not 5886 * enable display. Verify the same as well. 5887 */ 5888 cdctl = I915_READ(CDCLK_CTL); 5889 expected = (cdctl & CDCLK_FREQ_SEL_MASK) | 5890 skl_cdclk_decimal(dev_priv->cdclk_freq); 5891 if (cdctl == expected) 5892 /* All well; nothing to sanitize */ 5893 return; 5894 5895 sanitize: 5896 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); 5897 5898 /* force cdclk programming */ 5899 dev_priv->cdclk_freq = 0; 5900 /* force full PLL disable + enable */ 5901 dev_priv->cdclk_pll.vco = -1; 5902 } 5903 5904 /* Adjust CDclk dividers to allow high res or save power if possible */ 5905 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5906 { 5907 struct drm_i915_private *dev_priv = to_i915(dev); 5908 u32 val, cmd; 5909 5910 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5911 != dev_priv->cdclk_freq); 5912 5913 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5914 cmd = 2; 5915 else if (cdclk == 266667) 5916 cmd = 1; 5917 else 5918 cmd = 0; 5919 5920 mutex_lock(&dev_priv->rps.hw_lock); 5921 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5922 val &= ~DSPFREQGUAR_MASK; 5923 val |= (cmd << DSPFREQGUAR_SHIFT); 5924 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5925 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5926 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5927 50)) { 5928 DRM_ERROR("timed out waiting for CDclk change\n"); 5929 } 5930 mutex_unlock(&dev_priv->rps.hw_lock); 5931 5932 mutex_lock(&dev_priv->sb_lock); 5933 5934 if (cdclk == 400000) { 5935 u32 divider; 5936 5937 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5938 5939 /* adjust cdclk divider */ 5940 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5941 val &= ~CCK_FREQUENCY_VALUES; 5942 val |= divider; 5943 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5944 5945 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5946 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 5947 50)) 5948 DRM_ERROR("timed out waiting for CDclk change\n"); 5949 } 5950 5951 /* adjust self-refresh exit latency value */ 5952 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5953 val &= ~0x7f; 5954 5955 /* 5956 * For high bandwidth configs, we set a higher latency in the bunit 5957 * so that the core display fetch happens in time to avoid underruns. 5958 */ 5959 if (cdclk == 400000) 5960 val |= 4500 / 250; /* 4.5 usec */ 5961 else 5962 val |= 3000 / 250; /* 3.0 usec */ 5963 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5964 5965 mutex_unlock(&dev_priv->sb_lock); 5966 5967 intel_update_cdclk(dev); 5968 } 5969 5970 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5971 { 5972 struct drm_i915_private *dev_priv = to_i915(dev); 5973 u32 val, cmd; 5974 5975 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5976 != dev_priv->cdclk_freq); 5977 5978 switch (cdclk) { 5979 case 333333: 5980 case 320000: 5981 case 266667: 5982 case 200000: 5983 break; 5984 default: 5985 MISSING_CASE(cdclk); 5986 return; 5987 } 5988 5989 /* 5990 * Specs are full of misinformation, but testing on actual 5991 * hardware has shown that we just need to write the desired 5992 * CCK divider into the Punit register. 5993 */ 5994 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5995 5996 mutex_lock(&dev_priv->rps.hw_lock); 5997 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5998 val &= ~DSPFREQGUAR_MASK_CHV; 5999 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 6000 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 6001 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 6002 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 6003 50)) { 6004 DRM_ERROR("timed out waiting for CDclk change\n"); 6005 } 6006 mutex_unlock(&dev_priv->rps.hw_lock); 6007 6008 intel_update_cdclk(dev); 6009 } 6010 6011 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 6012 int max_pixclk) 6013 { 6014 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 6015 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 6016 6017 /* 6018 * Really only a few cases to deal with, as only 4 CDclks are supported: 6019 * 200MHz 6020 * 267MHz 6021 * 320/333MHz (depends on HPLL freq) 6022 * 400MHz (VLV only) 6023 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 6024 * of the lower bin and adjust if needed. 6025 * 6026 * We seem to get an unstable or solid color picture at 200MHz. 6027 * Not sure what's wrong. For now use 200MHz only when all pipes 6028 * are off. 6029 */ 6030 if (!IS_CHERRYVIEW(dev_priv) && 6031 max_pixclk > freq_320*limit/100) 6032 return 400000; 6033 else if (max_pixclk > 266667*limit/100) 6034 return freq_320; 6035 else if (max_pixclk > 0) 6036 return 266667; 6037 else 6038 return 200000; 6039 } 6040 6041 static int bxt_calc_cdclk(int max_pixclk) 6042 { 6043 if (max_pixclk > 576000) 6044 return 624000; 6045 else if (max_pixclk > 384000) 6046 return 576000; 6047 else if (max_pixclk > 288000) 6048 return 384000; 6049 else if (max_pixclk > 144000) 6050 return 288000; 6051 else 6052 return 144000; 6053 } 6054 6055 /* Compute the max pixel clock for new configuration. */ 6056 static int intel_mode_max_pixclk(struct drm_device *dev, 6057 struct drm_atomic_state *state) 6058 { 6059 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 6060 struct drm_i915_private *dev_priv = to_i915(dev); 6061 struct drm_crtc *crtc; 6062 struct drm_crtc_state *crtc_state; 6063 unsigned max_pixclk = 0, i; 6064 enum i915_pipe pipe; 6065 6066 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 6067 sizeof(intel_state->min_pixclk)); 6068 6069 for_each_crtc_in_state(state, crtc, crtc_state, i) { 6070 int pixclk = 0; 6071 6072 if (crtc_state->enable) 6073 pixclk = crtc_state->adjusted_mode.crtc_clock; 6074 6075 intel_state->min_pixclk[i] = pixclk; 6076 } 6077 6078 for_each_pipe(dev_priv, pipe) 6079 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); 6080 6081 return max_pixclk; 6082 } 6083 6084 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6085 { 6086 struct drm_device *dev = state->dev; 6087 struct drm_i915_private *dev_priv = to_i915(dev); 6088 int max_pixclk = intel_mode_max_pixclk(dev, state); 6089 struct intel_atomic_state *intel_state = 6090 to_intel_atomic_state(state); 6091 6092 intel_state->cdclk = intel_state->dev_cdclk = 6093 valleyview_calc_cdclk(dev_priv, max_pixclk); 6094 6095 if (!intel_state->active_crtcs) 6096 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0); 6097 6098 return 0; 6099 } 6100 6101 static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) 6102 { 6103 int max_pixclk = ilk_max_pixel_rate(state); 6104 struct intel_atomic_state *intel_state = 6105 to_intel_atomic_state(state); 6106 6107 intel_state->cdclk = intel_state->dev_cdclk = 6108 bxt_calc_cdclk(max_pixclk); 6109 6110 if (!intel_state->active_crtcs) 6111 intel_state->dev_cdclk = bxt_calc_cdclk(0); 6112 6113 return 0; 6114 } 6115 6116 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 6117 { 6118 unsigned int credits, default_credits; 6119 6120 if (IS_CHERRYVIEW(dev_priv)) 6121 default_credits = PFI_CREDIT(12); 6122 else 6123 default_credits = PFI_CREDIT(8); 6124 6125 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) { 6126 /* CHV suggested value is 31 or 63 */ 6127 if (IS_CHERRYVIEW(dev_priv)) 6128 credits = PFI_CREDIT_63; 6129 else 6130 credits = PFI_CREDIT(15); 6131 } else { 6132 credits = default_credits; 6133 } 6134 6135 /* 6136 * WA - write default credits before re-programming 6137 * FIXME: should we also set the resend bit here? 6138 */ 6139 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6140 default_credits); 6141 6142 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6143 credits | PFI_CREDIT_RESEND); 6144 6145 /* 6146 * FIXME is this guaranteed to clear 6147 * immediately or should we poll for it? 6148 */ 6149 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6150 } 6151 6152 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6153 { 6154 struct drm_device *dev = old_state->dev; 6155 struct drm_i915_private *dev_priv = to_i915(dev); 6156 struct intel_atomic_state *old_intel_state = 6157 to_intel_atomic_state(old_state); 6158 unsigned req_cdclk = old_intel_state->dev_cdclk; 6159 6160 /* 6161 * FIXME: We can end up here with all power domains off, yet 6162 * with a CDCLK frequency other than the minimum. To account 6163 * for this take the PIPE-A power domain, which covers the HW 6164 * blocks needed for the following programming. This can be 6165 * removed once it's guaranteed that we get here either with 6166 * the minimum CDCLK set, or the required power domains 6167 * enabled. 6168 */ 6169 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6170 6171 if (IS_CHERRYVIEW(dev)) 6172 cherryview_set_cdclk(dev, req_cdclk); 6173 else 6174 valleyview_set_cdclk(dev, req_cdclk); 6175 6176 vlv_program_pfi_credits(dev_priv); 6177 6178 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6179 } 6180 6181 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6182 { 6183 struct drm_device *dev = crtc->dev; 6184 struct drm_i915_private *dev_priv = to_i915(dev); 6185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6186 struct intel_encoder *encoder; 6187 struct intel_crtc_state *pipe_config = 6188 to_intel_crtc_state(crtc->state); 6189 int pipe = intel_crtc->pipe; 6190 6191 if (WARN_ON(intel_crtc->active)) 6192 return; 6193 6194 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6195 intel_dp_set_m_n(intel_crtc, M1_N1); 6196 6197 intel_set_pipe_timings(intel_crtc); 6198 intel_set_pipe_src_size(intel_crtc); 6199 6200 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6201 struct drm_i915_private *dev_priv = to_i915(dev); 6202 6203 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6204 I915_WRITE(CHV_CANVAS(pipe), 0); 6205 } 6206 6207 i9xx_set_pipeconf(intel_crtc); 6208 6209 intel_crtc->active = true; 6210 6211 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6212 6213 for_each_encoder_on_crtc(dev, crtc, encoder) 6214 if (encoder->pre_pll_enable) 6215 encoder->pre_pll_enable(encoder); 6216 6217 if (IS_CHERRYVIEW(dev)) { 6218 chv_prepare_pll(intel_crtc, intel_crtc->config); 6219 chv_enable_pll(intel_crtc, intel_crtc->config); 6220 } else { 6221 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6222 vlv_enable_pll(intel_crtc, intel_crtc->config); 6223 } 6224 6225 for_each_encoder_on_crtc(dev, crtc, encoder) 6226 if (encoder->pre_enable) 6227 encoder->pre_enable(encoder); 6228 6229 i9xx_pfit_enable(intel_crtc); 6230 6231 intel_color_load_luts(&pipe_config->base); 6232 6233 intel_update_watermarks(crtc); 6234 intel_enable_pipe(intel_crtc); 6235 6236 assert_vblank_disabled(crtc); 6237 drm_crtc_vblank_on(crtc); 6238 6239 for_each_encoder_on_crtc(dev, crtc, encoder) 6240 encoder->enable(encoder); 6241 } 6242 6243 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6244 { 6245 struct drm_device *dev = crtc->base.dev; 6246 struct drm_i915_private *dev_priv = to_i915(dev); 6247 6248 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6249 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6250 } 6251 6252 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6253 { 6254 struct drm_device *dev = crtc->dev; 6255 struct drm_i915_private *dev_priv = to_i915(dev); 6256 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6257 struct intel_encoder *encoder; 6258 struct intel_crtc_state *pipe_config = 6259 to_intel_crtc_state(crtc->state); 6260 enum i915_pipe pipe = intel_crtc->pipe; 6261 6262 if (WARN_ON(intel_crtc->active)) 6263 return; 6264 6265 i9xx_set_pll_dividers(intel_crtc); 6266 6267 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6268 intel_dp_set_m_n(intel_crtc, M1_N1); 6269 6270 intel_set_pipe_timings(intel_crtc); 6271 intel_set_pipe_src_size(intel_crtc); 6272 6273 i9xx_set_pipeconf(intel_crtc); 6274 6275 intel_crtc->active = true; 6276 6277 if (!IS_GEN2(dev)) 6278 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6279 6280 for_each_encoder_on_crtc(dev, crtc, encoder) 6281 if (encoder->pre_enable) 6282 encoder->pre_enable(encoder); 6283 6284 i9xx_enable_pll(intel_crtc); 6285 6286 i9xx_pfit_enable(intel_crtc); 6287 6288 intel_color_load_luts(&pipe_config->base); 6289 6290 intel_update_watermarks(crtc); 6291 intel_enable_pipe(intel_crtc); 6292 6293 assert_vblank_disabled(crtc); 6294 drm_crtc_vblank_on(crtc); 6295 6296 for_each_encoder_on_crtc(dev, crtc, encoder) 6297 encoder->enable(encoder); 6298 } 6299 6300 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6301 { 6302 struct drm_device *dev = crtc->base.dev; 6303 struct drm_i915_private *dev_priv = to_i915(dev); 6304 6305 if (!crtc->config->gmch_pfit.control) 6306 return; 6307 6308 assert_pipe_disabled(dev_priv, crtc->pipe); 6309 6310 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6311 I915_READ(PFIT_CONTROL)); 6312 I915_WRITE(PFIT_CONTROL, 0); 6313 } 6314 6315 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6316 { 6317 struct drm_device *dev = crtc->dev; 6318 struct drm_i915_private *dev_priv = to_i915(dev); 6319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6320 struct intel_encoder *encoder; 6321 int pipe = intel_crtc->pipe; 6322 6323 /* 6324 * On gen2 planes are double buffered but the pipe isn't, so we must 6325 * wait for planes to fully turn off before disabling the pipe. 6326 */ 6327 if (IS_GEN2(dev)) 6328 intel_wait_for_vblank(dev, pipe); 6329 6330 for_each_encoder_on_crtc(dev, crtc, encoder) 6331 encoder->disable(encoder); 6332 6333 drm_crtc_vblank_off(crtc); 6334 assert_vblank_disabled(crtc); 6335 6336 intel_disable_pipe(intel_crtc); 6337 6338 i9xx_pfit_disable(intel_crtc); 6339 6340 for_each_encoder_on_crtc(dev, crtc, encoder) 6341 if (encoder->post_disable) 6342 encoder->post_disable(encoder); 6343 6344 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 6345 if (IS_CHERRYVIEW(dev)) 6346 chv_disable_pll(dev_priv, pipe); 6347 else if (IS_VALLEYVIEW(dev)) 6348 vlv_disable_pll(dev_priv, pipe); 6349 else 6350 i9xx_disable_pll(intel_crtc); 6351 } 6352 6353 for_each_encoder_on_crtc(dev, crtc, encoder) 6354 if (encoder->post_pll_disable) 6355 encoder->post_pll_disable(encoder); 6356 6357 if (!IS_GEN2(dev)) 6358 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6359 } 6360 6361 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6362 { 6363 struct intel_encoder *encoder; 6364 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6365 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6366 enum intel_display_power_domain domain; 6367 unsigned long domains; 6368 6369 if (!intel_crtc->active) 6370 return; 6371 6372 if (to_intel_plane_state(crtc->primary->state)->visible) { 6373 WARN_ON(intel_crtc->flip_work); 6374 6375 intel_pre_disable_primary_noatomic(crtc); 6376 6377 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 6378 to_intel_plane_state(crtc->primary->state)->visible = false; 6379 } 6380 6381 dev_priv->display.crtc_disable(crtc); 6382 6383 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 6384 crtc->base.id, crtc->name); 6385 6386 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6387 crtc->state->active = false; 6388 intel_crtc->active = false; 6389 crtc->enabled = false; 6390 crtc->state->connector_mask = 0; 6391 crtc->state->encoder_mask = 0; 6392 6393 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 6394 encoder->base.crtc = NULL; 6395 6396 intel_fbc_disable(intel_crtc); 6397 intel_update_watermarks(crtc); 6398 intel_disable_shared_dpll(intel_crtc); 6399 6400 domains = intel_crtc->enabled_power_domains; 6401 for_each_power_domain(domain, domains) 6402 intel_display_power_put(dev_priv, domain); 6403 intel_crtc->enabled_power_domains = 0; 6404 6405 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 6406 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 6407 } 6408 6409 /* 6410 * turn all crtc's off, but do not adjust state 6411 * This has to be paired with a call to intel_modeset_setup_hw_state. 6412 */ 6413 int intel_display_suspend(struct drm_device *dev) 6414 { 6415 struct drm_i915_private *dev_priv = to_i915(dev); 6416 struct drm_atomic_state *state; 6417 int ret; 6418 6419 state = drm_atomic_helper_suspend(dev); 6420 ret = PTR_ERR_OR_ZERO(state); 6421 if (ret) 6422 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6423 else 6424 dev_priv->modeset_restore_state = state; 6425 return ret; 6426 } 6427 6428 void intel_encoder_destroy(struct drm_encoder *encoder) 6429 { 6430 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6431 6432 drm_encoder_cleanup(encoder); 6433 kfree(intel_encoder); 6434 } 6435 6436 /* Cross check the actual hw state with our own modeset state tracking (and it's 6437 * internal consistency). */ 6438 static void intel_connector_verify_state(struct intel_connector *connector) 6439 { 6440 struct drm_crtc *crtc = connector->base.state->crtc; 6441 6442 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6443 connector->base.base.id, 6444 connector->base.name); 6445 6446 if (connector->get_hw_state(connector)) { 6447 struct intel_encoder *encoder = connector->encoder; 6448 struct drm_connector_state *conn_state = connector->base.state; 6449 6450 I915_STATE_WARN(!crtc, 6451 "connector enabled without attached crtc\n"); 6452 6453 if (!crtc) 6454 return; 6455 6456 I915_STATE_WARN(!crtc->state->active, 6457 "connector is active, but attached crtc isn't\n"); 6458 6459 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6460 return; 6461 6462 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6463 "atomic encoder doesn't match attached encoder\n"); 6464 6465 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6466 "attached encoder crtc differs from connector crtc\n"); 6467 } else { 6468 I915_STATE_WARN(crtc && crtc->state->active, 6469 "attached crtc is active, but connector isn't\n"); 6470 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 6471 "best encoder set without crtc!\n"); 6472 } 6473 } 6474 6475 int intel_connector_init(struct intel_connector *connector) 6476 { 6477 drm_atomic_helper_connector_reset(&connector->base); 6478 6479 if (!connector->base.state) 6480 return -ENOMEM; 6481 6482 return 0; 6483 } 6484 6485 struct intel_connector *intel_connector_alloc(void) 6486 { 6487 struct intel_connector *connector; 6488 6489 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6490 if (!connector) 6491 return NULL; 6492 6493 if (intel_connector_init(connector) < 0) { 6494 kfree(connector); 6495 return NULL; 6496 } 6497 6498 return connector; 6499 } 6500 6501 /* Simple connector->get_hw_state implementation for encoders that support only 6502 * one connector and no cloning and hence the encoder state determines the state 6503 * of the connector. */ 6504 bool intel_connector_get_hw_state(struct intel_connector *connector) 6505 { 6506 enum i915_pipe pipe = 0; 6507 struct intel_encoder *encoder = connector->encoder; 6508 6509 return encoder->get_hw_state(encoder, &pipe); 6510 } 6511 6512 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6513 { 6514 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6515 return crtc_state->fdi_lanes; 6516 6517 return 0; 6518 } 6519 6520 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6521 struct intel_crtc_state *pipe_config) 6522 { 6523 struct drm_atomic_state *state = pipe_config->base.state; 6524 struct intel_crtc *other_crtc; 6525 struct intel_crtc_state *other_crtc_state; 6526 6527 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6528 pipe_name(pipe), pipe_config->fdi_lanes); 6529 if (pipe_config->fdi_lanes > 4) { 6530 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6531 pipe_name(pipe), pipe_config->fdi_lanes); 6532 return -EINVAL; 6533 } 6534 6535 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6536 if (pipe_config->fdi_lanes > 2) { 6537 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6538 pipe_config->fdi_lanes); 6539 return -EINVAL; 6540 } else { 6541 return 0; 6542 } 6543 } 6544 6545 if (INTEL_INFO(dev)->num_pipes == 2) 6546 return 0; 6547 6548 /* Ivybridge 3 pipe is really complicated */ 6549 switch (pipe) { 6550 case PIPE_A: 6551 return 0; 6552 case PIPE_B: 6553 if (pipe_config->fdi_lanes <= 2) 6554 return 0; 6555 6556 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6557 other_crtc_state = 6558 intel_atomic_get_crtc_state(state, other_crtc); 6559 if (IS_ERR(other_crtc_state)) 6560 return PTR_ERR(other_crtc_state); 6561 6562 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6563 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6564 pipe_name(pipe), pipe_config->fdi_lanes); 6565 return -EINVAL; 6566 } 6567 return 0; 6568 case PIPE_C: 6569 if (pipe_config->fdi_lanes > 2) { 6570 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6571 pipe_name(pipe), pipe_config->fdi_lanes); 6572 return -EINVAL; 6573 } 6574 6575 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6576 other_crtc_state = 6577 intel_atomic_get_crtc_state(state, other_crtc); 6578 if (IS_ERR(other_crtc_state)) 6579 return PTR_ERR(other_crtc_state); 6580 6581 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6582 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6583 return -EINVAL; 6584 } 6585 return 0; 6586 default: 6587 BUG(); 6588 } 6589 } 6590 6591 #define RETRY 1 6592 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6593 struct intel_crtc_state *pipe_config) 6594 { 6595 struct drm_device *dev = intel_crtc->base.dev; 6596 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6597 int lane, link_bw, fdi_dotclock, ret; 6598 bool needs_recompute = false; 6599 6600 retry: 6601 /* FDI is a binary signal running at ~2.7GHz, encoding 6602 * each output octet as 10 bits. The actual frequency 6603 * is stored as a divider into a 100MHz clock, and the 6604 * mode pixel clock is stored in units of 1KHz. 6605 * Hence the bw of each lane in terms of the mode signal 6606 * is: 6607 */ 6608 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6609 6610 fdi_dotclock = adjusted_mode->crtc_clock; 6611 6612 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6613 pipe_config->pipe_bpp); 6614 6615 pipe_config->fdi_lanes = lane; 6616 6617 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6618 link_bw, &pipe_config->fdi_m_n); 6619 6620 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6621 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6622 pipe_config->pipe_bpp -= 2*3; 6623 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6624 pipe_config->pipe_bpp); 6625 needs_recompute = true; 6626 pipe_config->bw_constrained = true; 6627 6628 goto retry; 6629 } 6630 6631 if (needs_recompute) 6632 return RETRY; 6633 6634 return ret; 6635 } 6636 6637 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6638 struct intel_crtc_state *pipe_config) 6639 { 6640 if (pipe_config->pipe_bpp > 24) 6641 return false; 6642 6643 /* HSW can handle pixel rate up to cdclk? */ 6644 if (IS_HASWELL(dev_priv)) 6645 return true; 6646 6647 /* 6648 * We compare against max which means we must take 6649 * the increased cdclk requirement into account when 6650 * calculating the new cdclk. 6651 * 6652 * Should measure whether using a lower cdclk w/o IPS 6653 */ 6654 return ilk_pipe_pixel_rate(pipe_config) <= 6655 dev_priv->max_cdclk_freq * 95 / 100; 6656 } 6657 6658 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6659 struct intel_crtc_state *pipe_config) 6660 { 6661 struct drm_device *dev = crtc->base.dev; 6662 struct drm_i915_private *dev_priv = to_i915(dev); 6663 6664 pipe_config->ips_enabled = i915.enable_ips && 6665 hsw_crtc_supports_ips(crtc) && 6666 pipe_config_supports_ips(dev_priv, pipe_config); 6667 } 6668 6669 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6670 { 6671 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6672 6673 /* GDG double wide on either pipe, otherwise pipe A only */ 6674 return INTEL_INFO(dev_priv)->gen < 4 && 6675 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6676 } 6677 6678 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6679 struct intel_crtc_state *pipe_config) 6680 { 6681 struct drm_device *dev = crtc->base.dev; 6682 struct drm_i915_private *dev_priv = to_i915(dev); 6683 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6684 int clock_limit = dev_priv->max_dotclk_freq; 6685 6686 if (INTEL_INFO(dev)->gen < 4) { 6687 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6688 6689 /* 6690 * Enable double wide mode when the dot clock 6691 * is > 90% of the (display) core speed. 6692 */ 6693 if (intel_crtc_supports_double_wide(crtc) && 6694 adjusted_mode->crtc_clock > clock_limit) { 6695 clock_limit = dev_priv->max_dotclk_freq; 6696 pipe_config->double_wide = true; 6697 } 6698 } 6699 6700 if (adjusted_mode->crtc_clock > clock_limit) { 6701 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6702 adjusted_mode->crtc_clock, clock_limit, 6703 yesno(pipe_config->double_wide)); 6704 return -EINVAL; 6705 } 6706 6707 /* 6708 * Pipe horizontal size must be even in: 6709 * - DVO ganged mode 6710 * - LVDS dual channel mode 6711 * - Double wide pipe 6712 */ 6713 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6714 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6715 pipe_config->pipe_src_w &= ~1; 6716 6717 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6718 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6719 */ 6720 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6721 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6722 return -EINVAL; 6723 6724 if (HAS_IPS(dev)) 6725 hsw_compute_ips_config(crtc, pipe_config); 6726 6727 if (pipe_config->has_pch_encoder) 6728 return ironlake_fdi_compute_config(crtc, pipe_config); 6729 6730 return 0; 6731 } 6732 6733 static int skylake_get_display_clock_speed(struct drm_device *dev) 6734 { 6735 struct drm_i915_private *dev_priv = to_i915(dev); 6736 uint32_t cdctl; 6737 6738 skl_dpll0_update(dev_priv); 6739 6740 if (dev_priv->cdclk_pll.vco == 0) 6741 return dev_priv->cdclk_pll.ref; 6742 6743 cdctl = I915_READ(CDCLK_CTL); 6744 6745 if (dev_priv->cdclk_pll.vco == 8640000) { 6746 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6747 case CDCLK_FREQ_450_432: 6748 return 432000; 6749 case CDCLK_FREQ_337_308: 6750 return 308571; 6751 case CDCLK_FREQ_540: 6752 return 540000; 6753 case CDCLK_FREQ_675_617: 6754 return 617143; 6755 default: 6756 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 6757 } 6758 } else { 6759 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6760 case CDCLK_FREQ_450_432: 6761 return 450000; 6762 case CDCLK_FREQ_337_308: 6763 return 337500; 6764 case CDCLK_FREQ_540: 6765 return 540000; 6766 case CDCLK_FREQ_675_617: 6767 return 675000; 6768 default: 6769 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 6770 } 6771 } 6772 6773 return dev_priv->cdclk_pll.ref; 6774 } 6775 6776 static void bxt_de_pll_update(struct drm_i915_private *dev_priv) 6777 { 6778 u32 val; 6779 6780 dev_priv->cdclk_pll.ref = 19200; 6781 dev_priv->cdclk_pll.vco = 0; 6782 6783 val = I915_READ(BXT_DE_PLL_ENABLE); 6784 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) 6785 return; 6786 6787 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) 6788 return; 6789 6790 val = I915_READ(BXT_DE_PLL_CTL); 6791 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * 6792 dev_priv->cdclk_pll.ref; 6793 } 6794 6795 static int broxton_get_display_clock_speed(struct drm_device *dev) 6796 { 6797 struct drm_i915_private *dev_priv = to_i915(dev); 6798 u32 divider; 6799 int div, vco; 6800 6801 bxt_de_pll_update(dev_priv); 6802 6803 vco = dev_priv->cdclk_pll.vco; 6804 if (vco == 0) 6805 return dev_priv->cdclk_pll.ref; 6806 6807 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; 6808 6809 switch (divider) { 6810 case BXT_CDCLK_CD2X_DIV_SEL_1: 6811 div = 2; 6812 break; 6813 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6814 div = 3; 6815 break; 6816 case BXT_CDCLK_CD2X_DIV_SEL_2: 6817 div = 4; 6818 break; 6819 case BXT_CDCLK_CD2X_DIV_SEL_4: 6820 div = 8; 6821 break; 6822 default: 6823 MISSING_CASE(divider); 6824 return dev_priv->cdclk_pll.ref; 6825 } 6826 6827 return DIV_ROUND_CLOSEST(vco, div); 6828 } 6829 6830 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6831 { 6832 struct drm_i915_private *dev_priv = to_i915(dev); 6833 uint32_t lcpll = I915_READ(LCPLL_CTL); 6834 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6835 6836 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6837 return 800000; 6838 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6839 return 450000; 6840 else if (freq == LCPLL_CLK_FREQ_450) 6841 return 450000; 6842 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6843 return 540000; 6844 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6845 return 337500; 6846 else 6847 return 675000; 6848 } 6849 6850 static int haswell_get_display_clock_speed(struct drm_device *dev) 6851 { 6852 struct drm_i915_private *dev_priv = to_i915(dev); 6853 uint32_t lcpll = I915_READ(LCPLL_CTL); 6854 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6855 6856 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6857 return 800000; 6858 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6859 return 450000; 6860 else if (freq == LCPLL_CLK_FREQ_450) 6861 return 450000; 6862 else if (IS_HSW_ULT(dev)) 6863 return 337500; 6864 else 6865 return 540000; 6866 } 6867 6868 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6869 { 6870 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", 6871 CCK_DISPLAY_CLOCK_CONTROL); 6872 } 6873 6874 static int ilk_get_display_clock_speed(struct drm_device *dev) 6875 { 6876 return 450000; 6877 } 6878 6879 static int i945_get_display_clock_speed(struct drm_device *dev) 6880 { 6881 return 400000; 6882 } 6883 6884 static int i915_get_display_clock_speed(struct drm_device *dev) 6885 { 6886 return 333333; 6887 } 6888 6889 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6890 { 6891 return 200000; 6892 } 6893 6894 static int pnv_get_display_clock_speed(struct drm_device *dev) 6895 { 6896 u16 gcfgc = 0; 6897 6898 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6899 6900 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6901 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6902 return 266667; 6903 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6904 return 333333; 6905 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6906 return 444444; 6907 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6908 return 200000; 6909 default: 6910 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6911 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6912 return 133333; 6913 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6914 return 166667; 6915 } 6916 } 6917 6918 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6919 { 6920 u16 gcfgc = 0; 6921 6922 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6923 6924 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6925 return 133333; 6926 else { 6927 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6928 case GC_DISPLAY_CLOCK_333_MHZ: 6929 return 333333; 6930 default: 6931 case GC_DISPLAY_CLOCK_190_200_MHZ: 6932 return 190000; 6933 } 6934 } 6935 } 6936 6937 static int i865_get_display_clock_speed(struct drm_device *dev) 6938 { 6939 return 266667; 6940 } 6941 6942 static int i85x_get_display_clock_speed(struct drm_device *dev) 6943 { 6944 u16 hpllcc = 0; 6945 6946 /* 6947 * 852GM/852GMV only supports 133 MHz and the HPLLCC 6948 * encoding is different :( 6949 * FIXME is this the right way to detect 852GM/852GMV? 6950 */ 6951 if (dev->pdev->revision == 0x1) 6952 return 133333; 6953 6954 pci_bus_read_config_word(dev->pdev->bus, 6955 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 6956 6957 /* Assume that the hardware is in the high speed state. This 6958 * should be the default. 6959 */ 6960 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6961 case GC_CLOCK_133_200: 6962 case GC_CLOCK_133_200_2: 6963 case GC_CLOCK_100_200: 6964 return 200000; 6965 case GC_CLOCK_166_250: 6966 return 250000; 6967 case GC_CLOCK_100_133: 6968 return 133333; 6969 case GC_CLOCK_133_266: 6970 case GC_CLOCK_133_266_2: 6971 case GC_CLOCK_166_266: 6972 return 266667; 6973 } 6974 6975 /* Shouldn't happen */ 6976 return 0; 6977 } 6978 6979 static int i830_get_display_clock_speed(struct drm_device *dev) 6980 { 6981 return 133333; 6982 } 6983 6984 static unsigned int intel_hpll_vco(struct drm_device *dev) 6985 { 6986 struct drm_i915_private *dev_priv = to_i915(dev); 6987 static const unsigned int blb_vco[8] = { 6988 [0] = 3200000, 6989 [1] = 4000000, 6990 [2] = 5333333, 6991 [3] = 4800000, 6992 [4] = 6400000, 6993 }; 6994 static const unsigned int pnv_vco[8] = { 6995 [0] = 3200000, 6996 [1] = 4000000, 6997 [2] = 5333333, 6998 [3] = 4800000, 6999 [4] = 2666667, 7000 }; 7001 static const unsigned int cl_vco[8] = { 7002 [0] = 3200000, 7003 [1] = 4000000, 7004 [2] = 5333333, 7005 [3] = 6400000, 7006 [4] = 3333333, 7007 [5] = 3566667, 7008 [6] = 4266667, 7009 }; 7010 static const unsigned int elk_vco[8] = { 7011 [0] = 3200000, 7012 [1] = 4000000, 7013 [2] = 5333333, 7014 [3] = 4800000, 7015 }; 7016 static const unsigned int ctg_vco[8] = { 7017 [0] = 3200000, 7018 [1] = 4000000, 7019 [2] = 5333333, 7020 [3] = 6400000, 7021 [4] = 2666667, 7022 [5] = 4266667, 7023 }; 7024 const unsigned int *vco_table; 7025 unsigned int vco; 7026 uint8_t tmp = 0; 7027 7028 /* FIXME other chipsets? */ 7029 if (IS_GM45(dev)) 7030 vco_table = ctg_vco; 7031 else if (IS_G4X(dev)) 7032 vco_table = elk_vco; 7033 else if (IS_CRESTLINE(dev)) 7034 vco_table = cl_vco; 7035 else if (IS_PINEVIEW(dev)) 7036 vco_table = pnv_vco; 7037 else if (IS_G33(dev)) 7038 vco_table = blb_vco; 7039 else 7040 return 0; 7041 7042 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); 7043 7044 vco = vco_table[tmp & 0x7]; 7045 if (vco == 0) 7046 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); 7047 else 7048 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); 7049 7050 return vco; 7051 } 7052 7053 static int gm45_get_display_clock_speed(struct drm_device *dev) 7054 { 7055 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7056 uint16_t tmp = 0; 7057 7058 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7059 7060 cdclk_sel = (tmp >> 12) & 0x1; 7061 7062 switch (vco) { 7063 case 2666667: 7064 case 4000000: 7065 case 5333333: 7066 return cdclk_sel ? 333333 : 222222; 7067 case 3200000: 7068 return cdclk_sel ? 320000 : 228571; 7069 default: 7070 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); 7071 return 222222; 7072 } 7073 } 7074 7075 static int i965gm_get_display_clock_speed(struct drm_device *dev) 7076 { 7077 static const uint8_t div_3200[] = { 16, 10, 8 }; 7078 static const uint8_t div_4000[] = { 20, 12, 10 }; 7079 static const uint8_t div_5333[] = { 24, 16, 14 }; 7080 const uint8_t *div_table; 7081 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7082 uint16_t tmp = 0; 7083 7084 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7085 7086 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 7087 7088 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7089 goto fail; 7090 7091 switch (vco) { 7092 case 3200000: 7093 div_table = div_3200; 7094 break; 7095 case 4000000: 7096 div_table = div_4000; 7097 break; 7098 case 5333333: 7099 div_table = div_5333; 7100 break; 7101 default: 7102 goto fail; 7103 } 7104 7105 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7106 7107 fail: 7108 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); 7109 return 200000; 7110 } 7111 7112 static int g33_get_display_clock_speed(struct drm_device *dev) 7113 { 7114 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; 7115 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; 7116 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; 7117 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; 7118 const uint8_t *div_table; 7119 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7120 uint16_t tmp = 0; 7121 7122 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7123 7124 cdclk_sel = (tmp >> 4) & 0x7; 7125 7126 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7127 goto fail; 7128 7129 switch (vco) { 7130 case 3200000: 7131 div_table = div_3200; 7132 break; 7133 case 4000000: 7134 div_table = div_4000; 7135 break; 7136 case 4800000: 7137 div_table = div_4800; 7138 break; 7139 case 5333333: 7140 div_table = div_5333; 7141 break; 7142 default: 7143 goto fail; 7144 } 7145 7146 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7147 7148 fail: 7149 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); 7150 return 190476; 7151 } 7152 7153 static void 7154 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 7155 { 7156 while (*num > DATA_LINK_M_N_MASK || 7157 *den > DATA_LINK_M_N_MASK) { 7158 *num >>= 1; 7159 *den >>= 1; 7160 } 7161 } 7162 7163 static void compute_m_n(unsigned int m, unsigned int n, 7164 uint32_t *ret_m, uint32_t *ret_n) 7165 { 7166 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7167 *ret_m = div_u64((uint64_t) m * *ret_n, n); 7168 intel_reduce_m_n_ratio(ret_m, ret_n); 7169 } 7170 7171 void 7172 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 7173 int pixel_clock, int link_clock, 7174 struct intel_link_m_n *m_n) 7175 { 7176 m_n->tu = 64; 7177 7178 compute_m_n(bits_per_pixel * pixel_clock, 7179 link_clock * nlanes * 8, 7180 &m_n->gmch_m, &m_n->gmch_n); 7181 7182 compute_m_n(pixel_clock, link_clock, 7183 &m_n->link_m, &m_n->link_n); 7184 } 7185 7186 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7187 { 7188 if (i915.panel_use_ssc >= 0) 7189 return i915.panel_use_ssc != 0; 7190 return dev_priv->vbt.lvds_use_ssc 7191 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7192 } 7193 7194 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 7195 { 7196 return (1 << dpll->n) << 16 | dpll->m2; 7197 } 7198 7199 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 7200 { 7201 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7202 } 7203 7204 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7205 struct intel_crtc_state *crtc_state, 7206 struct dpll *reduced_clock) 7207 { 7208 struct drm_device *dev = crtc->base.dev; 7209 u32 fp, fp2 = 0; 7210 7211 if (IS_PINEVIEW(dev)) { 7212 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7213 if (reduced_clock) 7214 fp2 = pnv_dpll_compute_fp(reduced_clock); 7215 } else { 7216 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7217 if (reduced_clock) 7218 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7219 } 7220 7221 crtc_state->dpll_hw_state.fp0 = fp; 7222 7223 crtc->lowfreq_avail = false; 7224 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7225 reduced_clock) { 7226 crtc_state->dpll_hw_state.fp1 = fp2; 7227 crtc->lowfreq_avail = true; 7228 } else { 7229 crtc_state->dpll_hw_state.fp1 = fp; 7230 } 7231 } 7232 7233 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 7234 pipe) 7235 { 7236 u32 reg_val; 7237 7238 /* 7239 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7240 * and set it to a reasonable value instead. 7241 */ 7242 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7243 reg_val &= 0xffffff00; 7244 reg_val |= 0x00000030; 7245 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7246 7247 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7248 reg_val &= 0x8cffffff; 7249 reg_val = 0x8c000000; 7250 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7251 7252 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7253 reg_val &= 0xffffff00; 7254 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7255 7256 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7257 reg_val &= 0x00ffffff; 7258 reg_val |= 0xb0000000; 7259 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7260 } 7261 7262 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 7263 struct intel_link_m_n *m_n) 7264 { 7265 struct drm_device *dev = crtc->base.dev; 7266 struct drm_i915_private *dev_priv = to_i915(dev); 7267 int pipe = crtc->pipe; 7268 7269 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7270 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7271 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7272 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7273 } 7274 7275 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 7276 struct intel_link_m_n *m_n, 7277 struct intel_link_m_n *m2_n2) 7278 { 7279 struct drm_device *dev = crtc->base.dev; 7280 struct drm_i915_private *dev_priv = to_i915(dev); 7281 int pipe = crtc->pipe; 7282 enum transcoder transcoder = crtc->config->cpu_transcoder; 7283 7284 if (INTEL_INFO(dev)->gen >= 5) { 7285 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7286 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7287 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7288 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7289 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7290 * for gen < 8) and if DRRS is supported (to make sure the 7291 * registers are not unnecessarily accessed). 7292 */ 7293 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7294 crtc->config->has_drrs) { 7295 I915_WRITE(PIPE_DATA_M2(transcoder), 7296 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7297 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7298 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7299 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7300 } 7301 } else { 7302 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7303 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7304 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7305 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7306 } 7307 } 7308 7309 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7310 { 7311 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7312 7313 if (m_n == M1_N1) { 7314 dp_m_n = &crtc->config->dp_m_n; 7315 dp_m2_n2 = &crtc->config->dp_m2_n2; 7316 } else if (m_n == M2_N2) { 7317 7318 /* 7319 * M2_N2 registers are not supported. Hence m2_n2 divider value 7320 * needs to be programmed into M1_N1. 7321 */ 7322 dp_m_n = &crtc->config->dp_m2_n2; 7323 } else { 7324 DRM_ERROR("Unsupported divider value\n"); 7325 return; 7326 } 7327 7328 if (crtc->config->has_pch_encoder) 7329 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7330 else 7331 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7332 } 7333 7334 static void vlv_compute_dpll(struct intel_crtc *crtc, 7335 struct intel_crtc_state *pipe_config) 7336 { 7337 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7338 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7339 if (crtc->pipe != PIPE_A) 7340 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7341 7342 /* DPLL not used with DSI, but still need the rest set up */ 7343 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7344 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7345 DPLL_EXT_BUFFER_ENABLE_VLV; 7346 7347 pipe_config->dpll_hw_state.dpll_md = 7348 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7349 } 7350 7351 static void chv_compute_dpll(struct intel_crtc *crtc, 7352 struct intel_crtc_state *pipe_config) 7353 { 7354 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7355 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7356 if (crtc->pipe != PIPE_A) 7357 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7358 7359 /* DPLL not used with DSI, but still need the rest set up */ 7360 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7361 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7362 7363 pipe_config->dpll_hw_state.dpll_md = 7364 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7365 } 7366 7367 static void vlv_prepare_pll(struct intel_crtc *crtc, 7368 const struct intel_crtc_state *pipe_config) 7369 { 7370 struct drm_device *dev = crtc->base.dev; 7371 struct drm_i915_private *dev_priv = to_i915(dev); 7372 enum i915_pipe pipe = crtc->pipe; 7373 u32 mdiv; 7374 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7375 u32 coreclk, reg_val; 7376 7377 /* Enable Refclk */ 7378 I915_WRITE(DPLL(pipe), 7379 pipe_config->dpll_hw_state.dpll & 7380 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7381 7382 /* No need to actually set up the DPLL with DSI */ 7383 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7384 return; 7385 7386 mutex_lock(&dev_priv->sb_lock); 7387 7388 bestn = pipe_config->dpll.n; 7389 bestm1 = pipe_config->dpll.m1; 7390 bestm2 = pipe_config->dpll.m2; 7391 bestp1 = pipe_config->dpll.p1; 7392 bestp2 = pipe_config->dpll.p2; 7393 7394 /* See eDP HDMI DPIO driver vbios notes doc */ 7395 7396 /* PLL B needs special handling */ 7397 if (pipe == PIPE_B) 7398 vlv_pllb_recal_opamp(dev_priv, pipe); 7399 7400 /* Set up Tx target for periodic Rcomp update */ 7401 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7402 7403 /* Disable target IRef on PLL */ 7404 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7405 reg_val &= 0x00ffffff; 7406 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7407 7408 /* Disable fast lock */ 7409 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7410 7411 /* Set idtafcrecal before PLL is enabled */ 7412 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7413 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7414 mdiv |= ((bestn << DPIO_N_SHIFT)); 7415 mdiv |= (1 << DPIO_K_SHIFT); 7416 7417 /* 7418 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7419 * but we don't support that). 7420 * Note: don't use the DAC post divider as it seems unstable. 7421 */ 7422 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7423 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7424 7425 mdiv |= DPIO_ENABLE_CALIBRATION; 7426 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7427 7428 /* Set HBR and RBR LPF coefficients */ 7429 if (pipe_config->port_clock == 162000 || 7430 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 7431 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 7432 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7433 0x009f0003); 7434 else 7435 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7436 0x00d0000f); 7437 7438 if (intel_crtc_has_dp_encoder(pipe_config)) { 7439 /* Use SSC source */ 7440 if (pipe == PIPE_A) 7441 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7442 0x0df40000); 7443 else 7444 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7445 0x0df70000); 7446 } else { /* HDMI or VGA */ 7447 /* Use bend source */ 7448 if (pipe == PIPE_A) 7449 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7450 0x0df70000); 7451 else 7452 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7453 0x0df40000); 7454 } 7455 7456 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7457 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7458 if (intel_crtc_has_dp_encoder(crtc->config)) 7459 coreclk |= 0x01000000; 7460 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7461 7462 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7463 mutex_unlock(&dev_priv->sb_lock); 7464 } 7465 7466 static void chv_prepare_pll(struct intel_crtc *crtc, 7467 const struct intel_crtc_state *pipe_config) 7468 { 7469 struct drm_device *dev = crtc->base.dev; 7470 struct drm_i915_private *dev_priv = to_i915(dev); 7471 enum i915_pipe pipe = crtc->pipe; 7472 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7473 u32 loopfilter, tribuf_calcntr; 7474 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7475 u32 dpio_val; 7476 int vco; 7477 7478 /* Enable Refclk and SSC */ 7479 I915_WRITE(DPLL(pipe), 7480 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7481 7482 /* No need to actually set up the DPLL with DSI */ 7483 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7484 return; 7485 7486 bestn = pipe_config->dpll.n; 7487 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7488 bestm1 = pipe_config->dpll.m1; 7489 bestm2 = pipe_config->dpll.m2 >> 22; 7490 bestp1 = pipe_config->dpll.p1; 7491 bestp2 = pipe_config->dpll.p2; 7492 vco = pipe_config->dpll.vco; 7493 dpio_val = 0; 7494 loopfilter = 0; 7495 7496 mutex_lock(&dev_priv->sb_lock); 7497 7498 /* p1 and p2 divider */ 7499 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7500 5 << DPIO_CHV_S1_DIV_SHIFT | 7501 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7502 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7503 1 << DPIO_CHV_K_DIV_SHIFT); 7504 7505 /* Feedback post-divider - m2 */ 7506 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7507 7508 /* Feedback refclk divider - n and m1 */ 7509 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7510 DPIO_CHV_M1_DIV_BY_2 | 7511 1 << DPIO_CHV_N_DIV_SHIFT); 7512 7513 /* M2 fraction division */ 7514 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7515 7516 /* M2 fraction division enable */ 7517 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7518 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7519 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7520 if (bestm2_frac) 7521 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7522 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7523 7524 /* Program digital lock detect threshold */ 7525 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7526 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7527 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7528 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7529 if (!bestm2_frac) 7530 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7531 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7532 7533 /* Loop filter */ 7534 if (vco == 5400000) { 7535 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7536 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7537 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7538 tribuf_calcntr = 0x9; 7539 } else if (vco <= 6200000) { 7540 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7541 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7542 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7543 tribuf_calcntr = 0x9; 7544 } else if (vco <= 6480000) { 7545 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7546 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7547 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7548 tribuf_calcntr = 0x8; 7549 } else { 7550 /* Not supported. Apply the same limits as in the max case */ 7551 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7552 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7553 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7554 tribuf_calcntr = 0; 7555 } 7556 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7557 7558 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7559 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7560 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7561 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7562 7563 /* AFC Recal */ 7564 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7565 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7566 DPIO_AFC_RECAL); 7567 7568 mutex_unlock(&dev_priv->sb_lock); 7569 } 7570 7571 /** 7572 * vlv_force_pll_on - forcibly enable just the PLL 7573 * @dev_priv: i915 private structure 7574 * @pipe: pipe PLL to enable 7575 * @dpll: PLL configuration 7576 * 7577 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7578 * in cases where we need the PLL enabled even when @pipe is not going to 7579 * be enabled. 7580 */ 7581 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7582 const struct dpll *dpll) 7583 { 7584 struct intel_crtc *crtc = 7585 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7586 struct intel_crtc_state *pipe_config; 7587 7588 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7589 if (!pipe_config) 7590 return -ENOMEM; 7591 7592 pipe_config->base.crtc = &crtc->base; 7593 pipe_config->pixel_multiplier = 1; 7594 pipe_config->dpll = *dpll; 7595 7596 if (IS_CHERRYVIEW(dev)) { 7597 chv_compute_dpll(crtc, pipe_config); 7598 chv_prepare_pll(crtc, pipe_config); 7599 chv_enable_pll(crtc, pipe_config); 7600 } else { 7601 vlv_compute_dpll(crtc, pipe_config); 7602 vlv_prepare_pll(crtc, pipe_config); 7603 vlv_enable_pll(crtc, pipe_config); 7604 } 7605 7606 kfree(pipe_config); 7607 7608 return 0; 7609 } 7610 7611 /** 7612 * vlv_force_pll_off - forcibly disable just the PLL 7613 * @dev_priv: i915 private structure 7614 * @pipe: pipe PLL to disable 7615 * 7616 * Disable the PLL for @pipe. To be used in cases where we need 7617 * the PLL enabled even when @pipe is not going to be enabled. 7618 */ 7619 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7620 { 7621 if (IS_CHERRYVIEW(dev)) 7622 chv_disable_pll(to_i915(dev), pipe); 7623 else 7624 vlv_disable_pll(to_i915(dev), pipe); 7625 } 7626 7627 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7628 struct intel_crtc_state *crtc_state, 7629 struct dpll *reduced_clock) 7630 { 7631 struct drm_device *dev = crtc->base.dev; 7632 struct drm_i915_private *dev_priv = to_i915(dev); 7633 u32 dpll; 7634 struct dpll *clock = &crtc_state->dpll; 7635 7636 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7637 7638 dpll = DPLL_VGA_MODE_DIS; 7639 7640 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 7641 dpll |= DPLLB_MODE_LVDS; 7642 else 7643 dpll |= DPLLB_MODE_DAC_SERIAL; 7644 7645 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7646 dpll |= (crtc_state->pixel_multiplier - 1) 7647 << SDVO_MULTIPLIER_SHIFT_HIRES; 7648 } 7649 7650 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 7651 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 7652 dpll |= DPLL_SDVO_HIGH_SPEED; 7653 7654 if (intel_crtc_has_dp_encoder(crtc_state)) 7655 dpll |= DPLL_SDVO_HIGH_SPEED; 7656 7657 /* compute bitmask from p1 value */ 7658 if (IS_PINEVIEW(dev)) 7659 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7660 else { 7661 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7662 if (IS_G4X(dev) && reduced_clock) 7663 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7664 } 7665 switch (clock->p2) { 7666 case 5: 7667 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7668 break; 7669 case 7: 7670 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7671 break; 7672 case 10: 7673 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7674 break; 7675 case 14: 7676 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7677 break; 7678 } 7679 if (INTEL_INFO(dev)->gen >= 4) 7680 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7681 7682 if (crtc_state->sdvo_tv_clock) 7683 dpll |= PLL_REF_INPUT_TVCLKINBC; 7684 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7685 intel_panel_use_ssc(dev_priv)) 7686 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7687 else 7688 dpll |= PLL_REF_INPUT_DREFCLK; 7689 7690 dpll |= DPLL_VCO_ENABLE; 7691 crtc_state->dpll_hw_state.dpll = dpll; 7692 7693 if (INTEL_INFO(dev)->gen >= 4) { 7694 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7695 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7696 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7697 } 7698 } 7699 7700 static void i8xx_compute_dpll(struct intel_crtc *crtc, 7701 struct intel_crtc_state *crtc_state, 7702 struct dpll *reduced_clock) 7703 { 7704 struct drm_device *dev = crtc->base.dev; 7705 struct drm_i915_private *dev_priv = to_i915(dev); 7706 u32 dpll; 7707 struct dpll *clock = &crtc_state->dpll; 7708 7709 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7710 7711 dpll = DPLL_VGA_MODE_DIS; 7712 7713 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7714 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7715 } else { 7716 if (clock->p1 == 2) 7717 dpll |= PLL_P1_DIVIDE_BY_TWO; 7718 else 7719 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7720 if (clock->p2 == 4) 7721 dpll |= PLL_P2_DIVIDE_BY_4; 7722 } 7723 7724 if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 7725 dpll |= DPLL_DVO_2X_MODE; 7726 7727 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7728 intel_panel_use_ssc(dev_priv)) 7729 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7730 else 7731 dpll |= PLL_REF_INPUT_DREFCLK; 7732 7733 dpll |= DPLL_VCO_ENABLE; 7734 crtc_state->dpll_hw_state.dpll = dpll; 7735 } 7736 7737 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7738 { 7739 struct drm_device *dev = intel_crtc->base.dev; 7740 struct drm_i915_private *dev_priv = to_i915(dev); 7741 enum i915_pipe pipe = intel_crtc->pipe; 7742 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7743 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7744 uint32_t crtc_vtotal, crtc_vblank_end; 7745 int vsyncshift = 0; 7746 7747 /* We need to be careful not to changed the adjusted mode, for otherwise 7748 * the hw state checker will get angry at the mismatch. */ 7749 crtc_vtotal = adjusted_mode->crtc_vtotal; 7750 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7751 7752 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7753 /* the chip adds 2 halflines automatically */ 7754 crtc_vtotal -= 1; 7755 crtc_vblank_end -= 1; 7756 7757 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7758 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7759 else 7760 vsyncshift = adjusted_mode->crtc_hsync_start - 7761 adjusted_mode->crtc_htotal / 2; 7762 if (vsyncshift < 0) 7763 vsyncshift += adjusted_mode->crtc_htotal; 7764 } 7765 7766 if (INTEL_INFO(dev)->gen > 3) 7767 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7768 7769 I915_WRITE(HTOTAL(cpu_transcoder), 7770 (adjusted_mode->crtc_hdisplay - 1) | 7771 ((adjusted_mode->crtc_htotal - 1) << 16)); 7772 I915_WRITE(HBLANK(cpu_transcoder), 7773 (adjusted_mode->crtc_hblank_start - 1) | 7774 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7775 I915_WRITE(HSYNC(cpu_transcoder), 7776 (adjusted_mode->crtc_hsync_start - 1) | 7777 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7778 7779 I915_WRITE(VTOTAL(cpu_transcoder), 7780 (adjusted_mode->crtc_vdisplay - 1) | 7781 ((crtc_vtotal - 1) << 16)); 7782 I915_WRITE(VBLANK(cpu_transcoder), 7783 (adjusted_mode->crtc_vblank_start - 1) | 7784 ((crtc_vblank_end - 1) << 16)); 7785 I915_WRITE(VSYNC(cpu_transcoder), 7786 (adjusted_mode->crtc_vsync_start - 1) | 7787 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7788 7789 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7790 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7791 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7792 * bits. */ 7793 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7794 (pipe == PIPE_B || pipe == PIPE_C)) 7795 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7796 7797 } 7798 7799 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7800 { 7801 struct drm_device *dev = intel_crtc->base.dev; 7802 struct drm_i915_private *dev_priv = to_i915(dev); 7803 enum i915_pipe pipe = intel_crtc->pipe; 7804 7805 /* pipesrc controls the size that is scaled from, which should 7806 * always be the user's requested size. 7807 */ 7808 I915_WRITE(PIPESRC(pipe), 7809 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7810 (intel_crtc->config->pipe_src_h - 1)); 7811 } 7812 7813 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7814 struct intel_crtc_state *pipe_config) 7815 { 7816 struct drm_device *dev = crtc->base.dev; 7817 struct drm_i915_private *dev_priv = to_i915(dev); 7818 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7819 uint32_t tmp; 7820 7821 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7822 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7823 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7824 tmp = I915_READ(HBLANK(cpu_transcoder)); 7825 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7826 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7827 tmp = I915_READ(HSYNC(cpu_transcoder)); 7828 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7829 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7830 7831 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7832 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7833 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7834 tmp = I915_READ(VBLANK(cpu_transcoder)); 7835 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7836 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7837 tmp = I915_READ(VSYNC(cpu_transcoder)); 7838 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7839 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7840 7841 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7842 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7843 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7844 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7845 } 7846 } 7847 7848 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 7849 struct intel_crtc_state *pipe_config) 7850 { 7851 struct drm_device *dev = crtc->base.dev; 7852 struct drm_i915_private *dev_priv = to_i915(dev); 7853 u32 tmp; 7854 7855 tmp = I915_READ(PIPESRC(crtc->pipe)); 7856 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7857 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7858 7859 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7860 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7861 } 7862 7863 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7864 struct intel_crtc_state *pipe_config) 7865 { 7866 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7867 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7868 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7869 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7870 7871 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7872 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7873 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7874 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7875 7876 mode->flags = pipe_config->base.adjusted_mode.flags; 7877 mode->type = DRM_MODE_TYPE_DRIVER; 7878 7879 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7880 mode->flags |= pipe_config->base.adjusted_mode.flags; 7881 7882 mode->hsync = drm_mode_hsync(mode); 7883 mode->vrefresh = drm_mode_vrefresh(mode); 7884 drm_mode_set_name(mode); 7885 } 7886 7887 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7888 { 7889 struct drm_device *dev = intel_crtc->base.dev; 7890 struct drm_i915_private *dev_priv = to_i915(dev); 7891 uint32_t pipeconf; 7892 7893 pipeconf = 0; 7894 7895 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7896 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7897 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7898 7899 if (intel_crtc->config->double_wide) 7900 pipeconf |= PIPECONF_DOUBLE_WIDE; 7901 7902 /* only g4x and later have fancy bpc/dither controls */ 7903 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 7904 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7905 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7906 pipeconf |= PIPECONF_DITHER_EN | 7907 PIPECONF_DITHER_TYPE_SP; 7908 7909 switch (intel_crtc->config->pipe_bpp) { 7910 case 18: 7911 pipeconf |= PIPECONF_6BPC; 7912 break; 7913 case 24: 7914 pipeconf |= PIPECONF_8BPC; 7915 break; 7916 case 30: 7917 pipeconf |= PIPECONF_10BPC; 7918 break; 7919 default: 7920 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7921 BUG(); 7922 } 7923 } 7924 7925 if (HAS_PIPE_CXSR(dev)) { 7926 if (intel_crtc->lowfreq_avail) { 7927 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7928 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7929 } else { 7930 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7931 } 7932 } 7933 7934 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7935 if (INTEL_INFO(dev)->gen < 4 || 7936 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7937 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7938 else 7939 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7940 } else 7941 pipeconf |= PIPECONF_PROGRESSIVE; 7942 7943 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 7944 intel_crtc->config->limited_color_range) 7945 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7946 7947 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7948 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7949 } 7950 7951 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7952 struct intel_crtc_state *crtc_state) 7953 { 7954 struct drm_device *dev = crtc->base.dev; 7955 struct drm_i915_private *dev_priv = to_i915(dev); 7956 const struct intel_limit *limit; 7957 int refclk = 48000; 7958 7959 memset(&crtc_state->dpll_hw_state, 0, 7960 sizeof(crtc_state->dpll_hw_state)); 7961 7962 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7963 if (intel_panel_use_ssc(dev_priv)) { 7964 refclk = dev_priv->vbt.lvds_ssc_freq; 7965 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7966 } 7967 7968 limit = &intel_limits_i8xx_lvds; 7969 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7970 limit = &intel_limits_i8xx_dvo; 7971 } else { 7972 limit = &intel_limits_i8xx_dac; 7973 } 7974 7975 if (!crtc_state->clock_set && 7976 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7977 refclk, NULL, &crtc_state->dpll)) { 7978 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7979 return -EINVAL; 7980 } 7981 7982 i8xx_compute_dpll(crtc, crtc_state, NULL); 7983 7984 return 0; 7985 } 7986 7987 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7988 struct intel_crtc_state *crtc_state) 7989 { 7990 struct drm_device *dev = crtc->base.dev; 7991 struct drm_i915_private *dev_priv = to_i915(dev); 7992 const struct intel_limit *limit; 7993 int refclk = 96000; 7994 7995 memset(&crtc_state->dpll_hw_state, 0, 7996 sizeof(crtc_state->dpll_hw_state)); 7997 7998 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7999 if (intel_panel_use_ssc(dev_priv)) { 8000 refclk = dev_priv->vbt.lvds_ssc_freq; 8001 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8002 } 8003 8004 if (intel_is_dual_link_lvds(dev)) 8005 limit = &intel_limits_g4x_dual_channel_lvds; 8006 else 8007 limit = &intel_limits_g4x_single_channel_lvds; 8008 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8009 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8010 limit = &intel_limits_g4x_hdmi; 8011 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8012 limit = &intel_limits_g4x_sdvo; 8013 } else { 8014 /* The option is for other outputs */ 8015 limit = &intel_limits_i9xx_sdvo; 8016 } 8017 8018 if (!crtc_state->clock_set && 8019 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8020 refclk, NULL, &crtc_state->dpll)) { 8021 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8022 return -EINVAL; 8023 } 8024 8025 i9xx_compute_dpll(crtc, crtc_state, NULL); 8026 8027 return 0; 8028 } 8029 8030 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8031 struct intel_crtc_state *crtc_state) 8032 { 8033 struct drm_device *dev = crtc->base.dev; 8034 struct drm_i915_private *dev_priv = to_i915(dev); 8035 const struct intel_limit *limit; 8036 int refclk = 96000; 8037 8038 memset(&crtc_state->dpll_hw_state, 0, 8039 sizeof(crtc_state->dpll_hw_state)); 8040 8041 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8042 if (intel_panel_use_ssc(dev_priv)) { 8043 refclk = dev_priv->vbt.lvds_ssc_freq; 8044 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8045 } 8046 8047 limit = &intel_limits_pineview_lvds; 8048 } else { 8049 limit = &intel_limits_pineview_sdvo; 8050 } 8051 8052 if (!crtc_state->clock_set && 8053 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8054 refclk, NULL, &crtc_state->dpll)) { 8055 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8056 return -EINVAL; 8057 } 8058 8059 i9xx_compute_dpll(crtc, crtc_state, NULL); 8060 8061 return 0; 8062 } 8063 8064 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8065 struct intel_crtc_state *crtc_state) 8066 { 8067 struct drm_device *dev = crtc->base.dev; 8068 struct drm_i915_private *dev_priv = to_i915(dev); 8069 const struct intel_limit *limit; 8070 int refclk = 96000; 8071 8072 memset(&crtc_state->dpll_hw_state, 0, 8073 sizeof(crtc_state->dpll_hw_state)); 8074 8075 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8076 if (intel_panel_use_ssc(dev_priv)) { 8077 refclk = dev_priv->vbt.lvds_ssc_freq; 8078 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8079 } 8080 8081 limit = &intel_limits_i9xx_lvds; 8082 } else { 8083 limit = &intel_limits_i9xx_sdvo; 8084 } 8085 8086 if (!crtc_state->clock_set && 8087 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8088 refclk, NULL, &crtc_state->dpll)) { 8089 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8090 return -EINVAL; 8091 } 8092 8093 i9xx_compute_dpll(crtc, crtc_state, NULL); 8094 8095 return 0; 8096 } 8097 8098 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8099 struct intel_crtc_state *crtc_state) 8100 { 8101 int refclk = 100000; 8102 const struct intel_limit *limit = &intel_limits_chv; 8103 8104 memset(&crtc_state->dpll_hw_state, 0, 8105 sizeof(crtc_state->dpll_hw_state)); 8106 8107 if (!crtc_state->clock_set && 8108 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8109 refclk, NULL, &crtc_state->dpll)) { 8110 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8111 return -EINVAL; 8112 } 8113 8114 chv_compute_dpll(crtc, crtc_state); 8115 8116 return 0; 8117 } 8118 8119 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8120 struct intel_crtc_state *crtc_state) 8121 { 8122 int refclk = 100000; 8123 const struct intel_limit *limit = &intel_limits_vlv; 8124 8125 memset(&crtc_state->dpll_hw_state, 0, 8126 sizeof(crtc_state->dpll_hw_state)); 8127 8128 if (!crtc_state->clock_set && 8129 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8130 refclk, NULL, &crtc_state->dpll)) { 8131 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8132 return -EINVAL; 8133 } 8134 8135 vlv_compute_dpll(crtc, crtc_state); 8136 8137 return 0; 8138 } 8139 8140 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8141 struct intel_crtc_state *pipe_config) 8142 { 8143 struct drm_device *dev = crtc->base.dev; 8144 struct drm_i915_private *dev_priv = to_i915(dev); 8145 uint32_t tmp; 8146 8147 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 8148 return; 8149 8150 tmp = I915_READ(PFIT_CONTROL); 8151 if (!(tmp & PFIT_ENABLE)) 8152 return; 8153 8154 /* Check whether the pfit is attached to our pipe. */ 8155 if (INTEL_INFO(dev)->gen < 4) { 8156 if (crtc->pipe != PIPE_B) 8157 return; 8158 } else { 8159 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8160 return; 8161 } 8162 8163 pipe_config->gmch_pfit.control = tmp; 8164 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8165 } 8166 8167 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8168 struct intel_crtc_state *pipe_config) 8169 { 8170 struct drm_device *dev = crtc->base.dev; 8171 struct drm_i915_private *dev_priv = to_i915(dev); 8172 int pipe = pipe_config->cpu_transcoder; 8173 struct dpll clock; 8174 u32 mdiv; 8175 int refclk = 100000; 8176 8177 /* In case of DSI, DPLL will not be used */ 8178 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8179 return; 8180 8181 mutex_lock(&dev_priv->sb_lock); 8182 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8183 mutex_unlock(&dev_priv->sb_lock); 8184 8185 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8186 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8187 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8188 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8189 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8190 8191 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8192 } 8193 8194 static void 8195 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8196 struct intel_initial_plane_config *plane_config) 8197 { 8198 struct drm_device *dev = crtc->base.dev; 8199 struct drm_i915_private *dev_priv = to_i915(dev); 8200 u32 val, base, offset; 8201 int pipe = crtc->pipe, plane = crtc->plane; 8202 int fourcc, pixel_format; 8203 unsigned int aligned_height; 8204 struct drm_framebuffer *fb; 8205 struct intel_framebuffer *intel_fb; 8206 8207 val = I915_READ(DSPCNTR(plane)); 8208 if (!(val & DISPLAY_PLANE_ENABLE)) 8209 return; 8210 8211 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8212 if (!intel_fb) { 8213 DRM_DEBUG_KMS("failed to alloc fb\n"); 8214 return; 8215 } 8216 8217 fb = &intel_fb->base; 8218 8219 if (INTEL_INFO(dev)->gen >= 4) { 8220 if (val & DISPPLANE_TILED) { 8221 plane_config->tiling = I915_TILING_X; 8222 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8223 } 8224 } 8225 8226 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8227 fourcc = i9xx_format_to_fourcc(pixel_format); 8228 fb->pixel_format = fourcc; 8229 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8230 8231 if (INTEL_INFO(dev)->gen >= 4) { 8232 if (plane_config->tiling) 8233 offset = I915_READ(DSPTILEOFF(plane)); 8234 else 8235 offset = I915_READ(DSPLINOFF(plane)); 8236 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 8237 } else { 8238 base = I915_READ(DSPADDR(plane)); 8239 } 8240 plane_config->base = base; 8241 8242 val = I915_READ(PIPESRC(pipe)); 8243 fb->width = ((val >> 16) & 0xfff) + 1; 8244 fb->height = ((val >> 0) & 0xfff) + 1; 8245 8246 val = I915_READ(DSPSTRIDE(pipe)); 8247 fb->pitches[0] = val & 0xffffffc0; 8248 8249 aligned_height = intel_fb_align_height(dev, fb->height, 8250 fb->pixel_format, 8251 fb->modifier[0]); 8252 8253 plane_config->size = fb->pitches[0] * aligned_height; 8254 8255 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8256 pipe_name(pipe), plane, fb->width, fb->height, 8257 fb->bits_per_pixel, base, fb->pitches[0], 8258 plane_config->size); 8259 8260 plane_config->fb = intel_fb; 8261 } 8262 8263 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8264 struct intel_crtc_state *pipe_config) 8265 { 8266 struct drm_device *dev = crtc->base.dev; 8267 struct drm_i915_private *dev_priv = to_i915(dev); 8268 int pipe = pipe_config->cpu_transcoder; 8269 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8270 struct dpll clock; 8271 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8272 int refclk = 100000; 8273 8274 /* In case of DSI, DPLL will not be used */ 8275 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8276 return; 8277 8278 mutex_lock(&dev_priv->sb_lock); 8279 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8280 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8281 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8282 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8283 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8284 mutex_unlock(&dev_priv->sb_lock); 8285 8286 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8287 clock.m2 = (pll_dw0 & 0xff) << 22; 8288 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8289 clock.m2 |= pll_dw2 & 0x3fffff; 8290 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8291 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8292 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8293 8294 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8295 } 8296 8297 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8298 struct intel_crtc_state *pipe_config) 8299 { 8300 struct drm_device *dev = crtc->base.dev; 8301 struct drm_i915_private *dev_priv = to_i915(dev); 8302 enum intel_display_power_domain power_domain; 8303 uint32_t tmp; 8304 bool ret; 8305 8306 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8307 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8308 return false; 8309 8310 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8311 pipe_config->shared_dpll = NULL; 8312 8313 ret = false; 8314 8315 tmp = I915_READ(PIPECONF(crtc->pipe)); 8316 if (!(tmp & PIPECONF_ENABLE)) 8317 goto out; 8318 8319 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8320 switch (tmp & PIPECONF_BPC_MASK) { 8321 case PIPECONF_6BPC: 8322 pipe_config->pipe_bpp = 18; 8323 break; 8324 case PIPECONF_8BPC: 8325 pipe_config->pipe_bpp = 24; 8326 break; 8327 case PIPECONF_10BPC: 8328 pipe_config->pipe_bpp = 30; 8329 break; 8330 default: 8331 break; 8332 } 8333 } 8334 8335 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 8336 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8337 pipe_config->limited_color_range = true; 8338 8339 if (INTEL_INFO(dev)->gen < 4) 8340 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8341 8342 intel_get_pipe_timings(crtc, pipe_config); 8343 intel_get_pipe_src_size(crtc, pipe_config); 8344 8345 i9xx_get_pfit_config(crtc, pipe_config); 8346 8347 if (INTEL_INFO(dev)->gen >= 4) { 8348 /* No way to read it out on pipes B and C */ 8349 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A) 8350 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8351 else 8352 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8353 pipe_config->pixel_multiplier = 8354 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8355 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8356 pipe_config->dpll_hw_state.dpll_md = tmp; 8357 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 8358 tmp = I915_READ(DPLL(crtc->pipe)); 8359 pipe_config->pixel_multiplier = 8360 ((tmp & SDVO_MULTIPLIER_MASK) 8361 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8362 } else { 8363 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8364 * port and will be fixed up in the encoder->get_config 8365 * function. */ 8366 pipe_config->pixel_multiplier = 1; 8367 } 8368 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8369 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 8370 /* 8371 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 8372 * on 830. Filter it out here so that we don't 8373 * report errors due to that. 8374 */ 8375 if (IS_I830(dev)) 8376 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 8377 8378 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8379 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8380 } else { 8381 /* Mask out read-only status bits. */ 8382 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8383 DPLL_PORTC_READY_MASK | 8384 DPLL_PORTB_READY_MASK); 8385 } 8386 8387 if (IS_CHERRYVIEW(dev)) 8388 chv_crtc_clock_get(crtc, pipe_config); 8389 else if (IS_VALLEYVIEW(dev)) 8390 vlv_crtc_clock_get(crtc, pipe_config); 8391 else 8392 i9xx_crtc_clock_get(crtc, pipe_config); 8393 8394 /* 8395 * Normally the dotclock is filled in by the encoder .get_config() 8396 * but in case the pipe is enabled w/o any ports we need a sane 8397 * default. 8398 */ 8399 pipe_config->base.adjusted_mode.crtc_clock = 8400 pipe_config->port_clock / pipe_config->pixel_multiplier; 8401 8402 ret = true; 8403 8404 out: 8405 intel_display_power_put(dev_priv, power_domain); 8406 8407 return ret; 8408 } 8409 8410 static void ironlake_init_pch_refclk(struct drm_device *dev) 8411 { 8412 struct drm_i915_private *dev_priv = to_i915(dev); 8413 struct intel_encoder *encoder; 8414 int i; 8415 u32 val, final; 8416 bool has_lvds = false; 8417 bool has_cpu_edp = false; 8418 bool has_panel = false; 8419 bool has_ck505 = false; 8420 bool can_ssc = false; 8421 bool using_ssc_source = false; 8422 8423 /* We need to take the global config into account */ 8424 for_each_intel_encoder(dev, encoder) { 8425 switch (encoder->type) { 8426 case INTEL_OUTPUT_LVDS: 8427 has_panel = true; 8428 has_lvds = true; 8429 break; 8430 case INTEL_OUTPUT_EDP: 8431 has_panel = true; 8432 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8433 has_cpu_edp = true; 8434 break; 8435 default: 8436 break; 8437 } 8438 } 8439 8440 if (HAS_PCH_IBX(dev)) { 8441 has_ck505 = dev_priv->vbt.display_clock_mode; 8442 can_ssc = has_ck505; 8443 } else { 8444 has_ck505 = false; 8445 can_ssc = true; 8446 } 8447 8448 /* Check if any DPLLs are using the SSC source */ 8449 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8450 u32 temp = I915_READ(PCH_DPLL(i)); 8451 8452 if (!(temp & DPLL_VCO_ENABLE)) 8453 continue; 8454 8455 if ((temp & PLL_REF_INPUT_MASK) == 8456 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8457 using_ssc_source = true; 8458 break; 8459 } 8460 } 8461 8462 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8463 has_panel, has_lvds, has_ck505, using_ssc_source); 8464 8465 /* Ironlake: try to setup display ref clock before DPLL 8466 * enabling. This is only under driver's control after 8467 * PCH B stepping, previous chipset stepping should be 8468 * ignoring this setting. 8469 */ 8470 val = I915_READ(PCH_DREF_CONTROL); 8471 8472 /* As we must carefully and slowly disable/enable each source in turn, 8473 * compute the final state we want first and check if we need to 8474 * make any changes at all. 8475 */ 8476 final = val; 8477 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8478 if (has_ck505) 8479 final |= DREF_NONSPREAD_CK505_ENABLE; 8480 else 8481 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8482 8483 final &= ~DREF_SSC_SOURCE_MASK; 8484 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8485 final &= ~DREF_SSC1_ENABLE; 8486 8487 if (has_panel) { 8488 final |= DREF_SSC_SOURCE_ENABLE; 8489 8490 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8491 final |= DREF_SSC1_ENABLE; 8492 8493 if (has_cpu_edp) { 8494 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8495 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8496 else 8497 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8498 } else 8499 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8500 } else if (using_ssc_source) { 8501 final |= DREF_SSC_SOURCE_ENABLE; 8502 final |= DREF_SSC1_ENABLE; 8503 } 8504 8505 if (final == val) 8506 return; 8507 8508 /* Always enable nonspread source */ 8509 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8510 8511 if (has_ck505) 8512 val |= DREF_NONSPREAD_CK505_ENABLE; 8513 else 8514 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8515 8516 if (has_panel) { 8517 val &= ~DREF_SSC_SOURCE_MASK; 8518 val |= DREF_SSC_SOURCE_ENABLE; 8519 8520 /* SSC must be turned on before enabling the CPU output */ 8521 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8522 DRM_DEBUG_KMS("Using SSC on panel\n"); 8523 val |= DREF_SSC1_ENABLE; 8524 } else 8525 val &= ~DREF_SSC1_ENABLE; 8526 8527 /* Get SSC going before enabling the outputs */ 8528 I915_WRITE(PCH_DREF_CONTROL, val); 8529 POSTING_READ(PCH_DREF_CONTROL); 8530 udelay(200); 8531 8532 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8533 8534 /* Enable CPU source on CPU attached eDP */ 8535 if (has_cpu_edp) { 8536 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8537 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8538 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8539 } else 8540 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8541 } else 8542 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8543 8544 I915_WRITE(PCH_DREF_CONTROL, val); 8545 POSTING_READ(PCH_DREF_CONTROL); 8546 udelay(200); 8547 } else { 8548 DRM_DEBUG_KMS("Disabling CPU source output\n"); 8549 8550 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8551 8552 /* Turn off CPU output */ 8553 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8554 8555 I915_WRITE(PCH_DREF_CONTROL, val); 8556 POSTING_READ(PCH_DREF_CONTROL); 8557 udelay(200); 8558 8559 if (!using_ssc_source) { 8560 DRM_DEBUG_KMS("Disabling SSC source\n"); 8561 8562 /* Turn off the SSC source */ 8563 val &= ~DREF_SSC_SOURCE_MASK; 8564 val |= DREF_SSC_SOURCE_DISABLE; 8565 8566 /* Turn off SSC1 */ 8567 val &= ~DREF_SSC1_ENABLE; 8568 8569 I915_WRITE(PCH_DREF_CONTROL, val); 8570 POSTING_READ(PCH_DREF_CONTROL); 8571 udelay(200); 8572 } 8573 } 8574 8575 BUG_ON(val != final); 8576 } 8577 8578 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8579 { 8580 uint32_t tmp; 8581 8582 tmp = I915_READ(SOUTH_CHICKEN2); 8583 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8584 I915_WRITE(SOUTH_CHICKEN2, tmp); 8585 8586 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 8587 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8588 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8589 8590 tmp = I915_READ(SOUTH_CHICKEN2); 8591 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8592 I915_WRITE(SOUTH_CHICKEN2, tmp); 8593 8594 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 8595 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8596 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8597 } 8598 8599 /* WaMPhyProgramming:hsw */ 8600 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8601 { 8602 uint32_t tmp; 8603 8604 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8605 tmp &= ~(0xFF << 24); 8606 tmp |= (0x12 << 24); 8607 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8608 8609 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8610 tmp |= (1 << 11); 8611 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8612 8613 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8614 tmp |= (1 << 11); 8615 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8616 8617 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8618 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8619 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8620 8621 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8622 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8623 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8624 8625 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8626 tmp &= ~(7 << 13); 8627 tmp |= (5 << 13); 8628 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8629 8630 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8631 tmp &= ~(7 << 13); 8632 tmp |= (5 << 13); 8633 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8634 8635 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8636 tmp &= ~0xFF; 8637 tmp |= 0x1C; 8638 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8639 8640 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8641 tmp &= ~0xFF; 8642 tmp |= 0x1C; 8643 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8644 8645 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8646 tmp &= ~(0xFF << 16); 8647 tmp |= (0x1C << 16); 8648 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8649 8650 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8651 tmp &= ~(0xFF << 16); 8652 tmp |= (0x1C << 16); 8653 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8654 8655 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8656 tmp |= (1 << 27); 8657 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8658 8659 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8660 tmp |= (1 << 27); 8661 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8662 8663 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8664 tmp &= ~(0xF << 28); 8665 tmp |= (4 << 28); 8666 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8667 8668 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8669 tmp &= ~(0xF << 28); 8670 tmp |= (4 << 28); 8671 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8672 } 8673 8674 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8675 * Programming" based on the parameters passed: 8676 * - Sequence to enable CLKOUT_DP 8677 * - Sequence to enable CLKOUT_DP without spread 8678 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8679 */ 8680 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8681 bool with_fdi) 8682 { 8683 struct drm_i915_private *dev_priv = to_i915(dev); 8684 uint32_t reg, tmp; 8685 8686 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8687 with_spread = true; 8688 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) 8689 with_fdi = false; 8690 8691 mutex_lock(&dev_priv->sb_lock); 8692 8693 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8694 tmp &= ~SBI_SSCCTL_DISABLE; 8695 tmp |= SBI_SSCCTL_PATHALT; 8696 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8697 8698 udelay(24); 8699 8700 if (with_spread) { 8701 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8702 tmp &= ~SBI_SSCCTL_PATHALT; 8703 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8704 8705 if (with_fdi) { 8706 lpt_reset_fdi_mphy(dev_priv); 8707 lpt_program_fdi_mphy(dev_priv); 8708 } 8709 } 8710 8711 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8712 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8713 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8714 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8715 8716 mutex_unlock(&dev_priv->sb_lock); 8717 } 8718 8719 /* Sequence to disable CLKOUT_DP */ 8720 static void lpt_disable_clkout_dp(struct drm_device *dev) 8721 { 8722 struct drm_i915_private *dev_priv = to_i915(dev); 8723 uint32_t reg, tmp; 8724 8725 mutex_lock(&dev_priv->sb_lock); 8726 8727 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8728 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8729 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8730 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8731 8732 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8733 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8734 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8735 tmp |= SBI_SSCCTL_PATHALT; 8736 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8737 udelay(32); 8738 } 8739 tmp |= SBI_SSCCTL_DISABLE; 8740 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8741 } 8742 8743 mutex_unlock(&dev_priv->sb_lock); 8744 } 8745 8746 #define BEND_IDX(steps) ((50 + (steps)) / 5) 8747 8748 static const uint16_t sscdivintphase[] = { 8749 [BEND_IDX( 50)] = 0x3B23, 8750 [BEND_IDX( 45)] = 0x3B23, 8751 [BEND_IDX( 40)] = 0x3C23, 8752 [BEND_IDX( 35)] = 0x3C23, 8753 [BEND_IDX( 30)] = 0x3D23, 8754 [BEND_IDX( 25)] = 0x3D23, 8755 [BEND_IDX( 20)] = 0x3E23, 8756 [BEND_IDX( 15)] = 0x3E23, 8757 [BEND_IDX( 10)] = 0x3F23, 8758 [BEND_IDX( 5)] = 0x3F23, 8759 [BEND_IDX( 0)] = 0x0025, 8760 [BEND_IDX( -5)] = 0x0025, 8761 [BEND_IDX(-10)] = 0x0125, 8762 [BEND_IDX(-15)] = 0x0125, 8763 [BEND_IDX(-20)] = 0x0225, 8764 [BEND_IDX(-25)] = 0x0225, 8765 [BEND_IDX(-30)] = 0x0325, 8766 [BEND_IDX(-35)] = 0x0325, 8767 [BEND_IDX(-40)] = 0x0425, 8768 [BEND_IDX(-45)] = 0x0425, 8769 [BEND_IDX(-50)] = 0x0525, 8770 }; 8771 8772 /* 8773 * Bend CLKOUT_DP 8774 * steps -50 to 50 inclusive, in steps of 5 8775 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 8776 * change in clock period = -(steps / 10) * 5.787 ps 8777 */ 8778 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 8779 { 8780 uint32_t tmp; 8781 int idx = BEND_IDX(steps); 8782 8783 if (WARN_ON(steps % 5 != 0)) 8784 return; 8785 8786 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 8787 return; 8788 8789 mutex_lock(&dev_priv->sb_lock); 8790 8791 if (steps % 10 != 0) 8792 tmp = 0xAAAAAAAB; 8793 else 8794 tmp = 0x00000000; 8795 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 8796 8797 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 8798 tmp &= 0xffff0000; 8799 tmp |= sscdivintphase[idx]; 8800 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 8801 8802 mutex_unlock(&dev_priv->sb_lock); 8803 } 8804 8805 #undef BEND_IDX 8806 8807 static void lpt_init_pch_refclk(struct drm_device *dev) 8808 { 8809 struct intel_encoder *encoder; 8810 bool has_vga = false; 8811 8812 for_each_intel_encoder(dev, encoder) { 8813 switch (encoder->type) { 8814 case INTEL_OUTPUT_ANALOG: 8815 has_vga = true; 8816 break; 8817 default: 8818 break; 8819 } 8820 } 8821 8822 if (has_vga) { 8823 lpt_bend_clkout_dp(to_i915(dev), 0); 8824 lpt_enable_clkout_dp(dev, true, true); 8825 } else { 8826 lpt_disable_clkout_dp(dev); 8827 } 8828 } 8829 8830 /* 8831 * Initialize reference clocks when the driver loads 8832 */ 8833 void intel_init_pch_refclk(struct drm_device *dev) 8834 { 8835 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8836 ironlake_init_pch_refclk(dev); 8837 else if (HAS_PCH_LPT(dev)) 8838 lpt_init_pch_refclk(dev); 8839 } 8840 8841 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8842 { 8843 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8845 int pipe = intel_crtc->pipe; 8846 uint32_t val; 8847 8848 val = 0; 8849 8850 switch (intel_crtc->config->pipe_bpp) { 8851 case 18: 8852 val |= PIPECONF_6BPC; 8853 break; 8854 case 24: 8855 val |= PIPECONF_8BPC; 8856 break; 8857 case 30: 8858 val |= PIPECONF_10BPC; 8859 break; 8860 case 36: 8861 val |= PIPECONF_12BPC; 8862 break; 8863 default: 8864 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8865 BUG(); 8866 } 8867 8868 if (intel_crtc->config->dither) 8869 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8870 8871 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8872 val |= PIPECONF_INTERLACED_ILK; 8873 else 8874 val |= PIPECONF_PROGRESSIVE; 8875 8876 if (intel_crtc->config->limited_color_range) 8877 val |= PIPECONF_COLOR_RANGE_SELECT; 8878 8879 I915_WRITE(PIPECONF(pipe), val); 8880 POSTING_READ(PIPECONF(pipe)); 8881 } 8882 8883 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8884 { 8885 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8887 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8888 u32 val = 0; 8889 8890 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8891 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8892 8893 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8894 val |= PIPECONF_INTERLACED_ILK; 8895 else 8896 val |= PIPECONF_PROGRESSIVE; 8897 8898 I915_WRITE(PIPECONF(cpu_transcoder), val); 8899 POSTING_READ(PIPECONF(cpu_transcoder)); 8900 } 8901 8902 static void haswell_set_pipemisc(struct drm_crtc *crtc) 8903 { 8904 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8905 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8906 8907 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8908 u32 val = 0; 8909 8910 switch (intel_crtc->config->pipe_bpp) { 8911 case 18: 8912 val |= PIPEMISC_DITHER_6_BPC; 8913 break; 8914 case 24: 8915 val |= PIPEMISC_DITHER_8_BPC; 8916 break; 8917 case 30: 8918 val |= PIPEMISC_DITHER_10_BPC; 8919 break; 8920 case 36: 8921 val |= PIPEMISC_DITHER_12_BPC; 8922 break; 8923 default: 8924 /* Case prevented by pipe_config_set_bpp. */ 8925 BUG(); 8926 } 8927 8928 if (intel_crtc->config->dither) 8929 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8930 8931 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8932 } 8933 } 8934 8935 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8936 { 8937 /* 8938 * Account for spread spectrum to avoid 8939 * oversubscribing the link. Max center spread 8940 * is 2.5%; use 5% for safety's sake. 8941 */ 8942 u32 bps = target_clock * bpp * 21 / 20; 8943 return DIV_ROUND_UP(bps, link_bw * 8); 8944 } 8945 8946 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8947 { 8948 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8949 } 8950 8951 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8952 struct intel_crtc_state *crtc_state, 8953 struct dpll *reduced_clock) 8954 { 8955 struct drm_crtc *crtc = &intel_crtc->base; 8956 struct drm_device *dev = crtc->dev; 8957 struct drm_i915_private *dev_priv = to_i915(dev); 8958 u32 dpll, fp, fp2; 8959 int factor; 8960 8961 /* Enable autotuning of the PLL clock (if permissible) */ 8962 factor = 21; 8963 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8964 if ((intel_panel_use_ssc(dev_priv) && 8965 dev_priv->vbt.lvds_ssc_freq == 100000) || 8966 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8967 factor = 25; 8968 } else if (crtc_state->sdvo_tv_clock) 8969 factor = 20; 8970 8971 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8972 8973 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8974 fp |= FP_CB_TUNE; 8975 8976 if (reduced_clock) { 8977 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8978 8979 if (reduced_clock->m < factor * reduced_clock->n) 8980 fp2 |= FP_CB_TUNE; 8981 } else { 8982 fp2 = fp; 8983 } 8984 8985 dpll = 0; 8986 8987 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8988 dpll |= DPLLB_MODE_LVDS; 8989 else 8990 dpll |= DPLLB_MODE_DAC_SERIAL; 8991 8992 dpll |= (crtc_state->pixel_multiplier - 1) 8993 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8994 8995 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8996 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8997 dpll |= DPLL_SDVO_HIGH_SPEED; 8998 8999 if (intel_crtc_has_dp_encoder(crtc_state)) 9000 dpll |= DPLL_SDVO_HIGH_SPEED; 9001 9002 /* compute bitmask from p1 value */ 9003 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9004 /* also FPA1 */ 9005 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9006 9007 switch (crtc_state->dpll.p2) { 9008 case 5: 9009 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9010 break; 9011 case 7: 9012 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9013 break; 9014 case 10: 9015 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9016 break; 9017 case 14: 9018 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9019 break; 9020 } 9021 9022 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9023 intel_panel_use_ssc(dev_priv)) 9024 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9025 else 9026 dpll |= PLL_REF_INPUT_DREFCLK; 9027 9028 dpll |= DPLL_VCO_ENABLE; 9029 9030 crtc_state->dpll_hw_state.dpll = dpll; 9031 crtc_state->dpll_hw_state.fp0 = fp; 9032 crtc_state->dpll_hw_state.fp1 = fp2; 9033 } 9034 9035 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9036 struct intel_crtc_state *crtc_state) 9037 { 9038 struct drm_device *dev = crtc->base.dev; 9039 struct drm_i915_private *dev_priv = to_i915(dev); 9040 struct dpll reduced_clock; 9041 bool has_reduced_clock = false; 9042 struct intel_shared_dpll *pll; 9043 const struct intel_limit *limit; 9044 int refclk = 120000; 9045 9046 memset(&crtc_state->dpll_hw_state, 0, 9047 sizeof(crtc_state->dpll_hw_state)); 9048 9049 crtc->lowfreq_avail = false; 9050 9051 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9052 if (!crtc_state->has_pch_encoder) 9053 return 0; 9054 9055 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9056 if (intel_panel_use_ssc(dev_priv)) { 9057 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9058 dev_priv->vbt.lvds_ssc_freq); 9059 refclk = dev_priv->vbt.lvds_ssc_freq; 9060 } 9061 9062 if (intel_is_dual_link_lvds(dev)) { 9063 if (refclk == 100000) 9064 limit = &intel_limits_ironlake_dual_lvds_100m; 9065 else 9066 limit = &intel_limits_ironlake_dual_lvds; 9067 } else { 9068 if (refclk == 100000) 9069 limit = &intel_limits_ironlake_single_lvds_100m; 9070 else 9071 limit = &intel_limits_ironlake_single_lvds; 9072 } 9073 } else { 9074 limit = &intel_limits_ironlake_dac; 9075 } 9076 9077 if (!crtc_state->clock_set && 9078 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9079 refclk, NULL, &crtc_state->dpll)) { 9080 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9081 return -EINVAL; 9082 } 9083 9084 ironlake_compute_dpll(crtc, crtc_state, 9085 has_reduced_clock ? &reduced_clock : NULL); 9086 9087 pll = intel_get_shared_dpll(crtc, crtc_state, NULL); 9088 if (pll == NULL) { 9089 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 9090 pipe_name(crtc->pipe)); 9091 return -EINVAL; 9092 } 9093 9094 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9095 has_reduced_clock) 9096 crtc->lowfreq_avail = true; 9097 9098 return 0; 9099 } 9100 9101 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9102 struct intel_link_m_n *m_n) 9103 { 9104 struct drm_device *dev = crtc->base.dev; 9105 struct drm_i915_private *dev_priv = to_i915(dev); 9106 enum i915_pipe pipe = crtc->pipe; 9107 9108 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9109 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9110 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9111 & ~TU_SIZE_MASK; 9112 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9113 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9114 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9115 } 9116 9117 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9118 enum transcoder transcoder, 9119 struct intel_link_m_n *m_n, 9120 struct intel_link_m_n *m2_n2) 9121 { 9122 struct drm_device *dev = crtc->base.dev; 9123 struct drm_i915_private *dev_priv = to_i915(dev); 9124 enum i915_pipe pipe = crtc->pipe; 9125 9126 if (INTEL_INFO(dev)->gen >= 5) { 9127 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9128 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9129 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9130 & ~TU_SIZE_MASK; 9131 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9132 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9133 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9134 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 9135 * gen < 8) and if DRRS is supported (to make sure the 9136 * registers are not unnecessarily read). 9137 */ 9138 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 9139 crtc->config->has_drrs) { 9140 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9141 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9142 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9143 & ~TU_SIZE_MASK; 9144 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9145 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9146 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9147 } 9148 } else { 9149 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9150 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9151 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9152 & ~TU_SIZE_MASK; 9153 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9154 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9155 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9156 } 9157 } 9158 9159 void intel_dp_get_m_n(struct intel_crtc *crtc, 9160 struct intel_crtc_state *pipe_config) 9161 { 9162 if (pipe_config->has_pch_encoder) 9163 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9164 else 9165 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9166 &pipe_config->dp_m_n, 9167 &pipe_config->dp_m2_n2); 9168 } 9169 9170 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9171 struct intel_crtc_state *pipe_config) 9172 { 9173 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9174 &pipe_config->fdi_m_n, NULL); 9175 } 9176 9177 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9178 struct intel_crtc_state *pipe_config) 9179 { 9180 struct drm_device *dev = crtc->base.dev; 9181 struct drm_i915_private *dev_priv = to_i915(dev); 9182 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9183 uint32_t ps_ctrl = 0; 9184 int id = -1; 9185 int i; 9186 9187 /* find scaler attached to this pipe */ 9188 for (i = 0; i < crtc->num_scalers; i++) { 9189 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9190 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9191 id = i; 9192 pipe_config->pch_pfit.enabled = true; 9193 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9194 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9195 break; 9196 } 9197 } 9198 9199 scaler_state->scaler_id = id; 9200 if (id >= 0) { 9201 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9202 } else { 9203 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9204 } 9205 } 9206 9207 static void 9208 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9209 struct intel_initial_plane_config *plane_config) 9210 { 9211 struct drm_device *dev = crtc->base.dev; 9212 struct drm_i915_private *dev_priv = to_i915(dev); 9213 u32 val, base, offset, stride_mult, tiling; 9214 int pipe = crtc->pipe; 9215 int fourcc, pixel_format; 9216 unsigned int aligned_height; 9217 struct drm_framebuffer *fb; 9218 struct intel_framebuffer *intel_fb; 9219 9220 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9221 if (!intel_fb) { 9222 DRM_DEBUG_KMS("failed to alloc fb\n"); 9223 return; 9224 } 9225 9226 fb = &intel_fb->base; 9227 9228 val = I915_READ(PLANE_CTL(pipe, 0)); 9229 if (!(val & PLANE_CTL_ENABLE)) 9230 goto error; 9231 9232 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9233 fourcc = skl_format_to_fourcc(pixel_format, 9234 val & PLANE_CTL_ORDER_RGBX, 9235 val & PLANE_CTL_ALPHA_MASK); 9236 fb->pixel_format = fourcc; 9237 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9238 9239 tiling = val & PLANE_CTL_TILED_MASK; 9240 switch (tiling) { 9241 case PLANE_CTL_TILED_LINEAR: 9242 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 9243 break; 9244 case PLANE_CTL_TILED_X: 9245 plane_config->tiling = I915_TILING_X; 9246 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9247 break; 9248 case PLANE_CTL_TILED_Y: 9249 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 9250 break; 9251 case PLANE_CTL_TILED_YF: 9252 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 9253 break; 9254 default: 9255 MISSING_CASE(tiling); 9256 goto error; 9257 } 9258 9259 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 9260 plane_config->base = base; 9261 9262 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 9263 9264 val = I915_READ(PLANE_SIZE(pipe, 0)); 9265 fb->height = ((val >> 16) & 0xfff) + 1; 9266 fb->width = ((val >> 0) & 0x1fff) + 1; 9267 9268 val = I915_READ(PLANE_STRIDE(pipe, 0)); 9269 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 9270 fb->pixel_format); 9271 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9272 9273 aligned_height = intel_fb_align_height(dev, fb->height, 9274 fb->pixel_format, 9275 fb->modifier[0]); 9276 9277 plane_config->size = fb->pitches[0] * aligned_height; 9278 9279 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9280 pipe_name(pipe), fb->width, fb->height, 9281 fb->bits_per_pixel, base, fb->pitches[0], 9282 plane_config->size); 9283 9284 plane_config->fb = intel_fb; 9285 return; 9286 9287 error: 9288 kfree(fb); 9289 } 9290 9291 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9292 struct intel_crtc_state *pipe_config) 9293 { 9294 struct drm_device *dev = crtc->base.dev; 9295 struct drm_i915_private *dev_priv = to_i915(dev); 9296 uint32_t tmp; 9297 9298 tmp = I915_READ(PF_CTL(crtc->pipe)); 9299 9300 if (tmp & PF_ENABLE) { 9301 pipe_config->pch_pfit.enabled = true; 9302 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9303 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9304 9305 /* We currently do not free assignements of panel fitters on 9306 * ivb/hsw (since we don't use the higher upscaling modes which 9307 * differentiates them) so just WARN about this case for now. */ 9308 if (IS_GEN7(dev)) { 9309 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9310 PF_PIPE_SEL_IVB(crtc->pipe)); 9311 } 9312 } 9313 } 9314 9315 static void 9316 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 9317 struct intel_initial_plane_config *plane_config) 9318 { 9319 struct drm_device *dev = crtc->base.dev; 9320 struct drm_i915_private *dev_priv = to_i915(dev); 9321 u32 val, base, offset; 9322 int pipe = crtc->pipe; 9323 int fourcc, pixel_format; 9324 unsigned int aligned_height; 9325 struct drm_framebuffer *fb; 9326 struct intel_framebuffer *intel_fb; 9327 9328 val = I915_READ(DSPCNTR(pipe)); 9329 if (!(val & DISPLAY_PLANE_ENABLE)) 9330 return; 9331 9332 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9333 if (!intel_fb) { 9334 DRM_DEBUG_KMS("failed to alloc fb\n"); 9335 return; 9336 } 9337 9338 fb = &intel_fb->base; 9339 9340 if (INTEL_INFO(dev)->gen >= 4) { 9341 if (val & DISPPLANE_TILED) { 9342 plane_config->tiling = I915_TILING_X; 9343 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9344 } 9345 } 9346 9347 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9348 fourcc = i9xx_format_to_fourcc(pixel_format); 9349 fb->pixel_format = fourcc; 9350 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9351 9352 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9353 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9354 offset = I915_READ(DSPOFFSET(pipe)); 9355 } else { 9356 if (plane_config->tiling) 9357 offset = I915_READ(DSPTILEOFF(pipe)); 9358 else 9359 offset = I915_READ(DSPLINOFF(pipe)); 9360 } 9361 plane_config->base = base; 9362 9363 val = I915_READ(PIPESRC(pipe)); 9364 fb->width = ((val >> 16) & 0xfff) + 1; 9365 fb->height = ((val >> 0) & 0xfff) + 1; 9366 9367 val = I915_READ(DSPSTRIDE(pipe)); 9368 fb->pitches[0] = val & 0xffffffc0; 9369 9370 aligned_height = intel_fb_align_height(dev, fb->height, 9371 fb->pixel_format, 9372 fb->modifier[0]); 9373 9374 plane_config->size = fb->pitches[0] * aligned_height; 9375 9376 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9377 pipe_name(pipe), fb->width, fb->height, 9378 fb->bits_per_pixel, base, fb->pitches[0], 9379 plane_config->size); 9380 9381 plane_config->fb = intel_fb; 9382 } 9383 9384 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9385 struct intel_crtc_state *pipe_config) 9386 { 9387 struct drm_device *dev = crtc->base.dev; 9388 struct drm_i915_private *dev_priv = to_i915(dev); 9389 enum intel_display_power_domain power_domain; 9390 uint32_t tmp; 9391 bool ret; 9392 9393 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9394 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9395 return false; 9396 9397 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9398 pipe_config->shared_dpll = NULL; 9399 9400 ret = false; 9401 tmp = I915_READ(PIPECONF(crtc->pipe)); 9402 if (!(tmp & PIPECONF_ENABLE)) 9403 goto out; 9404 9405 switch (tmp & PIPECONF_BPC_MASK) { 9406 case PIPECONF_6BPC: 9407 pipe_config->pipe_bpp = 18; 9408 break; 9409 case PIPECONF_8BPC: 9410 pipe_config->pipe_bpp = 24; 9411 break; 9412 case PIPECONF_10BPC: 9413 pipe_config->pipe_bpp = 30; 9414 break; 9415 case PIPECONF_12BPC: 9416 pipe_config->pipe_bpp = 36; 9417 break; 9418 default: 9419 break; 9420 } 9421 9422 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9423 pipe_config->limited_color_range = true; 9424 9425 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9426 struct intel_shared_dpll *pll; 9427 enum intel_dpll_id pll_id; 9428 9429 pipe_config->has_pch_encoder = true; 9430 9431 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9432 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9433 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9434 9435 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9436 9437 if (HAS_PCH_IBX(dev_priv)) { 9438 /* 9439 * The pipe->pch transcoder and pch transcoder->pll 9440 * mapping is fixed. 9441 */ 9442 pll_id = (enum intel_dpll_id) crtc->pipe; 9443 } else { 9444 tmp = I915_READ(PCH_DPLL_SEL); 9445 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9446 pll_id = DPLL_ID_PCH_PLL_B; 9447 else 9448 pll_id= DPLL_ID_PCH_PLL_A; 9449 } 9450 9451 pipe_config->shared_dpll = 9452 intel_get_shared_dpll_by_id(dev_priv, pll_id); 9453 pll = pipe_config->shared_dpll; 9454 9455 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9456 &pipe_config->dpll_hw_state)); 9457 9458 tmp = pipe_config->dpll_hw_state.dpll; 9459 pipe_config->pixel_multiplier = 9460 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9461 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9462 9463 ironlake_pch_clock_get(crtc, pipe_config); 9464 } else { 9465 pipe_config->pixel_multiplier = 1; 9466 } 9467 9468 intel_get_pipe_timings(crtc, pipe_config); 9469 intel_get_pipe_src_size(crtc, pipe_config); 9470 9471 ironlake_get_pfit_config(crtc, pipe_config); 9472 9473 ret = true; 9474 9475 out: 9476 intel_display_power_put(dev_priv, power_domain); 9477 9478 return ret; 9479 } 9480 9481 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9482 { 9483 struct drm_device *dev = &dev_priv->drm; 9484 struct intel_crtc *crtc; 9485 9486 for_each_intel_crtc(dev, crtc) 9487 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9488 pipe_name(crtc->pipe)); 9489 9490 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9491 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9492 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9493 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9494 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9495 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9496 "CPU PWM1 enabled\n"); 9497 if (IS_HASWELL(dev)) 9498 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9499 "CPU PWM2 enabled\n"); 9500 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9501 "PCH PWM1 enabled\n"); 9502 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9503 "Utility pin enabled\n"); 9504 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9505 9506 /* 9507 * In theory we can still leave IRQs enabled, as long as only the HPD 9508 * interrupts remain enabled. We used to check for that, but since it's 9509 * gen-specific and since we only disable LCPLL after we fully disable 9510 * the interrupts, the check below should be enough. 9511 */ 9512 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9513 } 9514 9515 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9516 { 9517 struct drm_device *dev = &dev_priv->drm; 9518 9519 if (IS_HASWELL(dev)) 9520 return I915_READ(D_COMP_HSW); 9521 else 9522 return I915_READ(D_COMP_BDW); 9523 } 9524 9525 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9526 { 9527 struct drm_device *dev = &dev_priv->drm; 9528 9529 if (IS_HASWELL(dev)) { 9530 mutex_lock(&dev_priv->rps.hw_lock); 9531 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9532 val)) 9533 DRM_ERROR("Failed to write to D_COMP\n"); 9534 mutex_unlock(&dev_priv->rps.hw_lock); 9535 } else { 9536 I915_WRITE(D_COMP_BDW, val); 9537 POSTING_READ(D_COMP_BDW); 9538 } 9539 } 9540 9541 /* 9542 * This function implements pieces of two sequences from BSpec: 9543 * - Sequence for display software to disable LCPLL 9544 * - Sequence for display software to allow package C8+ 9545 * The steps implemented here are just the steps that actually touch the LCPLL 9546 * register. Callers should take care of disabling all the display engine 9547 * functions, doing the mode unset, fixing interrupts, etc. 9548 */ 9549 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9550 bool switch_to_fclk, bool allow_power_down) 9551 { 9552 uint32_t val; 9553 9554 assert_can_disable_lcpll(dev_priv); 9555 9556 val = I915_READ(LCPLL_CTL); 9557 9558 if (switch_to_fclk) { 9559 val |= LCPLL_CD_SOURCE_FCLK; 9560 I915_WRITE(LCPLL_CTL, val); 9561 9562 if (wait_for_us(I915_READ(LCPLL_CTL) & 9563 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9564 DRM_ERROR("Switching to FCLK failed\n"); 9565 9566 val = I915_READ(LCPLL_CTL); 9567 } 9568 9569 val |= LCPLL_PLL_DISABLE; 9570 I915_WRITE(LCPLL_CTL, val); 9571 POSTING_READ(LCPLL_CTL); 9572 9573 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 9574 DRM_ERROR("LCPLL still locked\n"); 9575 9576 val = hsw_read_dcomp(dev_priv); 9577 val |= D_COMP_COMP_DISABLE; 9578 hsw_write_dcomp(dev_priv, val); 9579 ndelay(100); 9580 9581 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9582 1)) 9583 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9584 9585 if (allow_power_down) { 9586 val = I915_READ(LCPLL_CTL); 9587 val |= LCPLL_POWER_DOWN_ALLOW; 9588 I915_WRITE(LCPLL_CTL, val); 9589 POSTING_READ(LCPLL_CTL); 9590 } 9591 } 9592 9593 /* 9594 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9595 * source. 9596 */ 9597 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9598 { 9599 uint32_t val; 9600 9601 val = I915_READ(LCPLL_CTL); 9602 9603 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9604 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9605 return; 9606 9607 /* 9608 * Make sure we're not on PC8 state before disabling PC8, otherwise 9609 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9610 */ 9611 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9612 9613 if (val & LCPLL_POWER_DOWN_ALLOW) { 9614 val &= ~LCPLL_POWER_DOWN_ALLOW; 9615 I915_WRITE(LCPLL_CTL, val); 9616 POSTING_READ(LCPLL_CTL); 9617 } 9618 9619 val = hsw_read_dcomp(dev_priv); 9620 val |= D_COMP_COMP_FORCE; 9621 val &= ~D_COMP_COMP_DISABLE; 9622 hsw_write_dcomp(dev_priv, val); 9623 9624 val = I915_READ(LCPLL_CTL); 9625 val &= ~LCPLL_PLL_DISABLE; 9626 I915_WRITE(LCPLL_CTL, val); 9627 9628 if (intel_wait_for_register(dev_priv, 9629 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 9630 5)) 9631 DRM_ERROR("LCPLL not locked yet\n"); 9632 9633 if (val & LCPLL_CD_SOURCE_FCLK) { 9634 val = I915_READ(LCPLL_CTL); 9635 val &= ~LCPLL_CD_SOURCE_FCLK; 9636 I915_WRITE(LCPLL_CTL, val); 9637 9638 if (wait_for_us((I915_READ(LCPLL_CTL) & 9639 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9640 DRM_ERROR("Switching back to LCPLL failed\n"); 9641 } 9642 9643 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9644 intel_update_cdclk(&dev_priv->drm); 9645 } 9646 9647 /* 9648 * Package states C8 and deeper are really deep PC states that can only be 9649 * reached when all the devices on the system allow it, so even if the graphics 9650 * device allows PC8+, it doesn't mean the system will actually get to these 9651 * states. Our driver only allows PC8+ when going into runtime PM. 9652 * 9653 * The requirements for PC8+ are that all the outputs are disabled, the power 9654 * well is disabled and most interrupts are disabled, and these are also 9655 * requirements for runtime PM. When these conditions are met, we manually do 9656 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9657 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9658 * hang the machine. 9659 * 9660 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9661 * the state of some registers, so when we come back from PC8+ we need to 9662 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9663 * need to take care of the registers kept by RC6. Notice that this happens even 9664 * if we don't put the device in PCI D3 state (which is what currently happens 9665 * because of the runtime PM support). 9666 * 9667 * For more, read "Display Sequences for Package C8" on the hardware 9668 * documentation. 9669 */ 9670 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9671 { 9672 struct drm_device *dev = &dev_priv->drm; 9673 uint32_t val; 9674 9675 DRM_DEBUG_KMS("Enabling package C8+\n"); 9676 9677 if (HAS_PCH_LPT_LP(dev)) { 9678 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9679 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9680 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9681 } 9682 9683 lpt_disable_clkout_dp(dev); 9684 hsw_disable_lcpll(dev_priv, true, true); 9685 } 9686 9687 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9688 { 9689 struct drm_device *dev = &dev_priv->drm; 9690 uint32_t val; 9691 9692 DRM_DEBUG_KMS("Disabling package C8+\n"); 9693 9694 hsw_restore_lcpll(dev_priv); 9695 lpt_init_pch_refclk(dev); 9696 9697 if (HAS_PCH_LPT_LP(dev)) { 9698 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9699 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9700 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9701 } 9702 } 9703 9704 static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9705 { 9706 struct drm_device *dev = old_state->dev; 9707 struct intel_atomic_state *old_intel_state = 9708 to_intel_atomic_state(old_state); 9709 unsigned int req_cdclk = old_intel_state->dev_cdclk; 9710 9711 bxt_set_cdclk(to_i915(dev), req_cdclk); 9712 } 9713 9714 /* compute the max rate for new configuration */ 9715 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9716 { 9717 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9718 struct drm_i915_private *dev_priv = to_i915(state->dev); 9719 struct drm_crtc *crtc; 9720 struct drm_crtc_state *cstate; 9721 struct intel_crtc_state *crtc_state; 9722 unsigned max_pixel_rate = 0, i; 9723 enum i915_pipe pipe; 9724 9725 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 9726 sizeof(intel_state->min_pixclk)); 9727 9728 for_each_crtc_in_state(state, crtc, cstate, i) { 9729 int pixel_rate; 9730 9731 crtc_state = to_intel_crtc_state(cstate); 9732 if (!crtc_state->base.enable) { 9733 intel_state->min_pixclk[i] = 0; 9734 continue; 9735 } 9736 9737 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 9738 9739 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 9740 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 9741 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 9742 9743 intel_state->min_pixclk[i] = pixel_rate; 9744 } 9745 9746 for_each_pipe(dev_priv, pipe) 9747 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); 9748 9749 return max_pixel_rate; 9750 } 9751 9752 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9753 { 9754 struct drm_i915_private *dev_priv = to_i915(dev); 9755 uint32_t val, data; 9756 int ret; 9757 9758 if (WARN((I915_READ(LCPLL_CTL) & 9759 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 9760 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 9761 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 9762 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 9763 "trying to change cdclk frequency with cdclk not enabled\n")) 9764 return; 9765 9766 mutex_lock(&dev_priv->rps.hw_lock); 9767 ret = sandybridge_pcode_write(dev_priv, 9768 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 9769 mutex_unlock(&dev_priv->rps.hw_lock); 9770 if (ret) { 9771 DRM_ERROR("failed to inform pcode about cdclk change\n"); 9772 return; 9773 } 9774 9775 val = I915_READ(LCPLL_CTL); 9776 val |= LCPLL_CD_SOURCE_FCLK; 9777 I915_WRITE(LCPLL_CTL, val); 9778 9779 if (wait_for_us(I915_READ(LCPLL_CTL) & 9780 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9781 DRM_ERROR("Switching to FCLK failed\n"); 9782 9783 val = I915_READ(LCPLL_CTL); 9784 val &= ~LCPLL_CLK_FREQ_MASK; 9785 9786 switch (cdclk) { 9787 case 450000: 9788 val |= LCPLL_CLK_FREQ_450; 9789 data = 0; 9790 break; 9791 case 540000: 9792 val |= LCPLL_CLK_FREQ_54O_BDW; 9793 data = 1; 9794 break; 9795 case 337500: 9796 val |= LCPLL_CLK_FREQ_337_5_BDW; 9797 data = 2; 9798 break; 9799 case 675000: 9800 val |= LCPLL_CLK_FREQ_675_BDW; 9801 data = 3; 9802 break; 9803 default: 9804 WARN(1, "invalid cdclk frequency\n"); 9805 return; 9806 } 9807 9808 I915_WRITE(LCPLL_CTL, val); 9809 9810 val = I915_READ(LCPLL_CTL); 9811 val &= ~LCPLL_CD_SOURCE_FCLK; 9812 I915_WRITE(LCPLL_CTL, val); 9813 9814 if (wait_for_us((I915_READ(LCPLL_CTL) & 9815 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9816 DRM_ERROR("Switching back to LCPLL failed\n"); 9817 9818 mutex_lock(&dev_priv->rps.hw_lock); 9819 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 9820 mutex_unlock(&dev_priv->rps.hw_lock); 9821 9822 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 9823 9824 intel_update_cdclk(dev); 9825 9826 WARN(cdclk != dev_priv->cdclk_freq, 9827 "cdclk requested %d kHz but got %d kHz\n", 9828 cdclk, dev_priv->cdclk_freq); 9829 } 9830 9831 static int broadwell_calc_cdclk(int max_pixclk) 9832 { 9833 if (max_pixclk > 540000) 9834 return 675000; 9835 else if (max_pixclk > 450000) 9836 return 540000; 9837 else if (max_pixclk > 337500) 9838 return 450000; 9839 else 9840 return 337500; 9841 } 9842 9843 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9844 { 9845 struct drm_i915_private *dev_priv = to_i915(state->dev); 9846 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9847 int max_pixclk = ilk_max_pixel_rate(state); 9848 int cdclk; 9849 9850 /* 9851 * FIXME should also account for plane ratio 9852 * once 64bpp pixel formats are supported. 9853 */ 9854 cdclk = broadwell_calc_cdclk(max_pixclk); 9855 9856 if (cdclk > dev_priv->max_cdclk_freq) { 9857 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9858 cdclk, dev_priv->max_cdclk_freq); 9859 return -EINVAL; 9860 } 9861 9862 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9863 if (!intel_state->active_crtcs) 9864 intel_state->dev_cdclk = broadwell_calc_cdclk(0); 9865 9866 return 0; 9867 } 9868 9869 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9870 { 9871 struct drm_device *dev = old_state->dev; 9872 struct intel_atomic_state *old_intel_state = 9873 to_intel_atomic_state(old_state); 9874 unsigned req_cdclk = old_intel_state->dev_cdclk; 9875 9876 broadwell_set_cdclk(dev, req_cdclk); 9877 } 9878 9879 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) 9880 { 9881 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9882 struct drm_i915_private *dev_priv = to_i915(state->dev); 9883 const int max_pixclk = ilk_max_pixel_rate(state); 9884 int vco = intel_state->cdclk_pll_vco; 9885 int cdclk; 9886 9887 /* 9888 * FIXME should also account for plane ratio 9889 * once 64bpp pixel formats are supported. 9890 */ 9891 cdclk = skl_calc_cdclk(max_pixclk, vco); 9892 9893 /* 9894 * FIXME move the cdclk caclulation to 9895 * compute_config() so we can fail gracegully. 9896 */ 9897 if (cdclk > dev_priv->max_cdclk_freq) { 9898 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9899 cdclk, dev_priv->max_cdclk_freq); 9900 cdclk = dev_priv->max_cdclk_freq; 9901 } 9902 9903 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9904 if (!intel_state->active_crtcs) 9905 intel_state->dev_cdclk = skl_calc_cdclk(0, vco); 9906 9907 return 0; 9908 } 9909 9910 static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9911 { 9912 struct drm_i915_private *dev_priv = to_i915(old_state->dev); 9913 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); 9914 unsigned int req_cdclk = intel_state->dev_cdclk; 9915 unsigned int req_vco = intel_state->cdclk_pll_vco; 9916 9917 skl_set_cdclk(dev_priv, req_cdclk, req_vco); 9918 } 9919 9920 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9921 struct intel_crtc_state *crtc_state) 9922 { 9923 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 9924 if (!intel_ddi_pll_select(crtc, crtc_state)) 9925 return -EINVAL; 9926 } 9927 9928 crtc->lowfreq_avail = false; 9929 9930 return 0; 9931 } 9932 9933 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9934 enum port port, 9935 struct intel_crtc_state *pipe_config) 9936 { 9937 enum intel_dpll_id id; 9938 9939 switch (port) { 9940 case PORT_A: 9941 pipe_config->ddi_pll_sel = SKL_DPLL0; 9942 id = DPLL_ID_SKL_DPLL0; 9943 break; 9944 case PORT_B: 9945 pipe_config->ddi_pll_sel = SKL_DPLL1; 9946 id = DPLL_ID_SKL_DPLL1; 9947 break; 9948 case PORT_C: 9949 pipe_config->ddi_pll_sel = SKL_DPLL2; 9950 id = DPLL_ID_SKL_DPLL2; 9951 break; 9952 default: 9953 DRM_ERROR("Incorrect port type\n"); 9954 return; 9955 } 9956 9957 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 9958 } 9959 9960 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 9961 enum port port, 9962 struct intel_crtc_state *pipe_config) 9963 { 9964 enum intel_dpll_id id; 9965 u32 temp; 9966 9967 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 9968 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 9969 9970 switch (pipe_config->ddi_pll_sel) { 9971 case SKL_DPLL0: 9972 id = DPLL_ID_SKL_DPLL0; 9973 break; 9974 case SKL_DPLL1: 9975 id = DPLL_ID_SKL_DPLL1; 9976 break; 9977 case SKL_DPLL2: 9978 id = DPLL_ID_SKL_DPLL2; 9979 break; 9980 case SKL_DPLL3: 9981 id = DPLL_ID_SKL_DPLL3; 9982 break; 9983 default: 9984 MISSING_CASE(pipe_config->ddi_pll_sel); 9985 return; 9986 } 9987 9988 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 9989 } 9990 9991 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 9992 enum port port, 9993 struct intel_crtc_state *pipe_config) 9994 { 9995 enum intel_dpll_id id; 9996 9997 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 9998 9999 switch (pipe_config->ddi_pll_sel) { 10000 case PORT_CLK_SEL_WRPLL1: 10001 id = DPLL_ID_WRPLL1; 10002 break; 10003 case PORT_CLK_SEL_WRPLL2: 10004 id = DPLL_ID_WRPLL2; 10005 break; 10006 case PORT_CLK_SEL_SPLL: 10007 id = DPLL_ID_SPLL; 10008 break; 10009 case PORT_CLK_SEL_LCPLL_810: 10010 id = DPLL_ID_LCPLL_810; 10011 break; 10012 case PORT_CLK_SEL_LCPLL_1350: 10013 id = DPLL_ID_LCPLL_1350; 10014 break; 10015 case PORT_CLK_SEL_LCPLL_2700: 10016 id = DPLL_ID_LCPLL_2700; 10017 break; 10018 default: 10019 MISSING_CASE(pipe_config->ddi_pll_sel); 10020 /* fall through */ 10021 case PORT_CLK_SEL_NONE: 10022 return; 10023 } 10024 10025 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10026 } 10027 10028 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10029 struct intel_crtc_state *pipe_config, 10030 unsigned long *power_domain_mask) 10031 { 10032 struct drm_device *dev = crtc->base.dev; 10033 struct drm_i915_private *dev_priv = to_i915(dev); 10034 enum intel_display_power_domain power_domain; 10035 u32 tmp; 10036 10037 /* 10038 * The pipe->transcoder mapping is fixed with the exception of the eDP 10039 * transcoder handled below. 10040 */ 10041 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10042 10043 /* 10044 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10045 * consistency and less surprising code; it's in always on power). 10046 */ 10047 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 10048 if (tmp & TRANS_DDI_FUNC_ENABLE) { 10049 enum i915_pipe trans_edp_pipe; 10050 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10051 default: 10052 WARN(1, "unknown pipe linked to edp transcoder\n"); 10053 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10054 case TRANS_DDI_EDP_INPUT_A_ON: 10055 trans_edp_pipe = PIPE_A; 10056 break; 10057 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10058 trans_edp_pipe = PIPE_B; 10059 break; 10060 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10061 trans_edp_pipe = PIPE_C; 10062 break; 10063 } 10064 10065 if (trans_edp_pipe == crtc->pipe) 10066 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10067 } 10068 10069 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10070 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10071 return false; 10072 *power_domain_mask |= BIT(power_domain); 10073 10074 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10075 10076 return tmp & PIPECONF_ENABLE; 10077 } 10078 10079 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10080 struct intel_crtc_state *pipe_config, 10081 unsigned long *power_domain_mask) 10082 { 10083 struct drm_device *dev = crtc->base.dev; 10084 struct drm_i915_private *dev_priv = to_i915(dev); 10085 enum intel_display_power_domain power_domain; 10086 enum port port; 10087 enum transcoder cpu_transcoder; 10088 u32 tmp; 10089 10090 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10091 if (port == PORT_A) 10092 cpu_transcoder = TRANSCODER_DSI_A; 10093 else 10094 cpu_transcoder = TRANSCODER_DSI_C; 10095 10096 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10097 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10098 continue; 10099 *power_domain_mask |= BIT(power_domain); 10100 10101 /* 10102 * The PLL needs to be enabled with a valid divider 10103 * configuration, otherwise accessing DSI registers will hang 10104 * the machine. See BSpec North Display Engine 10105 * registers/MIPI[BXT]. We can break out here early, since we 10106 * need the same DSI PLL to be enabled for both DSI ports. 10107 */ 10108 if (!intel_dsi_pll_is_enabled(dev_priv)) 10109 break; 10110 10111 /* XXX: this works for video mode only */ 10112 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10113 if (!(tmp & DPI_ENABLE)) 10114 continue; 10115 10116 tmp = I915_READ(MIPI_CTRL(port)); 10117 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10118 continue; 10119 10120 pipe_config->cpu_transcoder = cpu_transcoder; 10121 break; 10122 } 10123 10124 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10125 } 10126 10127 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10128 struct intel_crtc_state *pipe_config) 10129 { 10130 struct drm_device *dev = crtc->base.dev; 10131 struct drm_i915_private *dev_priv = to_i915(dev); 10132 struct intel_shared_dpll *pll; 10133 enum port port; 10134 uint32_t tmp; 10135 10136 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10137 10138 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 10139 10140 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 10141 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10142 else if (IS_BROXTON(dev)) 10143 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10144 else 10145 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10146 10147 pll = pipe_config->shared_dpll; 10148 if (pll) { 10149 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 10150 &pipe_config->dpll_hw_state)); 10151 } 10152 10153 /* 10154 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10155 * DDI E. So just check whether this pipe is wired to DDI E and whether 10156 * the PCH transcoder is on. 10157 */ 10158 if (INTEL_INFO(dev)->gen < 9 && 10159 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10160 pipe_config->has_pch_encoder = true; 10161 10162 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10163 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10164 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10165 10166 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10167 } 10168 } 10169 10170 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10171 struct intel_crtc_state *pipe_config) 10172 { 10173 struct drm_device *dev = crtc->base.dev; 10174 struct drm_i915_private *dev_priv = to_i915(dev); 10175 enum intel_display_power_domain power_domain; 10176 unsigned long power_domain_mask; 10177 bool active; 10178 10179 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10180 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10181 return false; 10182 power_domain_mask = BIT(power_domain); 10183 10184 pipe_config->shared_dpll = NULL; 10185 10186 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 10187 10188 if (IS_BROXTON(dev_priv) && 10189 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 10190 WARN_ON(active); 10191 active = true; 10192 } 10193 10194 if (!active) 10195 goto out; 10196 10197 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10198 haswell_get_ddi_port_state(crtc, pipe_config); 10199 intel_get_pipe_timings(crtc, pipe_config); 10200 } 10201 10202 intel_get_pipe_src_size(crtc, pipe_config); 10203 10204 pipe_config->gamma_mode = 10205 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 10206 10207 if (INTEL_INFO(dev)->gen >= 9) { 10208 skl_init_scalers(dev, crtc, pipe_config); 10209 } 10210 10211 if (INTEL_INFO(dev)->gen >= 9) { 10212 pipe_config->scaler_state.scaler_id = -1; 10213 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10214 } 10215 10216 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10217 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 10218 power_domain_mask |= BIT(power_domain); 10219 if (INTEL_INFO(dev)->gen >= 9) 10220 skylake_get_pfit_config(crtc, pipe_config); 10221 else 10222 ironlake_get_pfit_config(crtc, pipe_config); 10223 } 10224 10225 if (IS_HASWELL(dev)) 10226 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 10227 (I915_READ(IPS_CTL) & IPS_ENABLE); 10228 10229 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10230 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10231 pipe_config->pixel_multiplier = 10232 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10233 } else { 10234 pipe_config->pixel_multiplier = 1; 10235 } 10236 10237 out: 10238 for_each_power_domain(power_domain, power_domain_mask) 10239 intel_display_power_put(dev_priv, power_domain); 10240 10241 return active; 10242 } 10243 10244 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 10245 const struct intel_plane_state *plane_state) 10246 { 10247 struct drm_device *dev = crtc->dev; 10248 struct drm_i915_private *dev_priv = to_i915(dev); 10249 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10250 uint32_t cntl = 0, size = 0; 10251 10252 if (plane_state && plane_state->visible) { 10253 unsigned int width = plane_state->base.crtc_w; 10254 unsigned int height = plane_state->base.crtc_h; 10255 unsigned int stride = roundup_pow_of_two(width) * 4; 10256 10257 switch (stride) { 10258 default: 10259 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 10260 width, stride); 10261 stride = 256; 10262 /* fallthrough */ 10263 case 256: 10264 case 512: 10265 case 1024: 10266 case 2048: 10267 break; 10268 } 10269 10270 cntl |= CURSOR_ENABLE | 10271 CURSOR_GAMMA_ENABLE | 10272 CURSOR_FORMAT_ARGB | 10273 CURSOR_STRIDE(stride); 10274 10275 size = (height << 12) | width; 10276 } 10277 10278 if (intel_crtc->cursor_cntl != 0 && 10279 (intel_crtc->cursor_base != base || 10280 intel_crtc->cursor_size != size || 10281 intel_crtc->cursor_cntl != cntl)) { 10282 /* On these chipsets we can only modify the base/size/stride 10283 * whilst the cursor is disabled. 10284 */ 10285 I915_WRITE(CURCNTR(PIPE_A), 0); 10286 POSTING_READ(CURCNTR(PIPE_A)); 10287 intel_crtc->cursor_cntl = 0; 10288 } 10289 10290 if (intel_crtc->cursor_base != base) { 10291 I915_WRITE(CURBASE(PIPE_A), base); 10292 intel_crtc->cursor_base = base; 10293 } 10294 10295 if (intel_crtc->cursor_size != size) { 10296 I915_WRITE(CURSIZE, size); 10297 intel_crtc->cursor_size = size; 10298 } 10299 10300 if (intel_crtc->cursor_cntl != cntl) { 10301 I915_WRITE(CURCNTR(PIPE_A), cntl); 10302 POSTING_READ(CURCNTR(PIPE_A)); 10303 intel_crtc->cursor_cntl = cntl; 10304 } 10305 } 10306 10307 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 10308 const struct intel_plane_state *plane_state) 10309 { 10310 struct drm_device *dev = crtc->dev; 10311 struct drm_i915_private *dev_priv = to_i915(dev); 10312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10313 int pipe = intel_crtc->pipe; 10314 uint32_t cntl = 0; 10315 10316 if (plane_state && plane_state->visible) { 10317 cntl = MCURSOR_GAMMA_ENABLE; 10318 switch (plane_state->base.crtc_w) { 10319 case 64: 10320 cntl |= CURSOR_MODE_64_ARGB_AX; 10321 break; 10322 case 128: 10323 cntl |= CURSOR_MODE_128_ARGB_AX; 10324 break; 10325 case 256: 10326 cntl |= CURSOR_MODE_256_ARGB_AX; 10327 break; 10328 default: 10329 MISSING_CASE(plane_state->base.crtc_w); 10330 return; 10331 } 10332 cntl |= pipe << 28; /* Connect to correct pipe */ 10333 10334 if (HAS_DDI(dev)) 10335 cntl |= CURSOR_PIPE_CSC_ENABLE; 10336 10337 if (plane_state->base.rotation == DRM_ROTATE_180) 10338 cntl |= CURSOR_ROTATE_180; 10339 } 10340 10341 if (intel_crtc->cursor_cntl != cntl) { 10342 I915_WRITE(CURCNTR(pipe), cntl); 10343 POSTING_READ(CURCNTR(pipe)); 10344 intel_crtc->cursor_cntl = cntl; 10345 } 10346 10347 /* and commit changes on next vblank */ 10348 I915_WRITE(CURBASE(pipe), base); 10349 POSTING_READ(CURBASE(pipe)); 10350 10351 intel_crtc->cursor_base = base; 10352 } 10353 10354 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 10355 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 10356 const struct intel_plane_state *plane_state) 10357 { 10358 struct drm_device *dev = crtc->dev; 10359 struct drm_i915_private *dev_priv = to_i915(dev); 10360 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10361 int pipe = intel_crtc->pipe; 10362 u32 base = intel_crtc->cursor_addr; 10363 u32 pos = 0; 10364 10365 if (plane_state) { 10366 int x = plane_state->base.crtc_x; 10367 int y = plane_state->base.crtc_y; 10368 10369 if (x < 0) { 10370 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10371 x = -x; 10372 } 10373 pos |= x << CURSOR_X_SHIFT; 10374 10375 if (y < 0) { 10376 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10377 y = -y; 10378 } 10379 pos |= y << CURSOR_Y_SHIFT; 10380 10381 /* ILK+ do this automagically */ 10382 if (HAS_GMCH_DISPLAY(dev) && 10383 plane_state->base.rotation == DRM_ROTATE_180) { 10384 base += (plane_state->base.crtc_h * 10385 plane_state->base.crtc_w - 1) * 4; 10386 } 10387 } 10388 10389 I915_WRITE(CURPOS(pipe), pos); 10390 10391 if (IS_845G(dev) || IS_I865G(dev)) 10392 i845_update_cursor(crtc, base, plane_state); 10393 else 10394 i9xx_update_cursor(crtc, base, plane_state); 10395 } 10396 10397 static bool cursor_size_ok(struct drm_device *dev, 10398 uint32_t width, uint32_t height) 10399 { 10400 if (width == 0 || height == 0) 10401 return false; 10402 10403 /* 10404 * 845g/865g are special in that they are only limited by 10405 * the width of their cursors, the height is arbitrary up to 10406 * the precision of the register. Everything else requires 10407 * square cursors, limited to a few power-of-two sizes. 10408 */ 10409 if (IS_845G(dev) || IS_I865G(dev)) { 10410 if ((width & 63) != 0) 10411 return false; 10412 10413 if (width > (IS_845G(dev) ? 64 : 512)) 10414 return false; 10415 10416 if (height > 1023) 10417 return false; 10418 } else { 10419 switch (width | height) { 10420 case 256: 10421 case 128: 10422 if (IS_GEN2(dev)) 10423 return false; 10424 case 64: 10425 break; 10426 default: 10427 return false; 10428 } 10429 } 10430 10431 return true; 10432 } 10433 10434 /* VESA 640x480x72Hz mode to set on the pipe */ 10435 static struct drm_display_mode load_detect_mode = { 10436 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 10437 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 10438 }; 10439 10440 struct drm_framebuffer * 10441 __intel_framebuffer_create(struct drm_device *dev, 10442 struct drm_mode_fb_cmd2 *mode_cmd, 10443 struct drm_i915_gem_object *obj) 10444 { 10445 struct intel_framebuffer *intel_fb; 10446 int ret; 10447 10448 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10449 if (!intel_fb) 10450 return ERR_PTR(-ENOMEM); 10451 10452 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10453 if (ret) 10454 goto err; 10455 10456 return &intel_fb->base; 10457 10458 err: 10459 kfree(intel_fb); 10460 return ERR_PTR(ret); 10461 } 10462 10463 static struct drm_framebuffer * 10464 intel_framebuffer_create(struct drm_device *dev, 10465 struct drm_mode_fb_cmd2 *mode_cmd, 10466 struct drm_i915_gem_object *obj) 10467 { 10468 struct drm_framebuffer *fb; 10469 int ret; 10470 10471 ret = i915_mutex_lock_interruptible(dev); 10472 if (ret) 10473 return ERR_PTR(ret); 10474 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 10475 mutex_unlock(&dev->struct_mutex); 10476 10477 return fb; 10478 } 10479 10480 static u32 10481 intel_framebuffer_pitch_for_width(int width, int bpp) 10482 { 10483 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 10484 return ALIGN(pitch, 64); 10485 } 10486 10487 static u32 10488 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 10489 { 10490 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 10491 return PAGE_ALIGN(pitch * mode->vdisplay); 10492 } 10493 10494 static struct drm_framebuffer * 10495 intel_framebuffer_create_for_mode(struct drm_device *dev, 10496 struct drm_display_mode *mode, 10497 int depth, int bpp) 10498 { 10499 struct drm_framebuffer *fb; 10500 struct drm_i915_gem_object *obj; 10501 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10502 10503 obj = i915_gem_object_create(dev, 10504 intel_framebuffer_size_for_mode(mode, bpp)); 10505 if (IS_ERR(obj)) 10506 return ERR_CAST(obj); 10507 10508 mode_cmd.width = mode->hdisplay; 10509 mode_cmd.height = mode->vdisplay; 10510 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 10511 bpp); 10512 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10513 10514 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 10515 if (IS_ERR(fb)) 10516 i915_gem_object_put_unlocked(obj); 10517 10518 return fb; 10519 } 10520 10521 static struct drm_framebuffer * 10522 mode_fits_in_fbdev(struct drm_device *dev, 10523 struct drm_display_mode *mode) 10524 { 10525 #ifdef CONFIG_DRM_FBDEV_EMULATION 10526 struct drm_i915_private *dev_priv = to_i915(dev); 10527 struct drm_i915_gem_object *obj; 10528 struct drm_framebuffer *fb; 10529 10530 if (!dev_priv->fbdev) 10531 return NULL; 10532 10533 if (!dev_priv->fbdev->fb) 10534 return NULL; 10535 10536 obj = dev_priv->fbdev->fb->obj; 10537 BUG_ON(!obj); 10538 10539 fb = &dev_priv->fbdev->fb->base; 10540 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 10541 fb->bits_per_pixel)) 10542 return NULL; 10543 10544 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10545 return NULL; 10546 10547 drm_framebuffer_reference(fb); 10548 return fb; 10549 #else 10550 return NULL; 10551 #endif 10552 } 10553 10554 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 10555 struct drm_crtc *crtc, 10556 struct drm_display_mode *mode, 10557 struct drm_framebuffer *fb, 10558 int x, int y) 10559 { 10560 struct drm_plane_state *plane_state; 10561 int hdisplay, vdisplay; 10562 int ret; 10563 10564 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 10565 if (IS_ERR(plane_state)) 10566 return PTR_ERR(plane_state); 10567 10568 if (mode) 10569 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 10570 else 10571 hdisplay = vdisplay = 0; 10572 10573 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 10574 if (ret) 10575 return ret; 10576 drm_atomic_set_fb_for_plane(plane_state, fb); 10577 plane_state->crtc_x = 0; 10578 plane_state->crtc_y = 0; 10579 plane_state->crtc_w = hdisplay; 10580 plane_state->crtc_h = vdisplay; 10581 plane_state->src_x = x << 16; 10582 plane_state->src_y = y << 16; 10583 plane_state->src_w = hdisplay << 16; 10584 plane_state->src_h = vdisplay << 16; 10585 10586 return 0; 10587 } 10588 10589 bool intel_get_load_detect_pipe(struct drm_connector *connector, 10590 struct drm_display_mode *mode, 10591 struct intel_load_detect_pipe *old, 10592 struct drm_modeset_acquire_ctx *ctx) 10593 { 10594 struct intel_crtc *intel_crtc; 10595 struct intel_encoder *intel_encoder = 10596 intel_attached_encoder(connector); 10597 struct drm_crtc *possible_crtc; 10598 struct drm_encoder *encoder = &intel_encoder->base; 10599 struct drm_crtc *crtc = NULL; 10600 struct drm_device *dev = encoder->dev; 10601 struct drm_framebuffer *fb; 10602 struct drm_mode_config *config = &dev->mode_config; 10603 struct drm_atomic_state *state = NULL, *restore_state = NULL; 10604 struct drm_connector_state *connector_state; 10605 struct intel_crtc_state *crtc_state; 10606 int ret, i = -1; 10607 10608 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10609 connector->base.id, connector->name, 10610 encoder->base.id, encoder->name); 10611 10612 old->restore_state = NULL; 10613 10614 retry: 10615 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10616 if (ret) 10617 goto fail; 10618 10619 /* 10620 * Algorithm gets a little messy: 10621 * 10622 * - if the connector already has an assigned crtc, use it (but make 10623 * sure it's on first) 10624 * 10625 * - try to find the first unused crtc that can drive this connector, 10626 * and use that if we find one 10627 */ 10628 10629 /* See if we already have a CRTC for this connector */ 10630 if (connector->state->crtc) { 10631 crtc = connector->state->crtc; 10632 10633 ret = drm_modeset_lock(&crtc->mutex, ctx); 10634 if (ret) 10635 goto fail; 10636 10637 /* Make sure the crtc and connector are running */ 10638 goto found; 10639 } 10640 10641 /* Find an unused one (if possible) */ 10642 for_each_crtc(dev, possible_crtc) { 10643 i++; 10644 if (!(encoder->possible_crtcs & (1 << i))) 10645 continue; 10646 10647 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 10648 if (ret) 10649 goto fail; 10650 10651 if (possible_crtc->state->enable) { 10652 drm_modeset_unlock(&possible_crtc->mutex); 10653 continue; 10654 } 10655 10656 crtc = possible_crtc; 10657 break; 10658 } 10659 10660 /* 10661 * If we didn't find an unused CRTC, don't use any. 10662 */ 10663 if (!crtc) { 10664 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10665 goto fail; 10666 } 10667 10668 found: 10669 intel_crtc = to_intel_crtc(crtc); 10670 10671 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10672 if (ret) 10673 goto fail; 10674 10675 state = drm_atomic_state_alloc(dev); 10676 restore_state = drm_atomic_state_alloc(dev); 10677 if (!state || !restore_state) { 10678 ret = -ENOMEM; 10679 goto fail; 10680 } 10681 10682 state->acquire_ctx = ctx; 10683 restore_state->acquire_ctx = ctx; 10684 10685 connector_state = drm_atomic_get_connector_state(state, connector); 10686 if (IS_ERR(connector_state)) { 10687 ret = PTR_ERR(connector_state); 10688 goto fail; 10689 } 10690 10691 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 10692 if (ret) 10693 goto fail; 10694 10695 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10696 if (IS_ERR(crtc_state)) { 10697 ret = PTR_ERR(crtc_state); 10698 goto fail; 10699 } 10700 10701 crtc_state->base.active = crtc_state->base.enable = true; 10702 10703 if (!mode) 10704 mode = &load_detect_mode; 10705 10706 /* We need a framebuffer large enough to accommodate all accesses 10707 * that the plane may generate whilst we perform load detection. 10708 * We can not rely on the fbcon either being present (we get called 10709 * during its initialisation to detect all boot displays, or it may 10710 * not even exist) or that it is large enough to satisfy the 10711 * requested mode. 10712 */ 10713 fb = mode_fits_in_fbdev(dev, mode); 10714 if (fb == NULL) { 10715 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10716 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10717 } else 10718 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10719 if (IS_ERR(fb)) { 10720 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10721 goto fail; 10722 } 10723 10724 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10725 if (ret) 10726 goto fail; 10727 10728 drm_framebuffer_unreference(fb); 10729 10730 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 10731 if (ret) 10732 goto fail; 10733 10734 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 10735 if (!ret) 10736 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 10737 if (!ret) 10738 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 10739 if (ret) { 10740 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 10741 goto fail; 10742 } 10743 10744 ret = drm_atomic_commit(state); 10745 if (ret) { 10746 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10747 goto fail; 10748 } 10749 10750 old->restore_state = restore_state; 10751 10752 /* let the connector get through one full cycle before testing */ 10753 intel_wait_for_vblank(dev, intel_crtc->pipe); 10754 return true; 10755 10756 fail: 10757 drm_atomic_state_free(state); 10758 drm_atomic_state_free(restore_state); 10759 restore_state = state = NULL; 10760 10761 if (ret == -EDEADLK) { 10762 drm_modeset_backoff(ctx); 10763 goto retry; 10764 } 10765 10766 return false; 10767 } 10768 10769 void intel_release_load_detect_pipe(struct drm_connector *connector, 10770 struct intel_load_detect_pipe *old, 10771 struct drm_modeset_acquire_ctx *ctx) 10772 { 10773 struct intel_encoder *intel_encoder = 10774 intel_attached_encoder(connector); 10775 struct drm_encoder *encoder = &intel_encoder->base; 10776 struct drm_atomic_state *state = old->restore_state; 10777 int ret; 10778 10779 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10780 connector->base.id, connector->name, 10781 encoder->base.id, encoder->name); 10782 10783 if (!state) 10784 return; 10785 10786 ret = drm_atomic_commit(state); 10787 if (ret) { 10788 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 10789 drm_atomic_state_free(state); 10790 } 10791 } 10792 10793 static int i9xx_pll_refclk(struct drm_device *dev, 10794 const struct intel_crtc_state *pipe_config) 10795 { 10796 struct drm_i915_private *dev_priv = to_i915(dev); 10797 u32 dpll = pipe_config->dpll_hw_state.dpll; 10798 10799 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10800 return dev_priv->vbt.lvds_ssc_freq; 10801 else if (HAS_PCH_SPLIT(dev)) 10802 return 120000; 10803 else if (!IS_GEN2(dev)) 10804 return 96000; 10805 else 10806 return 48000; 10807 } 10808 10809 /* Returns the clock of the currently programmed mode of the given pipe. */ 10810 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10811 struct intel_crtc_state *pipe_config) 10812 { 10813 struct drm_device *dev = crtc->base.dev; 10814 struct drm_i915_private *dev_priv = to_i915(dev); 10815 int pipe = pipe_config->cpu_transcoder; 10816 u32 dpll = pipe_config->dpll_hw_state.dpll; 10817 u32 fp; 10818 struct dpll clock; 10819 int port_clock; 10820 int refclk = i9xx_pll_refclk(dev, pipe_config); 10821 10822 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10823 fp = pipe_config->dpll_hw_state.fp0; 10824 else 10825 fp = pipe_config->dpll_hw_state.fp1; 10826 10827 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10828 if (IS_PINEVIEW(dev)) { 10829 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10830 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10831 } else { 10832 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10833 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10834 } 10835 10836 if (!IS_GEN2(dev)) { 10837 if (IS_PINEVIEW(dev)) 10838 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10839 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10840 else 10841 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10842 DPLL_FPA01_P1_POST_DIV_SHIFT); 10843 10844 switch (dpll & DPLL_MODE_MASK) { 10845 case DPLLB_MODE_DAC_SERIAL: 10846 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10847 5 : 10; 10848 break; 10849 case DPLLB_MODE_LVDS: 10850 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10851 7 : 14; 10852 break; 10853 default: 10854 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10855 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10856 return; 10857 } 10858 10859 if (IS_PINEVIEW(dev)) 10860 port_clock = pnv_calc_dpll_params(refclk, &clock); 10861 else 10862 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10863 } else { 10864 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10865 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10866 10867 if (is_lvds) { 10868 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10869 DPLL_FPA01_P1_POST_DIV_SHIFT); 10870 10871 if (lvds & LVDS_CLKB_POWER_UP) 10872 clock.p2 = 7; 10873 else 10874 clock.p2 = 14; 10875 } else { 10876 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10877 clock.p1 = 2; 10878 else { 10879 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10880 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10881 } 10882 if (dpll & PLL_P2_DIVIDE_BY_4) 10883 clock.p2 = 4; 10884 else 10885 clock.p2 = 2; 10886 } 10887 10888 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10889 } 10890 10891 /* 10892 * This value includes pixel_multiplier. We will use 10893 * port_clock to compute adjusted_mode.crtc_clock in the 10894 * encoder's get_config() function. 10895 */ 10896 pipe_config->port_clock = port_clock; 10897 } 10898 10899 int intel_dotclock_calculate(int link_freq, 10900 const struct intel_link_m_n *m_n) 10901 { 10902 /* 10903 * The calculation for the data clock is: 10904 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10905 * But we want to avoid losing precison if possible, so: 10906 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10907 * 10908 * and the link clock is simpler: 10909 * link_clock = (m * link_clock) / n 10910 */ 10911 10912 if (!m_n->link_n) 10913 return 0; 10914 10915 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10916 } 10917 10918 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10919 struct intel_crtc_state *pipe_config) 10920 { 10921 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10922 10923 /* read out port_clock from the DPLL */ 10924 i9xx_crtc_clock_get(crtc, pipe_config); 10925 10926 /* 10927 * In case there is an active pipe without active ports, 10928 * we may need some idea for the dotclock anyway. 10929 * Calculate one based on the FDI configuration. 10930 */ 10931 pipe_config->base.adjusted_mode.crtc_clock = 10932 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 10933 &pipe_config->fdi_m_n); 10934 } 10935 10936 /** Returns the currently programmed mode of the given pipe. */ 10937 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10938 struct drm_crtc *crtc) 10939 { 10940 struct drm_i915_private *dev_priv = to_i915(dev); 10941 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10942 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10943 struct drm_display_mode *mode; 10944 struct intel_crtc_state *pipe_config; 10945 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10946 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10947 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10948 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10949 enum i915_pipe pipe = intel_crtc->pipe; 10950 10951 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10952 if (!mode) 10953 return NULL; 10954 10955 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10956 if (!pipe_config) { 10957 kfree(mode); 10958 return NULL; 10959 } 10960 10961 /* 10962 * Construct a pipe_config sufficient for getting the clock info 10963 * back out of crtc_clock_get. 10964 * 10965 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10966 * to use a real value here instead. 10967 */ 10968 pipe_config->cpu_transcoder = (enum transcoder) pipe; 10969 pipe_config->pixel_multiplier = 1; 10970 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10971 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10972 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10973 i9xx_crtc_clock_get(intel_crtc, pipe_config); 10974 10975 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 10976 mode->hdisplay = (htot & 0xffff) + 1; 10977 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10978 mode->hsync_start = (hsync & 0xffff) + 1; 10979 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10980 mode->vdisplay = (vtot & 0xffff) + 1; 10981 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10982 mode->vsync_start = (vsync & 0xffff) + 1; 10983 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10984 10985 drm_mode_set_name(mode); 10986 10987 kfree(pipe_config); 10988 10989 return mode; 10990 } 10991 10992 static void intel_crtc_destroy(struct drm_crtc *crtc) 10993 { 10994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10995 struct drm_device *dev = crtc->dev; 10996 struct intel_flip_work *work; 10997 10998 spin_lock_irq(&dev->event_lock); 10999 work = intel_crtc->flip_work; 11000 intel_crtc->flip_work = NULL; 11001 spin_unlock_irq(&dev->event_lock); 11002 11003 if (work) { 11004 cancel_work_sync(&work->mmio_work); 11005 cancel_work_sync(&work->unpin_work); 11006 kfree(work); 11007 } 11008 11009 drm_crtc_cleanup(crtc); 11010 11011 kfree(intel_crtc); 11012 } 11013 11014 static void intel_unpin_work_fn(struct work_struct *__work) 11015 { 11016 struct intel_flip_work *work = 11017 container_of(__work, struct intel_flip_work, unpin_work); 11018 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11019 struct drm_device *dev = crtc->base.dev; 11020 struct drm_plane *primary = crtc->base.primary; 11021 11022 if (is_mmio_work(work)) 11023 flush_work(&work->mmio_work); 11024 11025 mutex_lock(&dev->struct_mutex); 11026 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11027 i915_gem_object_put(work->pending_flip_obj); 11028 mutex_unlock(&dev->struct_mutex); 11029 11030 i915_gem_request_put(work->flip_queued_req); 11031 11032 intel_frontbuffer_flip_complete(to_i915(dev), 11033 to_intel_plane(primary)->frontbuffer_bit); 11034 intel_fbc_post_update(crtc); 11035 drm_framebuffer_unreference(work->old_fb); 11036 11037 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 11038 atomic_dec(&crtc->unpin_work_count); 11039 11040 kfree(work); 11041 } 11042 11043 /* Is 'a' after or equal to 'b'? */ 11044 static bool g4x_flip_count_after_eq(u32 a, u32 b) 11045 { 11046 return !((a - b) & 0x80000000); 11047 } 11048 11049 static bool __pageflip_finished_cs(struct intel_crtc *crtc, 11050 struct intel_flip_work *work) 11051 { 11052 struct drm_device *dev = crtc->base.dev; 11053 struct drm_i915_private *dev_priv = to_i915(dev); 11054 unsigned reset_counter; 11055 11056 reset_counter = i915_reset_counter(&dev_priv->gpu_error); 11057 if (crtc->reset_counter != reset_counter) 11058 return true; 11059 11060 /* 11061 * The relevant registers doen't exist on pre-ctg. 11062 * As the flip done interrupt doesn't trigger for mmio 11063 * flips on gmch platforms, a flip count check isn't 11064 * really needed there. But since ctg has the registers, 11065 * include it in the check anyway. 11066 */ 11067 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 11068 return true; 11069 11070 /* 11071 * BDW signals flip done immediately if the plane 11072 * is disabled, even if the plane enable is already 11073 * armed to occur at the next vblank :( 11074 */ 11075 11076 /* 11077 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 11078 * used the same base address. In that case the mmio flip might 11079 * have completed, but the CS hasn't even executed the flip yet. 11080 * 11081 * A flip count check isn't enough as the CS might have updated 11082 * the base address just after start of vblank, but before we 11083 * managed to process the interrupt. This means we'd complete the 11084 * CS flip too soon. 11085 * 11086 * Combining both checks should get us a good enough result. It may 11087 * still happen that the CS flip has been executed, but has not 11088 * yet actually completed. But in case the base address is the same 11089 * anyway, we don't really care. 11090 */ 11091 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11092 crtc->flip_work->gtt_offset && 11093 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11094 crtc->flip_work->flip_count); 11095 } 11096 11097 static bool 11098 __pageflip_finished_mmio(struct intel_crtc *crtc, 11099 struct intel_flip_work *work) 11100 { 11101 /* 11102 * MMIO work completes when vblank is different from 11103 * flip_queued_vblank. 11104 * 11105 * Reset counter value doesn't matter, this is handled by 11106 * i915_wait_request finishing early, so no need to handle 11107 * reset here. 11108 */ 11109 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 11110 } 11111 11112 11113 static bool pageflip_finished(struct intel_crtc *crtc, 11114 struct intel_flip_work *work) 11115 { 11116 if (!atomic_read(&work->pending)) 11117 return false; 11118 11119 smp_rmb(); 11120 11121 if (is_mmio_work(work)) 11122 return __pageflip_finished_mmio(crtc, work); 11123 else 11124 return __pageflip_finished_cs(crtc, work); 11125 } 11126 11127 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 11128 { 11129 struct drm_device *dev = &dev_priv->drm; 11130 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11131 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11132 struct intel_flip_work *work; 11133 unsigned long flags; 11134 11135 /* Ignore early vblank irqs */ 11136 if (!crtc) 11137 return; 11138 11139 /* 11140 * This is called both by irq handlers and the reset code (to complete 11141 * lost pageflips) so needs the full irqsave spinlocks. 11142 */ 11143 spin_lock_irqsave(&dev->event_lock, flags); 11144 work = intel_crtc->flip_work; 11145 11146 if (work != NULL && 11147 !is_mmio_work(work) && 11148 pageflip_finished(intel_crtc, work)) 11149 page_flip_completed(intel_crtc); 11150 11151 spin_unlock_irqrestore(&dev->event_lock, flags); 11152 } 11153 11154 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 11155 { 11156 struct drm_device *dev = &dev_priv->drm; 11157 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11159 struct intel_flip_work *work; 11160 unsigned long flags; 11161 11162 /* Ignore early vblank irqs */ 11163 if (!crtc) 11164 return; 11165 11166 /* 11167 * This is called both by irq handlers and the reset code (to complete 11168 * lost pageflips) so needs the full irqsave spinlocks. 11169 */ 11170 spin_lock_irqsave(&dev->event_lock, flags); 11171 work = intel_crtc->flip_work; 11172 11173 if (work != NULL && 11174 is_mmio_work(work) && 11175 pageflip_finished(intel_crtc, work)) 11176 page_flip_completed(intel_crtc); 11177 11178 spin_unlock_irqrestore(&dev->event_lock, flags); 11179 } 11180 11181 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, 11182 struct intel_flip_work *work) 11183 { 11184 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); 11185 11186 /* Ensure that the work item is consistent when activating it ... */ 11187 smp_mb__before_atomic(); 11188 atomic_set(&work->pending, 1); 11189 } 11190 11191 static int intel_gen2_queue_flip(struct drm_device *dev, 11192 struct drm_crtc *crtc, 11193 struct drm_framebuffer *fb, 11194 struct drm_i915_gem_object *obj, 11195 struct drm_i915_gem_request *req, 11196 uint32_t flags) 11197 { 11198 struct intel_ring *ring = req->ring; 11199 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11200 u32 flip_mask; 11201 int ret; 11202 11203 ret = intel_ring_begin(req, 6); 11204 if (ret) 11205 return ret; 11206 11207 /* Can't queue multiple flips, so wait for the previous 11208 * one to finish before executing the next. 11209 */ 11210 if (intel_crtc->plane) 11211 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11212 else 11213 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11214 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11215 intel_ring_emit(ring, MI_NOOP); 11216 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11217 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11218 intel_ring_emit(ring, fb->pitches[0]); 11219 intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); 11220 intel_ring_emit(ring, 0); /* aux display base address, unused */ 11221 11222 return 0; 11223 } 11224 11225 static int intel_gen3_queue_flip(struct drm_device *dev, 11226 struct drm_crtc *crtc, 11227 struct drm_framebuffer *fb, 11228 struct drm_i915_gem_object *obj, 11229 struct drm_i915_gem_request *req, 11230 uint32_t flags) 11231 { 11232 struct intel_ring *ring = req->ring; 11233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11234 u32 flip_mask; 11235 int ret; 11236 11237 ret = intel_ring_begin(req, 6); 11238 if (ret) 11239 return ret; 11240 11241 if (intel_crtc->plane) 11242 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11243 else 11244 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11245 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11246 intel_ring_emit(ring, MI_NOOP); 11247 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 11248 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11249 intel_ring_emit(ring, fb->pitches[0]); 11250 intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); 11251 intel_ring_emit(ring, MI_NOOP); 11252 11253 return 0; 11254 } 11255 11256 static int intel_gen4_queue_flip(struct drm_device *dev, 11257 struct drm_crtc *crtc, 11258 struct drm_framebuffer *fb, 11259 struct drm_i915_gem_object *obj, 11260 struct drm_i915_gem_request *req, 11261 uint32_t flags) 11262 { 11263 struct intel_ring *ring = req->ring; 11264 struct drm_i915_private *dev_priv = to_i915(dev); 11265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11266 uint32_t pf, pipesrc; 11267 int ret; 11268 11269 ret = intel_ring_begin(req, 4); 11270 if (ret) 11271 return ret; 11272 11273 /* i965+ uses the linear or tiled offsets from the 11274 * Display Registers (which do not change across a page-flip) 11275 * so we need only reprogram the base address. 11276 */ 11277 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11278 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11279 intel_ring_emit(ring, fb->pitches[0]); 11280 intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | 11281 i915_gem_object_get_tiling(obj)); 11282 11283 /* XXX Enabling the panel-fitter across page-flip is so far 11284 * untested on non-native modes, so ignore it for now. 11285 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 11286 */ 11287 pf = 0; 11288 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11289 intel_ring_emit(ring, pf | pipesrc); 11290 11291 return 0; 11292 } 11293 11294 static int intel_gen6_queue_flip(struct drm_device *dev, 11295 struct drm_crtc *crtc, 11296 struct drm_framebuffer *fb, 11297 struct drm_i915_gem_object *obj, 11298 struct drm_i915_gem_request *req, 11299 uint32_t flags) 11300 { 11301 struct intel_ring *ring = req->ring; 11302 struct drm_i915_private *dev_priv = to_i915(dev); 11303 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11304 uint32_t pf, pipesrc; 11305 int ret; 11306 11307 ret = intel_ring_begin(req, 4); 11308 if (ret) 11309 return ret; 11310 11311 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11312 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11313 intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj)); 11314 intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); 11315 11316 /* Contrary to the suggestions in the documentation, 11317 * "Enable Panel Fitter" does not seem to be required when page 11318 * flipping with a non-native mode, and worse causes a normal 11319 * modeset to fail. 11320 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 11321 */ 11322 pf = 0; 11323 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11324 intel_ring_emit(ring, pf | pipesrc); 11325 11326 return 0; 11327 } 11328 11329 static int intel_gen7_queue_flip(struct drm_device *dev, 11330 struct drm_crtc *crtc, 11331 struct drm_framebuffer *fb, 11332 struct drm_i915_gem_object *obj, 11333 struct drm_i915_gem_request *req, 11334 uint32_t flags) 11335 { 11336 struct intel_ring *ring = req->ring; 11337 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11338 uint32_t plane_bit = 0; 11339 int len, ret; 11340 11341 switch (intel_crtc->plane) { 11342 case PLANE_A: 11343 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 11344 break; 11345 case PLANE_B: 11346 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 11347 break; 11348 case PLANE_C: 11349 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 11350 break; 11351 default: 11352 WARN_ONCE(1, "unknown plane in flip command\n"); 11353 return -ENODEV; 11354 } 11355 11356 len = 4; 11357 if (req->engine->id == RCS) { 11358 len += 6; 11359 /* 11360 * On Gen 8, SRM is now taking an extra dword to accommodate 11361 * 48bits addresses, and we need a NOOP for the batch size to 11362 * stay even. 11363 */ 11364 if (IS_GEN8(dev)) 11365 len += 2; 11366 } 11367 11368 /* 11369 * BSpec MI_DISPLAY_FLIP for IVB: 11370 * "The full packet must be contained within the same cache line." 11371 * 11372 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 11373 * cacheline, if we ever start emitting more commands before 11374 * the MI_DISPLAY_FLIP we may need to first emit everything else, 11375 * then do the cacheline alignment, and finally emit the 11376 * MI_DISPLAY_FLIP. 11377 */ 11378 ret = intel_ring_cacheline_align(req); 11379 if (ret) 11380 return ret; 11381 11382 ret = intel_ring_begin(req, len); 11383 if (ret) 11384 return ret; 11385 11386 /* Unmask the flip-done completion message. Note that the bspec says that 11387 * we should do this for both the BCS and RCS, and that we must not unmask 11388 * more than one flip event at any time (or ensure that one flip message 11389 * can be sent by waiting for flip-done prior to queueing new flips). 11390 * Experimentation says that BCS works despite DERRMR masking all 11391 * flip-done completion events and that unmasking all planes at once 11392 * for the RCS also doesn't appear to drop events. Setting the DERRMR 11393 * to zero does lead to lockups within MI_DISPLAY_FLIP. 11394 */ 11395 if (req->engine->id == RCS) { 11396 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11397 intel_ring_emit_reg(ring, DERRMR); 11398 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11399 DERRMR_PIPEB_PRI_FLIP_DONE | 11400 DERRMR_PIPEC_PRI_FLIP_DONE)); 11401 if (IS_GEN8(dev)) 11402 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | 11403 MI_SRM_LRM_GLOBAL_GTT); 11404 else 11405 intel_ring_emit(ring, MI_STORE_REGISTER_MEM | 11406 MI_SRM_LRM_GLOBAL_GTT); 11407 intel_ring_emit_reg(ring, DERRMR); 11408 intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256); 11409 if (IS_GEN8(dev)) { 11410 intel_ring_emit(ring, 0); 11411 intel_ring_emit(ring, MI_NOOP); 11412 } 11413 } 11414 11415 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 11416 intel_ring_emit(ring, fb->pitches[0] | i915_gem_object_get_tiling(obj)); 11417 intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); 11418 intel_ring_emit(ring, (MI_NOOP)); 11419 11420 return 0; 11421 } 11422 11423 static bool use_mmio_flip(struct intel_engine_cs *engine, 11424 struct drm_i915_gem_object *obj) 11425 { 11426 struct reservation_object *resv; 11427 11428 /* 11429 * This is not being used for older platforms, because 11430 * non-availability of flip done interrupt forces us to use 11431 * CS flips. Older platforms derive flip done using some clever 11432 * tricks involving the flip_pending status bits and vblank irqs. 11433 * So using MMIO flips there would disrupt this mechanism. 11434 */ 11435 11436 if (engine == NULL) 11437 return true; 11438 11439 if (INTEL_GEN(engine->i915) < 5) 11440 return false; 11441 11442 if (i915.use_mmio_flip < 0) 11443 return false; 11444 else if (i915.use_mmio_flip > 0) 11445 return true; 11446 else if (i915.enable_execlists) 11447 return true; 11448 11449 resv = i915_gem_object_get_dmabuf_resv(obj); 11450 if (resv && !reservation_object_test_signaled_rcu(resv, false)) 11451 return true; 11452 11453 return engine != i915_gem_active_get_engine(&obj->last_write, 11454 &obj->base.dev->struct_mutex); 11455 } 11456 11457 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11458 unsigned int rotation, 11459 struct intel_flip_work *work) 11460 { 11461 struct drm_device *dev = intel_crtc->base.dev; 11462 struct drm_i915_private *dev_priv = to_i915(dev); 11463 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11464 const enum i915_pipe pipe = intel_crtc->pipe; 11465 u32 ctl, stride, tile_height; 11466 11467 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11468 ctl &= ~PLANE_CTL_TILED_MASK; 11469 switch (fb->modifier[0]) { 11470 case DRM_FORMAT_MOD_NONE: 11471 break; 11472 case I915_FORMAT_MOD_X_TILED: 11473 ctl |= PLANE_CTL_TILED_X; 11474 break; 11475 case I915_FORMAT_MOD_Y_TILED: 11476 ctl |= PLANE_CTL_TILED_Y; 11477 break; 11478 case I915_FORMAT_MOD_Yf_TILED: 11479 ctl |= PLANE_CTL_TILED_YF; 11480 break; 11481 default: 11482 MISSING_CASE(fb->modifier[0]); 11483 } 11484 11485 /* 11486 * The stride is either expressed as a multiple of 64 bytes chunks for 11487 * linear buffers or in number of tiles for tiled buffers. 11488 */ 11489 if (intel_rotation_90_or_270(rotation)) { 11490 /* stride = Surface height in tiles */ 11491 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); 11492 stride = DIV_ROUND_UP(fb->height, tile_height); 11493 } else { 11494 stride = fb->pitches[0] / 11495 intel_fb_stride_alignment(dev_priv, fb->modifier[0], 11496 fb->pixel_format); 11497 } 11498 11499 /* 11500 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11501 * PLANE_SURF updates, the update is then guaranteed to be atomic. 11502 */ 11503 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11504 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11505 11506 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 11507 POSTING_READ(PLANE_SURF(pipe, 0)); 11508 } 11509 11510 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11511 struct intel_flip_work *work) 11512 { 11513 struct drm_device *dev = intel_crtc->base.dev; 11514 struct drm_i915_private *dev_priv = to_i915(dev); 11515 struct intel_framebuffer *intel_fb = 11516 to_intel_framebuffer(intel_crtc->base.primary->fb); 11517 struct drm_i915_gem_object *obj = intel_fb->obj; 11518 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 11519 u32 dspcntr; 11520 11521 dspcntr = I915_READ(reg); 11522 11523 if (i915_gem_object_is_tiled(obj)) 11524 dspcntr |= DISPPLANE_TILED; 11525 else 11526 dspcntr &= ~DISPPLANE_TILED; 11527 11528 I915_WRITE(reg, dspcntr); 11529 11530 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 11531 POSTING_READ(DSPSURF(intel_crtc->plane)); 11532 } 11533 11534 static void intel_mmio_flip_work_func(struct work_struct *w) 11535 { 11536 struct intel_flip_work *work = 11537 container_of(w, struct intel_flip_work, mmio_work); 11538 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11540 struct intel_framebuffer *intel_fb = 11541 to_intel_framebuffer(crtc->base.primary->fb); 11542 struct drm_i915_gem_object *obj = intel_fb->obj; 11543 struct reservation_object *resv; 11544 11545 if (work->flip_queued_req) 11546 WARN_ON(i915_wait_request(work->flip_queued_req, 11547 false, NULL, 11548 NO_WAITBOOST)); 11549 11550 /* For framebuffer backed by dmabuf, wait for fence */ 11551 resv = i915_gem_object_get_dmabuf_resv(obj); 11552 if (resv) 11553 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, 11554 MAX_SCHEDULE_TIMEOUT) < 0); 11555 11556 intel_pipe_update_start(crtc); 11557 11558 if (INTEL_GEN(dev_priv) >= 9) 11559 skl_do_mmio_flip(crtc, work->rotation, work); 11560 else 11561 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11562 ilk_do_mmio_flip(crtc, work); 11563 11564 intel_pipe_update_end(crtc, work); 11565 } 11566 11567 static int intel_default_queue_flip(struct drm_device *dev, 11568 struct drm_crtc *crtc, 11569 struct drm_framebuffer *fb, 11570 struct drm_i915_gem_object *obj, 11571 struct drm_i915_gem_request *req, 11572 uint32_t flags) 11573 { 11574 return -ENODEV; 11575 } 11576 11577 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 11578 struct intel_crtc *intel_crtc, 11579 struct intel_flip_work *work) 11580 { 11581 u32 addr, vblank; 11582 11583 if (!atomic_read(&work->pending)) 11584 return false; 11585 11586 smp_rmb(); 11587 11588 vblank = intel_crtc_get_vblank_counter(intel_crtc); 11589 if (work->flip_ready_vblank == 0) { 11590 if (work->flip_queued_req && 11591 !i915_gem_request_completed(work->flip_queued_req)) 11592 return false; 11593 11594 work->flip_ready_vblank = vblank; 11595 } 11596 11597 if (vblank - work->flip_ready_vblank < 3) 11598 return false; 11599 11600 /* Potential stall - if we see that the flip has happened, 11601 * assume a missed interrupt. */ 11602 if (INTEL_GEN(dev_priv) >= 4) 11603 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11604 else 11605 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11606 11607 /* There is a potential issue here with a false positive after a flip 11608 * to the same address. We could address this by checking for a 11609 * non-incrementing frame counter. 11610 */ 11611 return addr == work->gtt_offset; 11612 } 11613 11614 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 11615 { 11616 struct drm_device *dev = &dev_priv->drm; 11617 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11618 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11619 struct intel_flip_work *work; 11620 11621 // WARN_ON(!in_interrupt()); 11622 11623 if (crtc == NULL) 11624 return; 11625 11626 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11627 work = intel_crtc->flip_work; 11628 11629 if (work != NULL && !is_mmio_work(work) && 11630 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { 11631 WARN_ONCE(1, 11632 "Kicking stuck page flip: queued at %d, now %d\n", 11633 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); 11634 page_flip_completed(intel_crtc); 11635 work = NULL; 11636 } 11637 11638 if (work != NULL && !is_mmio_work(work) && 11639 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) 11640 intel_queue_rps_boost_for_request(work->flip_queued_req); 11641 lockmgr(&dev->event_lock, LK_RELEASE); 11642 } 11643 11644 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11645 struct drm_framebuffer *fb, 11646 struct drm_pending_vblank_event *event, 11647 uint32_t page_flip_flags) 11648 { 11649 struct drm_device *dev = crtc->dev; 11650 struct drm_i915_private *dev_priv = to_i915(dev); 11651 struct drm_framebuffer *old_fb = crtc->primary->fb; 11652 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11654 struct drm_plane *primary = crtc->primary; 11655 enum i915_pipe pipe = intel_crtc->pipe; 11656 struct intel_flip_work *work; 11657 struct intel_engine_cs *engine; 11658 bool mmio_flip; 11659 struct drm_i915_gem_request *request; 11660 int ret; 11661 11662 /* 11663 * drm_mode_page_flip_ioctl() should already catch this, but double 11664 * check to be safe. In the future we may enable pageflipping from 11665 * a disabled primary plane. 11666 */ 11667 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11668 return -EBUSY; 11669 11670 /* Can't change pixel format via MI display flips. */ 11671 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11672 return -EINVAL; 11673 11674 /* 11675 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11676 * Note that pitch changes could also affect these register. 11677 */ 11678 if (INTEL_INFO(dev)->gen > 3 && 11679 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11680 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11681 return -EINVAL; 11682 11683 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11684 goto out_hang; 11685 11686 work = kzalloc(sizeof(*work), GFP_KERNEL); 11687 if (work == NULL) 11688 return -ENOMEM; 11689 11690 work->event = event; 11691 work->crtc = crtc; 11692 work->old_fb = old_fb; 11693 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 11694 11695 ret = drm_crtc_vblank_get(crtc); 11696 if (ret) 11697 goto free_work; 11698 11699 /* We borrow the event spin lock for protecting flip_work */ 11700 spin_lock_irq(&dev->event_lock); 11701 if (intel_crtc->flip_work) { 11702 /* Before declaring the flip queue wedged, check if 11703 * the hardware completed the operation behind our backs. 11704 */ 11705 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { 11706 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11707 page_flip_completed(intel_crtc); 11708 } else { 11709 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11710 spin_unlock_irq(&dev->event_lock); 11711 11712 drm_crtc_vblank_put(crtc); 11713 kfree(work); 11714 return -EBUSY; 11715 } 11716 } 11717 intel_crtc->flip_work = work; 11718 spin_unlock_irq(&dev->event_lock); 11719 11720 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11721 flush_workqueue(dev_priv->wq); 11722 11723 /* Reference the objects for the scheduled work. */ 11724 drm_framebuffer_reference(work->old_fb); 11725 11726 crtc->primary->fb = fb; 11727 update_state_fb(crtc->primary); 11728 11729 intel_fbc_pre_update(intel_crtc, intel_crtc->config, 11730 to_intel_plane_state(primary->state)); 11731 11732 work->pending_flip_obj = i915_gem_object_get(obj); 11733 11734 ret = i915_mutex_lock_interruptible(dev); 11735 if (ret) 11736 goto cleanup; 11737 11738 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error); 11739 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) { 11740 ret = -EIO; 11741 goto cleanup; 11742 } 11743 11744 atomic_inc(&intel_crtc->unpin_work_count); 11745 11746 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11747 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 11748 11749 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 11750 engine = &dev_priv->engine[BCS]; 11751 if (i915_gem_object_get_tiling(obj) != 11752 i915_gem_object_get_tiling(intel_fb_obj(work->old_fb))) 11753 /* vlv: DISPLAY_FLIP fails to change tiling */ 11754 engine = NULL; 11755 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11756 engine = &dev_priv->engine[BCS]; 11757 } else if (INTEL_INFO(dev)->gen >= 7) { 11758 engine = i915_gem_active_get_engine(&obj->last_write, 11759 &obj->base.dev->struct_mutex); 11760 if (engine == NULL || engine->id != RCS) 11761 engine = &dev_priv->engine[BCS]; 11762 } else { 11763 engine = &dev_priv->engine[RCS]; 11764 } 11765 11766 mmio_flip = use_mmio_flip(engine, obj); 11767 11768 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 11769 if (ret) 11770 goto cleanup_pending; 11771 11772 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11773 obj, 0); 11774 work->gtt_offset += intel_crtc->dspaddr_offset; 11775 work->rotation = crtc->primary->state->rotation; 11776 11777 if (mmio_flip) { 11778 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 11779 11780 work->flip_queued_req = i915_gem_active_get(&obj->last_write, 11781 &obj->base.dev->struct_mutex); 11782 schedule_work(&work->mmio_work); 11783 } else { 11784 request = i915_gem_request_alloc(engine, engine->last_context); 11785 if (IS_ERR(request)) { 11786 ret = PTR_ERR(request); 11787 goto cleanup_unpin; 11788 } 11789 11790 ret = i915_gem_object_sync(obj, request); 11791 if (ret) 11792 goto cleanup_request; 11793 11794 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11795 page_flip_flags); 11796 if (ret) 11797 goto cleanup_request; 11798 11799 intel_mark_page_flip_active(intel_crtc, work); 11800 11801 work->flip_queued_req = i915_gem_request_get(request); 11802 i915_add_request_no_flush(request); 11803 } 11804 11805 i915_gem_track_fb(intel_fb_obj(old_fb), obj, 11806 to_intel_plane(primary)->frontbuffer_bit); 11807 mutex_unlock(&dev->struct_mutex); 11808 11809 intel_frontbuffer_flip_prepare(to_i915(dev), 11810 to_intel_plane(primary)->frontbuffer_bit); 11811 11812 trace_i915_flip_request(intel_crtc->plane, obj); 11813 11814 return 0; 11815 11816 cleanup_request: 11817 i915_add_request_no_flush(request); 11818 cleanup_unpin: 11819 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 11820 cleanup_pending: 11821 atomic_dec(&intel_crtc->unpin_work_count); 11822 mutex_unlock(&dev->struct_mutex); 11823 cleanup: 11824 crtc->primary->fb = old_fb; 11825 update_state_fb(crtc->primary); 11826 11827 i915_gem_object_put_unlocked(obj); 11828 drm_framebuffer_unreference(work->old_fb); 11829 11830 spin_lock_irq(&dev->event_lock); 11831 intel_crtc->flip_work = NULL; 11832 spin_unlock_irq(&dev->event_lock); 11833 11834 drm_crtc_vblank_put(crtc); 11835 free_work: 11836 kfree(work); 11837 11838 if (ret == -EIO) { 11839 struct drm_atomic_state *state; 11840 struct drm_plane_state *plane_state; 11841 11842 out_hang: 11843 state = drm_atomic_state_alloc(dev); 11844 if (!state) 11845 return -ENOMEM; 11846 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 11847 11848 retry: 11849 plane_state = drm_atomic_get_plane_state(state, primary); 11850 ret = PTR_ERR_OR_ZERO(plane_state); 11851 if (!ret) { 11852 drm_atomic_set_fb_for_plane(plane_state, fb); 11853 11854 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 11855 if (!ret) 11856 ret = drm_atomic_commit(state); 11857 } 11858 11859 if (ret == -EDEADLK) { 11860 drm_modeset_backoff(state->acquire_ctx); 11861 drm_atomic_state_clear(state); 11862 goto retry; 11863 } 11864 11865 if (ret) 11866 drm_atomic_state_free(state); 11867 11868 if (ret == 0 && event) { 11869 spin_lock_irq(&dev->event_lock); 11870 drm_crtc_send_vblank_event(crtc, event); 11871 spin_unlock_irq(&dev->event_lock); 11872 } 11873 } 11874 return ret; 11875 } 11876 11877 11878 /** 11879 * intel_wm_need_update - Check whether watermarks need updating 11880 * @plane: drm plane 11881 * @state: new plane state 11882 * 11883 * Check current plane state versus the new one to determine whether 11884 * watermarks need to be recalculated. 11885 * 11886 * Returns true or false. 11887 */ 11888 static bool intel_wm_need_update(struct drm_plane *plane, 11889 struct drm_plane_state *state) 11890 { 11891 struct intel_plane_state *new = to_intel_plane_state(state); 11892 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 11893 11894 /* Update watermarks on tiling or size changes. */ 11895 if (new->visible != cur->visible) 11896 return true; 11897 11898 if (!cur->base.fb || !new->base.fb) 11899 return false; 11900 11901 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || 11902 cur->base.rotation != new->base.rotation || 11903 drm_rect_width(&new->src) != drm_rect_width(&cur->src) || 11904 drm_rect_height(&new->src) != drm_rect_height(&cur->src) || 11905 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || 11906 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) 11907 return true; 11908 11909 return false; 11910 } 11911 11912 static bool needs_scaling(struct intel_plane_state *state) 11913 { 11914 int src_w = drm_rect_width(&state->src) >> 16; 11915 int src_h = drm_rect_height(&state->src) >> 16; 11916 int dst_w = drm_rect_width(&state->dst); 11917 int dst_h = drm_rect_height(&state->dst); 11918 11919 return (src_w != dst_w || src_h != dst_h); 11920 } 11921 11922 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11923 struct drm_plane_state *plane_state) 11924 { 11925 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 11926 struct drm_crtc *crtc = crtc_state->crtc; 11927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11928 struct drm_plane *plane = plane_state->plane; 11929 struct drm_device *dev = crtc->dev; 11930 struct drm_i915_private *dev_priv = to_i915(dev); 11931 struct intel_plane_state *old_plane_state = 11932 to_intel_plane_state(plane->state); 11933 bool mode_changed = needs_modeset(crtc_state); 11934 bool was_crtc_enabled = crtc->state->active; 11935 bool is_crtc_enabled = crtc_state->active; 11936 bool turn_off, turn_on, visible, was_visible; 11937 struct drm_framebuffer *fb = plane_state->fb; 11938 int ret; 11939 11940 if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { 11941 ret = skl_update_scaler_plane( 11942 to_intel_crtc_state(crtc_state), 11943 to_intel_plane_state(plane_state)); 11944 if (ret) 11945 return ret; 11946 } 11947 11948 was_visible = old_plane_state->visible; 11949 visible = to_intel_plane_state(plane_state)->visible; 11950 11951 if (!was_crtc_enabled && WARN_ON(was_visible)) 11952 was_visible = false; 11953 11954 /* 11955 * Visibility is calculated as if the crtc was on, but 11956 * after scaler setup everything depends on it being off 11957 * when the crtc isn't active. 11958 * 11959 * FIXME this is wrong for watermarks. Watermarks should also 11960 * be computed as if the pipe would be active. Perhaps move 11961 * per-plane wm computation to the .check_plane() hook, and 11962 * only combine the results from all planes in the current place? 11963 */ 11964 if (!is_crtc_enabled) 11965 to_intel_plane_state(plane_state)->visible = visible = false; 11966 11967 if (!was_visible && !visible) 11968 return 0; 11969 11970 if (fb != old_plane_state->base.fb) 11971 pipe_config->fb_changed = true; 11972 11973 turn_off = was_visible && (!visible || mode_changed); 11974 turn_on = visible && (!was_visible || mode_changed); 11975 11976 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 11977 intel_crtc->base.base.id, 11978 intel_crtc->base.name, 11979 plane->base.id, plane->name, 11980 fb ? fb->base.id : -1); 11981 11982 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11983 plane->base.id, plane->name, 11984 was_visible, visible, 11985 turn_off, turn_on, mode_changed); 11986 11987 if (turn_on) { 11988 pipe_config->update_wm_pre = true; 11989 11990 /* must disable cxsr around plane enable/disable */ 11991 if (plane->type != DRM_PLANE_TYPE_CURSOR) 11992 pipe_config->disable_cxsr = true; 11993 } else if (turn_off) { 11994 pipe_config->update_wm_post = true; 11995 11996 /* must disable cxsr around plane enable/disable */ 11997 if (plane->type != DRM_PLANE_TYPE_CURSOR) 11998 pipe_config->disable_cxsr = true; 11999 } else if (intel_wm_need_update(plane, plane_state)) { 12000 /* FIXME bollocks */ 12001 pipe_config->update_wm_pre = true; 12002 pipe_config->update_wm_post = true; 12003 } 12004 12005 /* Pre-gen9 platforms need two-step watermark updates */ 12006 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) && 12007 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks) 12008 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true; 12009 12010 if (visible || was_visible) 12011 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit; 12012 12013 /* 12014 * WaCxSRDisabledForSpriteScaling:ivb 12015 * 12016 * cstate->update_wm was already set above, so this flag will 12017 * take effect when we commit and program watermarks. 12018 */ 12019 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) && 12020 needs_scaling(to_intel_plane_state(plane_state)) && 12021 !needs_scaling(old_plane_state)) 12022 pipe_config->disable_lp_wm = true; 12023 12024 return 0; 12025 } 12026 12027 static bool encoders_cloneable(const struct intel_encoder *a, 12028 const struct intel_encoder *b) 12029 { 12030 /* masks could be asymmetric, so check both ways */ 12031 return a == b || (a->cloneable & (1 << b->type) && 12032 b->cloneable & (1 << a->type)); 12033 } 12034 12035 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12036 struct intel_crtc *crtc, 12037 struct intel_encoder *encoder) 12038 { 12039 struct intel_encoder *source_encoder; 12040 struct drm_connector *connector; 12041 struct drm_connector_state *connector_state; 12042 int i; 12043 12044 for_each_connector_in_state(state, connector, connector_state, i) { 12045 if (connector_state->crtc != &crtc->base) 12046 continue; 12047 12048 source_encoder = 12049 to_intel_encoder(connector_state->best_encoder); 12050 if (!encoders_cloneable(encoder, source_encoder)) 12051 return false; 12052 } 12053 12054 return true; 12055 } 12056 12057 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 12058 struct drm_crtc_state *crtc_state) 12059 { 12060 struct drm_device *dev = crtc->dev; 12061 struct drm_i915_private *dev_priv = to_i915(dev); 12062 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12063 struct intel_crtc_state *pipe_config = 12064 to_intel_crtc_state(crtc_state); 12065 struct drm_atomic_state *state = crtc_state->state; 12066 int ret; 12067 bool mode_changed = needs_modeset(crtc_state); 12068 12069 if (mode_changed && !crtc_state->active) 12070 pipe_config->update_wm_post = true; 12071 12072 if (mode_changed && crtc_state->enable && 12073 dev_priv->display.crtc_compute_clock && 12074 !WARN_ON(pipe_config->shared_dpll)) { 12075 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 12076 pipe_config); 12077 if (ret) 12078 return ret; 12079 } 12080 12081 if (crtc_state->color_mgmt_changed) { 12082 ret = intel_color_check(crtc, crtc_state); 12083 if (ret) 12084 return ret; 12085 12086 /* 12087 * Changing color management on Intel hardware is 12088 * handled as part of planes update. 12089 */ 12090 crtc_state->planes_changed = true; 12091 } 12092 12093 ret = 0; 12094 if (dev_priv->display.compute_pipe_wm) { 12095 ret = dev_priv->display.compute_pipe_wm(pipe_config); 12096 if (ret) { 12097 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 12098 return ret; 12099 } 12100 } 12101 12102 if (dev_priv->display.compute_intermediate_wm && 12103 !to_intel_atomic_state(state)->skip_intermediate_wm) { 12104 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 12105 return 0; 12106 12107 /* 12108 * Calculate 'intermediate' watermarks that satisfy both the 12109 * old state and the new state. We can program these 12110 * immediately. 12111 */ 12112 ret = dev_priv->display.compute_intermediate_wm(crtc->dev, 12113 intel_crtc, 12114 pipe_config); 12115 if (ret) { 12116 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12117 return ret; 12118 } 12119 } else if (dev_priv->display.compute_intermediate_wm) { 12120 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12121 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 12122 } 12123 12124 if (INTEL_INFO(dev)->gen >= 9) { 12125 if (mode_changed) 12126 ret = skl_update_scaler_crtc(pipe_config); 12127 12128 if (!ret) 12129 ret = intel_atomic_setup_scalers(dev, intel_crtc, 12130 pipe_config); 12131 } 12132 12133 return ret; 12134 } 12135 12136 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 12137 .mode_set_base_atomic = intel_pipe_set_base_atomic, 12138 .atomic_begin = intel_begin_crtc_commit, 12139 .atomic_flush = intel_finish_crtc_commit, 12140 .atomic_check = intel_crtc_atomic_check, 12141 }; 12142 12143 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12144 { 12145 struct intel_connector *connector; 12146 12147 for_each_intel_connector(dev, connector) { 12148 if (connector->base.state->crtc) 12149 drm_connector_unreference(&connector->base); 12150 12151 if (connector->base.encoder) { 12152 connector->base.state->best_encoder = 12153 connector->base.encoder; 12154 connector->base.state->crtc = 12155 connector->base.encoder->crtc; 12156 12157 drm_connector_reference(&connector->base); 12158 } else { 12159 connector->base.state->best_encoder = NULL; 12160 connector->base.state->crtc = NULL; 12161 } 12162 } 12163 } 12164 12165 static void 12166 connected_sink_compute_bpp(struct intel_connector *connector, 12167 struct intel_crtc_state *pipe_config) 12168 { 12169 int bpp = pipe_config->pipe_bpp; 12170 12171 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 12172 connector->base.base.id, 12173 connector->base.name); 12174 12175 /* Don't use an invalid EDID bpc value */ 12176 if (connector->base.display_info.bpc && 12177 connector->base.display_info.bpc * 3 < bpp) { 12178 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 12179 bpp, connector->base.display_info.bpc*3); 12180 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12181 } 12182 12183 /* Clamp bpp to default limit on screens without EDID 1.4 */ 12184 if (connector->base.display_info.bpc == 0) { 12185 int type = connector->base.connector_type; 12186 int clamp_bpp = 24; 12187 12188 /* Fall back to 18 bpp when DP sink capability is unknown. */ 12189 if (type == DRM_MODE_CONNECTOR_DisplayPort || 12190 type == DRM_MODE_CONNECTOR_eDP) 12191 clamp_bpp = 18; 12192 12193 if (bpp > clamp_bpp) { 12194 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", 12195 bpp, clamp_bpp); 12196 pipe_config->pipe_bpp = clamp_bpp; 12197 } 12198 } 12199 } 12200 12201 static int 12202 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12203 struct intel_crtc_state *pipe_config) 12204 { 12205 struct drm_device *dev = crtc->base.dev; 12206 struct drm_atomic_state *state; 12207 struct drm_connector *connector; 12208 struct drm_connector_state *connector_state; 12209 int bpp, i; 12210 12211 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 12212 bpp = 10*3; 12213 else if (INTEL_INFO(dev)->gen >= 5) 12214 bpp = 12*3; 12215 else 12216 bpp = 8*3; 12217 12218 12219 pipe_config->pipe_bpp = bpp; 12220 12221 state = pipe_config->base.state; 12222 12223 /* Clamp display bpp to EDID value */ 12224 for_each_connector_in_state(state, connector, connector_state, i) { 12225 if (connector_state->crtc != &crtc->base) 12226 continue; 12227 12228 connected_sink_compute_bpp(to_intel_connector(connector), 12229 pipe_config); 12230 } 12231 12232 return bpp; 12233 } 12234 12235 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12236 { 12237 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12238 "type: 0x%x flags: 0x%x\n", 12239 mode->crtc_clock, 12240 mode->crtc_hdisplay, mode->crtc_hsync_start, 12241 mode->crtc_hsync_end, mode->crtc_htotal, 12242 mode->crtc_vdisplay, mode->crtc_vsync_start, 12243 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 12244 } 12245 12246 static void intel_dump_pipe_config(struct intel_crtc *crtc, 12247 struct intel_crtc_state *pipe_config, 12248 const char *context) 12249 { 12250 struct drm_device *dev = crtc->base.dev; 12251 struct drm_plane *plane; 12252 struct intel_plane *intel_plane; 12253 struct intel_plane_state *state; 12254 struct drm_framebuffer *fb; 12255 12256 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", 12257 crtc->base.base.id, crtc->base.name, 12258 context, pipe_config, pipe_name(crtc->pipe)); 12259 12260 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12261 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 12262 pipe_config->pipe_bpp, pipe_config->dither); 12263 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12264 pipe_config->has_pch_encoder, 12265 pipe_config->fdi_lanes, 12266 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 12267 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12268 pipe_config->fdi_m_n.tu); 12269 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12270 intel_crtc_has_dp_encoder(pipe_config), 12271 pipe_config->lane_count, 12272 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12273 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12274 pipe_config->dp_m_n.tu); 12275 12276 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12277 intel_crtc_has_dp_encoder(pipe_config), 12278 pipe_config->lane_count, 12279 pipe_config->dp_m2_n2.gmch_m, 12280 pipe_config->dp_m2_n2.gmch_n, 12281 pipe_config->dp_m2_n2.link_m, 12282 pipe_config->dp_m2_n2.link_n, 12283 pipe_config->dp_m2_n2.tu); 12284 12285 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 12286 pipe_config->has_audio, 12287 pipe_config->has_infoframe); 12288 12289 DRM_DEBUG_KMS("requested mode:\n"); 12290 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12291 DRM_DEBUG_KMS("adjusted mode:\n"); 12292 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12293 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12294 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 12295 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 12296 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 12297 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12298 crtc->num_scalers, 12299 pipe_config->scaler_state.scaler_users, 12300 pipe_config->scaler_state.scaler_id); 12301 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12302 pipe_config->gmch_pfit.control, 12303 pipe_config->gmch_pfit.pgm_ratios, 12304 pipe_config->gmch_pfit.lvds_border_bits); 12305 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 12306 pipe_config->pch_pfit.pos, 12307 pipe_config->pch_pfit.size, 12308 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 12309 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 12310 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 12311 12312 if (IS_BROXTON(dev)) { 12313 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 12314 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 12315 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 12316 pipe_config->ddi_pll_sel, 12317 pipe_config->dpll_hw_state.ebb0, 12318 pipe_config->dpll_hw_state.ebb4, 12319 pipe_config->dpll_hw_state.pll0, 12320 pipe_config->dpll_hw_state.pll1, 12321 pipe_config->dpll_hw_state.pll2, 12322 pipe_config->dpll_hw_state.pll3, 12323 pipe_config->dpll_hw_state.pll6, 12324 pipe_config->dpll_hw_state.pll8, 12325 pipe_config->dpll_hw_state.pll9, 12326 pipe_config->dpll_hw_state.pll10, 12327 pipe_config->dpll_hw_state.pcsdw12); 12328 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 12329 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12330 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12331 pipe_config->ddi_pll_sel, 12332 pipe_config->dpll_hw_state.ctrl1, 12333 pipe_config->dpll_hw_state.cfgcr1, 12334 pipe_config->dpll_hw_state.cfgcr2); 12335 } else if (HAS_DDI(dev)) { 12336 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 12337 pipe_config->ddi_pll_sel, 12338 pipe_config->dpll_hw_state.wrpll, 12339 pipe_config->dpll_hw_state.spll); 12340 } else { 12341 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12342 "fp0: 0x%x, fp1: 0x%x\n", 12343 pipe_config->dpll_hw_state.dpll, 12344 pipe_config->dpll_hw_state.dpll_md, 12345 pipe_config->dpll_hw_state.fp0, 12346 pipe_config->dpll_hw_state.fp1); 12347 } 12348 12349 DRM_DEBUG_KMS("planes on this crtc\n"); 12350 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 12351 intel_plane = to_intel_plane(plane); 12352 if (intel_plane->pipe != crtc->pipe) 12353 continue; 12354 12355 state = to_intel_plane_state(plane->state); 12356 fb = state->base.fb; 12357 if (!fb) { 12358 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 12359 plane->base.id, plane->name, state->scaler_id); 12360 continue; 12361 } 12362 12363 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", 12364 plane->base.id, plane->name); 12365 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", 12366 fb->base.id, fb->width, fb->height, 12367 drm_get_format_name(fb->pixel_format)); 12368 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 12369 state->scaler_id, 12370 state->src.x1 >> 16, state->src.y1 >> 16, 12371 drm_rect_width(&state->src) >> 16, 12372 drm_rect_height(&state->src) >> 16, 12373 state->dst.x1, state->dst.y1, 12374 drm_rect_width(&state->dst), 12375 drm_rect_height(&state->dst)); 12376 } 12377 } 12378 12379 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12380 { 12381 struct drm_device *dev = state->dev; 12382 struct drm_connector *connector; 12383 unsigned int used_ports = 0; 12384 unsigned int used_mst_ports = 0; 12385 12386 /* 12387 * Walk the connector list instead of the encoder 12388 * list to detect the problem on ddi platforms 12389 * where there's just one encoder per digital port. 12390 */ 12391 drm_for_each_connector(connector, dev) { 12392 struct drm_connector_state *connector_state; 12393 struct intel_encoder *encoder; 12394 12395 connector_state = drm_atomic_get_existing_connector_state(state, connector); 12396 if (!connector_state) 12397 connector_state = connector->state; 12398 12399 if (!connector_state->best_encoder) 12400 continue; 12401 12402 encoder = to_intel_encoder(connector_state->best_encoder); 12403 12404 WARN_ON(!connector_state->crtc); 12405 12406 switch (encoder->type) { 12407 unsigned int port_mask; 12408 case INTEL_OUTPUT_UNKNOWN: 12409 if (WARN_ON(!HAS_DDI(dev))) 12410 break; 12411 case INTEL_OUTPUT_DP: 12412 case INTEL_OUTPUT_HDMI: 12413 case INTEL_OUTPUT_EDP: 12414 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12415 12416 /* the same port mustn't appear more than once */ 12417 if (used_ports & port_mask) 12418 return false; 12419 12420 used_ports |= port_mask; 12421 break; 12422 case INTEL_OUTPUT_DP_MST: 12423 used_mst_ports |= 12424 1 << enc_to_mst(&encoder->base)->primary->port; 12425 break; 12426 default: 12427 break; 12428 } 12429 } 12430 12431 /* can't mix MST and SST/HDMI on the same port */ 12432 if (used_ports & used_mst_ports) 12433 return false; 12434 12435 return true; 12436 } 12437 12438 static void 12439 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12440 { 12441 struct drm_crtc_state tmp_state; 12442 struct intel_crtc_scaler_state scaler_state; 12443 struct intel_dpll_hw_state dpll_hw_state; 12444 struct intel_shared_dpll *shared_dpll; 12445 uint32_t ddi_pll_sel; 12446 bool force_thru; 12447 12448 /* FIXME: before the switch to atomic started, a new pipe_config was 12449 * kzalloc'd. Code that depends on any field being zero should be 12450 * fixed, so that the crtc_state can be safely duplicated. For now, 12451 * only fields that are know to not cause problems are preserved. */ 12452 12453 tmp_state = crtc_state->base; 12454 scaler_state = crtc_state->scaler_state; 12455 shared_dpll = crtc_state->shared_dpll; 12456 dpll_hw_state = crtc_state->dpll_hw_state; 12457 ddi_pll_sel = crtc_state->ddi_pll_sel; 12458 force_thru = crtc_state->pch_pfit.force_thru; 12459 12460 memset(crtc_state, 0, sizeof *crtc_state); 12461 12462 crtc_state->base = tmp_state; 12463 crtc_state->scaler_state = scaler_state; 12464 crtc_state->shared_dpll = shared_dpll; 12465 crtc_state->dpll_hw_state = dpll_hw_state; 12466 crtc_state->ddi_pll_sel = ddi_pll_sel; 12467 crtc_state->pch_pfit.force_thru = force_thru; 12468 } 12469 12470 static int 12471 intel_modeset_pipe_config(struct drm_crtc *crtc, 12472 struct intel_crtc_state *pipe_config) 12473 { 12474 struct drm_atomic_state *state = pipe_config->base.state; 12475 struct intel_encoder *encoder; 12476 struct drm_connector *connector; 12477 struct drm_connector_state *connector_state; 12478 int base_bpp, ret = -EINVAL; 12479 int i; 12480 bool retry = true; 12481 12482 clear_intel_crtc_state(pipe_config); 12483 12484 pipe_config->cpu_transcoder = 12485 (enum transcoder) to_intel_crtc(crtc)->pipe; 12486 12487 /* 12488 * Sanitize sync polarity flags based on requested ones. If neither 12489 * positive or negative polarity is requested, treat this as meaning 12490 * negative polarity. 12491 */ 12492 if (!(pipe_config->base.adjusted_mode.flags & 12493 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12494 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12495 12496 if (!(pipe_config->base.adjusted_mode.flags & 12497 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12498 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12499 12500 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12501 pipe_config); 12502 if (base_bpp < 0) 12503 goto fail; 12504 12505 /* 12506 * Determine the real pipe dimensions. Note that stereo modes can 12507 * increase the actual pipe size due to the frame doubling and 12508 * insertion of additional space for blanks between the frame. This 12509 * is stored in the crtc timings. We use the requested mode to do this 12510 * computation to clearly distinguish it from the adjusted mode, which 12511 * can be changed by the connectors in the below retry loop. 12512 */ 12513 drm_crtc_get_hv_timing(&pipe_config->base.mode, 12514 &pipe_config->pipe_src_w, 12515 &pipe_config->pipe_src_h); 12516 12517 for_each_connector_in_state(state, connector, connector_state, i) { 12518 if (connector_state->crtc != crtc) 12519 continue; 12520 12521 encoder = to_intel_encoder(connector_state->best_encoder); 12522 12523 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12524 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12525 goto fail; 12526 } 12527 12528 /* 12529 * Determine output_types before calling the .compute_config() 12530 * hooks so that the hooks can use this information safely. 12531 */ 12532 pipe_config->output_types |= 1 << encoder->type; 12533 } 12534 12535 encoder_retry: 12536 /* Ensure the port clock defaults are reset when retrying. */ 12537 pipe_config->port_clock = 0; 12538 pipe_config->pixel_multiplier = 1; 12539 12540 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12541 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12542 CRTC_STEREO_DOUBLE); 12543 12544 /* Pass our mode to the connectors and the CRTC to give them a chance to 12545 * adjust it according to limitations or connector properties, and also 12546 * a chance to reject the mode entirely. 12547 */ 12548 for_each_connector_in_state(state, connector, connector_state, i) { 12549 if (connector_state->crtc != crtc) 12550 continue; 12551 12552 encoder = to_intel_encoder(connector_state->best_encoder); 12553 12554 if (!(encoder->compute_config(encoder, pipe_config))) { 12555 DRM_DEBUG_KMS("Encoder config failure\n"); 12556 goto fail; 12557 } 12558 } 12559 12560 /* Set default port clock if not overwritten by the encoder. Needs to be 12561 * done afterwards in case the encoder adjusts the mode. */ 12562 if (!pipe_config->port_clock) 12563 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12564 * pipe_config->pixel_multiplier; 12565 12566 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12567 if (ret < 0) { 12568 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12569 goto fail; 12570 } 12571 12572 if (ret == RETRY) { 12573 if (WARN(!retry, "loop in pipe configuration computation\n")) { 12574 ret = -EINVAL; 12575 goto fail; 12576 } 12577 12578 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12579 retry = false; 12580 goto encoder_retry; 12581 } 12582 12583 /* Dithering seems to not pass-through bits correctly when it should, so 12584 * only enable it on 6bpc panels. */ 12585 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12586 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12587 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12588 12589 fail: 12590 return ret; 12591 } 12592 12593 static void 12594 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 12595 { 12596 struct drm_crtc *crtc; 12597 struct drm_crtc_state *crtc_state; 12598 int i; 12599 12600 /* Double check state. */ 12601 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12602 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); 12603 12604 /* Update hwmode for vblank functions */ 12605 if (crtc->state->active) 12606 crtc->hwmode = crtc->state->adjusted_mode; 12607 else 12608 crtc->hwmode.crtc_clock = 0; 12609 12610 /* 12611 * Update legacy state to satisfy fbc code. This can 12612 * be removed when fbc uses the atomic state. 12613 */ 12614 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12615 struct drm_plane_state *plane_state = crtc->primary->state; 12616 12617 crtc->primary->fb = plane_state->fb; 12618 crtc->x = plane_state->src_x >> 16; 12619 crtc->y = plane_state->src_y >> 16; 12620 } 12621 } 12622 } 12623 12624 static bool intel_fuzzy_clock_check(int clock1, int clock2) 12625 { 12626 int diff; 12627 12628 if (clock1 == clock2) 12629 return true; 12630 12631 if (!clock1 || !clock2) 12632 return false; 12633 12634 diff = abs(clock1 - clock2); 12635 12636 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12637 return true; 12638 12639 return false; 12640 } 12641 12642 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 12643 list_for_each_entry((intel_crtc), \ 12644 &(dev)->mode_config.crtc_list, \ 12645 base.head) \ 12646 for_each_if (mask & (1 <<(intel_crtc)->pipe)) 12647 12648 static bool 12649 intel_compare_m_n(unsigned int m, unsigned int n, 12650 unsigned int m2, unsigned int n2, 12651 bool exact) 12652 { 12653 if (m == m2 && n == n2) 12654 return true; 12655 12656 if (exact || !m || !n || !m2 || !n2) 12657 return false; 12658 12659 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12660 12661 if (n > n2) { 12662 while (n > n2) { 12663 m2 <<= 1; 12664 n2 <<= 1; 12665 } 12666 } else if (n < n2) { 12667 while (n < n2) { 12668 m <<= 1; 12669 n <<= 1; 12670 } 12671 } 12672 12673 if (n != n2) 12674 return false; 12675 12676 return intel_fuzzy_clock_check(m, m2); 12677 } 12678 12679 static bool 12680 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12681 struct intel_link_m_n *m2_n2, 12682 bool adjust) 12683 { 12684 if (m_n->tu == m2_n2->tu && 12685 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12686 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 12687 intel_compare_m_n(m_n->link_m, m_n->link_n, 12688 m2_n2->link_m, m2_n2->link_n, !adjust)) { 12689 if (adjust) 12690 *m2_n2 = *m_n; 12691 12692 return true; 12693 } 12694 12695 return false; 12696 } 12697 12698 static bool 12699 intel_pipe_config_compare(struct drm_device *dev, 12700 struct intel_crtc_state *current_config, 12701 struct intel_crtc_state *pipe_config, 12702 bool adjust) 12703 { 12704 bool ret = true; 12705 12706 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ 12707 do { \ 12708 if (!adjust) \ 12709 DRM_ERROR(fmt, ##__VA_ARGS__); \ 12710 else \ 12711 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ 12712 } while (0) 12713 12714 #define PIPE_CONF_CHECK_X(name) \ 12715 if (current_config->name != pipe_config->name) { \ 12716 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12717 "(expected 0x%08x, found 0x%08x)\n", \ 12718 current_config->name, \ 12719 pipe_config->name); \ 12720 ret = false; \ 12721 } 12722 12723 #define PIPE_CONF_CHECK_I(name) \ 12724 if (current_config->name != pipe_config->name) { \ 12725 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12726 "(expected %i, found %i)\n", \ 12727 current_config->name, \ 12728 pipe_config->name); \ 12729 ret = false; \ 12730 } 12731 12732 #define PIPE_CONF_CHECK_P(name) \ 12733 if (current_config->name != pipe_config->name) { \ 12734 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12735 "(expected %p, found %p)\n", \ 12736 current_config->name, \ 12737 pipe_config->name); \ 12738 ret = false; \ 12739 } 12740 12741 #define PIPE_CONF_CHECK_M_N(name) \ 12742 if (!intel_compare_link_m_n(¤t_config->name, \ 12743 &pipe_config->name,\ 12744 adjust)) { \ 12745 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12746 "(expected tu %i gmch %i/%i link %i/%i, " \ 12747 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12748 current_config->name.tu, \ 12749 current_config->name.gmch_m, \ 12750 current_config->name.gmch_n, \ 12751 current_config->name.link_m, \ 12752 current_config->name.link_n, \ 12753 pipe_config->name.tu, \ 12754 pipe_config->name.gmch_m, \ 12755 pipe_config->name.gmch_n, \ 12756 pipe_config->name.link_m, \ 12757 pipe_config->name.link_n); \ 12758 ret = false; \ 12759 } 12760 12761 /* This is required for BDW+ where there is only one set of registers for 12762 * switching between high and low RR. 12763 * This macro can be used whenever a comparison has to be made between one 12764 * hw state and multiple sw state variables. 12765 */ 12766 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 12767 if (!intel_compare_link_m_n(¤t_config->name, \ 12768 &pipe_config->name, adjust) && \ 12769 !intel_compare_link_m_n(¤t_config->alt_name, \ 12770 &pipe_config->name, adjust)) { \ 12771 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12772 "(expected tu %i gmch %i/%i link %i/%i, " \ 12773 "or tu %i gmch %i/%i link %i/%i, " \ 12774 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12775 current_config->name.tu, \ 12776 current_config->name.gmch_m, \ 12777 current_config->name.gmch_n, \ 12778 current_config->name.link_m, \ 12779 current_config->name.link_n, \ 12780 current_config->alt_name.tu, \ 12781 current_config->alt_name.gmch_m, \ 12782 current_config->alt_name.gmch_n, \ 12783 current_config->alt_name.link_m, \ 12784 current_config->alt_name.link_n, \ 12785 pipe_config->name.tu, \ 12786 pipe_config->name.gmch_m, \ 12787 pipe_config->name.gmch_n, \ 12788 pipe_config->name.link_m, \ 12789 pipe_config->name.link_n); \ 12790 ret = false; \ 12791 } 12792 12793 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 12794 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12795 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ 12796 "(expected %i, found %i)\n", \ 12797 current_config->name & (mask), \ 12798 pipe_config->name & (mask)); \ 12799 ret = false; \ 12800 } 12801 12802 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 12803 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12804 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12805 "(expected %i, found %i)\n", \ 12806 current_config->name, \ 12807 pipe_config->name); \ 12808 ret = false; \ 12809 } 12810 12811 #define PIPE_CONF_QUIRK(quirk) \ 12812 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12813 12814 PIPE_CONF_CHECK_I(cpu_transcoder); 12815 12816 PIPE_CONF_CHECK_I(has_pch_encoder); 12817 PIPE_CONF_CHECK_I(fdi_lanes); 12818 PIPE_CONF_CHECK_M_N(fdi_m_n); 12819 12820 PIPE_CONF_CHECK_I(lane_count); 12821 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12822 12823 if (INTEL_INFO(dev)->gen < 8) { 12824 PIPE_CONF_CHECK_M_N(dp_m_n); 12825 12826 if (current_config->has_drrs) 12827 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12828 } else 12829 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12830 12831 PIPE_CONF_CHECK_X(output_types); 12832 12833 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12834 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12835 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12836 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12837 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12838 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12839 12840 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12841 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12842 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12843 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12844 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12845 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12846 12847 PIPE_CONF_CHECK_I(pixel_multiplier); 12848 PIPE_CONF_CHECK_I(has_hdmi_sink); 12849 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12850 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 12851 PIPE_CONF_CHECK_I(limited_color_range); 12852 PIPE_CONF_CHECK_I(has_infoframe); 12853 12854 PIPE_CONF_CHECK_I(has_audio); 12855 12856 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12857 DRM_MODE_FLAG_INTERLACE); 12858 12859 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12860 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12861 DRM_MODE_FLAG_PHSYNC); 12862 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12863 DRM_MODE_FLAG_NHSYNC); 12864 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12865 DRM_MODE_FLAG_PVSYNC); 12866 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12867 DRM_MODE_FLAG_NVSYNC); 12868 } 12869 12870 PIPE_CONF_CHECK_X(gmch_pfit.control); 12871 /* pfit ratios are autocomputed by the hw on gen4+ */ 12872 if (INTEL_INFO(dev)->gen < 4) 12873 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12874 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12875 12876 if (!adjust) { 12877 PIPE_CONF_CHECK_I(pipe_src_w); 12878 PIPE_CONF_CHECK_I(pipe_src_h); 12879 12880 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12881 if (current_config->pch_pfit.enabled) { 12882 PIPE_CONF_CHECK_X(pch_pfit.pos); 12883 PIPE_CONF_CHECK_X(pch_pfit.size); 12884 } 12885 12886 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12887 } 12888 12889 /* BDW+ don't expose a synchronous way to read the state */ 12890 if (IS_HASWELL(dev)) 12891 PIPE_CONF_CHECK_I(ips_enabled); 12892 12893 PIPE_CONF_CHECK_I(double_wide); 12894 12895 PIPE_CONF_CHECK_X(ddi_pll_sel); 12896 12897 PIPE_CONF_CHECK_P(shared_dpll); 12898 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12899 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12900 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12901 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12902 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12903 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12904 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12905 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12906 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12907 12908 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 12909 PIPE_CONF_CHECK_X(dsi_pll.div); 12910 12911 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12912 PIPE_CONF_CHECK_I(pipe_bpp); 12913 12914 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12915 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12916 12917 #undef PIPE_CONF_CHECK_X 12918 #undef PIPE_CONF_CHECK_I 12919 #undef PIPE_CONF_CHECK_P 12920 #undef PIPE_CONF_CHECK_FLAGS 12921 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12922 #undef PIPE_CONF_QUIRK 12923 #undef INTEL_ERR_OR_DBG_KMS 12924 12925 return ret; 12926 } 12927 12928 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12929 const struct intel_crtc_state *pipe_config) 12930 { 12931 if (pipe_config->has_pch_encoder) { 12932 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12933 &pipe_config->fdi_m_n); 12934 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12935 12936 /* 12937 * FDI already provided one idea for the dotclock. 12938 * Yell if the encoder disagrees. 12939 */ 12940 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12941 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12942 fdi_dotclock, dotclock); 12943 } 12944 } 12945 12946 static void verify_wm_state(struct drm_crtc *crtc, 12947 struct drm_crtc_state *new_state) 12948 { 12949 struct drm_device *dev = crtc->dev; 12950 struct drm_i915_private *dev_priv = to_i915(dev); 12951 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12952 struct skl_ddb_entry *hw_entry, *sw_entry; 12953 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12954 const enum i915_pipe pipe = intel_crtc->pipe; 12955 int plane; 12956 12957 if (INTEL_INFO(dev)->gen < 9 || !new_state->active) 12958 return; 12959 12960 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12961 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12962 12963 /* planes */ 12964 for_each_plane(dev_priv, pipe, plane) { 12965 hw_entry = &hw_ddb.plane[pipe][plane]; 12966 sw_entry = &sw_ddb->plane[pipe][plane]; 12967 12968 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12969 continue; 12970 12971 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 12972 "(expected (%u,%u), found (%u,%u))\n", 12973 pipe_name(pipe), plane + 1, 12974 sw_entry->start, sw_entry->end, 12975 hw_entry->start, hw_entry->end); 12976 } 12977 12978 /* cursor */ 12979 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 12980 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 12981 12982 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { 12983 DRM_ERROR("mismatch in DDB state pipe %c cursor " 12984 "(expected (%u,%u), found (%u,%u))\n", 12985 pipe_name(pipe), 12986 sw_entry->start, sw_entry->end, 12987 hw_entry->start, hw_entry->end); 12988 } 12989 } 12990 12991 static void 12992 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc) 12993 { 12994 struct drm_connector *connector; 12995 12996 drm_for_each_connector(connector, dev) { 12997 struct drm_encoder *encoder = connector->encoder; 12998 struct drm_connector_state *state = connector->state; 12999 13000 if (state->crtc != crtc) 13001 continue; 13002 13003 intel_connector_verify_state(to_intel_connector(connector)); 13004 13005 I915_STATE_WARN(state->best_encoder != encoder, 13006 "connector's atomic encoder doesn't match legacy encoder\n"); 13007 } 13008 } 13009 13010 static void 13011 verify_encoder_state(struct drm_device *dev) 13012 { 13013 struct intel_encoder *encoder; 13014 struct intel_connector *connector; 13015 13016 for_each_intel_encoder(dev, encoder) { 13017 bool enabled = false; 13018 enum i915_pipe pipe; 13019 13020 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13021 encoder->base.base.id, 13022 encoder->base.name); 13023 13024 for_each_intel_connector(dev, connector) { 13025 if (connector->base.state->best_encoder != &encoder->base) 13026 continue; 13027 enabled = true; 13028 13029 I915_STATE_WARN(connector->base.state->crtc != 13030 encoder->base.crtc, 13031 "connector's crtc doesn't match encoder crtc\n"); 13032 } 13033 13034 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13035 "encoder's enabled state mismatch " 13036 "(expected %i, found %i)\n", 13037 !!encoder->base.crtc, enabled); 13038 13039 if (!encoder->base.crtc) { 13040 bool active; 13041 13042 active = encoder->get_hw_state(encoder, &pipe); 13043 I915_STATE_WARN(active, 13044 "encoder detached but still enabled on pipe %c.\n", 13045 pipe_name(pipe)); 13046 } 13047 } 13048 } 13049 13050 static void 13051 verify_crtc_state(struct drm_crtc *crtc, 13052 struct drm_crtc_state *old_crtc_state, 13053 struct drm_crtc_state *new_crtc_state) 13054 { 13055 struct drm_device *dev = crtc->dev; 13056 struct drm_i915_private *dev_priv = to_i915(dev); 13057 struct intel_encoder *encoder; 13058 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13059 struct intel_crtc_state *pipe_config, *sw_config; 13060 struct drm_atomic_state *old_state; 13061 bool active; 13062 13063 old_state = old_crtc_state->state; 13064 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 13065 pipe_config = to_intel_crtc_state(old_crtc_state); 13066 memset(pipe_config, 0, sizeof(*pipe_config)); 13067 pipe_config->base.crtc = crtc; 13068 pipe_config->base.state = old_state; 13069 13070 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 13071 13072 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 13073 13074 /* hw state is inconsistent with the pipe quirk */ 13075 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 13076 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 13077 active = new_crtc_state->active; 13078 13079 I915_STATE_WARN(new_crtc_state->active != active, 13080 "crtc active state doesn't match with hw state " 13081 "(expected %i, found %i)\n", new_crtc_state->active, active); 13082 13083 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 13084 "transitional active state does not match atomic hw state " 13085 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 13086 13087 for_each_encoder_on_crtc(dev, crtc, encoder) { 13088 enum i915_pipe pipe; 13089 13090 active = encoder->get_hw_state(encoder, &pipe); 13091 I915_STATE_WARN(active != new_crtc_state->active, 13092 "[ENCODER:%i] active %i with crtc active %i\n", 13093 encoder->base.base.id, active, new_crtc_state->active); 13094 13095 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 13096 "Encoder connected to wrong pipe %c\n", 13097 pipe_name(pipe)); 13098 13099 if (active) { 13100 pipe_config->output_types |= 1 << encoder->type; 13101 encoder->get_config(encoder, pipe_config); 13102 } 13103 } 13104 13105 if (!new_crtc_state->active) 13106 return; 13107 13108 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13109 13110 sw_config = to_intel_crtc_state(crtc->state); 13111 if (!intel_pipe_config_compare(dev, sw_config, 13112 pipe_config, false)) { 13113 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13114 intel_dump_pipe_config(intel_crtc, pipe_config, 13115 "[hw state]"); 13116 intel_dump_pipe_config(intel_crtc, sw_config, 13117 "[sw state]"); 13118 } 13119 } 13120 13121 static void 13122 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13123 struct intel_shared_dpll *pll, 13124 struct drm_crtc *crtc, 13125 struct drm_crtc_state *new_state) 13126 { 13127 struct intel_dpll_hw_state dpll_hw_state; 13128 unsigned crtc_mask; 13129 bool active; 13130 13131 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13132 13133 DRM_DEBUG_KMS("%s\n", pll->name); 13134 13135 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 13136 13137 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 13138 I915_STATE_WARN(!pll->on && pll->active_mask, 13139 "pll in active use but not on in sw tracking\n"); 13140 I915_STATE_WARN(pll->on && !pll->active_mask, 13141 "pll is on but not used by any active crtc\n"); 13142 I915_STATE_WARN(pll->on != active, 13143 "pll on state mismatch (expected %i, found %i)\n", 13144 pll->on, active); 13145 } 13146 13147 if (!crtc) { 13148 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask, 13149 "more active pll users than references: %x vs %x\n", 13150 pll->active_mask, pll->config.crtc_mask); 13151 13152 return; 13153 } 13154 13155 crtc_mask = 1 << drm_crtc_index(crtc); 13156 13157 if (new_state->active) 13158 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13159 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13160 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 13161 else 13162 I915_STATE_WARN(pll->active_mask & crtc_mask, 13163 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13164 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 13165 13166 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask), 13167 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13168 crtc_mask, pll->config.crtc_mask); 13169 13170 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, 13171 &dpll_hw_state, 13172 sizeof(dpll_hw_state)), 13173 "pll hw state mismatch\n"); 13174 } 13175 13176 static void 13177 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 13178 struct drm_crtc_state *old_crtc_state, 13179 struct drm_crtc_state *new_crtc_state) 13180 { 13181 struct drm_i915_private *dev_priv = to_i915(dev); 13182 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 13183 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 13184 13185 if (new_state->shared_dpll) 13186 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 13187 13188 if (old_state->shared_dpll && 13189 old_state->shared_dpll != new_state->shared_dpll) { 13190 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 13191 struct intel_shared_dpll *pll = old_state->shared_dpll; 13192 13193 I915_STATE_WARN(pll->active_mask & crtc_mask, 13194 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13195 pipe_name(drm_crtc_index(crtc))); 13196 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask, 13197 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13198 pipe_name(drm_crtc_index(crtc))); 13199 } 13200 } 13201 13202 static void 13203 intel_modeset_verify_crtc(struct drm_crtc *crtc, 13204 struct drm_crtc_state *old_state, 13205 struct drm_crtc_state *new_state) 13206 { 13207 if (!needs_modeset(new_state) && 13208 !to_intel_crtc_state(new_state)->update_pipe) 13209 return; 13210 13211 verify_wm_state(crtc, new_state); 13212 verify_connector_state(crtc->dev, crtc); 13213 verify_crtc_state(crtc, old_state, new_state); 13214 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 13215 } 13216 13217 static void 13218 verify_disabled_dpll_state(struct drm_device *dev) 13219 { 13220 struct drm_i915_private *dev_priv = to_i915(dev); 13221 int i; 13222 13223 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13224 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13225 } 13226 13227 static void 13228 intel_modeset_verify_disabled(struct drm_device *dev) 13229 { 13230 verify_encoder_state(dev); 13231 verify_connector_state(dev, NULL); 13232 verify_disabled_dpll_state(dev); 13233 } 13234 13235 static void update_scanline_offset(struct intel_crtc *crtc) 13236 { 13237 struct drm_device *dev = crtc->base.dev; 13238 13239 /* 13240 * The scanline counter increments at the leading edge of hsync. 13241 * 13242 * On most platforms it starts counting from vtotal-1 on the 13243 * first active line. That means the scanline counter value is 13244 * always one less than what we would expect. Ie. just after 13245 * start of vblank, which also occurs at start of hsync (on the 13246 * last active line), the scanline counter will read vblank_start-1. 13247 * 13248 * On gen2 the scanline counter starts counting from 1 instead 13249 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13250 * to keep the value positive), instead of adding one. 13251 * 13252 * On HSW+ the behaviour of the scanline counter depends on the output 13253 * type. For DP ports it behaves like most other platforms, but on HDMI 13254 * there's an extra 1 line difference. So we need to add two instead of 13255 * one to the value. 13256 */ 13257 if (IS_GEN2(dev)) { 13258 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 13259 int vtotal; 13260 13261 vtotal = adjusted_mode->crtc_vtotal; 13262 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13263 vtotal /= 2; 13264 13265 crtc->scanline_offset = vtotal - 1; 13266 } else if (HAS_DDI(dev) && 13267 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 13268 crtc->scanline_offset = 2; 13269 } else 13270 crtc->scanline_offset = 1; 13271 } 13272 13273 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 13274 { 13275 struct drm_device *dev = state->dev; 13276 struct drm_i915_private *dev_priv = to_i915(dev); 13277 struct intel_shared_dpll_config *shared_dpll = NULL; 13278 struct drm_crtc *crtc; 13279 struct drm_crtc_state *crtc_state; 13280 int i; 13281 13282 if (!dev_priv->display.crtc_compute_clock) 13283 return; 13284 13285 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13287 struct intel_shared_dpll *old_dpll = 13288 to_intel_crtc_state(crtc->state)->shared_dpll; 13289 13290 if (!needs_modeset(crtc_state)) 13291 continue; 13292 13293 to_intel_crtc_state(crtc_state)->shared_dpll = NULL; 13294 13295 if (!old_dpll) 13296 continue; 13297 13298 if (!shared_dpll) 13299 shared_dpll = intel_atomic_get_shared_dpll_state(state); 13300 13301 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc); 13302 } 13303 } 13304 13305 /* 13306 * This implements the workaround described in the "notes" section of the mode 13307 * set sequence documentation. When going from no pipes or single pipe to 13308 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13309 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13310 */ 13311 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 13312 { 13313 struct drm_crtc_state *crtc_state; 13314 struct intel_crtc *intel_crtc; 13315 struct drm_crtc *crtc; 13316 struct intel_crtc_state *first_crtc_state = NULL; 13317 struct intel_crtc_state *other_crtc_state = NULL; 13318 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13319 int i; 13320 13321 /* look at all crtc's that are going to be enabled in during modeset */ 13322 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13323 intel_crtc = to_intel_crtc(crtc); 13324 13325 if (!crtc_state->active || !needs_modeset(crtc_state)) 13326 continue; 13327 13328 if (first_crtc_state) { 13329 other_crtc_state = to_intel_crtc_state(crtc_state); 13330 break; 13331 } else { 13332 first_crtc_state = to_intel_crtc_state(crtc_state); 13333 first_pipe = intel_crtc->pipe; 13334 } 13335 } 13336 13337 /* No workaround needed? */ 13338 if (!first_crtc_state) 13339 return 0; 13340 13341 /* w/a possibly needed, check how many crtc's are already enabled. */ 13342 for_each_intel_crtc(state->dev, intel_crtc) { 13343 struct intel_crtc_state *pipe_config; 13344 13345 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 13346 if (IS_ERR(pipe_config)) 13347 return PTR_ERR(pipe_config); 13348 13349 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 13350 13351 if (!pipe_config->base.active || 13352 needs_modeset(&pipe_config->base)) 13353 continue; 13354 13355 /* 2 or more enabled crtcs means no need for w/a */ 13356 if (enabled_pipe != INVALID_PIPE) 13357 return 0; 13358 13359 enabled_pipe = intel_crtc->pipe; 13360 } 13361 13362 if (enabled_pipe != INVALID_PIPE) 13363 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13364 else if (other_crtc_state) 13365 other_crtc_state->hsw_workaround_pipe = first_pipe; 13366 13367 return 0; 13368 } 13369 13370 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 13371 { 13372 struct drm_crtc *crtc; 13373 struct drm_crtc_state *crtc_state; 13374 int ret = 0; 13375 13376 /* add all active pipes to the state */ 13377 for_each_crtc(state->dev, crtc) { 13378 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13379 if (IS_ERR(crtc_state)) 13380 return PTR_ERR(crtc_state); 13381 13382 if (!crtc_state->active || needs_modeset(crtc_state)) 13383 continue; 13384 13385 crtc_state->mode_changed = true; 13386 13387 ret = drm_atomic_add_affected_connectors(state, crtc); 13388 if (ret) 13389 break; 13390 13391 ret = drm_atomic_add_affected_planes(state, crtc); 13392 if (ret) 13393 break; 13394 } 13395 13396 return ret; 13397 } 13398 13399 static int intel_modeset_checks(struct drm_atomic_state *state) 13400 { 13401 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13402 struct drm_i915_private *dev_priv = to_i915(state->dev); 13403 struct drm_crtc *crtc; 13404 struct drm_crtc_state *crtc_state; 13405 int ret = 0, i; 13406 13407 if (!check_digital_port_conflicts(state)) { 13408 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13409 return -EINVAL; 13410 } 13411 13412 intel_state->modeset = true; 13413 intel_state->active_crtcs = dev_priv->active_crtcs; 13414 13415 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13416 if (crtc_state->active) 13417 intel_state->active_crtcs |= 1 << i; 13418 else 13419 intel_state->active_crtcs &= ~(1 << i); 13420 13421 if (crtc_state->active != crtc->state->active) 13422 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 13423 } 13424 13425 /* 13426 * See if the config requires any additional preparation, e.g. 13427 * to adjust global state with pipes off. We need to do this 13428 * here so we can get the modeset_pipe updated config for the new 13429 * mode set on this crtc. For other crtcs we need to use the 13430 * adjusted_mode bits in the crtc directly. 13431 */ 13432 if (dev_priv->display.modeset_calc_cdclk) { 13433 if (!intel_state->cdclk_pll_vco) 13434 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; 13435 if (!intel_state->cdclk_pll_vco) 13436 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; 13437 13438 ret = dev_priv->display.modeset_calc_cdclk(state); 13439 if (ret < 0) 13440 return ret; 13441 13442 if (intel_state->dev_cdclk != dev_priv->cdclk_freq || 13443 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) 13444 ret = intel_modeset_all_pipes(state); 13445 13446 if (ret < 0) 13447 return ret; 13448 13449 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", 13450 intel_state->cdclk, intel_state->dev_cdclk); 13451 } else 13452 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; 13453 13454 intel_modeset_clear_plls(state); 13455 13456 if (IS_HASWELL(dev_priv)) 13457 return haswell_mode_set_planes_workaround(state); 13458 13459 return 0; 13460 } 13461 13462 /* 13463 * Handle calculation of various watermark data at the end of the atomic check 13464 * phase. The code here should be run after the per-crtc and per-plane 'check' 13465 * handlers to ensure that all derived state has been updated. 13466 */ 13467 static int calc_watermark_data(struct drm_atomic_state *state) 13468 { 13469 struct drm_device *dev = state->dev; 13470 struct drm_i915_private *dev_priv = to_i915(dev); 13471 13472 /* Is there platform-specific watermark information to calculate? */ 13473 if (dev_priv->display.compute_global_watermarks) 13474 return dev_priv->display.compute_global_watermarks(state); 13475 13476 return 0; 13477 } 13478 13479 /** 13480 * intel_atomic_check - validate state object 13481 * @dev: drm device 13482 * @state: state to validate 13483 */ 13484 static int intel_atomic_check(struct drm_device *dev, 13485 struct drm_atomic_state *state) 13486 { 13487 struct drm_i915_private *dev_priv = to_i915(dev); 13488 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13489 struct drm_crtc *crtc; 13490 struct drm_crtc_state *crtc_state; 13491 int ret, i; 13492 bool any_ms = false; 13493 13494 ret = drm_atomic_helper_check_modeset(dev, state); 13495 if (ret) 13496 return ret; 13497 13498 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13499 struct intel_crtc_state *pipe_config = 13500 to_intel_crtc_state(crtc_state); 13501 13502 /* Catch I915_MODE_FLAG_INHERITED */ 13503 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13504 crtc_state->mode_changed = true; 13505 13506 if (!needs_modeset(crtc_state)) 13507 continue; 13508 13509 if (!crtc_state->enable) { 13510 any_ms = true; 13511 continue; 13512 } 13513 13514 /* FIXME: For only active_changed we shouldn't need to do any 13515 * state recomputation at all. */ 13516 13517 ret = drm_atomic_add_affected_connectors(state, crtc); 13518 if (ret) 13519 return ret; 13520 13521 ret = intel_modeset_pipe_config(crtc, pipe_config); 13522 if (ret) { 13523 intel_dump_pipe_config(to_intel_crtc(crtc), 13524 pipe_config, "[failed]"); 13525 return ret; 13526 } 13527 13528 if (i915.fastboot && 13529 intel_pipe_config_compare(dev, 13530 to_intel_crtc_state(crtc->state), 13531 pipe_config, true)) { 13532 crtc_state->mode_changed = false; 13533 to_intel_crtc_state(crtc_state)->update_pipe = true; 13534 } 13535 13536 if (needs_modeset(crtc_state)) 13537 any_ms = true; 13538 13539 ret = drm_atomic_add_affected_planes(state, crtc); 13540 if (ret) 13541 return ret; 13542 13543 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13544 needs_modeset(crtc_state) ? 13545 "[modeset]" : "[fastset]"); 13546 } 13547 13548 if (any_ms) { 13549 ret = intel_modeset_checks(state); 13550 13551 if (ret) 13552 return ret; 13553 } else 13554 intel_state->cdclk = dev_priv->cdclk_freq; 13555 13556 ret = drm_atomic_helper_check_planes(dev, state); 13557 if (ret) 13558 return ret; 13559 13560 intel_fbc_choose_crtc(dev_priv, state); 13561 return calc_watermark_data(state); 13562 } 13563 13564 static int intel_atomic_prepare_commit(struct drm_device *dev, 13565 struct drm_atomic_state *state, 13566 bool nonblock) 13567 { 13568 struct drm_i915_private *dev_priv = to_i915(dev); 13569 struct drm_plane_state *plane_state; 13570 struct drm_crtc_state *crtc_state; 13571 struct drm_plane *plane; 13572 struct drm_crtc *crtc; 13573 int i, ret; 13574 13575 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13576 if (state->legacy_cursor_update) 13577 continue; 13578 13579 ret = intel_crtc_wait_for_pending_flips(crtc); 13580 if (ret) 13581 return ret; 13582 13583 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 13584 flush_workqueue(dev_priv->wq); 13585 } 13586 13587 ret = mutex_lock_interruptible(&dev->struct_mutex); 13588 if (ret) 13589 return ret; 13590 13591 ret = drm_atomic_helper_prepare_planes(dev, state); 13592 mutex_unlock(&dev->struct_mutex); 13593 13594 if (!ret && !nonblock) { 13595 for_each_plane_in_state(state, plane, plane_state, i) { 13596 struct intel_plane_state *intel_plane_state = 13597 to_intel_plane_state(plane_state); 13598 13599 if (!intel_plane_state->wait_req) 13600 continue; 13601 13602 ret = i915_wait_request(intel_plane_state->wait_req, 13603 true, NULL, NULL); 13604 if (ret) { 13605 /* Any hang should be swallowed by the wait */ 13606 WARN_ON(ret == -EIO); 13607 mutex_lock(&dev->struct_mutex); 13608 drm_atomic_helper_cleanup_planes(dev, state); 13609 mutex_unlock(&dev->struct_mutex); 13610 break; 13611 } 13612 } 13613 } 13614 13615 return ret; 13616 } 13617 13618 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 13619 { 13620 struct drm_device *dev = crtc->base.dev; 13621 13622 if (!dev->max_vblank_count) 13623 return drm_accurate_vblank_count(&crtc->base); 13624 13625 return dev->driver->get_vblank_counter(dev, crtc->pipe); 13626 } 13627 13628 static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13629 struct drm_i915_private *dev_priv, 13630 unsigned crtc_mask) 13631 { 13632 unsigned last_vblank_count[I915_MAX_PIPES]; 13633 enum i915_pipe pipe; 13634 int ret; 13635 13636 if (!crtc_mask) 13637 return; 13638 13639 for_each_pipe(dev_priv, pipe) { 13640 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13641 13642 if (!((1 << pipe) & crtc_mask)) 13643 continue; 13644 13645 ret = drm_crtc_vblank_get(crtc); 13646 if (WARN_ON(ret != 0)) { 13647 crtc_mask &= ~(1 << pipe); 13648 continue; 13649 } 13650 13651 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc); 13652 } 13653 13654 for_each_pipe(dev_priv, pipe) { 13655 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13656 long lret; 13657 13658 if (!((1 << pipe) & crtc_mask)) 13659 continue; 13660 13661 lret = wait_event_timeout(dev->vblank[pipe].queue, 13662 last_vblank_count[pipe] != 13663 drm_crtc_vblank_count(crtc), 13664 msecs_to_jiffies(50)); 13665 13666 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); 13667 13668 drm_crtc_vblank_put(crtc); 13669 } 13670 } 13671 13672 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 13673 { 13674 /* fb updated, need to unpin old fb */ 13675 if (crtc_state->fb_changed) 13676 return true; 13677 13678 /* wm changes, need vblank before final wm's */ 13679 if (crtc_state->update_wm_post) 13680 return true; 13681 13682 /* 13683 * cxsr is re-enabled after vblank. 13684 * This is already handled by crtc_state->update_wm_post, 13685 * but added for clarity. 13686 */ 13687 if (crtc_state->disable_cxsr) 13688 return true; 13689 13690 return false; 13691 } 13692 13693 static void intel_atomic_commit_tail(struct drm_atomic_state *state) 13694 { 13695 struct drm_device *dev = state->dev; 13696 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13697 struct drm_i915_private *dev_priv = to_i915(dev); 13698 struct drm_crtc_state *old_crtc_state; 13699 struct drm_crtc *crtc; 13700 struct intel_crtc_state *intel_cstate; 13701 struct drm_plane *plane; 13702 struct drm_plane_state *plane_state; 13703 bool hw_check = intel_state->modeset; 13704 unsigned long put_domains[I915_MAX_PIPES] = {}; 13705 unsigned crtc_vblank_mask = 0; 13706 int i, ret; 13707 13708 for_each_plane_in_state(state, plane, plane_state, i) { 13709 struct intel_plane_state *intel_plane_state = 13710 to_intel_plane_state(plane_state); 13711 13712 if (!intel_plane_state->wait_req) 13713 continue; 13714 13715 ret = i915_wait_request(intel_plane_state->wait_req, 13716 true, NULL, NULL); 13717 /* EIO should be eaten, and we can't get interrupted in the 13718 * worker, and blocking commits have waited already. */ 13719 WARN_ON(ret); 13720 } 13721 13722 drm_atomic_helper_wait_for_dependencies(state); 13723 13724 if (intel_state->modeset) { 13725 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13726 sizeof(intel_state->min_pixclk)); 13727 dev_priv->active_crtcs = intel_state->active_crtcs; 13728 dev_priv->atomic_cdclk_freq = intel_state->cdclk; 13729 13730 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13731 } 13732 13733 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13735 13736 if (needs_modeset(crtc->state) || 13737 to_intel_crtc_state(crtc->state)->update_pipe) { 13738 hw_check = true; 13739 13740 put_domains[to_intel_crtc(crtc)->pipe] = 13741 modeset_get_crtc_power_domains(crtc, 13742 to_intel_crtc_state(crtc->state)); 13743 } 13744 13745 if (!needs_modeset(crtc->state)) 13746 continue; 13747 13748 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13749 13750 if (old_crtc_state->active) { 13751 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 13752 dev_priv->display.crtc_disable(crtc); 13753 intel_crtc->active = false; 13754 intel_fbc_disable(intel_crtc); 13755 intel_disable_shared_dpll(intel_crtc); 13756 13757 /* 13758 * Underruns don't always raise 13759 * interrupts, so check manually. 13760 */ 13761 intel_check_cpu_fifo_underruns(dev_priv); 13762 intel_check_pch_fifo_underruns(dev_priv); 13763 13764 if (!crtc->state->active) 13765 intel_update_watermarks(crtc); 13766 } 13767 } 13768 13769 /* Only after disabling all output pipelines that will be changed can we 13770 * update the the output configuration. */ 13771 intel_modeset_update_crtc_state(state); 13772 13773 if (intel_state->modeset) { 13774 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13775 13776 if (dev_priv->display.modeset_commit_cdclk && 13777 (intel_state->dev_cdclk != dev_priv->cdclk_freq || 13778 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) 13779 dev_priv->display.modeset_commit_cdclk(state); 13780 13781 intel_modeset_verify_disabled(dev); 13782 } 13783 13784 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13785 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13787 bool modeset = needs_modeset(crtc->state); 13788 struct intel_crtc_state *pipe_config = 13789 to_intel_crtc_state(crtc->state); 13790 13791 if (modeset && crtc->state->active) { 13792 update_scanline_offset(to_intel_crtc(crtc)); 13793 dev_priv->display.crtc_enable(crtc); 13794 } 13795 13796 /* Complete events for now disable pipes here. */ 13797 if (modeset && !crtc->state->active && crtc->state->event) { 13798 spin_lock_irq(&dev->event_lock); 13799 drm_crtc_send_vblank_event(crtc, crtc->state->event); 13800 spin_unlock_irq(&dev->event_lock); 13801 13802 crtc->state->event = NULL; 13803 } 13804 13805 if (!modeset) 13806 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13807 13808 if (crtc->state->active && 13809 drm_atomic_get_existing_plane_state(state, crtc->primary)) 13810 intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); 13811 13812 if (crtc->state->active) 13813 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 13814 13815 if (pipe_config->base.active && needs_vblank_wait(pipe_config)) 13816 crtc_vblank_mask |= 1 << i; 13817 } 13818 13819 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 13820 * already, but still need the state for the delayed optimization. To 13821 * fix this: 13822 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 13823 * - schedule that vblank worker _before_ calling hw_done 13824 * - at the start of commit_tail, cancel it _synchrously 13825 * - switch over to the vblank wait helper in the core after that since 13826 * we don't need out special handling any more. 13827 */ 13828 if (!state->legacy_cursor_update) 13829 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13830 13831 /* 13832 * Now that the vblank has passed, we can go ahead and program the 13833 * optimal watermarks on platforms that need two-step watermark 13834 * programming. 13835 * 13836 * TODO: Move this (and other cleanup) to an async worker eventually. 13837 */ 13838 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13839 intel_cstate = to_intel_crtc_state(crtc->state); 13840 13841 if (dev_priv->display.optimize_watermarks) 13842 dev_priv->display.optimize_watermarks(intel_cstate); 13843 } 13844 13845 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13846 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 13847 13848 if (put_domains[i]) 13849 modeset_put_power_domains(dev_priv, put_domains[i]); 13850 13851 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13852 } 13853 13854 drm_atomic_helper_commit_hw_done(state); 13855 13856 if (intel_state->modeset) 13857 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13858 13859 mutex_lock(&dev->struct_mutex); 13860 drm_atomic_helper_cleanup_planes(dev, state); 13861 mutex_unlock(&dev->struct_mutex); 13862 13863 drm_atomic_helper_commit_cleanup_done(state); 13864 13865 drm_atomic_state_free(state); 13866 13867 /* As one of the primary mmio accessors, KMS has a high likelihood 13868 * of triggering bugs in unclaimed access. After we finish 13869 * modesetting, see if an error has been flagged, and if so 13870 * enable debugging for the next modeset - and hope we catch 13871 * the culprit. 13872 * 13873 * XXX note that we assume display power is on at this point. 13874 * This might hold true now but we need to add pm helper to check 13875 * unclaimed only when the hardware is on, as atomic commits 13876 * can happen also when the device is completely off. 13877 */ 13878 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 13879 } 13880 13881 static void intel_atomic_commit_work(struct work_struct *work) 13882 { 13883 struct drm_atomic_state *state = container_of(work, 13884 struct drm_atomic_state, 13885 commit_work); 13886 intel_atomic_commit_tail(state); 13887 } 13888 13889 static void intel_atomic_track_fbs(struct drm_atomic_state *state) 13890 { 13891 struct drm_plane_state *old_plane_state; 13892 struct drm_plane *plane; 13893 int i; 13894 13895 for_each_plane_in_state(state, plane, old_plane_state, i) 13896 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), 13897 intel_fb_obj(plane->state->fb), 13898 to_intel_plane(plane)->frontbuffer_bit); 13899 } 13900 13901 /** 13902 * intel_atomic_commit - commit validated state object 13903 * @dev: DRM device 13904 * @state: the top-level driver state object 13905 * @nonblock: nonblocking commit 13906 * 13907 * This function commits a top-level state object that has been validated 13908 * with drm_atomic_helper_check(). 13909 * 13910 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment 13911 * nonblocking commits are only safe for pure plane updates. Everything else 13912 * should work though. 13913 * 13914 * RETURNS 13915 * Zero for success or -errno. 13916 */ 13917 static int intel_atomic_commit(struct drm_device *dev, 13918 struct drm_atomic_state *state, 13919 bool nonblock) 13920 { 13921 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13922 struct drm_i915_private *dev_priv = to_i915(dev); 13923 int ret = 0; 13924 13925 if (intel_state->modeset && nonblock) { 13926 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); 13927 return -EINVAL; 13928 } 13929 13930 ret = drm_atomic_helper_setup_commit(state, nonblock); 13931 if (ret) 13932 return ret; 13933 13934 INIT_WORK(&state->commit_work, intel_atomic_commit_work); 13935 13936 ret = intel_atomic_prepare_commit(dev, state, nonblock); 13937 if (ret) { 13938 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13939 return ret; 13940 } 13941 13942 drm_atomic_helper_swap_state(state, true); 13943 dev_priv->wm.distrust_bios_wm = false; 13944 dev_priv->wm.skl_results = intel_state->wm_results; 13945 intel_shared_dpll_commit(state); 13946 intel_atomic_track_fbs(state); 13947 13948 if (nonblock) 13949 queue_work(system_unbound_wq, &state->commit_work); 13950 else 13951 intel_atomic_commit_tail(state); 13952 13953 return 0; 13954 } 13955 13956 void intel_crtc_restore_mode(struct drm_crtc *crtc) 13957 { 13958 struct drm_device *dev = crtc->dev; 13959 struct drm_atomic_state *state; 13960 struct drm_crtc_state *crtc_state; 13961 int ret; 13962 13963 state = drm_atomic_state_alloc(dev); 13964 if (!state) { 13965 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", 13966 crtc->base.id, crtc->name); 13967 return; 13968 } 13969 13970 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 13971 13972 retry: 13973 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13974 ret = PTR_ERR_OR_ZERO(crtc_state); 13975 if (!ret) { 13976 if (!crtc_state->active) 13977 goto out; 13978 13979 crtc_state->mode_changed = true; 13980 ret = drm_atomic_commit(state); 13981 } 13982 13983 if (ret == -EDEADLK) { 13984 drm_atomic_state_clear(state); 13985 drm_modeset_backoff(state->acquire_ctx); 13986 goto retry; 13987 } 13988 13989 if (ret) 13990 out: 13991 drm_atomic_state_free(state); 13992 } 13993 13994 #undef for_each_intel_crtc_masked 13995 13996 /* 13997 * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling 13998 * drm_atomic_helper_legacy_gamma_set() directly. 13999 */ 14000 static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc, 14001 u16 *red, u16 *green, u16 *blue, 14002 uint32_t size) 14003 { 14004 struct drm_device *dev = crtc->dev; 14005 struct drm_mode_config *config = &dev->mode_config; 14006 struct drm_crtc_state *state; 14007 int ret; 14008 14009 ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size); 14010 if (ret) 14011 return ret; 14012 14013 /* 14014 * Make sure we update the legacy properties so this works when 14015 * atomic is not enabled. 14016 */ 14017 14018 state = crtc->state; 14019 14020 drm_object_property_set_value(&crtc->base, 14021 config->degamma_lut_property, 14022 (state->degamma_lut) ? 14023 state->degamma_lut->base.id : 0); 14024 14025 drm_object_property_set_value(&crtc->base, 14026 config->ctm_property, 14027 (state->ctm) ? 14028 state->ctm->base.id : 0); 14029 14030 drm_object_property_set_value(&crtc->base, 14031 config->gamma_lut_property, 14032 (state->gamma_lut) ? 14033 state->gamma_lut->base.id : 0); 14034 14035 return 0; 14036 } 14037 14038 static const struct drm_crtc_funcs intel_crtc_funcs = { 14039 .gamma_set = intel_atomic_legacy_gamma_set, 14040 .set_config = drm_atomic_helper_set_config, 14041 .set_property = drm_atomic_helper_crtc_set_property, 14042 .destroy = intel_crtc_destroy, 14043 .page_flip = intel_crtc_page_flip, 14044 .atomic_duplicate_state = intel_crtc_duplicate_state, 14045 .atomic_destroy_state = intel_crtc_destroy_state, 14046 }; 14047 14048 /** 14049 * intel_prepare_plane_fb - Prepare fb for usage on plane 14050 * @plane: drm plane to prepare for 14051 * @fb: framebuffer to prepare for presentation 14052 * 14053 * Prepares a framebuffer for usage on a display plane. Generally this 14054 * involves pinning the underlying object and updating the frontbuffer tracking 14055 * bits. Some older platforms need special physical address handling for 14056 * cursor planes. 14057 * 14058 * Must be called with struct_mutex held. 14059 * 14060 * Returns 0 on success, negative error code on failure. 14061 */ 14062 int 14063 intel_prepare_plane_fb(struct drm_plane *plane, 14064 struct drm_plane_state *new_state) 14065 { 14066 struct drm_device *dev = plane->dev; 14067 struct drm_framebuffer *fb = new_state->fb; 14068 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14069 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14070 struct reservation_object *resv; 14071 int ret = 0; 14072 14073 if (!obj && !old_obj) 14074 return 0; 14075 14076 if (old_obj) { 14077 struct drm_crtc_state *crtc_state = 14078 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); 14079 14080 /* Big Hammer, we also need to ensure that any pending 14081 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 14082 * current scanout is retired before unpinning the old 14083 * framebuffer. Note that we rely on userspace rendering 14084 * into the buffer attached to the pipe they are waiting 14085 * on. If not, userspace generates a GPU hang with IPEHR 14086 * point to the MI_WAIT_FOR_EVENT. 14087 * 14088 * This should only fail upon a hung GPU, in which case we 14089 * can safely continue. 14090 */ 14091 if (needs_modeset(crtc_state)) 14092 ret = i915_gem_object_wait_rendering(old_obj, true); 14093 if (ret) { 14094 /* GPU hangs should have been swallowed by the wait */ 14095 WARN_ON(ret == -EIO); 14096 return ret; 14097 } 14098 } 14099 14100 if (!obj) 14101 return 0; 14102 14103 /* For framebuffer backed by dmabuf, wait for fence */ 14104 resv = i915_gem_object_get_dmabuf_resv(obj); 14105 if (resv) { 14106 long lret; 14107 14108 lret = reservation_object_wait_timeout_rcu(resv, false, true, 14109 MAX_SCHEDULE_TIMEOUT); 14110 if (lret == -ERESTARTSYS) 14111 return lret; 14112 14113 WARN(lret < 0, "waiting returns %li\n", lret); 14114 } 14115 14116 if (plane->type == DRM_PLANE_TYPE_CURSOR && 14117 INTEL_INFO(dev)->cursor_needs_physical) { 14118 int align = IS_I830(dev) ? 16 * 1024 : 256; 14119 ret = i915_gem_object_attach_phys(obj, align); 14120 if (ret) 14121 DRM_DEBUG_KMS("failed to attach phys object\n"); 14122 } else { 14123 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 14124 } 14125 14126 if (ret == 0) { 14127 to_intel_plane_state(new_state)->wait_req = 14128 i915_gem_active_get(&obj->last_write, 14129 &obj->base.dev->struct_mutex); 14130 } 14131 14132 return ret; 14133 } 14134 14135 /** 14136 * intel_cleanup_plane_fb - Cleans up an fb after plane use 14137 * @plane: drm plane to clean up for 14138 * @fb: old framebuffer that was on plane 14139 * 14140 * Cleans up a framebuffer that has just been removed from a plane. 14141 * 14142 * Must be called with struct_mutex held. 14143 */ 14144 void 14145 intel_cleanup_plane_fb(struct drm_plane *plane, 14146 struct drm_plane_state *old_state) 14147 { 14148 struct drm_device *dev = plane->dev; 14149 struct intel_plane_state *old_intel_state; 14150 struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); 14151 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 14152 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 14153 14154 old_intel_state = to_intel_plane_state(old_state); 14155 14156 if (!obj && !old_obj) 14157 return; 14158 14159 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 14160 !INTEL_INFO(dev)->cursor_needs_physical)) 14161 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14162 14163 i915_gem_request_assign(&intel_state->wait_req, NULL); 14164 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 14165 } 14166 14167 int 14168 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 14169 { 14170 int max_scale; 14171 int crtc_clock, cdclk; 14172 14173 if (!intel_crtc || !crtc_state->base.enable) 14174 return DRM_PLANE_HELPER_NO_SCALING; 14175 14176 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14177 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 14178 14179 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) 14180 return DRM_PLANE_HELPER_NO_SCALING; 14181 14182 /* 14183 * skl max scale is lower of: 14184 * close to 3 but not 3, -1 is for that purpose 14185 * or 14186 * cdclk/crtc_clock 14187 */ 14188 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 14189 14190 return max_scale; 14191 } 14192 14193 static int 14194 intel_check_primary_plane(struct drm_plane *plane, 14195 struct intel_crtc_state *crtc_state, 14196 struct intel_plane_state *state) 14197 { 14198 struct drm_crtc *crtc = state->base.crtc; 14199 struct drm_framebuffer *fb = state->base.fb; 14200 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 14201 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 14202 bool can_position = false; 14203 14204 if (INTEL_INFO(plane->dev)->gen >= 9) { 14205 /* use scaler when colorkey is not required */ 14206 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 14207 min_scale = 1; 14208 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 14209 } 14210 can_position = true; 14211 } 14212 14213 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14214 &state->dst, &state->clip, 14215 state->base.rotation, 14216 min_scale, max_scale, 14217 can_position, true, 14218 &state->visible); 14219 } 14220 14221 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 14222 struct drm_crtc_state *old_crtc_state) 14223 { 14224 struct drm_device *dev = crtc->dev; 14225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14226 struct intel_crtc_state *old_intel_state = 14227 to_intel_crtc_state(old_crtc_state); 14228 bool modeset = needs_modeset(crtc->state); 14229 14230 /* Perform vblank evasion around commit operation */ 14231 intel_pipe_update_start(intel_crtc); 14232 14233 if (modeset) 14234 return; 14235 14236 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { 14237 intel_color_set_csc(crtc->state); 14238 intel_color_load_luts(crtc->state); 14239 } 14240 14241 if (to_intel_crtc_state(crtc->state)->update_pipe) 14242 intel_update_pipe_config(intel_crtc, old_intel_state); 14243 else if (INTEL_INFO(dev)->gen >= 9) 14244 skl_detach_scalers(intel_crtc); 14245 } 14246 14247 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 14248 struct drm_crtc_state *old_crtc_state) 14249 { 14250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14251 14252 intel_pipe_update_end(intel_crtc, NULL); 14253 } 14254 14255 /** 14256 * intel_plane_destroy - destroy a plane 14257 * @plane: plane to destroy 14258 * 14259 * Common destruction function for all types of planes (primary, cursor, 14260 * sprite). 14261 */ 14262 void intel_plane_destroy(struct drm_plane *plane) 14263 { 14264 if (!plane) 14265 return; 14266 14267 drm_plane_cleanup(plane); 14268 kfree(to_intel_plane(plane)); 14269 } 14270 14271 const struct drm_plane_funcs intel_plane_funcs = { 14272 .update_plane = drm_atomic_helper_update_plane, 14273 .disable_plane = drm_atomic_helper_disable_plane, 14274 .destroy = intel_plane_destroy, 14275 .set_property = drm_atomic_helper_plane_set_property, 14276 .atomic_get_property = intel_plane_atomic_get_property, 14277 .atomic_set_property = intel_plane_atomic_set_property, 14278 .atomic_duplicate_state = intel_plane_duplicate_state, 14279 .atomic_destroy_state = intel_plane_destroy_state, 14280 14281 }; 14282 14283 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 14284 int pipe) 14285 { 14286 struct intel_plane *primary = NULL; 14287 struct intel_plane_state *state = NULL; 14288 const uint32_t *intel_primary_formats; 14289 unsigned int num_formats; 14290 int ret; 14291 14292 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 14293 if (!primary) 14294 goto fail; 14295 14296 state = intel_create_plane_state(&primary->base); 14297 if (!state) 14298 goto fail; 14299 primary->base.state = &state->base; 14300 14301 primary->can_scale = false; 14302 primary->max_downscale = 1; 14303 if (INTEL_INFO(dev)->gen >= 9) { 14304 primary->can_scale = true; 14305 state->scaler_id = -1; 14306 } 14307 primary->pipe = pipe; 14308 primary->plane = pipe; 14309 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 14310 primary->check_plane = intel_check_primary_plane; 14311 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 14312 primary->plane = !pipe; 14313 14314 if (INTEL_INFO(dev)->gen >= 9) { 14315 intel_primary_formats = skl_primary_formats; 14316 num_formats = ARRAY_SIZE(skl_primary_formats); 14317 14318 primary->update_plane = skylake_update_primary_plane; 14319 primary->disable_plane = skylake_disable_primary_plane; 14320 } else if (HAS_PCH_SPLIT(dev)) { 14321 intel_primary_formats = i965_primary_formats; 14322 num_formats = ARRAY_SIZE(i965_primary_formats); 14323 14324 primary->update_plane = ironlake_update_primary_plane; 14325 primary->disable_plane = i9xx_disable_primary_plane; 14326 } else if (INTEL_INFO(dev)->gen >= 4) { 14327 intel_primary_formats = i965_primary_formats; 14328 num_formats = ARRAY_SIZE(i965_primary_formats); 14329 14330 primary->update_plane = i9xx_update_primary_plane; 14331 primary->disable_plane = i9xx_disable_primary_plane; 14332 } else { 14333 intel_primary_formats = i8xx_primary_formats; 14334 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14335 14336 primary->update_plane = i9xx_update_primary_plane; 14337 primary->disable_plane = i9xx_disable_primary_plane; 14338 } 14339 14340 if (INTEL_INFO(dev)->gen >= 9) 14341 ret = drm_universal_plane_init(dev, &primary->base, 0, 14342 &intel_plane_funcs, 14343 intel_primary_formats, num_formats, 14344 DRM_PLANE_TYPE_PRIMARY, 14345 "plane 1%c", pipe_name(pipe)); 14346 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 14347 ret = drm_universal_plane_init(dev, &primary->base, 0, 14348 &intel_plane_funcs, 14349 intel_primary_formats, num_formats, 14350 DRM_PLANE_TYPE_PRIMARY, 14351 "primary %c", pipe_name(pipe)); 14352 else 14353 ret = drm_universal_plane_init(dev, &primary->base, 0, 14354 &intel_plane_funcs, 14355 intel_primary_formats, num_formats, 14356 DRM_PLANE_TYPE_PRIMARY, 14357 "plane %c", plane_name(primary->plane)); 14358 if (ret) 14359 goto fail; 14360 14361 if (INTEL_INFO(dev)->gen >= 4) 14362 intel_create_rotation_property(dev, primary); 14363 14364 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 14365 14366 return &primary->base; 14367 14368 fail: 14369 kfree(state); 14370 kfree(primary); 14371 14372 return NULL; 14373 } 14374 14375 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 14376 { 14377 if (!dev->mode_config.rotation_property) { 14378 unsigned long flags = DRM_ROTATE_0 | 14379 DRM_ROTATE_180; 14380 14381 if (INTEL_INFO(dev)->gen >= 9) 14382 flags |= DRM_ROTATE_90 | DRM_ROTATE_270; 14383 14384 dev->mode_config.rotation_property = 14385 drm_mode_create_rotation_property(dev, flags); 14386 } 14387 if (dev->mode_config.rotation_property) 14388 drm_object_attach_property(&plane->base.base, 14389 dev->mode_config.rotation_property, 14390 plane->base.state->rotation); 14391 } 14392 14393 static int 14394 intel_check_cursor_plane(struct drm_plane *plane, 14395 struct intel_crtc_state *crtc_state, 14396 struct intel_plane_state *state) 14397 { 14398 struct drm_crtc *crtc = crtc_state->base.crtc; 14399 struct drm_framebuffer *fb = state->base.fb; 14400 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14401 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 14402 unsigned stride; 14403 int ret; 14404 14405 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14406 &state->dst, &state->clip, 14407 state->base.rotation, 14408 DRM_PLANE_HELPER_NO_SCALING, 14409 DRM_PLANE_HELPER_NO_SCALING, 14410 true, true, &state->visible); 14411 if (ret) 14412 return ret; 14413 14414 /* if we want to turn off the cursor ignore width and height */ 14415 if (!obj) 14416 return 0; 14417 14418 /* Check for which cursor types we support */ 14419 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 14420 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 14421 state->base.crtc_w, state->base.crtc_h); 14422 return -EINVAL; 14423 } 14424 14425 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 14426 if (obj->base.size < stride * state->base.crtc_h) { 14427 DRM_DEBUG_KMS("buffer is too small\n"); 14428 return -ENOMEM; 14429 } 14430 14431 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 14432 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 14433 return -EINVAL; 14434 } 14435 14436 /* 14437 * There's something wrong with the cursor on CHV pipe C. 14438 * If it straddles the left edge of the screen then 14439 * moving it away from the edge or disabling it often 14440 * results in a pipe underrun, and often that can lead to 14441 * dead pipe (constant underrun reported, and it scans 14442 * out just a solid color). To recover from that, the 14443 * display power well must be turned off and on again. 14444 * Refuse the put the cursor into that compromised position. 14445 */ 14446 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 14447 state->visible && state->base.crtc_x < 0) { 14448 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 14449 return -EINVAL; 14450 } 14451 14452 return 0; 14453 } 14454 14455 static void 14456 intel_disable_cursor_plane(struct drm_plane *plane, 14457 struct drm_crtc *crtc) 14458 { 14459 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14460 14461 intel_crtc->cursor_addr = 0; 14462 intel_crtc_update_cursor(crtc, NULL); 14463 } 14464 14465 static void 14466 intel_update_cursor_plane(struct drm_plane *plane, 14467 const struct intel_crtc_state *crtc_state, 14468 const struct intel_plane_state *state) 14469 { 14470 struct drm_crtc *crtc = crtc_state->base.crtc; 14471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14472 struct drm_device *dev = plane->dev; 14473 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 14474 uint32_t addr; 14475 14476 if (!obj) 14477 addr = 0; 14478 else if (!INTEL_INFO(dev)->cursor_needs_physical) 14479 addr = i915_gem_obj_ggtt_offset(obj); 14480 else 14481 addr = obj->phys_handle->busaddr; 14482 14483 intel_crtc->cursor_addr = addr; 14484 intel_crtc_update_cursor(crtc, state); 14485 } 14486 14487 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14488 int pipe) 14489 { 14490 struct intel_plane *cursor = NULL; 14491 struct intel_plane_state *state = NULL; 14492 int ret; 14493 14494 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 14495 if (!cursor) 14496 goto fail; 14497 14498 state = intel_create_plane_state(&cursor->base); 14499 if (!state) 14500 goto fail; 14501 cursor->base.state = &state->base; 14502 14503 cursor->can_scale = false; 14504 cursor->max_downscale = 1; 14505 cursor->pipe = pipe; 14506 cursor->plane = pipe; 14507 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 14508 cursor->check_plane = intel_check_cursor_plane; 14509 cursor->update_plane = intel_update_cursor_plane; 14510 cursor->disable_plane = intel_disable_cursor_plane; 14511 14512 ret = drm_universal_plane_init(dev, &cursor->base, 0, 14513 &intel_plane_funcs, 14514 intel_cursor_formats, 14515 ARRAY_SIZE(intel_cursor_formats), 14516 DRM_PLANE_TYPE_CURSOR, 14517 "cursor %c", pipe_name(pipe)); 14518 if (ret) 14519 goto fail; 14520 14521 if (INTEL_INFO(dev)->gen >= 4) { 14522 if (!dev->mode_config.rotation_property) 14523 dev->mode_config.rotation_property = 14524 drm_mode_create_rotation_property(dev, 14525 DRM_ROTATE_0 | 14526 DRM_ROTATE_180); 14527 if (dev->mode_config.rotation_property) 14528 drm_object_attach_property(&cursor->base.base, 14529 dev->mode_config.rotation_property, 14530 state->base.rotation); 14531 } 14532 14533 if (INTEL_INFO(dev)->gen >=9) 14534 state->scaler_id = -1; 14535 14536 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14537 14538 return &cursor->base; 14539 14540 fail: 14541 kfree(state); 14542 kfree(cursor); 14543 14544 return NULL; 14545 } 14546 14547 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 14548 struct intel_crtc_state *crtc_state) 14549 { 14550 int i; 14551 struct intel_scaler *intel_scaler; 14552 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 14553 14554 for (i = 0; i < intel_crtc->num_scalers; i++) { 14555 intel_scaler = &scaler_state->scalers[i]; 14556 intel_scaler->in_use = 0; 14557 intel_scaler->mode = PS_SCALER_MODE_DYN; 14558 } 14559 14560 scaler_state->scaler_id = -1; 14561 } 14562 14563 static void intel_crtc_init(struct drm_device *dev, int pipe) 14564 { 14565 struct drm_i915_private *dev_priv = to_i915(dev); 14566 struct intel_crtc *intel_crtc; 14567 struct intel_crtc_state *crtc_state = NULL; 14568 struct drm_plane *primary = NULL; 14569 struct drm_plane *cursor = NULL; 14570 int ret; 14571 14572 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 14573 if (intel_crtc == NULL) 14574 return; 14575 14576 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 14577 if (!crtc_state) 14578 goto fail; 14579 intel_crtc->config = crtc_state; 14580 intel_crtc->base.state = &crtc_state->base; 14581 crtc_state->base.crtc = &intel_crtc->base; 14582 14583 /* initialize shared scalers */ 14584 if (INTEL_INFO(dev)->gen >= 9) { 14585 if (pipe == PIPE_C) 14586 intel_crtc->num_scalers = 1; 14587 else 14588 intel_crtc->num_scalers = SKL_NUM_SCALERS; 14589 14590 skl_init_scalers(dev, intel_crtc, crtc_state); 14591 } 14592 14593 primary = intel_primary_plane_create(dev, pipe); 14594 if (!primary) 14595 goto fail; 14596 14597 cursor = intel_cursor_plane_create(dev, pipe); 14598 if (!cursor) 14599 goto fail; 14600 14601 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14602 cursor, &intel_crtc_funcs, 14603 "pipe %c", pipe_name(pipe)); 14604 if (ret) 14605 goto fail; 14606 14607 /* 14608 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 14609 * is hooked to pipe B. Hence we want plane A feeding pipe B. 14610 */ 14611 intel_crtc->pipe = pipe; 14612 intel_crtc->plane = pipe; 14613 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 14614 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 14615 intel_crtc->plane = !pipe; 14616 } 14617 14618 intel_crtc->cursor_base = ~0; 14619 intel_crtc->cursor_cntl = ~0; 14620 intel_crtc->cursor_size = ~0; 14621 14622 intel_crtc->wm.cxsr_allowed = true; 14623 14624 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 14625 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 14626 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 14627 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 14628 14629 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14630 14631 intel_color_init(&intel_crtc->base); 14632 14633 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14634 return; 14635 14636 fail: 14637 intel_plane_destroy(primary); 14638 intel_plane_destroy(cursor); 14639 kfree(crtc_state); 14640 kfree(intel_crtc); 14641 } 14642 14643 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14644 { 14645 struct drm_encoder *encoder = connector->base.encoder; 14646 struct drm_device *dev = connector->base.dev; 14647 14648 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14649 14650 if (!encoder || WARN_ON(!encoder->crtc)) 14651 return INVALID_PIPE; 14652 14653 return to_intel_crtc(encoder->crtc)->pipe; 14654 } 14655 14656 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14657 struct drm_file *file) 14658 { 14659 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14660 struct drm_crtc *drmmode_crtc; 14661 struct intel_crtc *crtc; 14662 14663 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14664 if (!drmmode_crtc) 14665 return -ENOENT; 14666 14667 crtc = to_intel_crtc(drmmode_crtc); 14668 pipe_from_crtc_id->pipe = crtc->pipe; 14669 14670 return 0; 14671 } 14672 14673 static int intel_encoder_clones(struct intel_encoder *encoder) 14674 { 14675 struct drm_device *dev = encoder->base.dev; 14676 struct intel_encoder *source_encoder; 14677 int index_mask = 0; 14678 int entry = 0; 14679 14680 for_each_intel_encoder(dev, source_encoder) { 14681 if (encoders_cloneable(encoder, source_encoder)) 14682 index_mask |= (1 << entry); 14683 14684 entry++; 14685 } 14686 14687 return index_mask; 14688 } 14689 14690 static bool has_edp_a(struct drm_device *dev) 14691 { 14692 struct drm_i915_private *dev_priv = to_i915(dev); 14693 14694 if (!IS_MOBILE(dev)) 14695 return false; 14696 14697 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14698 return false; 14699 14700 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14701 return false; 14702 14703 return true; 14704 } 14705 14706 static bool intel_crt_present(struct drm_device *dev) 14707 { 14708 struct drm_i915_private *dev_priv = to_i915(dev); 14709 14710 if (INTEL_INFO(dev)->gen >= 9) 14711 return false; 14712 14713 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 14714 return false; 14715 14716 if (IS_CHERRYVIEW(dev)) 14717 return false; 14718 14719 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14720 return false; 14721 14722 /* DDI E can't be used if DDI A requires 4 lanes */ 14723 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14724 return false; 14725 14726 if (!dev_priv->vbt.int_crt_support) 14727 return false; 14728 14729 return true; 14730 } 14731 14732 static void intel_setup_outputs(struct drm_device *dev) 14733 { 14734 struct drm_i915_private *dev_priv = to_i915(dev); 14735 struct intel_encoder *encoder; 14736 bool dpd_is_edp = false; 14737 14738 /* 14739 * intel_edp_init_connector() depends on this completing first, to 14740 * prevent the registeration of both eDP and LVDS and the incorrect 14741 * sharing of the PPS. 14742 */ 14743 intel_lvds_init(dev); 14744 14745 if (intel_crt_present(dev)) 14746 intel_crt_init(dev); 14747 14748 if (IS_BROXTON(dev)) { 14749 /* 14750 * FIXME: Broxton doesn't support port detection via the 14751 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14752 * detect the ports. 14753 */ 14754 intel_ddi_init(dev, PORT_A); 14755 intel_ddi_init(dev, PORT_B); 14756 intel_ddi_init(dev, PORT_C); 14757 14758 intel_dsi_init(dev); 14759 } else if (HAS_DDI(dev)) { 14760 int found; 14761 14762 /* 14763 * Haswell uses DDI functions to detect digital outputs. 14764 * On SKL pre-D0 the strap isn't connected, so we assume 14765 * it's there. 14766 */ 14767 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14768 /* WaIgnoreDDIAStrap: skl */ 14769 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14770 intel_ddi_init(dev, PORT_A); 14771 14772 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14773 * register */ 14774 found = I915_READ(SFUSE_STRAP); 14775 14776 if (found & SFUSE_STRAP_DDIB_DETECTED) 14777 intel_ddi_init(dev, PORT_B); 14778 if (found & SFUSE_STRAP_DDIC_DETECTED) 14779 intel_ddi_init(dev, PORT_C); 14780 if (found & SFUSE_STRAP_DDID_DETECTED) 14781 intel_ddi_init(dev, PORT_D); 14782 /* 14783 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14784 */ 14785 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && 14786 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14787 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14788 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14789 intel_ddi_init(dev, PORT_E); 14790 14791 } else if (HAS_PCH_SPLIT(dev)) { 14792 int found; 14793 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 14794 14795 if (has_edp_a(dev)) 14796 intel_dp_init(dev, DP_A, PORT_A); 14797 14798 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14799 /* PCH SDVOB multiplex with HDMIB */ 14800 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); 14801 if (!found) 14802 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14803 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14804 intel_dp_init(dev, PCH_DP_B, PORT_B); 14805 } 14806 14807 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14808 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14809 14810 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14811 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14812 14813 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14814 intel_dp_init(dev, PCH_DP_C, PORT_C); 14815 14816 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14817 intel_dp_init(dev, PCH_DP_D, PORT_D); 14818 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14819 bool has_edp, has_port; 14820 14821 /* 14822 * The DP_DETECTED bit is the latched state of the DDC 14823 * SDA pin at boot. However since eDP doesn't require DDC 14824 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14825 * eDP ports may have been muxed to an alternate function. 14826 * Thus we can't rely on the DP_DETECTED bit alone to detect 14827 * eDP ports. Consult the VBT as well as DP_DETECTED to 14828 * detect eDP ports. 14829 * 14830 * Sadly the straps seem to be missing sometimes even for HDMI 14831 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14832 * and VBT for the presence of the port. Additionally we can't 14833 * trust the port type the VBT declares as we've seen at least 14834 * HDMI ports that the VBT claim are DP or eDP. 14835 */ 14836 has_edp = intel_dp_is_edp(dev, PORT_B); 14837 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14838 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14839 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); 14840 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14841 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14842 14843 has_edp = intel_dp_is_edp(dev, PORT_C); 14844 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14845 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14846 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); 14847 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14848 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14849 14850 if (IS_CHERRYVIEW(dev)) { 14851 /* 14852 * eDP not supported on port D, 14853 * so no need to worry about it 14854 */ 14855 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14856 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14857 intel_dp_init(dev, CHV_DP_D, PORT_D); 14858 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14859 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14860 } 14861 14862 intel_dsi_init(dev); 14863 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 14864 bool found = false; 14865 14866 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14867 DRM_DEBUG_KMS("probing SDVOB\n"); 14868 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); 14869 if (!found && IS_G4X(dev)) { 14870 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14871 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14872 } 14873 14874 if (!found && IS_G4X(dev)) 14875 intel_dp_init(dev, DP_B, PORT_B); 14876 } 14877 14878 /* Before G4X SDVOC doesn't have its own detect register */ 14879 14880 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14881 DRM_DEBUG_KMS("probing SDVOC\n"); 14882 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); 14883 } 14884 14885 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14886 14887 if (IS_G4X(dev)) { 14888 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14889 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14890 } 14891 if (IS_G4X(dev)) 14892 intel_dp_init(dev, DP_C, PORT_C); 14893 } 14894 14895 if (IS_G4X(dev) && 14896 (I915_READ(DP_D) & DP_DETECTED)) 14897 intel_dp_init(dev, DP_D, PORT_D); 14898 } else if (IS_GEN2(dev)) 14899 intel_dvo_init(dev); 14900 14901 if (SUPPORTS_TV(dev)) 14902 intel_tv_init(dev); 14903 14904 intel_psr_init(dev); 14905 14906 for_each_intel_encoder(dev, encoder) { 14907 encoder->base.possible_crtcs = encoder->crtc_mask; 14908 encoder->base.possible_clones = 14909 intel_encoder_clones(encoder); 14910 } 14911 14912 intel_init_pch_refclk(dev); 14913 14914 drm_helper_move_panel_connectors_to_head(dev); 14915 } 14916 14917 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14918 { 14919 struct drm_device *dev = fb->dev; 14920 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14921 14922 drm_framebuffer_cleanup(fb); 14923 mutex_lock(&dev->struct_mutex); 14924 WARN_ON(!intel_fb->obj->framebuffer_references--); 14925 i915_gem_object_put(intel_fb->obj); 14926 mutex_unlock(&dev->struct_mutex); 14927 kfree(intel_fb); 14928 } 14929 14930 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14931 struct drm_file *file, 14932 unsigned int *handle) 14933 { 14934 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14935 struct drm_i915_gem_object *obj = intel_fb->obj; 14936 14937 if (obj->userptr.mm) { 14938 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14939 return -EINVAL; 14940 } 14941 14942 return drm_gem_handle_create(file, &obj->base, handle); 14943 } 14944 14945 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14946 struct drm_file *file, 14947 unsigned flags, unsigned color, 14948 struct drm_clip_rect *clips, 14949 unsigned num_clips) 14950 { 14951 struct drm_device *dev = fb->dev; 14952 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14953 struct drm_i915_gem_object *obj = intel_fb->obj; 14954 14955 mutex_lock(&dev->struct_mutex); 14956 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 14957 mutex_unlock(&dev->struct_mutex); 14958 14959 return 0; 14960 } 14961 14962 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14963 .destroy = intel_user_framebuffer_destroy, 14964 .create_handle = intel_user_framebuffer_create_handle, 14965 .dirty = intel_user_framebuffer_dirty, 14966 }; 14967 14968 static 14969 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14970 uint32_t pixel_format) 14971 { 14972 u32 gen = INTEL_INFO(dev)->gen; 14973 14974 if (gen >= 9) { 14975 int cpp = drm_format_plane_cpp(pixel_format, 0); 14976 14977 /* "The stride in bytes must not exceed the of the size of 8K 14978 * pixels and 32K bytes." 14979 */ 14980 return min(8192 * cpp, 32768); 14981 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 14982 return 32*1024; 14983 } else if (gen >= 4) { 14984 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14985 return 16*1024; 14986 else 14987 return 32*1024; 14988 } else if (gen >= 3) { 14989 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14990 return 8*1024; 14991 else 14992 return 16*1024; 14993 } else { 14994 /* XXX DSPC is limited to 4k tiled */ 14995 return 8*1024; 14996 } 14997 } 14998 14999 static int intel_framebuffer_init(struct drm_device *dev, 15000 struct intel_framebuffer *intel_fb, 15001 struct drm_mode_fb_cmd2 *mode_cmd, 15002 struct drm_i915_gem_object *obj) 15003 { 15004 struct drm_i915_private *dev_priv = to_i915(dev); 15005 unsigned int aligned_height; 15006 int ret; 15007 u32 pitch_limit, stride_alignment; 15008 15009 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 15010 15011 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 15012 /* Enforce that fb modifier and tiling mode match, but only for 15013 * X-tiled. This is needed for FBC. */ 15014 if (!!(i915_gem_object_get_tiling(obj) == I915_TILING_X) != 15015 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 15016 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 15017 return -EINVAL; 15018 } 15019 } else { 15020 if (i915_gem_object_get_tiling(obj) == I915_TILING_X) 15021 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 15022 else if (i915_gem_object_get_tiling(obj) == I915_TILING_Y) { 15023 DRM_DEBUG("No Y tiling for legacy addfb\n"); 15024 return -EINVAL; 15025 } 15026 } 15027 15028 /* Passed in modifier sanity checking. */ 15029 switch (mode_cmd->modifier[0]) { 15030 case I915_FORMAT_MOD_Y_TILED: 15031 case I915_FORMAT_MOD_Yf_TILED: 15032 if (INTEL_INFO(dev)->gen < 9) { 15033 DRM_DEBUG("Unsupported tiling 0x%llx!\n", 15034 mode_cmd->modifier[0]); 15035 return -EINVAL; 15036 } 15037 case DRM_FORMAT_MOD_NONE: 15038 case I915_FORMAT_MOD_X_TILED: 15039 break; 15040 default: 15041 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n", 15042 mode_cmd->modifier[0]); 15043 return -EINVAL; 15044 } 15045 15046 stride_alignment = intel_fb_stride_alignment(dev_priv, 15047 mode_cmd->modifier[0], 15048 mode_cmd->pixel_format); 15049 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 15050 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 15051 mode_cmd->pitches[0], stride_alignment); 15052 return -EINVAL; 15053 } 15054 15055 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 15056 mode_cmd->pixel_format); 15057 if (mode_cmd->pitches[0] > pitch_limit) { 15058 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 15059 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 15060 "tiled" : "linear", 15061 mode_cmd->pitches[0], pitch_limit); 15062 return -EINVAL; 15063 } 15064 15065 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 15066 mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) { 15067 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 15068 mode_cmd->pitches[0], 15069 i915_gem_object_get_stride(obj)); 15070 return -EINVAL; 15071 } 15072 15073 /* Reject formats not supported by any plane early. */ 15074 switch (mode_cmd->pixel_format) { 15075 case DRM_FORMAT_C8: 15076 case DRM_FORMAT_RGB565: 15077 case DRM_FORMAT_XRGB8888: 15078 case DRM_FORMAT_ARGB8888: 15079 break; 15080 case DRM_FORMAT_XRGB1555: 15081 if (INTEL_INFO(dev)->gen > 3) { 15082 DRM_DEBUG("unsupported pixel format: %s\n", 15083 drm_get_format_name(mode_cmd->pixel_format)); 15084 return -EINVAL; 15085 } 15086 break; 15087 case DRM_FORMAT_ABGR8888: 15088 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 15089 INTEL_INFO(dev)->gen < 9) { 15090 DRM_DEBUG("unsupported pixel format: %s\n", 15091 drm_get_format_name(mode_cmd->pixel_format)); 15092 return -EINVAL; 15093 } 15094 break; 15095 case DRM_FORMAT_XBGR8888: 15096 case DRM_FORMAT_XRGB2101010: 15097 case DRM_FORMAT_XBGR2101010: 15098 if (INTEL_INFO(dev)->gen < 4) { 15099 DRM_DEBUG("unsupported pixel format: %s\n", 15100 drm_get_format_name(mode_cmd->pixel_format)); 15101 return -EINVAL; 15102 } 15103 break; 15104 case DRM_FORMAT_ABGR2101010: 15105 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 15106 DRM_DEBUG("unsupported pixel format: %s\n", 15107 drm_get_format_name(mode_cmd->pixel_format)); 15108 return -EINVAL; 15109 } 15110 break; 15111 case DRM_FORMAT_YUYV: 15112 case DRM_FORMAT_UYVY: 15113 case DRM_FORMAT_YVYU: 15114 case DRM_FORMAT_VYUY: 15115 if (INTEL_INFO(dev)->gen < 5) { 15116 DRM_DEBUG("unsupported pixel format: %s\n", 15117 drm_get_format_name(mode_cmd->pixel_format)); 15118 return -EINVAL; 15119 } 15120 break; 15121 default: 15122 DRM_DEBUG("unsupported pixel format: %s\n", 15123 drm_get_format_name(mode_cmd->pixel_format)); 15124 return -EINVAL; 15125 } 15126 15127 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 15128 if (mode_cmd->offsets[0] != 0) 15129 return -EINVAL; 15130 15131 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 15132 mode_cmd->pixel_format, 15133 mode_cmd->modifier[0]); 15134 /* FIXME drm helper for size checks (especially planar formats)? */ 15135 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 15136 return -EINVAL; 15137 15138 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 15139 intel_fb->obj = obj; 15140 15141 intel_fill_fb_info(dev_priv, &intel_fb->base); 15142 15143 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 15144 if (ret) { 15145 DRM_ERROR("framebuffer init failed %d\n", ret); 15146 return ret; 15147 } 15148 15149 intel_fb->obj->framebuffer_references++; 15150 15151 return 0; 15152 } 15153 15154 static struct drm_framebuffer * 15155 intel_user_framebuffer_create(struct drm_device *dev, 15156 struct drm_file *filp, 15157 const struct drm_mode_fb_cmd2 *user_mode_cmd) 15158 { 15159 struct drm_framebuffer *fb; 15160 struct drm_i915_gem_object *obj; 15161 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 15162 15163 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 15164 if (!obj) 15165 return ERR_PTR(-ENOENT); 15166 15167 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 15168 if (IS_ERR(fb)) 15169 i915_gem_object_put_unlocked(obj); 15170 15171 return fb; 15172 } 15173 15174 #ifndef CONFIG_DRM_FBDEV_EMULATION 15175 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 15176 { 15177 } 15178 #endif 15179 15180 static const struct drm_mode_config_funcs intel_mode_funcs = { 15181 .fb_create = intel_user_framebuffer_create, 15182 .output_poll_changed = intel_fbdev_output_poll_changed, 15183 .atomic_check = intel_atomic_check, 15184 .atomic_commit = intel_atomic_commit, 15185 .atomic_state_alloc = intel_atomic_state_alloc, 15186 .atomic_state_clear = intel_atomic_state_clear, 15187 }; 15188 15189 /** 15190 * intel_init_display_hooks - initialize the display modesetting hooks 15191 * @dev_priv: device private 15192 */ 15193 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 15194 { 15195 if (INTEL_INFO(dev_priv)->gen >= 9) { 15196 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15197 dev_priv->display.get_initial_plane_config = 15198 skylake_get_initial_plane_config; 15199 dev_priv->display.crtc_compute_clock = 15200 haswell_crtc_compute_clock; 15201 dev_priv->display.crtc_enable = haswell_crtc_enable; 15202 dev_priv->display.crtc_disable = haswell_crtc_disable; 15203 } else if (HAS_DDI(dev_priv)) { 15204 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15205 dev_priv->display.get_initial_plane_config = 15206 ironlake_get_initial_plane_config; 15207 dev_priv->display.crtc_compute_clock = 15208 haswell_crtc_compute_clock; 15209 dev_priv->display.crtc_enable = haswell_crtc_enable; 15210 dev_priv->display.crtc_disable = haswell_crtc_disable; 15211 } else if (HAS_PCH_SPLIT(dev_priv)) { 15212 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 15213 dev_priv->display.get_initial_plane_config = 15214 ironlake_get_initial_plane_config; 15215 dev_priv->display.crtc_compute_clock = 15216 ironlake_crtc_compute_clock; 15217 dev_priv->display.crtc_enable = ironlake_crtc_enable; 15218 dev_priv->display.crtc_disable = ironlake_crtc_disable; 15219 } else if (IS_CHERRYVIEW(dev_priv)) { 15220 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15221 dev_priv->display.get_initial_plane_config = 15222 i9xx_get_initial_plane_config; 15223 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 15224 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15225 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15226 } else if (IS_VALLEYVIEW(dev_priv)) { 15227 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15228 dev_priv->display.get_initial_plane_config = 15229 i9xx_get_initial_plane_config; 15230 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 15231 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15232 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15233 } else if (IS_G4X(dev_priv)) { 15234 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15235 dev_priv->display.get_initial_plane_config = 15236 i9xx_get_initial_plane_config; 15237 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 15238 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15239 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15240 } else if (IS_PINEVIEW(dev_priv)) { 15241 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15242 dev_priv->display.get_initial_plane_config = 15243 i9xx_get_initial_plane_config; 15244 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 15245 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15246 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15247 } else if (!IS_GEN2(dev_priv)) { 15248 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15249 dev_priv->display.get_initial_plane_config = 15250 i9xx_get_initial_plane_config; 15251 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 15252 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15253 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15254 } else { 15255 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15256 dev_priv->display.get_initial_plane_config = 15257 i9xx_get_initial_plane_config; 15258 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 15259 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15260 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15261 } 15262 15263 /* Returns the core display clock speed */ 15264 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 15265 dev_priv->display.get_display_clock_speed = 15266 skylake_get_display_clock_speed; 15267 else if (IS_BROXTON(dev_priv)) 15268 dev_priv->display.get_display_clock_speed = 15269 broxton_get_display_clock_speed; 15270 else if (IS_BROADWELL(dev_priv)) 15271 dev_priv->display.get_display_clock_speed = 15272 broadwell_get_display_clock_speed; 15273 else if (IS_HASWELL(dev_priv)) 15274 dev_priv->display.get_display_clock_speed = 15275 haswell_get_display_clock_speed; 15276 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15277 dev_priv->display.get_display_clock_speed = 15278 valleyview_get_display_clock_speed; 15279 else if (IS_GEN5(dev_priv)) 15280 dev_priv->display.get_display_clock_speed = 15281 ilk_get_display_clock_speed; 15282 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) || 15283 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 15284 dev_priv->display.get_display_clock_speed = 15285 i945_get_display_clock_speed; 15286 else if (IS_GM45(dev_priv)) 15287 dev_priv->display.get_display_clock_speed = 15288 gm45_get_display_clock_speed; 15289 else if (IS_CRESTLINE(dev_priv)) 15290 dev_priv->display.get_display_clock_speed = 15291 i965gm_get_display_clock_speed; 15292 else if (IS_PINEVIEW(dev_priv)) 15293 dev_priv->display.get_display_clock_speed = 15294 pnv_get_display_clock_speed; 15295 else if (IS_G33(dev_priv) || IS_G4X(dev_priv)) 15296 dev_priv->display.get_display_clock_speed = 15297 g33_get_display_clock_speed; 15298 else if (IS_I915G(dev_priv)) 15299 dev_priv->display.get_display_clock_speed = 15300 i915_get_display_clock_speed; 15301 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv)) 15302 dev_priv->display.get_display_clock_speed = 15303 i9xx_misc_get_display_clock_speed; 15304 else if (IS_I915GM(dev_priv)) 15305 dev_priv->display.get_display_clock_speed = 15306 i915gm_get_display_clock_speed; 15307 else if (IS_I865G(dev_priv)) 15308 dev_priv->display.get_display_clock_speed = 15309 i865_get_display_clock_speed; 15310 else if (IS_I85X(dev_priv)) 15311 dev_priv->display.get_display_clock_speed = 15312 i85x_get_display_clock_speed; 15313 else { /* 830 */ 15314 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n"); 15315 dev_priv->display.get_display_clock_speed = 15316 i830_get_display_clock_speed; 15317 } 15318 15319 if (IS_GEN5(dev_priv)) { 15320 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15321 } else if (IS_GEN6(dev_priv)) { 15322 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15323 } else if (IS_IVYBRIDGE(dev_priv)) { 15324 /* FIXME: detect B0+ stepping and use auto training */ 15325 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15326 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15327 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15328 } 15329 15330 if (IS_BROADWELL(dev_priv)) { 15331 dev_priv->display.modeset_commit_cdclk = 15332 broadwell_modeset_commit_cdclk; 15333 dev_priv->display.modeset_calc_cdclk = 15334 broadwell_modeset_calc_cdclk; 15335 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15336 dev_priv->display.modeset_commit_cdclk = 15337 valleyview_modeset_commit_cdclk; 15338 dev_priv->display.modeset_calc_cdclk = 15339 valleyview_modeset_calc_cdclk; 15340 } else if (IS_BROXTON(dev_priv)) { 15341 dev_priv->display.modeset_commit_cdclk = 15342 bxt_modeset_commit_cdclk; 15343 dev_priv->display.modeset_calc_cdclk = 15344 bxt_modeset_calc_cdclk; 15345 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 15346 dev_priv->display.modeset_commit_cdclk = 15347 skl_modeset_commit_cdclk; 15348 dev_priv->display.modeset_calc_cdclk = 15349 skl_modeset_calc_cdclk; 15350 } 15351 15352 switch (INTEL_INFO(dev_priv)->gen) { 15353 case 2: 15354 dev_priv->display.queue_flip = intel_gen2_queue_flip; 15355 break; 15356 15357 case 3: 15358 dev_priv->display.queue_flip = intel_gen3_queue_flip; 15359 break; 15360 15361 case 4: 15362 case 5: 15363 dev_priv->display.queue_flip = intel_gen4_queue_flip; 15364 break; 15365 15366 case 6: 15367 dev_priv->display.queue_flip = intel_gen6_queue_flip; 15368 break; 15369 case 7: 15370 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 15371 dev_priv->display.queue_flip = intel_gen7_queue_flip; 15372 break; 15373 case 9: 15374 /* Drop through - unsupported since execlist only. */ 15375 default: 15376 /* Default just returns -ENODEV to indicate unsupported */ 15377 dev_priv->display.queue_flip = intel_default_queue_flip; 15378 } 15379 } 15380 15381 /* 15382 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 15383 * resume, or other times. This quirk makes sure that's the case for 15384 * affected systems. 15385 */ 15386 static void quirk_pipea_force(struct drm_device *dev) 15387 { 15388 struct drm_i915_private *dev_priv = to_i915(dev); 15389 15390 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15391 DRM_INFO("applying pipe a force quirk\n"); 15392 } 15393 15394 static void quirk_pipeb_force(struct drm_device *dev) 15395 { 15396 struct drm_i915_private *dev_priv = to_i915(dev); 15397 15398 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15399 DRM_INFO("applying pipe b force quirk\n"); 15400 } 15401 15402 /* 15403 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 15404 */ 15405 static void quirk_ssc_force_disable(struct drm_device *dev) 15406 { 15407 struct drm_i915_private *dev_priv = to_i915(dev); 15408 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15409 DRM_INFO("applying lvds SSC disable quirk\n"); 15410 } 15411 15412 /* 15413 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 15414 * brightness value 15415 */ 15416 static void quirk_invert_brightness(struct drm_device *dev) 15417 { 15418 struct drm_i915_private *dev_priv = to_i915(dev); 15419 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15420 DRM_INFO("applying inverted panel brightness quirk\n"); 15421 } 15422 15423 /* Some VBT's incorrectly indicate no backlight is present */ 15424 static void quirk_backlight_present(struct drm_device *dev) 15425 { 15426 struct drm_i915_private *dev_priv = to_i915(dev); 15427 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15428 DRM_INFO("applying backlight present quirk\n"); 15429 } 15430 15431 struct intel_quirk { 15432 int device; 15433 int subsystem_vendor; 15434 int subsystem_device; 15435 void (*hook)(struct drm_device *dev); 15436 }; 15437 15438 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 15439 struct intel_dmi_quirk { 15440 void (*hook)(struct drm_device *dev); 15441 const struct dmi_system_id (*dmi_id_list)[]; 15442 }; 15443 15444 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 15445 { 15446 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 15447 return 1; 15448 } 15449 15450 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 15451 { 15452 .dmi_id_list = &(const struct dmi_system_id[]) { 15453 { 15454 .callback = intel_dmi_reverse_brightness, 15455 .ident = "NCR Corporation", 15456 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 15457 DMI_MATCH(DMI_PRODUCT_NAME, ""), 15458 }, 15459 }, 15460 { } /* terminating entry */ 15461 }, 15462 .hook = quirk_invert_brightness, 15463 }, 15464 }; 15465 15466 static struct intel_quirk intel_quirks[] = { 15467 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 15468 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 15469 15470 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 15471 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 15472 15473 /* 830 needs to leave pipe A & dpll A up */ 15474 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 15475 15476 /* 830 needs to leave pipe B & dpll B up */ 15477 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 15478 15479 /* Lenovo U160 cannot use SSC on LVDS */ 15480 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 15481 15482 /* Sony Vaio Y cannot use SSC on LVDS */ 15483 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 15484 15485 /* Acer Aspire 5734Z must invert backlight brightness */ 15486 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 15487 15488 /* Acer/eMachines G725 */ 15489 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 15490 15491 /* Acer/eMachines e725 */ 15492 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 15493 15494 /* Acer/Packard Bell NCL20 */ 15495 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 15496 15497 /* Acer Aspire 4736Z */ 15498 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 15499 15500 /* Acer Aspire 5336 */ 15501 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 15502 15503 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 15504 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 15505 15506 /* Acer C720 Chromebook (Core i3 4005U) */ 15507 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 15508 15509 /* Apple Macbook 2,1 (Core 2 T7400) */ 15510 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 15511 15512 /* Apple Macbook 4,1 */ 15513 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 15514 15515 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 15516 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 15517 15518 /* HP Chromebook 14 (Celeron 2955U) */ 15519 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 15520 15521 /* Dell Chromebook 11 */ 15522 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 15523 15524 /* Dell Chromebook 11 (2015 version) */ 15525 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 15526 }; 15527 15528 static void intel_init_quirks(struct drm_device *dev) 15529 { 15530 struct pci_dev *d = dev->pdev; 15531 int i; 15532 15533 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 15534 struct intel_quirk *q = &intel_quirks[i]; 15535 15536 if (d->device == q->device && 15537 (d->subsystem_vendor == q->subsystem_vendor || 15538 q->subsystem_vendor == PCI_ANY_ID) && 15539 (d->subsystem_device == q->subsystem_device || 15540 q->subsystem_device == PCI_ANY_ID)) 15541 q->hook(dev); 15542 } 15543 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 15544 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 15545 intel_dmi_quirks[i].hook(dev); 15546 } 15547 } 15548 15549 /* Disable the VGA plane that we never use */ 15550 static void i915_disable_vga(struct drm_device *dev) 15551 { 15552 struct drm_i915_private *dev_priv = to_i915(dev); 15553 u8 sr1; 15554 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15555 15556 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15557 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15558 outb(SR01, VGA_SR_INDEX); 15559 sr1 = inb(VGA_SR_DATA); 15560 outb(sr1 | 1<<5, VGA_SR_DATA); 15561 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 15562 udelay(300); 15563 15564 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15565 POSTING_READ(vga_reg); 15566 } 15567 15568 void intel_modeset_init_hw(struct drm_device *dev) 15569 { 15570 struct drm_i915_private *dev_priv = to_i915(dev); 15571 15572 intel_update_cdclk(dev); 15573 15574 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15575 15576 intel_init_clock_gating(dev); 15577 } 15578 15579 /* 15580 * Calculate what we think the watermarks should be for the state we've read 15581 * out of the hardware and then immediately program those watermarks so that 15582 * we ensure the hardware settings match our internal state. 15583 * 15584 * We can calculate what we think WM's should be by creating a duplicate of the 15585 * current state (which was constructed during hardware readout) and running it 15586 * through the atomic check code to calculate new watermark values in the 15587 * state object. 15588 */ 15589 static void sanitize_watermarks(struct drm_device *dev) 15590 { 15591 struct drm_i915_private *dev_priv = to_i915(dev); 15592 struct drm_atomic_state *state; 15593 struct drm_crtc *crtc; 15594 struct drm_crtc_state *cstate; 15595 struct drm_modeset_acquire_ctx ctx; 15596 int ret; 15597 int i; 15598 15599 /* Only supported on platforms that use atomic watermark design */ 15600 if (!dev_priv->display.optimize_watermarks) 15601 return; 15602 15603 /* 15604 * We need to hold connection_mutex before calling duplicate_state so 15605 * that the connector loop is protected. 15606 */ 15607 drm_modeset_acquire_init(&ctx, 0); 15608 retry: 15609 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15610 if (ret == -EDEADLK) { 15611 drm_modeset_backoff(&ctx); 15612 goto retry; 15613 } else if (WARN_ON(ret)) { 15614 goto fail; 15615 } 15616 15617 state = drm_atomic_helper_duplicate_state(dev, &ctx); 15618 if (WARN_ON(IS_ERR(state))) 15619 goto fail; 15620 15621 /* 15622 * Hardware readout is the only time we don't want to calculate 15623 * intermediate watermarks (since we don't trust the current 15624 * watermarks). 15625 */ 15626 to_intel_atomic_state(state)->skip_intermediate_wm = true; 15627 15628 ret = intel_atomic_check(dev, state); 15629 if (ret) { 15630 /* 15631 * If we fail here, it means that the hardware appears to be 15632 * programmed in a way that shouldn't be possible, given our 15633 * understanding of watermark requirements. This might mean a 15634 * mistake in the hardware readout code or a mistake in the 15635 * watermark calculations for a given platform. Raise a WARN 15636 * so that this is noticeable. 15637 * 15638 * If this actually happens, we'll have to just leave the 15639 * BIOS-programmed watermarks untouched and hope for the best. 15640 */ 15641 WARN(true, "Could not determine valid watermarks for inherited state\n"); 15642 goto fail; 15643 } 15644 15645 /* Write calculated watermark values back */ 15646 for_each_crtc_in_state(state, crtc, cstate, i) { 15647 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15648 15649 cs->wm.need_postvbl_update = true; 15650 dev_priv->display.optimize_watermarks(cs); 15651 } 15652 15653 drm_atomic_state_free(state); 15654 fail: 15655 drm_modeset_drop_locks(&ctx); 15656 drm_modeset_acquire_fini(&ctx); 15657 } 15658 15659 void intel_modeset_init(struct drm_device *dev) 15660 { 15661 struct drm_i915_private *dev_priv = to_i915(dev); 15662 struct i915_ggtt *ggtt = &dev_priv->ggtt; 15663 int sprite, ret; 15664 enum i915_pipe pipe; 15665 struct intel_crtc *crtc; 15666 15667 drm_mode_config_init(dev); 15668 15669 dev->mode_config.min_width = 0; 15670 dev->mode_config.min_height = 0; 15671 15672 dev->mode_config.preferred_depth = 24; 15673 dev->mode_config.prefer_shadow = 1; 15674 15675 dev->mode_config.allow_fb_modifiers = true; 15676 15677 dev->mode_config.funcs = &intel_mode_funcs; 15678 15679 intel_init_quirks(dev); 15680 15681 intel_init_pm(dev); 15682 15683 if (INTEL_INFO(dev)->num_pipes == 0) 15684 return; 15685 15686 /* 15687 * There may be no VBT; and if the BIOS enabled SSC we can 15688 * just keep using it to avoid unnecessary flicker. Whereas if the 15689 * BIOS isn't using it, don't assume it will work even if the VBT 15690 * indicates as much. 15691 */ 15692 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 15693 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15694 DREF_SSC1_ENABLE); 15695 15696 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15697 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15698 bios_lvds_use_ssc ? "en" : "dis", 15699 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15700 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15701 } 15702 } 15703 15704 if (IS_GEN2(dev)) { 15705 dev->mode_config.max_width = 2048; 15706 dev->mode_config.max_height = 2048; 15707 } else if (IS_GEN3(dev)) { 15708 dev->mode_config.max_width = 4096; 15709 dev->mode_config.max_height = 4096; 15710 } else { 15711 dev->mode_config.max_width = 8192; 15712 dev->mode_config.max_height = 8192; 15713 } 15714 15715 if (IS_845G(dev) || IS_I865G(dev)) { 15716 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 15717 dev->mode_config.cursor_height = 1023; 15718 } else if (IS_GEN2(dev)) { 15719 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15720 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15721 } else { 15722 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15723 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15724 } 15725 15726 dev->mode_config.fb_base = ggtt->mappable_base; 15727 15728 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15729 INTEL_INFO(dev)->num_pipes, 15730 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 15731 15732 for_each_pipe(dev_priv, pipe) { 15733 intel_crtc_init(dev, pipe); 15734 for_each_sprite(dev_priv, pipe, sprite) { 15735 ret = intel_plane_init(dev, pipe, sprite); 15736 if (ret) 15737 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 15738 pipe_name(pipe), sprite_name(pipe, sprite), ret); 15739 } 15740 } 15741 15742 intel_update_czclk(dev_priv); 15743 intel_update_cdclk(dev); 15744 15745 intel_shared_dpll_init(dev); 15746 15747 if (dev_priv->max_cdclk_freq == 0) 15748 intel_update_max_cdclk(dev); 15749 15750 /* Just disable it once at startup */ 15751 i915_disable_vga(dev); 15752 intel_setup_outputs(dev); 15753 15754 drm_modeset_lock_all(dev); 15755 intel_modeset_setup_hw_state(dev); 15756 drm_modeset_unlock_all(dev); 15757 15758 for_each_intel_crtc(dev, crtc) { 15759 struct intel_initial_plane_config plane_config = {}; 15760 15761 if (!crtc->active) 15762 continue; 15763 15764 /* 15765 * Note that reserving the BIOS fb up front prevents us 15766 * from stuffing other stolen allocations like the ring 15767 * on top. This prevents some ugliness at boot time, and 15768 * can even allow for smooth boot transitions if the BIOS 15769 * fb is large enough for the active pipe configuration. 15770 */ 15771 dev_priv->display.get_initial_plane_config(crtc, 15772 &plane_config); 15773 15774 /* 15775 * If the fb is shared between multiple heads, we'll 15776 * just get the first one. 15777 */ 15778 intel_find_initial_plane_obj(crtc, &plane_config); 15779 } 15780 15781 /* 15782 * Make sure hardware watermarks really match the state we read out. 15783 * Note that we need to do this after reconstructing the BIOS fb's 15784 * since the watermark calculation done here will use pstate->fb. 15785 */ 15786 sanitize_watermarks(dev); 15787 } 15788 15789 static void intel_enable_pipe_a(struct drm_device *dev) 15790 { 15791 struct intel_connector *connector; 15792 struct drm_connector *crt = NULL; 15793 struct intel_load_detect_pipe load_detect_temp; 15794 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15795 15796 /* We can't just switch on the pipe A, we need to set things up with a 15797 * proper mode and output configuration. As a gross hack, enable pipe A 15798 * by enabling the load detect pipe once. */ 15799 for_each_intel_connector(dev, connector) { 15800 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15801 crt = &connector->base; 15802 break; 15803 } 15804 } 15805 15806 if (!crt) 15807 return; 15808 15809 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15810 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15811 } 15812 15813 static bool 15814 intel_check_plane_mapping(struct intel_crtc *crtc) 15815 { 15816 struct drm_device *dev = crtc->base.dev; 15817 struct drm_i915_private *dev_priv = to_i915(dev); 15818 u32 val; 15819 15820 if (INTEL_INFO(dev)->num_pipes == 1) 15821 return true; 15822 15823 val = I915_READ(DSPCNTR(!crtc->plane)); 15824 15825 if ((val & DISPLAY_PLANE_ENABLE) && 15826 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15827 return false; 15828 15829 return true; 15830 } 15831 15832 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15833 { 15834 struct drm_device *dev = crtc->base.dev; 15835 struct intel_encoder *encoder; 15836 15837 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15838 return true; 15839 15840 return false; 15841 } 15842 15843 static bool intel_encoder_has_connectors(struct intel_encoder *encoder) 15844 { 15845 struct drm_device *dev = encoder->base.dev; 15846 struct intel_connector *connector; 15847 15848 for_each_connector_on_encoder(dev, &encoder->base, connector) 15849 return true; 15850 15851 return false; 15852 } 15853 15854 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 15855 enum transcoder pch_transcoder) 15856 { 15857 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 15858 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15859 } 15860 15861 static void intel_sanitize_crtc(struct intel_crtc *crtc) 15862 { 15863 struct drm_device *dev = crtc->base.dev; 15864 struct drm_i915_private *dev_priv = to_i915(dev); 15865 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15866 15867 /* Clear any frame start delays used for debugging left by the BIOS */ 15868 if (!transcoder_is_dsi(cpu_transcoder)) { 15869 i915_reg_t reg = PIPECONF(cpu_transcoder); 15870 15871 I915_WRITE(reg, 15872 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15873 } 15874 15875 /* restore vblank interrupts to correct state */ 15876 drm_crtc_vblank_reset(&crtc->base); 15877 if (crtc->active) { 15878 struct intel_plane *plane; 15879 15880 drm_crtc_vblank_on(&crtc->base); 15881 15882 /* Disable everything but the primary plane */ 15883 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15884 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15885 continue; 15886 15887 plane->disable_plane(&plane->base, &crtc->base); 15888 } 15889 } 15890 15891 /* We need to sanitize the plane -> pipe mapping first because this will 15892 * disable the crtc (and hence change the state) if it is wrong. Note 15893 * that gen4+ has a fixed plane -> pipe mapping. */ 15894 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15895 bool plane; 15896 15897 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", 15898 crtc->base.base.id, crtc->base.name); 15899 15900 /* Pipe has the wrong plane attached and the plane is active. 15901 * Temporarily change the plane mapping and disable everything 15902 * ... */ 15903 plane = crtc->plane; 15904 to_intel_plane_state(crtc->base.primary->state)->visible = true; 15905 crtc->plane = !plane; 15906 intel_crtc_disable_noatomic(&crtc->base); 15907 crtc->plane = plane; 15908 } 15909 15910 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15911 crtc->pipe == PIPE_A && !crtc->active) { 15912 /* BIOS forgot to enable pipe A, this mostly happens after 15913 * resume. Force-enable the pipe to fix this, the update_dpms 15914 * call below we restore the pipe to the right state, but leave 15915 * the required bits on. */ 15916 intel_enable_pipe_a(dev); 15917 } 15918 15919 /* Adjust the state of the output pipe according to whether we 15920 * have active connectors/encoders. */ 15921 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15922 intel_crtc_disable_noatomic(&crtc->base); 15923 15924 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 15925 /* 15926 * We start out with underrun reporting disabled to avoid races. 15927 * For correct bookkeeping mark this on active crtcs. 15928 * 15929 * Also on gmch platforms we dont have any hardware bits to 15930 * disable the underrun reporting. Which means we need to start 15931 * out with underrun reporting disabled also on inactive pipes, 15932 * since otherwise we'll complain about the garbage we read when 15933 * e.g. coming up after runtime pm. 15934 * 15935 * No protection against concurrent access is required - at 15936 * worst a fifo underrun happens which also sets this to false. 15937 */ 15938 crtc->cpu_fifo_underrun_disabled = true; 15939 /* 15940 * We track the PCH trancoder underrun reporting state 15941 * within the crtc. With crtc for pipe A housing the underrun 15942 * reporting state for PCH transcoder A, crtc for pipe B housing 15943 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 15944 * and marking underrun reporting as disabled for the non-existing 15945 * PCH transcoders B and C would prevent enabling the south 15946 * error interrupt (see cpt_can_enable_serr_int()). 15947 */ 15948 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) 15949 crtc->pch_fifo_underrun_disabled = true; 15950 } 15951 } 15952 15953 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15954 { 15955 struct intel_connector *connector; 15956 struct drm_device *dev = encoder->base.dev; 15957 15958 /* We need to check both for a crtc link (meaning that the 15959 * encoder is active and trying to read from a pipe) and the 15960 * pipe itself being active. */ 15961 bool has_active_crtc = encoder->base.crtc && 15962 to_intel_crtc(encoder->base.crtc)->active; 15963 15964 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) { 15965 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15966 encoder->base.base.id, 15967 encoder->base.name); 15968 15969 /* Connector is active, but has no active pipe. This is 15970 * fallout from our resume register restoring. Disable 15971 * the encoder manually again. */ 15972 if (encoder->base.crtc) { 15973 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15974 encoder->base.base.id, 15975 encoder->base.name); 15976 encoder->disable(encoder); 15977 if (encoder->post_disable) 15978 encoder->post_disable(encoder); 15979 } 15980 encoder->base.crtc = NULL; 15981 15982 /* Inconsistent output/port/pipe state happens presumably due to 15983 * a bug in one of the get_hw_state functions. Or someplace else 15984 * in our code, like the register restore mess on resume. Clamp 15985 * things to off as a safer default. */ 15986 for_each_intel_connector(dev, connector) { 15987 if (connector->encoder != encoder) 15988 continue; 15989 connector->base.dpms = DRM_MODE_DPMS_OFF; 15990 connector->base.encoder = NULL; 15991 } 15992 } 15993 /* Enabled encoders without active connectors will be fixed in 15994 * the crtc fixup. */ 15995 } 15996 15997 void i915_redisable_vga_power_on(struct drm_device *dev) 15998 { 15999 struct drm_i915_private *dev_priv = to_i915(dev); 16000 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 16001 16002 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 16003 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 16004 i915_disable_vga(dev); 16005 } 16006 } 16007 16008 void i915_redisable_vga(struct drm_device *dev) 16009 { 16010 struct drm_i915_private *dev_priv = to_i915(dev); 16011 16012 /* This function can be called both from intel_modeset_setup_hw_state or 16013 * at a very early point in our resume sequence, where the power well 16014 * structures are not yet restored. Since this function is at a very 16015 * paranoid "someone might have enabled VGA while we were not looking" 16016 * level, just check if the power well is enabled instead of trying to 16017 * follow the "don't touch the power well if we don't need it" policy 16018 * the rest of the driver uses. */ 16019 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 16020 return; 16021 16022 i915_redisable_vga_power_on(dev); 16023 16024 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 16025 } 16026 16027 static bool primary_get_hw_state(struct intel_plane *plane) 16028 { 16029 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 16030 16031 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 16032 } 16033 16034 /* FIXME read out full plane state for all planes */ 16035 static void readout_plane_state(struct intel_crtc *crtc) 16036 { 16037 struct drm_plane *primary = crtc->base.primary; 16038 struct intel_plane_state *plane_state = 16039 to_intel_plane_state(primary->state); 16040 16041 plane_state->visible = crtc->active && 16042 primary_get_hw_state(to_intel_plane(primary)); 16043 16044 if (plane_state->visible) 16045 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 16046 } 16047 16048 static void intel_modeset_readout_hw_state(struct drm_device *dev) 16049 { 16050 struct drm_i915_private *dev_priv = to_i915(dev); 16051 enum i915_pipe pipe; 16052 struct intel_crtc *crtc; 16053 struct intel_encoder *encoder; 16054 struct intel_connector *connector; 16055 int i; 16056 16057 dev_priv->active_crtcs = 0; 16058 16059 for_each_intel_crtc(dev, crtc) { 16060 struct intel_crtc_state *crtc_state = crtc->config; 16061 int pixclk = 0; 16062 16063 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16064 memset(crtc_state, 0, sizeof(*crtc_state)); 16065 crtc_state->base.crtc = &crtc->base; 16066 16067 crtc_state->base.active = crtc_state->base.enable = 16068 dev_priv->display.get_pipe_config(crtc, crtc_state); 16069 16070 crtc->base.enabled = crtc_state->base.enable; 16071 crtc->active = crtc_state->base.active; 16072 16073 if (crtc_state->base.active) { 16074 dev_priv->active_crtcs |= 1 << crtc->pipe; 16075 16076 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 16077 pixclk = ilk_pipe_pixel_rate(crtc_state); 16078 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16079 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 16080 else 16081 WARN_ON(dev_priv->display.modeset_calc_cdclk); 16082 16083 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 16084 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 16085 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 16086 } 16087 16088 dev_priv->min_pixclk[crtc->pipe] = pixclk; 16089 16090 readout_plane_state(crtc); 16091 16092 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16093 crtc->base.base.id, crtc->base.name, 16094 crtc->active ? "enabled" : "disabled"); 16095 } 16096 16097 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16098 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16099 16100 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 16101 &pll->config.hw_state); 16102 pll->config.crtc_mask = 0; 16103 for_each_intel_crtc(dev, crtc) { 16104 if (crtc->active && crtc->config->shared_dpll == pll) 16105 pll->config.crtc_mask |= 1 << crtc->pipe; 16106 } 16107 pll->active_mask = pll->config.crtc_mask; 16108 16109 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 16110 pll->name, pll->config.crtc_mask, pll->on); 16111 } 16112 16113 for_each_intel_encoder(dev, encoder) { 16114 pipe = 0; 16115 16116 if (encoder->get_hw_state(encoder, &pipe)) { 16117 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 16118 encoder->base.crtc = &crtc->base; 16119 crtc->config->output_types |= 1 << encoder->type; 16120 encoder->get_config(encoder, crtc->config); 16121 } else { 16122 encoder->base.crtc = NULL; 16123 } 16124 16125 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 16126 encoder->base.base.id, 16127 encoder->base.name, 16128 encoder->base.crtc ? "enabled" : "disabled", 16129 pipe_name(pipe)); 16130 } 16131 16132 for_each_intel_connector(dev, connector) { 16133 if (connector->get_hw_state(connector)) { 16134 connector->base.dpms = DRM_MODE_DPMS_ON; 16135 16136 encoder = connector->encoder; 16137 connector->base.encoder = &encoder->base; 16138 16139 if (encoder->base.crtc && 16140 encoder->base.crtc->state->active) { 16141 /* 16142 * This has to be done during hardware readout 16143 * because anything calling .crtc_disable may 16144 * rely on the connector_mask being accurate. 16145 */ 16146 encoder->base.crtc->state->connector_mask |= 16147 1 << drm_connector_index(&connector->base); 16148 encoder->base.crtc->state->encoder_mask |= 16149 1 << drm_encoder_index(&encoder->base); 16150 } 16151 16152 } else { 16153 connector->base.dpms = DRM_MODE_DPMS_OFF; 16154 connector->base.encoder = NULL; 16155 } 16156 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 16157 connector->base.base.id, 16158 connector->base.name, 16159 connector->base.encoder ? "enabled" : "disabled"); 16160 } 16161 16162 for_each_intel_crtc(dev, crtc) { 16163 crtc->base.hwmode = crtc->config->base.adjusted_mode; 16164 16165 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16166 if (crtc->base.state->active) { 16167 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 16168 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 16169 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 16170 16171 /* 16172 * The initial mode needs to be set in order to keep 16173 * the atomic core happy. It wants a valid mode if the 16174 * crtc's enabled, so we do the above call. 16175 * 16176 * At this point some state updated by the connectors 16177 * in their ->detect() callback has not run yet, so 16178 * no recalculation can be done yet. 16179 * 16180 * Even if we could do a recalculation and modeset 16181 * right now it would cause a double modeset if 16182 * fbdev or userspace chooses a different initial mode. 16183 * 16184 * If that happens, someone indicated they wanted a 16185 * mode change, which means it's safe to do a full 16186 * recalculation. 16187 */ 16188 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 16189 16190 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 16191 update_scanline_offset(crtc); 16192 } 16193 16194 intel_pipe_config_sanity_check(dev_priv, crtc->config); 16195 } 16196 } 16197 16198 /* Scan out the current hw modeset state, 16199 * and sanitizes it to the current state 16200 */ 16201 static void 16202 intel_modeset_setup_hw_state(struct drm_device *dev) 16203 { 16204 struct drm_i915_private *dev_priv = to_i915(dev); 16205 enum i915_pipe pipe; 16206 struct intel_crtc *crtc; 16207 struct intel_encoder *encoder; 16208 int i; 16209 16210 intel_modeset_readout_hw_state(dev); 16211 16212 /* HW state is read out, now we need to sanitize this mess. */ 16213 for_each_intel_encoder(dev, encoder) { 16214 intel_sanitize_encoder(encoder); 16215 } 16216 16217 for_each_pipe(dev_priv, pipe) { 16218 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 16219 intel_sanitize_crtc(crtc); 16220 intel_dump_pipe_config(crtc, crtc->config, 16221 "[setup_hw_state]"); 16222 } 16223 16224 intel_modeset_update_connector_atomic_state(dev); 16225 16226 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16227 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16228 16229 if (!pll->on || pll->active_mask) 16230 continue; 16231 16232 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 16233 16234 pll->funcs.disable(dev_priv, pll); 16235 pll->on = false; 16236 } 16237 16238 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 16239 vlv_wm_get_hw_state(dev); 16240 else if (IS_GEN9(dev)) 16241 skl_wm_get_hw_state(dev); 16242 else if (HAS_PCH_SPLIT(dev)) 16243 ilk_wm_get_hw_state(dev); 16244 16245 for_each_intel_crtc(dev, crtc) { 16246 unsigned long put_domains; 16247 16248 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 16249 if (WARN_ON(put_domains)) 16250 modeset_put_power_domains(dev_priv, put_domains); 16251 } 16252 intel_display_set_init_power(dev_priv, false); 16253 16254 intel_fbc_init_pipe_state(dev_priv); 16255 } 16256 16257 void intel_display_resume(struct drm_device *dev) 16258 { 16259 struct drm_i915_private *dev_priv = to_i915(dev); 16260 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 16261 struct drm_modeset_acquire_ctx ctx; 16262 int ret; 16263 16264 dev_priv->modeset_restore_state = NULL; 16265 if (state) 16266 state->acquire_ctx = &ctx; 16267 16268 /* 16269 * This is a cludge because with real atomic modeset mode_config.mutex 16270 * won't be taken. Unfortunately some probed state like 16271 * audio_codec_enable is still protected by mode_config.mutex, so lock 16272 * it here for now. 16273 */ 16274 mutex_lock(&dev->mode_config.mutex); 16275 drm_modeset_acquire_init(&ctx, 0); 16276 16277 while (1) { 16278 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16279 if (ret != -EDEADLK) 16280 break; 16281 16282 drm_modeset_backoff(&ctx); 16283 } 16284 16285 if (!ret) 16286 ret = __intel_display_resume(dev, state); 16287 16288 drm_modeset_drop_locks(&ctx); 16289 drm_modeset_acquire_fini(&ctx); 16290 mutex_unlock(&dev->mode_config.mutex); 16291 16292 if (ret) { 16293 DRM_ERROR("Restoring old state failed with %i\n", ret); 16294 drm_atomic_state_free(state); 16295 } 16296 } 16297 16298 void intel_modeset_gem_init(struct drm_device *dev) 16299 { 16300 struct drm_i915_private *dev_priv = to_i915(dev); 16301 struct drm_crtc *c; 16302 struct drm_i915_gem_object *obj; 16303 int ret; 16304 16305 intel_init_gt_powersave(dev_priv); 16306 16307 intel_modeset_init_hw(dev); 16308 16309 intel_setup_overlay(dev_priv); 16310 16311 /* 16312 * Make sure any fbs we allocated at startup are properly 16313 * pinned & fenced. When we do the allocation it's too early 16314 * for this. 16315 */ 16316 for_each_crtc(dev, c) { 16317 obj = intel_fb_obj(c->primary->fb); 16318 if (obj == NULL) 16319 continue; 16320 16321 mutex_lock(&dev->struct_mutex); 16322 ret = intel_pin_and_fence_fb_obj(c->primary->fb, 16323 c->primary->state->rotation); 16324 mutex_unlock(&dev->struct_mutex); 16325 if (ret) { 16326 DRM_ERROR("failed to pin boot fb on pipe %d\n", 16327 to_intel_crtc(c)->pipe); 16328 drm_framebuffer_unreference(c->primary->fb); 16329 c->primary->fb = NULL; 16330 c->primary->crtc = c->primary->state->crtc = NULL; 16331 update_state_fb(c->primary); 16332 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16333 } 16334 } 16335 } 16336 16337 int intel_connector_register(struct drm_connector *connector) 16338 { 16339 struct intel_connector *intel_connector = to_intel_connector(connector); 16340 int ret; 16341 16342 ret = intel_backlight_device_register(intel_connector); 16343 if (ret) 16344 goto err; 16345 16346 return 0; 16347 16348 err: 16349 return ret; 16350 } 16351 16352 void intel_connector_unregister(struct drm_connector *connector) 16353 { 16354 struct intel_connector *intel_connector = to_intel_connector(connector); 16355 16356 intel_backlight_device_unregister(intel_connector); 16357 intel_panel_destroy_backlight(connector); 16358 } 16359 16360 void intel_modeset_cleanup(struct drm_device *dev) 16361 { 16362 struct drm_i915_private *dev_priv = to_i915(dev); 16363 16364 intel_disable_gt_powersave(dev_priv); 16365 16366 /* 16367 * Interrupts and polling as the first thing to avoid creating havoc. 16368 * Too much stuff here (turning of connectors, ...) would 16369 * experience fancy races otherwise. 16370 */ 16371 intel_irq_uninstall(dev_priv); 16372 16373 /* 16374 * Due to the hpd irq storm handling the hotplug work can re-arm the 16375 * poll handlers. Hence disable polling after hpd handling is shut down. 16376 */ 16377 drm_kms_helper_poll_fini(dev); 16378 16379 intel_unregister_dsm_handler(); 16380 16381 intel_fbc_global_disable(dev_priv); 16382 16383 /* flush any delayed tasks or pending work */ 16384 flush_scheduled_work(); 16385 16386 drm_mode_config_cleanup(dev); 16387 16388 intel_cleanup_overlay(dev_priv); 16389 16390 intel_cleanup_gt_powersave(dev_priv); 16391 16392 intel_teardown_gmbus(dev); 16393 } 16394 16395 void intel_connector_attach_encoder(struct intel_connector *connector, 16396 struct intel_encoder *encoder) 16397 { 16398 connector->encoder = encoder; 16399 drm_mode_connector_attach_encoder(&connector->base, 16400 &encoder->base); 16401 } 16402 16403 /* 16404 * set vga decode state - true == enable VGA decode 16405 */ 16406 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 16407 { 16408 struct drm_i915_private *dev_priv = to_i915(dev); 16409 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 16410 u16 gmch_ctrl; 16411 16412 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 16413 DRM_ERROR("failed to read control word\n"); 16414 return -EIO; 16415 } 16416 16417 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 16418 return 0; 16419 16420 if (state) 16421 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 16422 else 16423 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 16424 16425 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 16426 DRM_ERROR("failed to write control word\n"); 16427 return -EIO; 16428 } 16429 16430 return 0; 16431 } 16432 16433 struct intel_display_error_state { 16434 16435 u32 power_well_driver; 16436 16437 int num_transcoders; 16438 16439 struct intel_cursor_error_state { 16440 u32 control; 16441 u32 position; 16442 u32 base; 16443 u32 size; 16444 } cursor[I915_MAX_PIPES]; 16445 16446 struct intel_pipe_error_state { 16447 bool power_domain_on; 16448 u32 source; 16449 u32 stat; 16450 } pipe[I915_MAX_PIPES]; 16451 16452 struct intel_plane_error_state { 16453 u32 control; 16454 u32 stride; 16455 u32 size; 16456 u32 pos; 16457 u32 addr; 16458 u32 surface; 16459 u32 tile_offset; 16460 } plane[I915_MAX_PIPES]; 16461 16462 struct intel_transcoder_error_state { 16463 bool power_domain_on; 16464 enum transcoder cpu_transcoder; 16465 16466 u32 conf; 16467 16468 u32 htotal; 16469 u32 hblank; 16470 u32 hsync; 16471 u32 vtotal; 16472 u32 vblank; 16473 u32 vsync; 16474 } transcoder[4]; 16475 }; 16476 16477 struct intel_display_error_state * 16478 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 16479 { 16480 struct intel_display_error_state *error; 16481 int transcoders[] = { 16482 TRANSCODER_A, 16483 TRANSCODER_B, 16484 TRANSCODER_C, 16485 TRANSCODER_EDP, 16486 }; 16487 int i; 16488 16489 if (INTEL_INFO(dev_priv)->num_pipes == 0) 16490 return NULL; 16491 16492 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16493 if (error == NULL) 16494 return NULL; 16495 16496 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 16497 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16498 16499 for_each_pipe(dev_priv, i) { 16500 error->pipe[i].power_domain_on = 16501 __intel_display_power_is_enabled(dev_priv, 16502 POWER_DOMAIN_PIPE(i)); 16503 if (!error->pipe[i].power_domain_on) 16504 continue; 16505 16506 error->cursor[i].control = I915_READ(CURCNTR(i)); 16507 error->cursor[i].position = I915_READ(CURPOS(i)); 16508 error->cursor[i].base = I915_READ(CURBASE(i)); 16509 16510 error->plane[i].control = I915_READ(DSPCNTR(i)); 16511 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16512 if (INTEL_GEN(dev_priv) <= 3) { 16513 error->plane[i].size = I915_READ(DSPSIZE(i)); 16514 error->plane[i].pos = I915_READ(DSPPOS(i)); 16515 } 16516 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 16517 error->plane[i].addr = I915_READ(DSPADDR(i)); 16518 if (INTEL_GEN(dev_priv) >= 4) { 16519 error->plane[i].surface = I915_READ(DSPSURF(i)); 16520 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16521 } 16522 16523 error->pipe[i].source = I915_READ(PIPESRC(i)); 16524 16525 if (HAS_GMCH_DISPLAY(dev_priv)) 16526 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16527 } 16528 16529 /* Note: this does not include DSI transcoders. */ 16530 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 16531 if (HAS_DDI(dev_priv)) 16532 error->num_transcoders++; /* Account for eDP. */ 16533 16534 for (i = 0; i < error->num_transcoders; i++) { 16535 enum transcoder cpu_transcoder = transcoders[i]; 16536 16537 error->transcoder[i].power_domain_on = 16538 __intel_display_power_is_enabled(dev_priv, 16539 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 16540 if (!error->transcoder[i].power_domain_on) 16541 continue; 16542 16543 error->transcoder[i].cpu_transcoder = cpu_transcoder; 16544 16545 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 16546 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 16547 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 16548 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 16549 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 16550 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 16551 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 16552 } 16553 16554 return error; 16555 } 16556 16557 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 16558 16559 void 16560 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 16561 struct drm_device *dev, 16562 struct intel_display_error_state *error) 16563 { 16564 struct drm_i915_private *dev_priv = to_i915(dev); 16565 int i; 16566 16567 if (!error) 16568 return; 16569 16570 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 16571 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16572 err_printf(m, "PWR_WELL_CTL2: %08x\n", 16573 error->power_well_driver); 16574 for_each_pipe(dev_priv, i) { 16575 err_printf(m, "Pipe [%d]:\n", i); 16576 err_printf(m, " Power: %s\n", 16577 onoff(error->pipe[i].power_domain_on)); 16578 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 16579 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 16580 16581 err_printf(m, "Plane [%d]:\n", i); 16582 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 16583 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 16584 if (INTEL_INFO(dev)->gen <= 3) { 16585 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 16586 err_printf(m, " POS: %08x\n", error->plane[i].pos); 16587 } 16588 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16589 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 16590 if (INTEL_INFO(dev)->gen >= 4) { 16591 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 16592 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 16593 } 16594 16595 err_printf(m, "Cursor [%d]:\n", i); 16596 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 16597 err_printf(m, " POS: %08x\n", error->cursor[i].position); 16598 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 16599 } 16600 16601 for (i = 0; i < error->num_transcoders; i++) { 16602 err_printf(m, "CPU transcoder: %s\n", 16603 transcoder_name(error->transcoder[i].cpu_transcoder)); 16604 err_printf(m, " Power: %s\n", 16605 onoff(error->transcoder[i].power_domain_on)); 16606 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 16607 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 16608 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 16609 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 16610 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 16611 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 16612 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 16613 } 16614 } 16615