1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats for gen <= 3 */ 45 static const uint32_t i8xx_primary_formats[] = { 46 DRM_FORMAT_C8, 47 DRM_FORMAT_RGB565, 48 DRM_FORMAT_XRGB1555, 49 DRM_FORMAT_XRGB8888, 50 }; 51 52 /* Primary plane formats for gen >= 4 */ 53 static const uint32_t i965_primary_formats[] = { 54 DRM_FORMAT_C8, 55 DRM_FORMAT_RGB565, 56 DRM_FORMAT_XRGB8888, 57 DRM_FORMAT_XBGR8888, 58 DRM_FORMAT_XRGB2101010, 59 DRM_FORMAT_XBGR2101010, 60 }; 61 62 static const uint32_t skl_primary_formats[] = { 63 DRM_FORMAT_C8, 64 DRM_FORMAT_RGB565, 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_XBGR8888, 67 DRM_FORMAT_ARGB8888, 68 DRM_FORMAT_ABGR8888, 69 DRM_FORMAT_XRGB2101010, 70 DRM_FORMAT_XBGR2101010, 71 }; 72 73 /* Cursor formats */ 74 static const uint32_t intel_cursor_formats[] = { 75 DRM_FORMAT_ARGB8888, 76 }; 77 78 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 79 80 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 81 struct intel_crtc_state *pipe_config); 82 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 83 struct intel_crtc_state *pipe_config); 84 85 static int intel_framebuffer_init(struct drm_device *dev, 86 struct intel_framebuffer *ifb, 87 struct drm_mode_fb_cmd2 *mode_cmd, 88 struct drm_i915_gem_object *obj); 89 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 90 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 91 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 92 struct intel_link_m_n *m_n, 93 struct intel_link_m_n *m2_n2); 94 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 95 static void haswell_set_pipeconf(struct drm_crtc *crtc); 96 static void intel_set_pipe_csc(struct drm_crtc *crtc); 97 static void vlv_prepare_pll(struct intel_crtc *crtc, 98 const struct intel_crtc_state *pipe_config); 99 static void chv_prepare_pll(struct intel_crtc *crtc, 100 const struct intel_crtc_state *pipe_config); 101 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 102 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 103 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 104 struct intel_crtc_state *crtc_state); 105 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 106 int num_connectors); 107 static void intel_modeset_setup_hw_state(struct drm_device *dev); 108 109 typedef struct { 110 int min, max; 111 } intel_range_t; 112 113 typedef struct { 114 int dot_limit; 115 int p2_slow, p2_fast; 116 } intel_p2_t; 117 118 typedef struct intel_limit intel_limit_t; 119 struct intel_limit { 120 intel_range_t dot, vco, n, m, m1, m2, p, p1; 121 intel_p2_t p2; 122 }; 123 124 int 125 intel_pch_rawclk(struct drm_device *dev) 126 { 127 struct drm_i915_private *dev_priv = dev->dev_private; 128 129 WARN_ON(!HAS_PCH_SPLIT(dev)); 130 131 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 132 } 133 134 static inline u32 /* units of 100MHz */ 135 intel_fdi_link_freq(struct drm_device *dev) 136 { 137 if (IS_GEN5(dev)) { 138 struct drm_i915_private *dev_priv = dev->dev_private; 139 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 140 } else 141 return 27; 142 } 143 144 static const intel_limit_t intel_limits_i8xx_dac = { 145 .dot = { .min = 25000, .max = 350000 }, 146 .vco = { .min = 908000, .max = 1512000 }, 147 .n = { .min = 2, .max = 16 }, 148 .m = { .min = 96, .max = 140 }, 149 .m1 = { .min = 18, .max = 26 }, 150 .m2 = { .min = 6, .max = 16 }, 151 .p = { .min = 4, .max = 128 }, 152 .p1 = { .min = 2, .max = 33 }, 153 .p2 = { .dot_limit = 165000, 154 .p2_slow = 4, .p2_fast = 2 }, 155 }; 156 157 static const intel_limit_t intel_limits_i8xx_dvo = { 158 .dot = { .min = 25000, .max = 350000 }, 159 .vco = { .min = 908000, .max = 1512000 }, 160 .n = { .min = 2, .max = 16 }, 161 .m = { .min = 96, .max = 140 }, 162 .m1 = { .min = 18, .max = 26 }, 163 .m2 = { .min = 6, .max = 16 }, 164 .p = { .min = 4, .max = 128 }, 165 .p1 = { .min = 2, .max = 33 }, 166 .p2 = { .dot_limit = 165000, 167 .p2_slow = 4, .p2_fast = 4 }, 168 }; 169 170 static const intel_limit_t intel_limits_i8xx_lvds = { 171 .dot = { .min = 25000, .max = 350000 }, 172 .vco = { .min = 908000, .max = 1512000 }, 173 .n = { .min = 2, .max = 16 }, 174 .m = { .min = 96, .max = 140 }, 175 .m1 = { .min = 18, .max = 26 }, 176 .m2 = { .min = 6, .max = 16 }, 177 .p = { .min = 4, .max = 128 }, 178 .p1 = { .min = 1, .max = 6 }, 179 .p2 = { .dot_limit = 165000, 180 .p2_slow = 14, .p2_fast = 7 }, 181 }; 182 183 static const intel_limit_t intel_limits_i9xx_sdvo = { 184 .dot = { .min = 20000, .max = 400000 }, 185 .vco = { .min = 1400000, .max = 2800000 }, 186 .n = { .min = 1, .max = 6 }, 187 .m = { .min = 70, .max = 120 }, 188 .m1 = { .min = 8, .max = 18 }, 189 .m2 = { .min = 3, .max = 7 }, 190 .p = { .min = 5, .max = 80 }, 191 .p1 = { .min = 1, .max = 8 }, 192 .p2 = { .dot_limit = 200000, 193 .p2_slow = 10, .p2_fast = 5 }, 194 }; 195 196 static const intel_limit_t intel_limits_i9xx_lvds = { 197 .dot = { .min = 20000, .max = 400000 }, 198 .vco = { .min = 1400000, .max = 2800000 }, 199 .n = { .min = 1, .max = 6 }, 200 .m = { .min = 70, .max = 120 }, 201 .m1 = { .min = 8, .max = 18 }, 202 .m2 = { .min = 3, .max = 7 }, 203 .p = { .min = 7, .max = 98 }, 204 .p1 = { .min = 1, .max = 8 }, 205 .p2 = { .dot_limit = 112000, 206 .p2_slow = 14, .p2_fast = 7 }, 207 }; 208 209 210 static const intel_limit_t intel_limits_g4x_sdvo = { 211 .dot = { .min = 25000, .max = 270000 }, 212 .vco = { .min = 1750000, .max = 3500000}, 213 .n = { .min = 1, .max = 4 }, 214 .m = { .min = 104, .max = 138 }, 215 .m1 = { .min = 17, .max = 23 }, 216 .m2 = { .min = 5, .max = 11 }, 217 .p = { .min = 10, .max = 30 }, 218 .p1 = { .min = 1, .max = 3}, 219 .p2 = { .dot_limit = 270000, 220 .p2_slow = 10, 221 .p2_fast = 10 222 }, 223 }; 224 225 static const intel_limit_t intel_limits_g4x_hdmi = { 226 .dot = { .min = 22000, .max = 400000 }, 227 .vco = { .min = 1750000, .max = 3500000}, 228 .n = { .min = 1, .max = 4 }, 229 .m = { .min = 104, .max = 138 }, 230 .m1 = { .min = 16, .max = 23 }, 231 .m2 = { .min = 5, .max = 11 }, 232 .p = { .min = 5, .max = 80 }, 233 .p1 = { .min = 1, .max = 8}, 234 .p2 = { .dot_limit = 165000, 235 .p2_slow = 10, .p2_fast = 5 }, 236 }; 237 238 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 239 .dot = { .min = 20000, .max = 115000 }, 240 .vco = { .min = 1750000, .max = 3500000 }, 241 .n = { .min = 1, .max = 3 }, 242 .m = { .min = 104, .max = 138 }, 243 .m1 = { .min = 17, .max = 23 }, 244 .m2 = { .min = 5, .max = 11 }, 245 .p = { .min = 28, .max = 112 }, 246 .p1 = { .min = 2, .max = 8 }, 247 .p2 = { .dot_limit = 0, 248 .p2_slow = 14, .p2_fast = 14 249 }, 250 }; 251 252 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 253 .dot = { .min = 80000, .max = 224000 }, 254 .vco = { .min = 1750000, .max = 3500000 }, 255 .n = { .min = 1, .max = 3 }, 256 .m = { .min = 104, .max = 138 }, 257 .m1 = { .min = 17, .max = 23 }, 258 .m2 = { .min = 5, .max = 11 }, 259 .p = { .min = 14, .max = 42 }, 260 .p1 = { .min = 2, .max = 6 }, 261 .p2 = { .dot_limit = 0, 262 .p2_slow = 7, .p2_fast = 7 263 }, 264 }; 265 266 static const intel_limit_t intel_limits_pineview_sdvo = { 267 .dot = { .min = 20000, .max = 400000}, 268 .vco = { .min = 1700000, .max = 3500000 }, 269 /* Pineview's Ncounter is a ring counter */ 270 .n = { .min = 3, .max = 6 }, 271 .m = { .min = 2, .max = 256 }, 272 /* Pineview only has one combined m divider, which we treat as m2. */ 273 .m1 = { .min = 0, .max = 0 }, 274 .m2 = { .min = 0, .max = 254 }, 275 .p = { .min = 5, .max = 80 }, 276 .p1 = { .min = 1, .max = 8 }, 277 .p2 = { .dot_limit = 200000, 278 .p2_slow = 10, .p2_fast = 5 }, 279 }; 280 281 static const intel_limit_t intel_limits_pineview_lvds = { 282 .dot = { .min = 20000, .max = 400000 }, 283 .vco = { .min = 1700000, .max = 3500000 }, 284 .n = { .min = 3, .max = 6 }, 285 .m = { .min = 2, .max = 256 }, 286 .m1 = { .min = 0, .max = 0 }, 287 .m2 = { .min = 0, .max = 254 }, 288 .p = { .min = 7, .max = 112 }, 289 .p1 = { .min = 1, .max = 8 }, 290 .p2 = { .dot_limit = 112000, 291 .p2_slow = 14, .p2_fast = 14 }, 292 }; 293 294 /* Ironlake / Sandybridge 295 * 296 * We calculate clock using (register_value + 2) for N/M1/M2, so here 297 * the range value for them is (actual_value - 2). 298 */ 299 static const intel_limit_t intel_limits_ironlake_dac = { 300 .dot = { .min = 25000, .max = 350000 }, 301 .vco = { .min = 1760000, .max = 3510000 }, 302 .n = { .min = 1, .max = 5 }, 303 .m = { .min = 79, .max = 127 }, 304 .m1 = { .min = 12, .max = 22 }, 305 .m2 = { .min = 5, .max = 9 }, 306 .p = { .min = 5, .max = 80 }, 307 .p1 = { .min = 1, .max = 8 }, 308 .p2 = { .dot_limit = 225000, 309 .p2_slow = 10, .p2_fast = 5 }, 310 }; 311 312 static const intel_limit_t intel_limits_ironlake_single_lvds = { 313 .dot = { .min = 25000, .max = 350000 }, 314 .vco = { .min = 1760000, .max = 3510000 }, 315 .n = { .min = 1, .max = 3 }, 316 .m = { .min = 79, .max = 118 }, 317 .m1 = { .min = 12, .max = 22 }, 318 .m2 = { .min = 5, .max = 9 }, 319 .p = { .min = 28, .max = 112 }, 320 .p1 = { .min = 2, .max = 8 }, 321 .p2 = { .dot_limit = 225000, 322 .p2_slow = 14, .p2_fast = 14 }, 323 }; 324 325 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 326 .dot = { .min = 25000, .max = 350000 }, 327 .vco = { .min = 1760000, .max = 3510000 }, 328 .n = { .min = 1, .max = 3 }, 329 .m = { .min = 79, .max = 127 }, 330 .m1 = { .min = 12, .max = 22 }, 331 .m2 = { .min = 5, .max = 9 }, 332 .p = { .min = 14, .max = 56 }, 333 .p1 = { .min = 2, .max = 8 }, 334 .p2 = { .dot_limit = 225000, 335 .p2_slow = 7, .p2_fast = 7 }, 336 }; 337 338 /* LVDS 100mhz refclk limits. */ 339 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 340 .dot = { .min = 25000, .max = 350000 }, 341 .vco = { .min = 1760000, .max = 3510000 }, 342 .n = { .min = 1, .max = 2 }, 343 .m = { .min = 79, .max = 126 }, 344 .m1 = { .min = 12, .max = 22 }, 345 .m2 = { .min = 5, .max = 9 }, 346 .p = { .min = 28, .max = 112 }, 347 .p1 = { .min = 2, .max = 8 }, 348 .p2 = { .dot_limit = 225000, 349 .p2_slow = 14, .p2_fast = 14 }, 350 }; 351 352 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 353 .dot = { .min = 25000, .max = 350000 }, 354 .vco = { .min = 1760000, .max = 3510000 }, 355 .n = { .min = 1, .max = 3 }, 356 .m = { .min = 79, .max = 126 }, 357 .m1 = { .min = 12, .max = 22 }, 358 .m2 = { .min = 5, .max = 9 }, 359 .p = { .min = 14, .max = 42 }, 360 .p1 = { .min = 2, .max = 6 }, 361 .p2 = { .dot_limit = 225000, 362 .p2_slow = 7, .p2_fast = 7 }, 363 }; 364 365 static const intel_limit_t intel_limits_vlv = { 366 /* 367 * These are the data rate limits (measured in fast clocks) 368 * since those are the strictest limits we have. The fast 369 * clock and actual rate limits are more relaxed, so checking 370 * them would make no difference. 371 */ 372 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 373 .vco = { .min = 4000000, .max = 6000000 }, 374 .n = { .min = 1, .max = 7 }, 375 .m1 = { .min = 2, .max = 3 }, 376 .m2 = { .min = 11, .max = 156 }, 377 .p1 = { .min = 2, .max = 3 }, 378 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 379 }; 380 381 static const intel_limit_t intel_limits_chv = { 382 /* 383 * These are the data rate limits (measured in fast clocks) 384 * since those are the strictest limits we have. The fast 385 * clock and actual rate limits are more relaxed, so checking 386 * them would make no difference. 387 */ 388 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 389 .vco = { .min = 4800000, .max = 6480000 }, 390 .n = { .min = 1, .max = 1 }, 391 .m1 = { .min = 2, .max = 2 }, 392 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 393 .p1 = { .min = 2, .max = 4 }, 394 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 395 }; 396 397 static const intel_limit_t intel_limits_bxt = { 398 /* FIXME: find real dot limits */ 399 .dot = { .min = 0, .max = INT_MAX }, 400 .vco = { .min = 4800000, .max = 6700000 }, 401 .n = { .min = 1, .max = 1 }, 402 .m1 = { .min = 2, .max = 2 }, 403 /* FIXME: find real m2 limits */ 404 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 405 .p1 = { .min = 2, .max = 4 }, 406 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 407 }; 408 409 static bool 410 needs_modeset(struct drm_crtc_state *state) 411 { 412 return drm_atomic_crtc_needs_modeset(state); 413 } 414 415 /** 416 * Returns whether any output on the specified pipe is of the specified type 417 */ 418 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 419 { 420 struct drm_device *dev = crtc->base.dev; 421 struct intel_encoder *encoder; 422 423 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 424 if (encoder->type == type) 425 return true; 426 427 return false; 428 } 429 430 /** 431 * Returns whether any output on the specified pipe will have the specified 432 * type after a staged modeset is complete, i.e., the same as 433 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 434 * encoder->crtc. 435 */ 436 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 437 int type) 438 { 439 struct drm_atomic_state *state = crtc_state->base.state; 440 struct drm_connector *connector; 441 struct drm_connector_state *connector_state; 442 struct intel_encoder *encoder; 443 int i, num_connectors = 0; 444 445 for_each_connector_in_state(state, connector, connector_state, i) { 446 if (connector_state->crtc != crtc_state->base.crtc) 447 continue; 448 449 num_connectors++; 450 451 encoder = to_intel_encoder(connector_state->best_encoder); 452 if (encoder->type == type) 453 return true; 454 } 455 456 WARN_ON(num_connectors == 0); 457 458 return false; 459 } 460 461 static const intel_limit_t * 462 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 463 { 464 struct drm_device *dev = crtc_state->base.crtc->dev; 465 const intel_limit_t *limit; 466 467 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 468 if (intel_is_dual_link_lvds(dev)) { 469 if (refclk == 100000) 470 limit = &intel_limits_ironlake_dual_lvds_100m; 471 else 472 limit = &intel_limits_ironlake_dual_lvds; 473 } else { 474 if (refclk == 100000) 475 limit = &intel_limits_ironlake_single_lvds_100m; 476 else 477 limit = &intel_limits_ironlake_single_lvds; 478 } 479 } else 480 limit = &intel_limits_ironlake_dac; 481 482 return limit; 483 } 484 485 static const intel_limit_t * 486 intel_g4x_limit(struct intel_crtc_state *crtc_state) 487 { 488 struct drm_device *dev = crtc_state->base.crtc->dev; 489 const intel_limit_t *limit; 490 491 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 492 if (intel_is_dual_link_lvds(dev)) 493 limit = &intel_limits_g4x_dual_channel_lvds; 494 else 495 limit = &intel_limits_g4x_single_channel_lvds; 496 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 497 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 498 limit = &intel_limits_g4x_hdmi; 499 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 500 limit = &intel_limits_g4x_sdvo; 501 } else /* The option is for other outputs */ 502 limit = &intel_limits_i9xx_sdvo; 503 504 return limit; 505 } 506 507 static const intel_limit_t * 508 intel_limit(struct intel_crtc_state *crtc_state, int refclk) 509 { 510 struct drm_device *dev = crtc_state->base.crtc->dev; 511 const intel_limit_t *limit; 512 513 if (IS_BROXTON(dev)) 514 limit = &intel_limits_bxt; 515 else if (HAS_PCH_SPLIT(dev)) 516 limit = intel_ironlake_limit(crtc_state, refclk); 517 else if (IS_G4X(dev)) { 518 limit = intel_g4x_limit(crtc_state); 519 } else if (IS_PINEVIEW(dev)) { 520 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 521 limit = &intel_limits_pineview_lvds; 522 else 523 limit = &intel_limits_pineview_sdvo; 524 } else if (IS_CHERRYVIEW(dev)) { 525 limit = &intel_limits_chv; 526 } else if (IS_VALLEYVIEW(dev)) { 527 limit = &intel_limits_vlv; 528 } else if (!IS_GEN2(dev)) { 529 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 530 limit = &intel_limits_i9xx_lvds; 531 else 532 limit = &intel_limits_i9xx_sdvo; 533 } else { 534 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 535 limit = &intel_limits_i8xx_lvds; 536 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 537 limit = &intel_limits_i8xx_dvo; 538 else 539 limit = &intel_limits_i8xx_dac; 540 } 541 return limit; 542 } 543 544 /* 545 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 546 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 547 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 548 * The helpers' return value is the rate of the clock that is fed to the 549 * display engine's pipe which can be the above fast dot clock rate or a 550 * divided-down version of it. 551 */ 552 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 553 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 554 { 555 clock->m = clock->m2 + 2; 556 clock->p = clock->p1 * clock->p2; 557 if (WARN_ON(clock->n == 0 || clock->p == 0)) 558 return 0; 559 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 560 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 561 562 return clock->dot; 563 } 564 565 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 566 { 567 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 568 } 569 570 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 571 { 572 clock->m = i9xx_dpll_compute_m(clock); 573 clock->p = clock->p1 * clock->p2; 574 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 575 return 0; 576 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 577 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 578 579 return clock->dot; 580 } 581 582 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 583 { 584 clock->m = clock->m1 * clock->m2; 585 clock->p = clock->p1 * clock->p2; 586 if (WARN_ON(clock->n == 0 || clock->p == 0)) 587 return 0; 588 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 589 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 590 591 return clock->dot / 5; 592 } 593 594 int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 595 { 596 clock->m = clock->m1 * clock->m2; 597 clock->p = clock->p1 * clock->p2; 598 if (WARN_ON(clock->n == 0 || clock->p == 0)) 599 return 0; 600 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 601 clock->n << 22); 602 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 603 604 return clock->dot / 5; 605 } 606 607 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 608 /** 609 * Returns whether the given set of divisors are valid for a given refclk with 610 * the given connectors. 611 */ 612 613 static bool intel_PLL_is_valid(struct drm_device *dev, 614 const intel_limit_t *limit, 615 const intel_clock_t *clock) 616 { 617 if (clock->n < limit->n.min || limit->n.max < clock->n) 618 INTELPllInvalid("n out of range\n"); 619 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 620 INTELPllInvalid("p1 out of range\n"); 621 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 622 INTELPllInvalid("m2 out of range\n"); 623 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 624 INTELPllInvalid("m1 out of range\n"); 625 626 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) 627 if (clock->m1 <= clock->m2) 628 INTELPllInvalid("m1 <= m2\n"); 629 630 if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) { 631 if (clock->p < limit->p.min || limit->p.max < clock->p) 632 INTELPllInvalid("p out of range\n"); 633 if (clock->m < limit->m.min || limit->m.max < clock->m) 634 INTELPllInvalid("m out of range\n"); 635 } 636 637 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 638 INTELPllInvalid("vco out of range\n"); 639 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 640 * connector, etc., rather than just a single range. 641 */ 642 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 643 INTELPllInvalid("dot out of range\n"); 644 645 return true; 646 } 647 648 static int 649 i9xx_select_p2_div(const intel_limit_t *limit, 650 const struct intel_crtc_state *crtc_state, 651 int target) 652 { 653 struct drm_device *dev = crtc_state->base.crtc->dev; 654 655 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 656 /* 657 * For LVDS just rely on its current settings for dual-channel. 658 * We haven't figured out how to reliably set up different 659 * single/dual channel state, if we even can. 660 */ 661 if (intel_is_dual_link_lvds(dev)) 662 return limit->p2.p2_fast; 663 else 664 return limit->p2.p2_slow; 665 } else { 666 if (target < limit->p2.dot_limit) 667 return limit->p2.p2_slow; 668 else 669 return limit->p2.p2_fast; 670 } 671 } 672 673 static bool 674 i9xx_find_best_dpll(const intel_limit_t *limit, 675 struct intel_crtc_state *crtc_state, 676 int target, int refclk, intel_clock_t *match_clock, 677 intel_clock_t *best_clock) 678 { 679 struct drm_device *dev = crtc_state->base.crtc->dev; 680 intel_clock_t clock; 681 int err = target; 682 683 memset(best_clock, 0, sizeof(*best_clock)); 684 685 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 686 687 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 688 clock.m1++) { 689 for (clock.m2 = limit->m2.min; 690 clock.m2 <= limit->m2.max; clock.m2++) { 691 if (clock.m2 >= clock.m1) 692 break; 693 for (clock.n = limit->n.min; 694 clock.n <= limit->n.max; clock.n++) { 695 for (clock.p1 = limit->p1.min; 696 clock.p1 <= limit->p1.max; clock.p1++) { 697 int this_err; 698 699 i9xx_calc_dpll_params(refclk, &clock); 700 if (!intel_PLL_is_valid(dev, limit, 701 &clock)) 702 continue; 703 if (match_clock && 704 clock.p != match_clock->p) 705 continue; 706 707 this_err = abs(clock.dot - target); 708 if (this_err < err) { 709 *best_clock = clock; 710 err = this_err; 711 } 712 } 713 } 714 } 715 } 716 717 return (err != target); 718 } 719 720 static bool 721 pnv_find_best_dpll(const intel_limit_t *limit, 722 struct intel_crtc_state *crtc_state, 723 int target, int refclk, intel_clock_t *match_clock, 724 intel_clock_t *best_clock) 725 { 726 struct drm_device *dev = crtc_state->base.crtc->dev; 727 intel_clock_t clock; 728 int err = target; 729 730 memset(best_clock, 0, sizeof(*best_clock)); 731 732 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 733 734 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 735 clock.m1++) { 736 for (clock.m2 = limit->m2.min; 737 clock.m2 <= limit->m2.max; clock.m2++) { 738 for (clock.n = limit->n.min; 739 clock.n <= limit->n.max; clock.n++) { 740 for (clock.p1 = limit->p1.min; 741 clock.p1 <= limit->p1.max; clock.p1++) { 742 int this_err; 743 744 pnv_calc_dpll_params(refclk, &clock); 745 if (!intel_PLL_is_valid(dev, limit, 746 &clock)) 747 continue; 748 if (match_clock && 749 clock.p != match_clock->p) 750 continue; 751 752 this_err = abs(clock.dot - target); 753 if (this_err < err) { 754 *best_clock = clock; 755 err = this_err; 756 } 757 } 758 } 759 } 760 } 761 762 return (err != target); 763 } 764 765 static bool 766 g4x_find_best_dpll(const intel_limit_t *limit, 767 struct intel_crtc_state *crtc_state, 768 int target, int refclk, intel_clock_t *match_clock, 769 intel_clock_t *best_clock) 770 { 771 struct drm_device *dev = crtc_state->base.crtc->dev; 772 intel_clock_t clock; 773 int max_n; 774 bool found = false; 775 /* approximately equals target * 0.00585 */ 776 int err_most = (target >> 8) + (target >> 9); 777 778 memset(best_clock, 0, sizeof(*best_clock)); 779 780 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 781 782 max_n = limit->n.max; 783 /* based on hardware requirement, prefer smaller n to precision */ 784 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 785 /* based on hardware requirement, prefere larger m1,m2 */ 786 for (clock.m1 = limit->m1.max; 787 clock.m1 >= limit->m1.min; clock.m1--) { 788 for (clock.m2 = limit->m2.max; 789 clock.m2 >= limit->m2.min; clock.m2--) { 790 for (clock.p1 = limit->p1.max; 791 clock.p1 >= limit->p1.min; clock.p1--) { 792 int this_err; 793 794 i9xx_calc_dpll_params(refclk, &clock); 795 if (!intel_PLL_is_valid(dev, limit, 796 &clock)) 797 continue; 798 799 this_err = abs(clock.dot - target); 800 if (this_err < err_most) { 801 *best_clock = clock; 802 err_most = this_err; 803 max_n = clock.n; 804 found = true; 805 } 806 } 807 } 808 } 809 } 810 return found; 811 } 812 813 /* 814 * Check if the calculated PLL configuration is more optimal compared to the 815 * best configuration and error found so far. Return the calculated error. 816 */ 817 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 818 const intel_clock_t *calculated_clock, 819 const intel_clock_t *best_clock, 820 unsigned int best_error_ppm, 821 unsigned int *error_ppm) 822 { 823 /* 824 * For CHV ignore the error and consider only the P value. 825 * Prefer a bigger P value based on HW requirements. 826 */ 827 if (IS_CHERRYVIEW(dev)) { 828 *error_ppm = 0; 829 830 return calculated_clock->p > best_clock->p; 831 } 832 833 if (WARN_ON_ONCE(!target_freq)) 834 return false; 835 836 *error_ppm = div_u64(1000000ULL * 837 abs(target_freq - calculated_clock->dot), 838 target_freq); 839 /* 840 * Prefer a better P value over a better (smaller) error if the error 841 * is small. Ensure this preference for future configurations too by 842 * setting the error to 0. 843 */ 844 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 845 *error_ppm = 0; 846 847 return true; 848 } 849 850 return *error_ppm + 10 < best_error_ppm; 851 } 852 853 static bool 854 vlv_find_best_dpll(const intel_limit_t *limit, 855 struct intel_crtc_state *crtc_state, 856 int target, int refclk, intel_clock_t *match_clock, 857 intel_clock_t *best_clock) 858 { 859 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 860 struct drm_device *dev = crtc->base.dev; 861 intel_clock_t clock; 862 unsigned int bestppm = 1000000; 863 /* min update 19.2 MHz */ 864 int max_n = min(limit->n.max, refclk / 19200); 865 bool found = false; 866 867 target *= 5; /* fast clock */ 868 869 memset(best_clock, 0, sizeof(*best_clock)); 870 871 /* based on hardware requirement, prefer smaller n to precision */ 872 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 873 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 874 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 875 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 876 clock.p = clock.p1 * clock.p2; 877 /* based on hardware requirement, prefer bigger m1,m2 values */ 878 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 879 unsigned int ppm; 880 881 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 882 refclk * clock.m1); 883 884 vlv_calc_dpll_params(refclk, &clock); 885 886 if (!intel_PLL_is_valid(dev, limit, 887 &clock)) 888 continue; 889 890 if (!vlv_PLL_is_optimal(dev, target, 891 &clock, 892 best_clock, 893 bestppm, &ppm)) 894 continue; 895 896 *best_clock = clock; 897 bestppm = ppm; 898 found = true; 899 } 900 } 901 } 902 } 903 904 return found; 905 } 906 907 static bool 908 chv_find_best_dpll(const intel_limit_t *limit, 909 struct intel_crtc_state *crtc_state, 910 int target, int refclk, intel_clock_t *match_clock, 911 intel_clock_t *best_clock) 912 { 913 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 914 struct drm_device *dev = crtc->base.dev; 915 unsigned int best_error_ppm; 916 intel_clock_t clock; 917 uint64_t m2; 918 int found = false; 919 920 memset(best_clock, 0, sizeof(*best_clock)); 921 best_error_ppm = 1000000; 922 923 /* 924 * Based on hardware doc, the n always set to 1, and m1 always 925 * set to 2. If requires to support 200Mhz refclk, we need to 926 * revisit this because n may not 1 anymore. 927 */ 928 clock.n = 1, clock.m1 = 2; 929 target *= 5; /* fast clock */ 930 931 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 932 for (clock.p2 = limit->p2.p2_fast; 933 clock.p2 >= limit->p2.p2_slow; 934 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 935 unsigned int error_ppm; 936 937 clock.p = clock.p1 * clock.p2; 938 939 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 940 clock.n) << 22, refclk * clock.m1); 941 942 if (m2 > INT_MAX/clock.m1) 943 continue; 944 945 clock.m2 = m2; 946 947 chv_calc_dpll_params(refclk, &clock); 948 949 if (!intel_PLL_is_valid(dev, limit, &clock)) 950 continue; 951 952 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 953 best_error_ppm, &error_ppm)) 954 continue; 955 956 *best_clock = clock; 957 best_error_ppm = error_ppm; 958 found = true; 959 } 960 } 961 962 return found; 963 } 964 965 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 966 intel_clock_t *best_clock) 967 { 968 int refclk = i9xx_get_refclk(crtc_state, 0); 969 970 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state, 971 target_clock, refclk, NULL, best_clock); 972 } 973 974 bool intel_crtc_active(struct drm_crtc *crtc) 975 { 976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 977 978 /* Be paranoid as we can arrive here with only partial 979 * state retrieved from the hardware during setup. 980 * 981 * We can ditch the adjusted_mode.crtc_clock check as soon 982 * as Haswell has gained clock readout/fastboot support. 983 * 984 * We can ditch the crtc->primary->fb check as soon as we can 985 * properly reconstruct framebuffers. 986 * 987 * FIXME: The intel_crtc->active here should be switched to 988 * crtc->state->active once we have proper CRTC states wired up 989 * for atomic. 990 */ 991 return intel_crtc->active && crtc->primary->state->fb && 992 intel_crtc->config->base.adjusted_mode.crtc_clock; 993 } 994 995 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 996 enum i915_pipe pipe) 997 { 998 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 999 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1000 1001 return intel_crtc->config->cpu_transcoder; 1002 } 1003 1004 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1005 { 1006 struct drm_i915_private *dev_priv = dev->dev_private; 1007 u32 reg = PIPEDSL(pipe); 1008 u32 line1, line2; 1009 u32 line_mask; 1010 1011 if (IS_GEN2(dev)) 1012 line_mask = DSL_LINEMASK_GEN2; 1013 else 1014 line_mask = DSL_LINEMASK_GEN3; 1015 1016 line1 = I915_READ(reg) & line_mask; 1017 msleep(5); 1018 line2 = I915_READ(reg) & line_mask; 1019 1020 return line1 == line2; 1021 } 1022 1023 /* 1024 * intel_wait_for_pipe_off - wait for pipe to turn off 1025 * @crtc: crtc whose pipe to wait for 1026 * 1027 * After disabling a pipe, we can't wait for vblank in the usual way, 1028 * spinning on the vblank interrupt status bit, since we won't actually 1029 * see an interrupt when the pipe is disabled. 1030 * 1031 * On Gen4 and above: 1032 * wait for the pipe register state bit to turn off 1033 * 1034 * Otherwise: 1035 * wait for the display line value to settle (it usually 1036 * ends up stopping at the start of the next frame). 1037 * 1038 */ 1039 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1040 { 1041 struct drm_device *dev = crtc->base.dev; 1042 struct drm_i915_private *dev_priv = dev->dev_private; 1043 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1044 enum i915_pipe pipe = crtc->pipe; 1045 1046 if (INTEL_INFO(dev)->gen >= 4) { 1047 int reg = PIPECONF(cpu_transcoder); 1048 1049 /* Wait for the Pipe State to go off */ 1050 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1051 100)) 1052 WARN(1, "pipe_off wait timed out\n"); 1053 } else { 1054 /* Wait for the display line to settle */ 1055 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1056 WARN(1, "pipe_off wait timed out\n"); 1057 } 1058 } 1059 1060 /* 1061 * ibx_digital_port_connected - is the specified port connected? 1062 * @dev_priv: i915 private structure 1063 * @port: the port to test 1064 * 1065 * Returns true if @port is connected, false otherwise. 1066 */ 1067 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 1068 struct intel_digital_port *port) 1069 { 1070 u32 bit; 1071 1072 if (HAS_PCH_IBX(dev_priv->dev)) { 1073 switch (port->port) { 1074 case PORT_B: 1075 bit = SDE_PORTB_HOTPLUG; 1076 break; 1077 case PORT_C: 1078 bit = SDE_PORTC_HOTPLUG; 1079 break; 1080 case PORT_D: 1081 bit = SDE_PORTD_HOTPLUG; 1082 break; 1083 default: 1084 return true; 1085 } 1086 } else { 1087 switch (port->port) { 1088 case PORT_B: 1089 bit = SDE_PORTB_HOTPLUG_CPT; 1090 break; 1091 case PORT_C: 1092 bit = SDE_PORTC_HOTPLUG_CPT; 1093 break; 1094 case PORT_D: 1095 bit = SDE_PORTD_HOTPLUG_CPT; 1096 break; 1097 case PORT_E: 1098 bit = SDE_PORTE_HOTPLUG_SPT; 1099 break; 1100 default: 1101 return true; 1102 } 1103 } 1104 1105 return I915_READ(SDEISR) & bit; 1106 } 1107 1108 static const char *state_string(bool enabled) 1109 { 1110 return enabled ? "on" : "off"; 1111 } 1112 1113 /* Only for pre-ILK configs */ 1114 void assert_pll(struct drm_i915_private *dev_priv, 1115 enum i915_pipe pipe, bool state) 1116 { 1117 int reg; 1118 u32 val; 1119 bool cur_state; 1120 1121 reg = DPLL(pipe); 1122 val = I915_READ(reg); 1123 cur_state = !!(val & DPLL_VCO_ENABLE); 1124 I915_STATE_WARN(cur_state != state, 1125 "PLL state assertion failure (expected %s, current %s)\n", 1126 state_string(state), state_string(cur_state)); 1127 } 1128 1129 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1130 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1131 { 1132 u32 val; 1133 bool cur_state; 1134 1135 mutex_lock(&dev_priv->sb_lock); 1136 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1137 mutex_unlock(&dev_priv->sb_lock); 1138 1139 cur_state = val & DSI_PLL_VCO_EN; 1140 I915_STATE_WARN(cur_state != state, 1141 "DSI PLL state assertion failure (expected %s, current %s)\n", 1142 state_string(state), state_string(cur_state)); 1143 } 1144 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1145 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1146 1147 struct intel_shared_dpll * 1148 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1149 { 1150 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1151 1152 if (crtc->config->shared_dpll < 0) 1153 return NULL; 1154 1155 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1156 } 1157 1158 /* For ILK+ */ 1159 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1160 struct intel_shared_dpll *pll, 1161 bool state) 1162 { 1163 bool cur_state; 1164 struct intel_dpll_hw_state hw_state; 1165 1166 if (WARN (!pll, 1167 "asserting DPLL %s with no DPLL\n", state_string(state))) 1168 return; 1169 1170 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1171 I915_STATE_WARN(cur_state != state, 1172 "%s assertion failure (expected %s, current %s)\n", 1173 pll->name, state_string(state), state_string(cur_state)); 1174 } 1175 1176 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1177 enum i915_pipe pipe, bool state) 1178 { 1179 int reg; 1180 u32 val; 1181 bool cur_state; 1182 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1183 pipe); 1184 1185 if (HAS_DDI(dev_priv->dev)) { 1186 /* DDI does not have a specific FDI_TX register */ 1187 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1188 val = I915_READ(reg); 1189 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1190 } else { 1191 reg = FDI_TX_CTL(pipe); 1192 val = I915_READ(reg); 1193 cur_state = !!(val & FDI_TX_ENABLE); 1194 } 1195 I915_STATE_WARN(cur_state != state, 1196 "FDI TX state assertion failure (expected %s, current %s)\n", 1197 state_string(state), state_string(cur_state)); 1198 } 1199 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1200 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1201 1202 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1203 enum i915_pipe pipe, bool state) 1204 { 1205 int reg; 1206 u32 val; 1207 bool cur_state; 1208 1209 reg = FDI_RX_CTL(pipe); 1210 val = I915_READ(reg); 1211 cur_state = !!(val & FDI_RX_ENABLE); 1212 I915_STATE_WARN(cur_state != state, 1213 "FDI RX state assertion failure (expected %s, current %s)\n", 1214 state_string(state), state_string(cur_state)); 1215 } 1216 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1217 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1218 1219 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1220 enum i915_pipe pipe) 1221 { 1222 int reg; 1223 u32 val; 1224 1225 /* ILK FDI PLL is always enabled */ 1226 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1227 return; 1228 1229 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1230 if (HAS_DDI(dev_priv->dev)) 1231 return; 1232 1233 reg = FDI_TX_CTL(pipe); 1234 val = I915_READ(reg); 1235 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1236 } 1237 1238 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1239 enum i915_pipe pipe, bool state) 1240 { 1241 int reg; 1242 u32 val; 1243 bool cur_state; 1244 1245 reg = FDI_RX_CTL(pipe); 1246 val = I915_READ(reg); 1247 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1248 I915_STATE_WARN(cur_state != state, 1249 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1250 state_string(state), state_string(cur_state)); 1251 } 1252 1253 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1254 enum i915_pipe pipe) 1255 { 1256 struct drm_device *dev = dev_priv->dev; 1257 int pp_reg; 1258 u32 val; 1259 enum i915_pipe panel_pipe = PIPE_A; 1260 bool locked = true; 1261 1262 if (WARN_ON(HAS_DDI(dev))) 1263 return; 1264 1265 if (HAS_PCH_SPLIT(dev)) { 1266 u32 port_sel; 1267 1268 pp_reg = PCH_PP_CONTROL; 1269 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1270 1271 if (port_sel == PANEL_PORT_SELECT_LVDS && 1272 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1273 panel_pipe = PIPE_B; 1274 /* XXX: else fix for eDP */ 1275 } else if (IS_VALLEYVIEW(dev)) { 1276 /* presumably write lock depends on pipe, not port select */ 1277 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1278 panel_pipe = pipe; 1279 } else { 1280 pp_reg = PP_CONTROL; 1281 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1282 panel_pipe = PIPE_B; 1283 } 1284 1285 val = I915_READ(pp_reg); 1286 if (!(val & PANEL_POWER_ON) || 1287 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1288 locked = false; 1289 1290 I915_STATE_WARN(panel_pipe == pipe && locked, 1291 "panel assertion failure, pipe %c regs locked\n", 1292 pipe_name(pipe)); 1293 } 1294 1295 static void assert_cursor(struct drm_i915_private *dev_priv, 1296 enum i915_pipe pipe, bool state) 1297 { 1298 struct drm_device *dev = dev_priv->dev; 1299 bool cur_state; 1300 1301 if (IS_845G(dev) || IS_I865G(dev)) 1302 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1303 else 1304 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1305 1306 I915_STATE_WARN(cur_state != state, 1307 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1308 pipe_name(pipe), state_string(state), state_string(cur_state)); 1309 } 1310 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1311 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1312 1313 void assert_pipe(struct drm_i915_private *dev_priv, 1314 enum i915_pipe pipe, bool state) 1315 { 1316 int reg; 1317 u32 val; 1318 bool cur_state; 1319 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1320 pipe); 1321 1322 /* if we need the pipe quirk it must be always on */ 1323 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1324 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1325 state = true; 1326 1327 if (!intel_display_power_is_enabled(dev_priv, 1328 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1329 cur_state = false; 1330 } else { 1331 reg = PIPECONF(cpu_transcoder); 1332 val = I915_READ(reg); 1333 cur_state = !!(val & PIPECONF_ENABLE); 1334 } 1335 1336 I915_STATE_WARN(cur_state != state, 1337 "pipe %c assertion failure (expected %s, current %s)\n", 1338 pipe_name(pipe), state_string(state), state_string(cur_state)); 1339 } 1340 1341 static void assert_plane(struct drm_i915_private *dev_priv, 1342 enum plane plane, bool state) 1343 { 1344 int reg; 1345 u32 val; 1346 bool cur_state; 1347 1348 reg = DSPCNTR(plane); 1349 val = I915_READ(reg); 1350 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1351 I915_STATE_WARN(cur_state != state, 1352 "plane %c assertion failure (expected %s, current %s)\n", 1353 plane_name(plane), state_string(state), state_string(cur_state)); 1354 } 1355 1356 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1357 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1358 1359 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1360 enum i915_pipe pipe) 1361 { 1362 struct drm_device *dev = dev_priv->dev; 1363 int reg, i; 1364 u32 val; 1365 int cur_pipe; 1366 1367 /* Primary planes are fixed to pipes on gen4+ */ 1368 if (INTEL_INFO(dev)->gen >= 4) { 1369 reg = DSPCNTR(pipe); 1370 val = I915_READ(reg); 1371 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1372 "plane %c assertion failure, should be disabled but not\n", 1373 plane_name(pipe)); 1374 return; 1375 } 1376 1377 /* Need to check both planes against the pipe */ 1378 for_each_pipe(dev_priv, i) { 1379 reg = DSPCNTR(i); 1380 val = I915_READ(reg); 1381 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1382 DISPPLANE_SEL_PIPE_SHIFT; 1383 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1384 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1385 plane_name(i), pipe_name(pipe)); 1386 } 1387 } 1388 1389 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1390 enum i915_pipe pipe) 1391 { 1392 struct drm_device *dev = dev_priv->dev; 1393 int reg, sprite; 1394 u32 val; 1395 1396 if (INTEL_INFO(dev)->gen >= 9) { 1397 for_each_sprite(dev_priv, pipe, sprite) { 1398 val = I915_READ(PLANE_CTL(pipe, sprite)); 1399 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1400 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1401 sprite, pipe_name(pipe)); 1402 } 1403 } else if (IS_VALLEYVIEW(dev)) { 1404 for_each_sprite(dev_priv, pipe, sprite) { 1405 reg = SPCNTR(pipe, sprite); 1406 val = I915_READ(reg); 1407 I915_STATE_WARN(val & SP_ENABLE, 1408 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1409 sprite_name(pipe, sprite), pipe_name(pipe)); 1410 } 1411 } else if (INTEL_INFO(dev)->gen >= 7) { 1412 reg = SPRCTL(pipe); 1413 val = I915_READ(reg); 1414 I915_STATE_WARN(val & SPRITE_ENABLE, 1415 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1416 plane_name(pipe), pipe_name(pipe)); 1417 } else if (INTEL_INFO(dev)->gen >= 5) { 1418 reg = DVSCNTR(pipe); 1419 val = I915_READ(reg); 1420 I915_STATE_WARN(val & DVS_ENABLE, 1421 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1422 plane_name(pipe), pipe_name(pipe)); 1423 } 1424 } 1425 1426 static void assert_vblank_disabled(struct drm_crtc *crtc) 1427 { 1428 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1429 drm_crtc_vblank_put(crtc); 1430 } 1431 1432 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1433 { 1434 u32 val; 1435 bool enabled; 1436 1437 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1438 1439 val = I915_READ(PCH_DREF_CONTROL); 1440 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1441 DREF_SUPERSPREAD_SOURCE_MASK)); 1442 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1443 } 1444 1445 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1446 enum i915_pipe pipe) 1447 { 1448 int reg; 1449 u32 val; 1450 bool enabled; 1451 1452 reg = PCH_TRANSCONF(pipe); 1453 val = I915_READ(reg); 1454 enabled = !!(val & TRANS_ENABLE); 1455 I915_STATE_WARN(enabled, 1456 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1457 pipe_name(pipe)); 1458 } 1459 1460 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1461 enum i915_pipe pipe, u32 port_sel, u32 val) 1462 { 1463 if ((val & DP_PORT_EN) == 0) 1464 return false; 1465 1466 if (HAS_PCH_CPT(dev_priv->dev)) { 1467 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1468 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1469 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1470 return false; 1471 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1472 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1473 return false; 1474 } else { 1475 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1476 return false; 1477 } 1478 return true; 1479 } 1480 1481 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1482 enum i915_pipe pipe, u32 val) 1483 { 1484 if ((val & SDVO_ENABLE) == 0) 1485 return false; 1486 1487 if (HAS_PCH_CPT(dev_priv->dev)) { 1488 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1489 return false; 1490 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1491 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1492 return false; 1493 } else { 1494 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1495 return false; 1496 } 1497 return true; 1498 } 1499 1500 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1501 enum i915_pipe pipe, u32 val) 1502 { 1503 if ((val & LVDS_PORT_EN) == 0) 1504 return false; 1505 1506 if (HAS_PCH_CPT(dev_priv->dev)) { 1507 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1508 return false; 1509 } else { 1510 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1511 return false; 1512 } 1513 return true; 1514 } 1515 1516 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1517 enum i915_pipe pipe, u32 val) 1518 { 1519 if ((val & ADPA_DAC_ENABLE) == 0) 1520 return false; 1521 if (HAS_PCH_CPT(dev_priv->dev)) { 1522 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1523 return false; 1524 } else { 1525 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1526 return false; 1527 } 1528 return true; 1529 } 1530 1531 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1532 enum i915_pipe pipe, int reg, u32 port_sel) 1533 { 1534 u32 val = I915_READ(reg); 1535 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1536 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1537 reg, pipe_name(pipe)); 1538 1539 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1540 && (val & DP_PIPEB_SELECT), 1541 "IBX PCH dp port still using transcoder B\n"); 1542 } 1543 1544 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1545 enum i915_pipe pipe, int reg) 1546 { 1547 u32 val = I915_READ(reg); 1548 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1549 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1550 reg, pipe_name(pipe)); 1551 1552 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1553 && (val & SDVO_PIPE_B_SELECT), 1554 "IBX PCH hdmi port still using transcoder B\n"); 1555 } 1556 1557 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1558 enum i915_pipe pipe) 1559 { 1560 int reg; 1561 u32 val; 1562 1563 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1564 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1565 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1566 1567 reg = PCH_ADPA; 1568 val = I915_READ(reg); 1569 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1570 "PCH VGA enabled on transcoder %c, should be disabled\n", 1571 pipe_name(pipe)); 1572 1573 reg = PCH_LVDS; 1574 val = I915_READ(reg); 1575 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1576 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1577 pipe_name(pipe)); 1578 1579 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1580 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1581 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1582 } 1583 1584 static void intel_init_dpio(struct drm_device *dev) 1585 { 1586 struct drm_i915_private *dev_priv = dev->dev_private; 1587 1588 if (!IS_VALLEYVIEW(dev)) 1589 return; 1590 1591 /* 1592 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1593 * CHV x1 PHY (DP/HDMI D) 1594 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1595 */ 1596 if (IS_CHERRYVIEW(dev)) { 1597 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1598 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1599 } else { 1600 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1601 } 1602 } 1603 1604 static void vlv_enable_pll(struct intel_crtc *crtc, 1605 const struct intel_crtc_state *pipe_config) 1606 { 1607 struct drm_device *dev = crtc->base.dev; 1608 struct drm_i915_private *dev_priv = dev->dev_private; 1609 int reg = DPLL(crtc->pipe); 1610 u32 dpll = pipe_config->dpll_hw_state.dpll; 1611 1612 assert_pipe_disabled(dev_priv, crtc->pipe); 1613 1614 /* No really, not for ILK+ */ 1615 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1616 1617 /* PLL is protected by panel, make sure we can write it */ 1618 if (IS_MOBILE(dev_priv->dev)) 1619 assert_panel_unlocked(dev_priv, crtc->pipe); 1620 1621 I915_WRITE(reg, dpll); 1622 POSTING_READ(reg); 1623 udelay(150); 1624 1625 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1626 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1627 1628 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1629 POSTING_READ(DPLL_MD(crtc->pipe)); 1630 1631 /* We do this three times for luck */ 1632 I915_WRITE(reg, dpll); 1633 POSTING_READ(reg); 1634 udelay(150); /* wait for warmup */ 1635 I915_WRITE(reg, dpll); 1636 POSTING_READ(reg); 1637 udelay(150); /* wait for warmup */ 1638 I915_WRITE(reg, dpll); 1639 POSTING_READ(reg); 1640 udelay(150); /* wait for warmup */ 1641 } 1642 1643 static void chv_enable_pll(struct intel_crtc *crtc, 1644 const struct intel_crtc_state *pipe_config) 1645 { 1646 struct drm_device *dev = crtc->base.dev; 1647 struct drm_i915_private *dev_priv = dev->dev_private; 1648 int pipe = crtc->pipe; 1649 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1650 u32 tmp; 1651 1652 assert_pipe_disabled(dev_priv, crtc->pipe); 1653 1654 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1655 1656 mutex_lock(&dev_priv->sb_lock); 1657 1658 /* Enable back the 10bit clock to display controller */ 1659 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1660 tmp |= DPIO_DCLKP_EN; 1661 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1662 1663 mutex_unlock(&dev_priv->sb_lock); 1664 1665 /* 1666 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1667 */ 1668 udelay(1); 1669 1670 /* Enable PLL */ 1671 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1672 1673 /* Check PLL is locked */ 1674 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1675 DRM_ERROR("PLL %d failed to lock\n", pipe); 1676 1677 /* not sure when this should be written */ 1678 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1679 POSTING_READ(DPLL_MD(pipe)); 1680 } 1681 1682 static int intel_num_dvo_pipes(struct drm_device *dev) 1683 { 1684 struct intel_crtc *crtc; 1685 int count = 0; 1686 1687 for_each_intel_crtc(dev, crtc) 1688 count += crtc->base.state->active && 1689 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1690 1691 return count; 1692 } 1693 1694 static void i9xx_enable_pll(struct intel_crtc *crtc) 1695 { 1696 struct drm_device *dev = crtc->base.dev; 1697 struct drm_i915_private *dev_priv = dev->dev_private; 1698 int reg = DPLL(crtc->pipe); 1699 u32 dpll = crtc->config->dpll_hw_state.dpll; 1700 1701 assert_pipe_disabled(dev_priv, crtc->pipe); 1702 1703 /* No really, not for ILK+ */ 1704 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1705 1706 /* PLL is protected by panel, make sure we can write it */ 1707 if (IS_MOBILE(dev) && !IS_I830(dev)) 1708 assert_panel_unlocked(dev_priv, crtc->pipe); 1709 1710 /* Enable DVO 2x clock on both PLLs if necessary */ 1711 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1712 /* 1713 * It appears to be important that we don't enable this 1714 * for the current pipe before otherwise configuring the 1715 * PLL. No idea how this should be handled if multiple 1716 * DVO outputs are enabled simultaneosly. 1717 */ 1718 dpll |= DPLL_DVO_2X_MODE; 1719 I915_WRITE(DPLL(!crtc->pipe), 1720 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1721 } 1722 1723 /* 1724 * Apparently we need to have VGA mode enabled prior to changing 1725 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1726 * dividers, even though the register value does change. 1727 */ 1728 I915_WRITE(reg, 0); 1729 1730 I915_WRITE(reg, dpll); 1731 1732 /* Wait for the clocks to stabilize. */ 1733 POSTING_READ(reg); 1734 udelay(150); 1735 1736 if (INTEL_INFO(dev)->gen >= 4) { 1737 I915_WRITE(DPLL_MD(crtc->pipe), 1738 crtc->config->dpll_hw_state.dpll_md); 1739 } else { 1740 /* The pixel multiplier can only be updated once the 1741 * DPLL is enabled and the clocks are stable. 1742 * 1743 * So write it again. 1744 */ 1745 I915_WRITE(reg, dpll); 1746 } 1747 1748 /* We do this three times for luck */ 1749 I915_WRITE(reg, dpll); 1750 POSTING_READ(reg); 1751 udelay(150); /* wait for warmup */ 1752 I915_WRITE(reg, dpll); 1753 POSTING_READ(reg); 1754 udelay(150); /* wait for warmup */ 1755 I915_WRITE(reg, dpll); 1756 POSTING_READ(reg); 1757 udelay(150); /* wait for warmup */ 1758 } 1759 1760 /** 1761 * i9xx_disable_pll - disable a PLL 1762 * @dev_priv: i915 private structure 1763 * @pipe: pipe PLL to disable 1764 * 1765 * Disable the PLL for @pipe, making sure the pipe is off first. 1766 * 1767 * Note! This is for pre-ILK only. 1768 */ 1769 static void i9xx_disable_pll(struct intel_crtc *crtc) 1770 { 1771 struct drm_device *dev = crtc->base.dev; 1772 struct drm_i915_private *dev_priv = dev->dev_private; 1773 enum i915_pipe pipe = crtc->pipe; 1774 1775 /* Disable DVO 2x clock on both PLLs if necessary */ 1776 if (IS_I830(dev) && 1777 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1778 !intel_num_dvo_pipes(dev)) { 1779 I915_WRITE(DPLL(PIPE_B), 1780 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1781 I915_WRITE(DPLL(PIPE_A), 1782 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1783 } 1784 1785 /* Don't disable pipe or pipe PLLs if needed */ 1786 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1787 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1788 return; 1789 1790 /* Make sure the pipe isn't still relying on us */ 1791 assert_pipe_disabled(dev_priv, pipe); 1792 1793 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1794 POSTING_READ(DPLL(pipe)); 1795 } 1796 1797 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1798 { 1799 u32 val; 1800 1801 /* Make sure the pipe isn't still relying on us */ 1802 assert_pipe_disabled(dev_priv, pipe); 1803 1804 /* 1805 * Leave integrated clock source and reference clock enabled for pipe B. 1806 * The latter is needed for VGA hotplug / manual detection. 1807 */ 1808 val = DPLL_VGA_MODE_DIS; 1809 if (pipe == PIPE_B) 1810 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV; 1811 I915_WRITE(DPLL(pipe), val); 1812 POSTING_READ(DPLL(pipe)); 1813 1814 } 1815 1816 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1817 { 1818 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1819 u32 val; 1820 1821 /* Make sure the pipe isn't still relying on us */ 1822 assert_pipe_disabled(dev_priv, pipe); 1823 1824 /* Set PLL en = 0 */ 1825 val = DPLL_SSC_REF_CLK_CHV | 1826 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1827 if (pipe != PIPE_A) 1828 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1829 I915_WRITE(DPLL(pipe), val); 1830 POSTING_READ(DPLL(pipe)); 1831 1832 mutex_lock(&dev_priv->sb_lock); 1833 1834 /* Disable 10bit clock to display controller */ 1835 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1836 val &= ~DPIO_DCLKP_EN; 1837 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1838 1839 /* disable left/right clock distribution */ 1840 if (pipe != PIPE_B) { 1841 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1842 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1843 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1844 } else { 1845 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1846 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1847 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1848 } 1849 1850 mutex_unlock(&dev_priv->sb_lock); 1851 } 1852 1853 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1854 struct intel_digital_port *dport, 1855 unsigned int expected_mask) 1856 { 1857 u32 port_mask; 1858 int dpll_reg; 1859 1860 switch (dport->port) { 1861 case PORT_B: 1862 port_mask = DPLL_PORTB_READY_MASK; 1863 dpll_reg = DPLL(0); 1864 break; 1865 case PORT_C: 1866 port_mask = DPLL_PORTC_READY_MASK; 1867 dpll_reg = DPLL(0); 1868 expected_mask <<= 4; 1869 break; 1870 case PORT_D: 1871 port_mask = DPLL_PORTD_READY_MASK; 1872 dpll_reg = DPIO_PHY_STATUS; 1873 break; 1874 default: 1875 BUG(); 1876 } 1877 1878 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1879 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1880 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1881 } 1882 1883 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1884 { 1885 struct drm_device *dev = crtc->base.dev; 1886 struct drm_i915_private *dev_priv = dev->dev_private; 1887 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1888 1889 if (WARN_ON(pll == NULL)) 1890 return; 1891 1892 WARN_ON(!pll->config.crtc_mask); 1893 if (pll->active == 0) { 1894 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1895 WARN_ON(pll->on); 1896 assert_shared_dpll_disabled(dev_priv, pll); 1897 1898 pll->mode_set(dev_priv, pll); 1899 } 1900 } 1901 1902 /** 1903 * intel_enable_shared_dpll - enable PCH PLL 1904 * @dev_priv: i915 private structure 1905 * @pipe: pipe PLL to enable 1906 * 1907 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1908 * drives the transcoder clock. 1909 */ 1910 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1911 { 1912 struct drm_device *dev = crtc->base.dev; 1913 struct drm_i915_private *dev_priv = dev->dev_private; 1914 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1915 1916 if (WARN_ON(pll == NULL)) 1917 return; 1918 1919 if (WARN_ON(pll->config.crtc_mask == 0)) 1920 return; 1921 1922 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1923 pll->name, pll->active, pll->on, 1924 crtc->base.base.id); 1925 1926 if (pll->active++) { 1927 WARN_ON(!pll->on); 1928 assert_shared_dpll_enabled(dev_priv, pll); 1929 return; 1930 } 1931 WARN_ON(pll->on); 1932 1933 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1934 1935 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1936 pll->enable(dev_priv, pll); 1937 pll->on = true; 1938 } 1939 1940 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1941 { 1942 struct drm_device *dev = crtc->base.dev; 1943 struct drm_i915_private *dev_priv = dev->dev_private; 1944 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1945 1946 /* PCH only available on ILK+ */ 1947 if (INTEL_INFO(dev)->gen < 5) 1948 return; 1949 1950 if (pll == NULL) 1951 return; 1952 1953 if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base))))) 1954 return; 1955 1956 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1957 pll->name, pll->active, pll->on, 1958 crtc->base.base.id); 1959 1960 if (WARN_ON(pll->active == 0)) { 1961 assert_shared_dpll_disabled(dev_priv, pll); 1962 return; 1963 } 1964 1965 assert_shared_dpll_enabled(dev_priv, pll); 1966 WARN_ON(!pll->on); 1967 if (--pll->active) 1968 return; 1969 1970 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1971 pll->disable(dev_priv, pll); 1972 pll->on = false; 1973 1974 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1975 } 1976 1977 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1978 enum i915_pipe pipe) 1979 { 1980 struct drm_device *dev = dev_priv->dev; 1981 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1983 uint32_t reg, val, pipeconf_val; 1984 1985 /* PCH only available on ILK+ */ 1986 BUG_ON(!HAS_PCH_SPLIT(dev)); 1987 1988 /* Make sure PCH DPLL is enabled */ 1989 assert_shared_dpll_enabled(dev_priv, 1990 intel_crtc_to_shared_dpll(intel_crtc)); 1991 1992 /* FDI must be feeding us bits for PCH ports */ 1993 assert_fdi_tx_enabled(dev_priv, pipe); 1994 assert_fdi_rx_enabled(dev_priv, pipe); 1995 1996 if (HAS_PCH_CPT(dev)) { 1997 /* Workaround: Set the timing override bit before enabling the 1998 * pch transcoder. */ 1999 reg = TRANS_CHICKEN2(pipe); 2000 val = I915_READ(reg); 2001 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2002 I915_WRITE(reg, val); 2003 } 2004 2005 reg = PCH_TRANSCONF(pipe); 2006 val = I915_READ(reg); 2007 pipeconf_val = I915_READ(PIPECONF(pipe)); 2008 2009 if (HAS_PCH_IBX(dev_priv->dev)) { 2010 /* 2011 * Make the BPC in transcoder be consistent with 2012 * that in pipeconf reg. For HDMI we must use 8bpc 2013 * here for both 8bpc and 12bpc. 2014 */ 2015 val &= ~PIPECONF_BPC_MASK; 2016 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) 2017 val |= PIPECONF_8BPC; 2018 else 2019 val |= pipeconf_val & PIPECONF_BPC_MASK; 2020 } 2021 2022 val &= ~TRANS_INTERLACE_MASK; 2023 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 2024 if (HAS_PCH_IBX(dev_priv->dev) && 2025 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 2026 val |= TRANS_LEGACY_INTERLACED_ILK; 2027 else 2028 val |= TRANS_INTERLACED; 2029 else 2030 val |= TRANS_PROGRESSIVE; 2031 2032 I915_WRITE(reg, val | TRANS_ENABLE); 2033 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2034 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 2035 } 2036 2037 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 2038 enum transcoder cpu_transcoder) 2039 { 2040 u32 val, pipeconf_val; 2041 2042 /* PCH only available on ILK+ */ 2043 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 2044 2045 /* FDI must be feeding us bits for PCH ports */ 2046 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 2047 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2048 2049 /* Workaround: set timing override bit. */ 2050 val = I915_READ(_TRANSA_CHICKEN2); 2051 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2052 I915_WRITE(_TRANSA_CHICKEN2, val); 2053 2054 val = TRANS_ENABLE; 2055 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2056 2057 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 2058 PIPECONF_INTERLACED_ILK) 2059 val |= TRANS_INTERLACED; 2060 else 2061 val |= TRANS_PROGRESSIVE; 2062 2063 I915_WRITE(LPT_TRANSCONF, val); 2064 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 2065 DRM_ERROR("Failed to enable PCH transcoder\n"); 2066 } 2067 2068 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 2069 enum i915_pipe pipe) 2070 { 2071 struct drm_device *dev = dev_priv->dev; 2072 uint32_t reg, val; 2073 2074 /* FDI relies on the transcoder */ 2075 assert_fdi_tx_disabled(dev_priv, pipe); 2076 assert_fdi_rx_disabled(dev_priv, pipe); 2077 2078 /* Ports must be off as well */ 2079 assert_pch_ports_disabled(dev_priv, pipe); 2080 2081 reg = PCH_TRANSCONF(pipe); 2082 val = I915_READ(reg); 2083 val &= ~TRANS_ENABLE; 2084 I915_WRITE(reg, val); 2085 /* wait for PCH transcoder off, transcoder state */ 2086 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2087 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2088 2089 if (!HAS_PCH_IBX(dev)) { 2090 /* Workaround: Clear the timing override chicken bit again. */ 2091 reg = TRANS_CHICKEN2(pipe); 2092 val = I915_READ(reg); 2093 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2094 I915_WRITE(reg, val); 2095 } 2096 } 2097 2098 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 2099 { 2100 u32 val; 2101 2102 val = I915_READ(LPT_TRANSCONF); 2103 val &= ~TRANS_ENABLE; 2104 I915_WRITE(LPT_TRANSCONF, val); 2105 /* wait for PCH transcoder off, transcoder state */ 2106 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2107 DRM_ERROR("Failed to disable PCH transcoder\n"); 2108 2109 /* Workaround: clear timing override bit. */ 2110 val = I915_READ(_TRANSA_CHICKEN2); 2111 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2112 I915_WRITE(_TRANSA_CHICKEN2, val); 2113 } 2114 2115 /** 2116 * intel_enable_pipe - enable a pipe, asserting requirements 2117 * @crtc: crtc responsible for the pipe 2118 * 2119 * Enable @crtc's pipe, making sure that various hardware specific requirements 2120 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2121 */ 2122 static void intel_enable_pipe(struct intel_crtc *crtc) 2123 { 2124 struct drm_device *dev = crtc->base.dev; 2125 struct drm_i915_private *dev_priv = dev->dev_private; 2126 enum i915_pipe pipe = crtc->pipe; 2127 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2128 pipe); 2129 enum i915_pipe pch_transcoder; 2130 int reg; 2131 u32 val; 2132 2133 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 2134 2135 assert_planes_disabled(dev_priv, pipe); 2136 assert_cursor_disabled(dev_priv, pipe); 2137 assert_sprites_disabled(dev_priv, pipe); 2138 2139 if (HAS_PCH_LPT(dev_priv->dev)) 2140 pch_transcoder = TRANSCODER_A; 2141 else 2142 pch_transcoder = pipe; 2143 2144 /* 2145 * A pipe without a PLL won't actually be able to drive bits from 2146 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2147 * need the check. 2148 */ 2149 if (HAS_GMCH_DISPLAY(dev_priv->dev)) 2150 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2151 assert_dsi_pll_enabled(dev_priv); 2152 else 2153 assert_pll_enabled(dev_priv, pipe); 2154 else { 2155 if (crtc->config->has_pch_encoder) { 2156 /* if driving the PCH, we need FDI enabled */ 2157 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2158 assert_fdi_tx_pll_enabled(dev_priv, 2159 (enum i915_pipe) cpu_transcoder); 2160 } 2161 /* FIXME: assert CPU port conditions for SNB+ */ 2162 } 2163 2164 reg = PIPECONF(cpu_transcoder); 2165 val = I915_READ(reg); 2166 if (val & PIPECONF_ENABLE) { 2167 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2168 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2169 return; 2170 } 2171 2172 I915_WRITE(reg, val | PIPECONF_ENABLE); 2173 POSTING_READ(reg); 2174 } 2175 2176 /** 2177 * intel_disable_pipe - disable a pipe, asserting requirements 2178 * @crtc: crtc whose pipes is to be disabled 2179 * 2180 * Disable the pipe of @crtc, making sure that various hardware 2181 * specific requirements are met, if applicable, e.g. plane 2182 * disabled, panel fitter off, etc. 2183 * 2184 * Will wait until the pipe has shut down before returning. 2185 */ 2186 static void intel_disable_pipe(struct intel_crtc *crtc) 2187 { 2188 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2189 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2190 enum i915_pipe pipe = crtc->pipe; 2191 int reg; 2192 u32 val; 2193 2194 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2195 2196 /* 2197 * Make sure planes won't keep trying to pump pixels to us, 2198 * or we might hang the display. 2199 */ 2200 assert_planes_disabled(dev_priv, pipe); 2201 assert_cursor_disabled(dev_priv, pipe); 2202 assert_sprites_disabled(dev_priv, pipe); 2203 2204 reg = PIPECONF(cpu_transcoder); 2205 val = I915_READ(reg); 2206 if ((val & PIPECONF_ENABLE) == 0) 2207 return; 2208 2209 /* 2210 * Double wide has implications for planes 2211 * so best keep it disabled when not needed. 2212 */ 2213 if (crtc->config->double_wide) 2214 val &= ~PIPECONF_DOUBLE_WIDE; 2215 2216 /* Don't disable pipe or pipe PLLs if needed */ 2217 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2218 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2219 val &= ~PIPECONF_ENABLE; 2220 2221 I915_WRITE(reg, val); 2222 if ((val & PIPECONF_ENABLE) == 0) 2223 intel_wait_for_pipe_off(crtc); 2224 } 2225 2226 static bool need_vtd_wa(struct drm_device *dev) 2227 { 2228 #ifdef CONFIG_INTEL_IOMMU 2229 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2230 return true; 2231 #endif 2232 return false; 2233 } 2234 2235 unsigned int 2236 intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 2237 uint64_t fb_format_modifier) 2238 { 2239 unsigned int tile_height; 2240 uint32_t pixel_bytes; 2241 2242 switch (fb_format_modifier) { 2243 case DRM_FORMAT_MOD_NONE: 2244 tile_height = 1; 2245 break; 2246 case I915_FORMAT_MOD_X_TILED: 2247 tile_height = IS_GEN2(dev) ? 16 : 8; 2248 break; 2249 case I915_FORMAT_MOD_Y_TILED: 2250 tile_height = 32; 2251 break; 2252 case I915_FORMAT_MOD_Yf_TILED: 2253 pixel_bytes = drm_format_plane_cpp(pixel_format, 0); 2254 switch (pixel_bytes) { 2255 default: 2256 case 1: 2257 tile_height = 64; 2258 break; 2259 case 2: 2260 case 4: 2261 tile_height = 32; 2262 break; 2263 case 8: 2264 tile_height = 16; 2265 break; 2266 case 16: 2267 WARN_ONCE(1, 2268 "128-bit pixels are not supported for display!"); 2269 tile_height = 16; 2270 break; 2271 } 2272 break; 2273 default: 2274 MISSING_CASE(fb_format_modifier); 2275 tile_height = 1; 2276 break; 2277 } 2278 2279 return tile_height; 2280 } 2281 2282 unsigned int 2283 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2284 uint32_t pixel_format, uint64_t fb_format_modifier) 2285 { 2286 return ALIGN(height, intel_tile_height(dev, pixel_format, 2287 fb_format_modifier)); 2288 } 2289 2290 static int 2291 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2292 const struct drm_plane_state *plane_state) 2293 { 2294 struct intel_rotation_info *info = &view->rotation_info; 2295 unsigned int tile_height, tile_pitch; 2296 2297 *view = i915_ggtt_view_normal; 2298 2299 if (!plane_state) 2300 return 0; 2301 2302 if (!intel_rotation_90_or_270(plane_state->rotation)) 2303 return 0; 2304 2305 *view = i915_ggtt_view_rotated; 2306 2307 info->height = fb->height; 2308 info->pixel_format = fb->pixel_format; 2309 info->pitch = fb->pitches[0]; 2310 info->fb_modifier = fb->modifier[0]; 2311 2312 tile_height = intel_tile_height(fb->dev, fb->pixel_format, 2313 fb->modifier[0]); 2314 tile_pitch = PAGE_SIZE / tile_height; 2315 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); 2316 info->height_pages = DIV_ROUND_UP(fb->height, tile_height); 2317 info->size = info->width_pages * info->height_pages * PAGE_SIZE; 2318 2319 return 0; 2320 } 2321 2322 static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) 2323 { 2324 if (INTEL_INFO(dev_priv)->gen >= 9) 2325 return 256 * 1024; 2326 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || 2327 IS_VALLEYVIEW(dev_priv)) 2328 return 128 * 1024; 2329 else if (INTEL_INFO(dev_priv)->gen >= 4) 2330 return 4 * 1024; 2331 else 2332 return 0; 2333 } 2334 2335 int 2336 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2337 struct drm_framebuffer *fb, 2338 const struct drm_plane_state *plane_state, 2339 struct intel_engine_cs *pipelined, 2340 struct drm_i915_gem_request **pipelined_request) 2341 { 2342 struct drm_device *dev = fb->dev; 2343 struct drm_i915_private *dev_priv = dev->dev_private; 2344 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2345 struct i915_ggtt_view view; 2346 u32 alignment; 2347 int ret; 2348 2349 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2350 2351 switch (fb->modifier[0]) { 2352 case DRM_FORMAT_MOD_NONE: 2353 alignment = intel_linear_alignment(dev_priv); 2354 break; 2355 case I915_FORMAT_MOD_X_TILED: 2356 if (INTEL_INFO(dev)->gen >= 9) 2357 alignment = 256 * 1024; 2358 else { 2359 /* pin() will align the object as required by fence */ 2360 alignment = 0; 2361 } 2362 break; 2363 case I915_FORMAT_MOD_Y_TILED: 2364 case I915_FORMAT_MOD_Yf_TILED: 2365 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, 2366 "Y tiling bo slipped through, driver bug!\n")) 2367 return -EINVAL; 2368 alignment = 1 * 1024 * 1024; 2369 break; 2370 default: 2371 MISSING_CASE(fb->modifier[0]); 2372 return -EINVAL; 2373 } 2374 2375 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2376 if (ret) 2377 return ret; 2378 2379 /* Note that the w/a also requires 64 PTE of padding following the 2380 * bo. We currently fill all unused PTE with the shadow page and so 2381 * we should always have valid PTE following the scanout preventing 2382 * the VT-d warning. 2383 */ 2384 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2385 alignment = 256 * 1024; 2386 2387 /* 2388 * Global gtt pte registers are special registers which actually forward 2389 * writes to a chunk of system memory. Which means that there is no risk 2390 * that the register values disappear as soon as we call 2391 * intel_runtime_pm_put(), so it is correct to wrap only the 2392 * pin/unpin/fence and not more. 2393 */ 2394 intel_runtime_pm_get(dev_priv); 2395 2396 dev_priv->mm.interruptible = false; 2397 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, 2398 pipelined_request, &view); 2399 if (ret) 2400 goto err_interruptible; 2401 2402 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2403 * fence, whereas 965+ only requires a fence if using 2404 * framebuffer compression. For simplicity, we always install 2405 * a fence as the cost is not that onerous. 2406 */ 2407 ret = i915_gem_object_get_fence(obj); 2408 if (ret == -EDEADLK) { 2409 /* 2410 * -EDEADLK means there are no free fences 2411 * no pending flips. 2412 * 2413 * This is propagated to atomic, but it uses 2414 * -EDEADLK to force a locking recovery, so 2415 * change the returned error to -EBUSY. 2416 */ 2417 ret = -EBUSY; 2418 goto err_unpin; 2419 } else if (ret) 2420 goto err_unpin; 2421 2422 i915_gem_object_pin_fence(obj); 2423 2424 dev_priv->mm.interruptible = true; 2425 intel_runtime_pm_put(dev_priv); 2426 return 0; 2427 2428 err_unpin: 2429 i915_gem_object_unpin_from_display_plane(obj, &view); 2430 err_interruptible: 2431 dev_priv->mm.interruptible = true; 2432 intel_runtime_pm_put(dev_priv); 2433 return ret; 2434 } 2435 2436 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, 2437 const struct drm_plane_state *plane_state) 2438 { 2439 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2440 struct i915_ggtt_view view; 2441 int ret; 2442 2443 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2444 2445 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2446 WARN_ONCE(ret, "Couldn't get view from plane state!"); 2447 2448 i915_gem_object_unpin_fence(obj); 2449 i915_gem_object_unpin_from_display_plane(obj, &view); 2450 } 2451 2452 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2453 * is assumed to be a power-of-two. */ 2454 unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, 2455 int *x, int *y, 2456 unsigned int tiling_mode, 2457 unsigned int cpp, 2458 unsigned int pitch) 2459 { 2460 if (tiling_mode != I915_TILING_NONE) { 2461 unsigned int tile_rows, tiles; 2462 2463 tile_rows = *y / 8; 2464 *y %= 8; 2465 2466 tiles = *x / (512/cpp); 2467 *x %= 512/cpp; 2468 2469 return tile_rows * pitch * 8 + tiles * 4096; 2470 } else { 2471 unsigned int alignment = intel_linear_alignment(dev_priv) - 1; 2472 unsigned int offset; 2473 2474 offset = *y * pitch + *x * cpp; 2475 *y = (offset & alignment) / pitch; 2476 *x = ((offset & alignment) - *y * pitch) / cpp; 2477 return offset & ~alignment; 2478 } 2479 } 2480 2481 static int i9xx_format_to_fourcc(int format) 2482 { 2483 switch (format) { 2484 case DISPPLANE_8BPP: 2485 return DRM_FORMAT_C8; 2486 case DISPPLANE_BGRX555: 2487 return DRM_FORMAT_XRGB1555; 2488 case DISPPLANE_BGRX565: 2489 return DRM_FORMAT_RGB565; 2490 default: 2491 case DISPPLANE_BGRX888: 2492 return DRM_FORMAT_XRGB8888; 2493 case DISPPLANE_RGBX888: 2494 return DRM_FORMAT_XBGR8888; 2495 case DISPPLANE_BGRX101010: 2496 return DRM_FORMAT_XRGB2101010; 2497 case DISPPLANE_RGBX101010: 2498 return DRM_FORMAT_XBGR2101010; 2499 } 2500 } 2501 2502 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2503 { 2504 switch (format) { 2505 case PLANE_CTL_FORMAT_RGB_565: 2506 return DRM_FORMAT_RGB565; 2507 default: 2508 case PLANE_CTL_FORMAT_XRGB_8888: 2509 if (rgb_order) { 2510 if (alpha) 2511 return DRM_FORMAT_ABGR8888; 2512 else 2513 return DRM_FORMAT_XBGR8888; 2514 } else { 2515 if (alpha) 2516 return DRM_FORMAT_ARGB8888; 2517 else 2518 return DRM_FORMAT_XRGB8888; 2519 } 2520 case PLANE_CTL_FORMAT_XRGB_2101010: 2521 if (rgb_order) 2522 return DRM_FORMAT_XBGR2101010; 2523 else 2524 return DRM_FORMAT_XRGB2101010; 2525 } 2526 } 2527 2528 static bool 2529 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2530 struct intel_initial_plane_config *plane_config) 2531 { 2532 struct drm_device *dev = crtc->base.dev; 2533 struct drm_i915_gem_object *obj = NULL; 2534 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2535 struct drm_framebuffer *fb = &plane_config->fb->base; 2536 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2537 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2538 PAGE_SIZE); 2539 2540 size_aligned -= base_aligned; 2541 2542 if (plane_config->size == 0) 2543 return false; 2544 2545 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2546 base_aligned, 2547 base_aligned, 2548 size_aligned); 2549 if (!obj) 2550 return false; 2551 2552 obj->tiling_mode = plane_config->tiling; 2553 if (obj->tiling_mode == I915_TILING_X) 2554 obj->stride = fb->pitches[0]; 2555 2556 mode_cmd.pixel_format = fb->pixel_format; 2557 mode_cmd.width = fb->width; 2558 mode_cmd.height = fb->height; 2559 mode_cmd.pitches[0] = fb->pitches[0]; 2560 mode_cmd.modifier[0] = fb->modifier[0]; 2561 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2562 2563 mutex_lock(&dev->struct_mutex); 2564 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2565 &mode_cmd, obj)) { 2566 DRM_DEBUG_KMS("intel fb init failed\n"); 2567 goto out_unref_obj; 2568 } 2569 mutex_unlock(&dev->struct_mutex); 2570 2571 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2572 return true; 2573 2574 out_unref_obj: 2575 drm_gem_object_unreference(&obj->base); 2576 mutex_unlock(&dev->struct_mutex); 2577 return false; 2578 } 2579 2580 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2581 static void 2582 update_state_fb(struct drm_plane *plane) 2583 { 2584 if (plane->fb == plane->state->fb) 2585 return; 2586 2587 if (plane->state->fb) 2588 drm_framebuffer_unreference(plane->state->fb); 2589 plane->state->fb = plane->fb; 2590 if (plane->state->fb) 2591 drm_framebuffer_reference(plane->state->fb); 2592 } 2593 2594 static void 2595 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2596 struct intel_initial_plane_config *plane_config) 2597 { 2598 struct drm_device *dev = intel_crtc->base.dev; 2599 struct drm_i915_private *dev_priv = dev->dev_private; 2600 struct drm_crtc *c; 2601 struct intel_crtc *i; 2602 struct drm_i915_gem_object *obj; 2603 struct drm_plane *primary = intel_crtc->base.primary; 2604 struct drm_plane_state *plane_state = primary->state; 2605 struct drm_framebuffer *fb; 2606 2607 if (!plane_config->fb) 2608 return; 2609 2610 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2611 fb = &plane_config->fb->base; 2612 goto valid_fb; 2613 } 2614 2615 kfree(plane_config->fb); 2616 2617 /* 2618 * Failed to alloc the obj, check to see if we should share 2619 * an fb with another CRTC instead 2620 */ 2621 for_each_crtc(dev, c) { 2622 i = to_intel_crtc(c); 2623 2624 if (c == &intel_crtc->base) 2625 continue; 2626 2627 if (!i->active) 2628 continue; 2629 2630 fb = c->primary->fb; 2631 if (!fb) 2632 continue; 2633 2634 obj = intel_fb_obj(fb); 2635 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2636 drm_framebuffer_reference(fb); 2637 goto valid_fb; 2638 } 2639 } 2640 2641 return; 2642 2643 valid_fb: 2644 plane_state->src_x = plane_state->src_y = 0; 2645 plane_state->src_w = fb->width << 16; 2646 plane_state->src_h = fb->height << 16; 2647 2648 plane_state->crtc_x = plane_state->src_y = 0; 2649 plane_state->crtc_w = fb->width; 2650 plane_state->crtc_h = fb->height; 2651 2652 obj = intel_fb_obj(fb); 2653 if (obj->tiling_mode != I915_TILING_NONE) 2654 dev_priv->preserve_bios_swizzle = true; 2655 2656 drm_framebuffer_reference(fb); 2657 primary->fb = primary->state->fb = fb; 2658 primary->crtc = primary->state->crtc = &intel_crtc->base; 2659 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); 2660 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; 2661 } 2662 2663 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2664 struct drm_framebuffer *fb, 2665 int x, int y) 2666 { 2667 struct drm_device *dev = crtc->dev; 2668 struct drm_i915_private *dev_priv = dev->dev_private; 2669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2670 struct drm_plane *primary = crtc->primary; 2671 bool visible = to_intel_plane_state(primary->state)->visible; 2672 struct drm_i915_gem_object *obj; 2673 int plane = intel_crtc->plane; 2674 unsigned long linear_offset; 2675 u32 dspcntr; 2676 u32 reg = DSPCNTR(plane); 2677 int pixel_size; 2678 2679 if (!visible || !fb) { 2680 I915_WRITE(reg, 0); 2681 if (INTEL_INFO(dev)->gen >= 4) 2682 I915_WRITE(DSPSURF(plane), 0); 2683 else 2684 I915_WRITE(DSPADDR(plane), 0); 2685 POSTING_READ(reg); 2686 return; 2687 } 2688 2689 obj = intel_fb_obj(fb); 2690 if (WARN_ON(obj == NULL)) 2691 return; 2692 2693 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2694 2695 dspcntr = DISPPLANE_GAMMA_ENABLE; 2696 2697 dspcntr |= DISPLAY_PLANE_ENABLE; 2698 2699 if (INTEL_INFO(dev)->gen < 4) { 2700 if (intel_crtc->pipe == PIPE_B) 2701 dspcntr |= DISPPLANE_SEL_PIPE_B; 2702 2703 /* pipesrc and dspsize control the size that is scaled from, 2704 * which should always be the user's requested size. 2705 */ 2706 I915_WRITE(DSPSIZE(plane), 2707 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2708 (intel_crtc->config->pipe_src_w - 1)); 2709 I915_WRITE(DSPPOS(plane), 0); 2710 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2711 I915_WRITE(PRIMSIZE(plane), 2712 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2713 (intel_crtc->config->pipe_src_w - 1)); 2714 I915_WRITE(PRIMPOS(plane), 0); 2715 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2716 } 2717 2718 switch (fb->pixel_format) { 2719 case DRM_FORMAT_C8: 2720 dspcntr |= DISPPLANE_8BPP; 2721 break; 2722 case DRM_FORMAT_XRGB1555: 2723 dspcntr |= DISPPLANE_BGRX555; 2724 break; 2725 case DRM_FORMAT_RGB565: 2726 dspcntr |= DISPPLANE_BGRX565; 2727 break; 2728 case DRM_FORMAT_XRGB8888: 2729 dspcntr |= DISPPLANE_BGRX888; 2730 break; 2731 case DRM_FORMAT_XBGR8888: 2732 dspcntr |= DISPPLANE_RGBX888; 2733 break; 2734 case DRM_FORMAT_XRGB2101010: 2735 dspcntr |= DISPPLANE_BGRX101010; 2736 break; 2737 case DRM_FORMAT_XBGR2101010: 2738 dspcntr |= DISPPLANE_RGBX101010; 2739 break; 2740 default: 2741 BUG(); 2742 } 2743 2744 if (INTEL_INFO(dev)->gen >= 4 && 2745 obj->tiling_mode != I915_TILING_NONE) 2746 dspcntr |= DISPPLANE_TILED; 2747 2748 if (IS_G4X(dev)) 2749 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2750 2751 linear_offset = y * fb->pitches[0] + x * pixel_size; 2752 2753 if (INTEL_INFO(dev)->gen >= 4) { 2754 intel_crtc->dspaddr_offset = 2755 intel_gen4_compute_page_offset(dev_priv, 2756 &x, &y, obj->tiling_mode, 2757 pixel_size, 2758 fb->pitches[0]); 2759 linear_offset -= intel_crtc->dspaddr_offset; 2760 } else { 2761 intel_crtc->dspaddr_offset = linear_offset; 2762 } 2763 2764 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2765 dspcntr |= DISPPLANE_ROTATE_180; 2766 2767 x += (intel_crtc->config->pipe_src_w - 1); 2768 y += (intel_crtc->config->pipe_src_h - 1); 2769 2770 /* Finding the last pixel of the last line of the display 2771 data and adding to linear_offset*/ 2772 linear_offset += 2773 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2774 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2775 } 2776 2777 I915_WRITE(reg, dspcntr); 2778 2779 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2780 if (INTEL_INFO(dev)->gen >= 4) { 2781 I915_WRITE(DSPSURF(plane), 2782 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2783 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2784 I915_WRITE(DSPLINOFF(plane), linear_offset); 2785 } else 2786 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2787 POSTING_READ(reg); 2788 } 2789 2790 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2791 struct drm_framebuffer *fb, 2792 int x, int y) 2793 { 2794 struct drm_device *dev = crtc->dev; 2795 struct drm_i915_private *dev_priv = dev->dev_private; 2796 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2797 struct drm_plane *primary = crtc->primary; 2798 bool visible = to_intel_plane_state(primary->state)->visible; 2799 struct drm_i915_gem_object *obj; 2800 int plane = intel_crtc->plane; 2801 unsigned long linear_offset; 2802 u32 dspcntr; 2803 u32 reg = DSPCNTR(plane); 2804 int pixel_size; 2805 2806 if (!visible || !fb) { 2807 I915_WRITE(reg, 0); 2808 I915_WRITE(DSPSURF(plane), 0); 2809 POSTING_READ(reg); 2810 return; 2811 } 2812 2813 obj = intel_fb_obj(fb); 2814 if (WARN_ON(obj == NULL)) 2815 return; 2816 2817 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2818 2819 dspcntr = DISPPLANE_GAMMA_ENABLE; 2820 2821 dspcntr |= DISPLAY_PLANE_ENABLE; 2822 2823 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2824 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2825 2826 switch (fb->pixel_format) { 2827 case DRM_FORMAT_C8: 2828 dspcntr |= DISPPLANE_8BPP; 2829 break; 2830 case DRM_FORMAT_RGB565: 2831 dspcntr |= DISPPLANE_BGRX565; 2832 break; 2833 case DRM_FORMAT_XRGB8888: 2834 dspcntr |= DISPPLANE_BGRX888; 2835 break; 2836 case DRM_FORMAT_XBGR8888: 2837 dspcntr |= DISPPLANE_RGBX888; 2838 break; 2839 case DRM_FORMAT_XRGB2101010: 2840 dspcntr |= DISPPLANE_BGRX101010; 2841 break; 2842 case DRM_FORMAT_XBGR2101010: 2843 dspcntr |= DISPPLANE_RGBX101010; 2844 break; 2845 default: 2846 BUG(); 2847 } 2848 2849 if (obj->tiling_mode != I915_TILING_NONE) 2850 dspcntr |= DISPPLANE_TILED; 2851 2852 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2853 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2854 2855 linear_offset = y * fb->pitches[0] + x * pixel_size; 2856 intel_crtc->dspaddr_offset = 2857 intel_gen4_compute_page_offset(dev_priv, 2858 &x, &y, obj->tiling_mode, 2859 pixel_size, 2860 fb->pitches[0]); 2861 linear_offset -= intel_crtc->dspaddr_offset; 2862 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2863 dspcntr |= DISPPLANE_ROTATE_180; 2864 2865 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2866 x += (intel_crtc->config->pipe_src_w - 1); 2867 y += (intel_crtc->config->pipe_src_h - 1); 2868 2869 /* Finding the last pixel of the last line of the display 2870 data and adding to linear_offset*/ 2871 linear_offset += 2872 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2873 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2874 } 2875 } 2876 2877 I915_WRITE(reg, dspcntr); 2878 2879 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2880 I915_WRITE(DSPSURF(plane), 2881 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2882 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2883 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2884 } else { 2885 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2886 I915_WRITE(DSPLINOFF(plane), linear_offset); 2887 } 2888 POSTING_READ(reg); 2889 } 2890 2891 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, 2892 uint32_t pixel_format) 2893 { 2894 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; 2895 2896 /* 2897 * The stride is either expressed as a multiple of 64 bytes 2898 * chunks for linear buffers or in number of tiles for tiled 2899 * buffers. 2900 */ 2901 switch (fb_modifier) { 2902 case DRM_FORMAT_MOD_NONE: 2903 return 64; 2904 case I915_FORMAT_MOD_X_TILED: 2905 if (INTEL_INFO(dev)->gen == 2) 2906 return 128; 2907 return 512; 2908 case I915_FORMAT_MOD_Y_TILED: 2909 /* No need to check for old gens and Y tiling since this is 2910 * about the display engine and those will be blocked before 2911 * we get here. 2912 */ 2913 return 128; 2914 case I915_FORMAT_MOD_Yf_TILED: 2915 if (bits_per_pixel == 8) 2916 return 64; 2917 else 2918 return 128; 2919 default: 2920 MISSING_CASE(fb_modifier); 2921 return 64; 2922 } 2923 } 2924 2925 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2926 struct drm_i915_gem_object *obj) 2927 { 2928 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2929 2930 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2931 view = &i915_ggtt_view_rotated; 2932 2933 return i915_gem_obj_ggtt_offset_view(obj, view); 2934 } 2935 2936 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2937 { 2938 struct drm_device *dev = intel_crtc->base.dev; 2939 struct drm_i915_private *dev_priv = dev->dev_private; 2940 2941 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2942 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2943 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2944 DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n", 2945 intel_crtc->base.base.id, intel_crtc->pipe, id); 2946 } 2947 2948 /* 2949 * This function detaches (aka. unbinds) unused scalers in hardware 2950 */ 2951 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 2952 { 2953 struct intel_crtc_scaler_state *scaler_state; 2954 int i; 2955 2956 scaler_state = &intel_crtc->config->scaler_state; 2957 2958 /* loop through and disable scalers that aren't in use */ 2959 for (i = 0; i < intel_crtc->num_scalers; i++) { 2960 if (!scaler_state->scalers[i].in_use) 2961 skl_detach_scaler(intel_crtc, i); 2962 } 2963 } 2964 2965 u32 skl_plane_ctl_format(uint32_t pixel_format) 2966 { 2967 switch (pixel_format) { 2968 case DRM_FORMAT_C8: 2969 return PLANE_CTL_FORMAT_INDEXED; 2970 case DRM_FORMAT_RGB565: 2971 return PLANE_CTL_FORMAT_RGB_565; 2972 case DRM_FORMAT_XBGR8888: 2973 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 2974 case DRM_FORMAT_XRGB8888: 2975 return PLANE_CTL_FORMAT_XRGB_8888; 2976 /* 2977 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 2978 * to be already pre-multiplied. We need to add a knob (or a different 2979 * DRM_FORMAT) for user-space to configure that. 2980 */ 2981 case DRM_FORMAT_ABGR8888: 2982 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 2983 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2984 case DRM_FORMAT_ARGB8888: 2985 return PLANE_CTL_FORMAT_XRGB_8888 | 2986 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2987 case DRM_FORMAT_XRGB2101010: 2988 return PLANE_CTL_FORMAT_XRGB_2101010; 2989 case DRM_FORMAT_XBGR2101010: 2990 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 2991 case DRM_FORMAT_YUYV: 2992 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 2993 case DRM_FORMAT_YVYU: 2994 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 2995 case DRM_FORMAT_UYVY: 2996 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 2997 case DRM_FORMAT_VYUY: 2998 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 2999 default: 3000 MISSING_CASE(pixel_format); 3001 } 3002 3003 return 0; 3004 } 3005 3006 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3007 { 3008 switch (fb_modifier) { 3009 case DRM_FORMAT_MOD_NONE: 3010 break; 3011 case I915_FORMAT_MOD_X_TILED: 3012 return PLANE_CTL_TILED_X; 3013 case I915_FORMAT_MOD_Y_TILED: 3014 return PLANE_CTL_TILED_Y; 3015 case I915_FORMAT_MOD_Yf_TILED: 3016 return PLANE_CTL_TILED_YF; 3017 default: 3018 MISSING_CASE(fb_modifier); 3019 } 3020 3021 return 0; 3022 } 3023 3024 u32 skl_plane_ctl_rotation(unsigned int rotation) 3025 { 3026 switch (rotation) { 3027 case BIT(DRM_ROTATE_0): 3028 break; 3029 /* 3030 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 3031 * while i915 HW rotation is clockwise, thats why this swapping. 3032 */ 3033 case BIT(DRM_ROTATE_90): 3034 return PLANE_CTL_ROTATE_270; 3035 case BIT(DRM_ROTATE_180): 3036 return PLANE_CTL_ROTATE_180; 3037 case BIT(DRM_ROTATE_270): 3038 return PLANE_CTL_ROTATE_90; 3039 default: 3040 MISSING_CASE(rotation); 3041 } 3042 3043 return 0; 3044 } 3045 3046 static void skylake_update_primary_plane(struct drm_crtc *crtc, 3047 struct drm_framebuffer *fb, 3048 int x, int y) 3049 { 3050 struct drm_device *dev = crtc->dev; 3051 struct drm_i915_private *dev_priv = dev->dev_private; 3052 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3053 struct drm_plane *plane = crtc->primary; 3054 bool visible = to_intel_plane_state(plane->state)->visible; 3055 struct drm_i915_gem_object *obj; 3056 int pipe = intel_crtc->pipe; 3057 u32 plane_ctl, stride_div, stride; 3058 u32 tile_height, plane_offset, plane_size; 3059 unsigned int rotation; 3060 int x_offset, y_offset; 3061 unsigned long surf_addr; 3062 struct intel_crtc_state *crtc_state = intel_crtc->config; 3063 struct intel_plane_state *plane_state; 3064 int src_x = 0, src_y = 0, src_w = 0, src_h = 0; 3065 int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0; 3066 int scaler_id = -1; 3067 3068 plane_state = to_intel_plane_state(plane->state); 3069 3070 if (!visible || !fb) { 3071 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3072 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3073 POSTING_READ(PLANE_CTL(pipe, 0)); 3074 return; 3075 } 3076 3077 plane_ctl = PLANE_CTL_ENABLE | 3078 PLANE_CTL_PIPE_GAMMA_ENABLE | 3079 PLANE_CTL_PIPE_CSC_ENABLE; 3080 3081 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3082 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3083 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3084 3085 rotation = plane->state->rotation; 3086 plane_ctl |= skl_plane_ctl_rotation(rotation); 3087 3088 obj = intel_fb_obj(fb); 3089 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3090 fb->pixel_format); 3091 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj); 3092 3093 /* 3094 * FIXME: intel_plane_state->src, dst aren't set when transitional 3095 * update_plane helpers are called from legacy paths. 3096 * Once full atomic crtc is available, below check can be avoided. 3097 */ 3098 if (drm_rect_width(&plane_state->src)) { 3099 scaler_id = plane_state->scaler_id; 3100 src_x = plane_state->src.x1 >> 16; 3101 src_y = plane_state->src.y1 >> 16; 3102 src_w = drm_rect_width(&plane_state->src) >> 16; 3103 src_h = drm_rect_height(&plane_state->src) >> 16; 3104 dst_x = plane_state->dst.x1; 3105 dst_y = plane_state->dst.y1; 3106 dst_w = drm_rect_width(&plane_state->dst); 3107 dst_h = drm_rect_height(&plane_state->dst); 3108 3109 WARN_ON(x != src_x || y != src_y); 3110 } else { 3111 src_w = intel_crtc->config->pipe_src_w; 3112 src_h = intel_crtc->config->pipe_src_h; 3113 } 3114 3115 if (intel_rotation_90_or_270(rotation)) { 3116 /* stride = Surface height in tiles */ 3117 tile_height = intel_tile_height(dev, fb->pixel_format, 3118 fb->modifier[0]); 3119 stride = DIV_ROUND_UP(fb->height, tile_height); 3120 x_offset = stride * tile_height - y - src_h; 3121 y_offset = x; 3122 plane_size = (src_w - 1) << 16 | (src_h - 1); 3123 } else { 3124 stride = fb->pitches[0] / stride_div; 3125 x_offset = x; 3126 y_offset = y; 3127 plane_size = (src_h - 1) << 16 | (src_w - 1); 3128 } 3129 plane_offset = y_offset << 16 | x_offset; 3130 3131 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3132 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3133 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3134 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3135 3136 if (scaler_id >= 0) { 3137 uint32_t ps_ctrl = 0; 3138 3139 WARN_ON(!dst_w || !dst_h); 3140 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3141 crtc_state->scaler_state.scalers[scaler_id].mode; 3142 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3143 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3144 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3145 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3146 I915_WRITE(PLANE_POS(pipe, 0), 0); 3147 } else { 3148 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3149 } 3150 3151 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3152 3153 POSTING_READ(PLANE_SURF(pipe, 0)); 3154 } 3155 3156 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3157 static int 3158 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3159 int x, int y, enum mode_set_atomic state) 3160 { 3161 struct drm_device *dev = crtc->dev; 3162 struct drm_i915_private *dev_priv = dev->dev_private; 3163 3164 if (dev_priv->fbc.disable_fbc) 3165 dev_priv->fbc.disable_fbc(dev_priv); 3166 3167 dev_priv->display.update_primary_plane(crtc, fb, x, y); 3168 3169 return 0; 3170 } 3171 3172 static void intel_complete_page_flips(struct drm_device *dev) 3173 { 3174 struct drm_crtc *crtc; 3175 3176 for_each_crtc(dev, crtc) { 3177 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3178 enum plane plane = intel_crtc->plane; 3179 3180 intel_prepare_page_flip(dev, plane); 3181 intel_finish_page_flip_plane(dev, plane); 3182 } 3183 } 3184 3185 static void intel_update_primary_planes(struct drm_device *dev) 3186 { 3187 struct drm_i915_private *dev_priv = dev->dev_private; 3188 struct drm_crtc *crtc; 3189 3190 for_each_crtc(dev, crtc) { 3191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3192 3193 drm_modeset_lock(&crtc->mutex, NULL); 3194 /* 3195 * FIXME: Once we have proper support for primary planes (and 3196 * disabling them without disabling the entire crtc) allow again 3197 * a NULL crtc->primary->fb. 3198 */ 3199 if (intel_crtc->active && crtc->primary->fb) 3200 dev_priv->display.update_primary_plane(crtc, 3201 crtc->primary->fb, 3202 crtc->x, 3203 crtc->y); 3204 drm_modeset_unlock(&crtc->mutex); 3205 } 3206 } 3207 3208 void intel_prepare_reset(struct drm_device *dev) 3209 { 3210 /* no reset support for gen2 */ 3211 if (IS_GEN2(dev)) 3212 return; 3213 3214 /* reset doesn't touch the display */ 3215 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3216 return; 3217 3218 drm_modeset_lock_all(dev); 3219 /* 3220 * Disabling the crtcs gracefully seems nicer. Also the 3221 * g33 docs say we should at least disable all the planes. 3222 */ 3223 intel_display_suspend(dev); 3224 } 3225 3226 void intel_finish_reset(struct drm_device *dev) 3227 { 3228 struct drm_i915_private *dev_priv = to_i915(dev); 3229 3230 /* 3231 * Flips in the rings will be nuked by the reset, 3232 * so complete all pending flips so that user space 3233 * will get its events and not get stuck. 3234 */ 3235 intel_complete_page_flips(dev); 3236 3237 /* no reset support for gen2 */ 3238 if (IS_GEN2(dev)) 3239 return; 3240 3241 /* reset doesn't touch the display */ 3242 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3243 /* 3244 * Flips in the rings have been nuked by the reset, 3245 * so update the base address of all primary 3246 * planes to the the last fb to make sure we're 3247 * showing the correct fb after a reset. 3248 */ 3249 intel_update_primary_planes(dev); 3250 return; 3251 } 3252 3253 /* 3254 * The display has been reset as well, 3255 * so need a full re-initialization. 3256 */ 3257 intel_runtime_pm_disable_interrupts(dev_priv); 3258 intel_runtime_pm_enable_interrupts(dev_priv); 3259 3260 intel_modeset_init_hw(dev); 3261 3262 spin_lock_irq(&dev_priv->irq_lock); 3263 if (dev_priv->display.hpd_irq_setup) 3264 dev_priv->display.hpd_irq_setup(dev); 3265 spin_unlock_irq(&dev_priv->irq_lock); 3266 3267 intel_display_resume(dev); 3268 3269 intel_hpd_init(dev_priv); 3270 3271 drm_modeset_unlock_all(dev); 3272 } 3273 3274 static void 3275 intel_finish_fb(struct drm_framebuffer *old_fb) 3276 { 3277 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 3278 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3279 bool was_interruptible = dev_priv->mm.interruptible; 3280 int ret; 3281 3282 /* Big Hammer, we also need to ensure that any pending 3283 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 3284 * current scanout is retired before unpinning the old 3285 * framebuffer. Note that we rely on userspace rendering 3286 * into the buffer attached to the pipe they are waiting 3287 * on. If not, userspace generates a GPU hang with IPEHR 3288 * point to the MI_WAIT_FOR_EVENT. 3289 * 3290 * This should only fail upon a hung GPU, in which case we 3291 * can safely continue. 3292 */ 3293 dev_priv->mm.interruptible = false; 3294 ret = i915_gem_object_wait_rendering(obj, true); 3295 dev_priv->mm.interruptible = was_interruptible; 3296 3297 WARN_ON(ret); 3298 } 3299 3300 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3301 { 3302 struct drm_device *dev = crtc->dev; 3303 struct drm_i915_private *dev_priv = dev->dev_private; 3304 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3305 bool pending; 3306 3307 if (i915_reset_in_progress(&dev_priv->gpu_error) || 3308 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 3309 return false; 3310 3311 spin_lock_irq(&dev->event_lock); 3312 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3313 spin_unlock_irq(&dev->event_lock); 3314 3315 return pending; 3316 } 3317 3318 static void intel_update_pipe_size(struct intel_crtc *crtc) 3319 { 3320 struct drm_device *dev = crtc->base.dev; 3321 struct drm_i915_private *dev_priv = dev->dev_private; 3322 const struct drm_display_mode *adjusted_mode; 3323 3324 if (!i915.fastboot) 3325 return; 3326 3327 /* 3328 * Update pipe size and adjust fitter if needed: the reason for this is 3329 * that in compute_mode_changes we check the native mode (not the pfit 3330 * mode) to see if we can flip rather than do a full mode set. In the 3331 * fastboot case, we'll flip, but if we don't update the pipesrc and 3332 * pfit state, we'll end up with a big fb scanned out into the wrong 3333 * sized surface. 3334 * 3335 * To fix this properly, we need to hoist the checks up into 3336 * compute_mode_changes (or above), check the actual pfit state and 3337 * whether the platform allows pfit disable with pipe active, and only 3338 * then update the pipesrc and pfit state, even on the flip path. 3339 */ 3340 3341 adjusted_mode = &crtc->config->base.adjusted_mode; 3342 3343 I915_WRITE(PIPESRC(crtc->pipe), 3344 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 3345 (adjusted_mode->crtc_vdisplay - 1)); 3346 if (!crtc->config->pch_pfit.enabled && 3347 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3348 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3349 I915_WRITE(PF_CTL(crtc->pipe), 0); 3350 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 3351 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 3352 } 3353 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay; 3354 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay; 3355 } 3356 3357 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3358 { 3359 struct drm_device *dev = crtc->dev; 3360 struct drm_i915_private *dev_priv = dev->dev_private; 3361 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3362 int pipe = intel_crtc->pipe; 3363 u32 reg, temp; 3364 3365 /* enable normal train */ 3366 reg = FDI_TX_CTL(pipe); 3367 temp = I915_READ(reg); 3368 if (IS_IVYBRIDGE(dev)) { 3369 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3370 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3371 } else { 3372 temp &= ~FDI_LINK_TRAIN_NONE; 3373 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3374 } 3375 I915_WRITE(reg, temp); 3376 3377 reg = FDI_RX_CTL(pipe); 3378 temp = I915_READ(reg); 3379 if (HAS_PCH_CPT(dev)) { 3380 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3381 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3382 } else { 3383 temp &= ~FDI_LINK_TRAIN_NONE; 3384 temp |= FDI_LINK_TRAIN_NONE; 3385 } 3386 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3387 3388 /* wait one idle pattern time */ 3389 POSTING_READ(reg); 3390 udelay(1000); 3391 3392 /* IVB wants error correction enabled */ 3393 if (IS_IVYBRIDGE(dev)) 3394 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3395 FDI_FE_ERRC_ENABLE); 3396 } 3397 3398 /* The FDI link training functions for ILK/Ibexpeak. */ 3399 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3400 { 3401 struct drm_device *dev = crtc->dev; 3402 struct drm_i915_private *dev_priv = dev->dev_private; 3403 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3404 int pipe = intel_crtc->pipe; 3405 u32 reg, temp, tries; 3406 3407 /* FDI needs bits from pipe first */ 3408 assert_pipe_enabled(dev_priv, pipe); 3409 3410 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3411 for train result */ 3412 reg = FDI_RX_IMR(pipe); 3413 temp = I915_READ(reg); 3414 temp &= ~FDI_RX_SYMBOL_LOCK; 3415 temp &= ~FDI_RX_BIT_LOCK; 3416 I915_WRITE(reg, temp); 3417 I915_READ(reg); 3418 udelay(150); 3419 3420 /* enable CPU FDI TX and PCH FDI RX */ 3421 reg = FDI_TX_CTL(pipe); 3422 temp = I915_READ(reg); 3423 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3424 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3425 temp &= ~FDI_LINK_TRAIN_NONE; 3426 temp |= FDI_LINK_TRAIN_PATTERN_1; 3427 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3428 3429 reg = FDI_RX_CTL(pipe); 3430 temp = I915_READ(reg); 3431 temp &= ~FDI_LINK_TRAIN_NONE; 3432 temp |= FDI_LINK_TRAIN_PATTERN_1; 3433 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3434 3435 POSTING_READ(reg); 3436 udelay(150); 3437 3438 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3439 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3440 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3441 FDI_RX_PHASE_SYNC_POINTER_EN); 3442 3443 reg = FDI_RX_IIR(pipe); 3444 for (tries = 0; tries < 5; tries++) { 3445 temp = I915_READ(reg); 3446 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3447 3448 if ((temp & FDI_RX_BIT_LOCK)) { 3449 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3450 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3451 break; 3452 } 3453 } 3454 if (tries == 5) 3455 DRM_ERROR("FDI train 1 fail!\n"); 3456 3457 /* Train 2 */ 3458 reg = FDI_TX_CTL(pipe); 3459 temp = I915_READ(reg); 3460 temp &= ~FDI_LINK_TRAIN_NONE; 3461 temp |= FDI_LINK_TRAIN_PATTERN_2; 3462 I915_WRITE(reg, temp); 3463 3464 reg = FDI_RX_CTL(pipe); 3465 temp = I915_READ(reg); 3466 temp &= ~FDI_LINK_TRAIN_NONE; 3467 temp |= FDI_LINK_TRAIN_PATTERN_2; 3468 I915_WRITE(reg, temp); 3469 3470 POSTING_READ(reg); 3471 udelay(150); 3472 3473 reg = FDI_RX_IIR(pipe); 3474 for (tries = 0; tries < 5; tries++) { 3475 temp = I915_READ(reg); 3476 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3477 3478 if (temp & FDI_RX_SYMBOL_LOCK) { 3479 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3480 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3481 break; 3482 } 3483 } 3484 if (tries == 5) 3485 DRM_ERROR("FDI train 2 fail!\n"); 3486 3487 DRM_DEBUG_KMS("FDI train done\n"); 3488 3489 } 3490 3491 static const int snb_b_fdi_train_param[] = { 3492 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3493 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3494 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3495 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3496 }; 3497 3498 /* The FDI link training functions for SNB/Cougarpoint. */ 3499 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3500 { 3501 struct drm_device *dev = crtc->dev; 3502 struct drm_i915_private *dev_priv = dev->dev_private; 3503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3504 int pipe = intel_crtc->pipe; 3505 u32 reg, temp, i, retry; 3506 3507 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3508 for train result */ 3509 reg = FDI_RX_IMR(pipe); 3510 temp = I915_READ(reg); 3511 temp &= ~FDI_RX_SYMBOL_LOCK; 3512 temp &= ~FDI_RX_BIT_LOCK; 3513 I915_WRITE(reg, temp); 3514 3515 POSTING_READ(reg); 3516 udelay(150); 3517 3518 /* enable CPU FDI TX and PCH FDI RX */ 3519 reg = FDI_TX_CTL(pipe); 3520 temp = I915_READ(reg); 3521 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3522 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3523 temp &= ~FDI_LINK_TRAIN_NONE; 3524 temp |= FDI_LINK_TRAIN_PATTERN_1; 3525 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3526 /* SNB-B */ 3527 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3528 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3529 3530 I915_WRITE(FDI_RX_MISC(pipe), 3531 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3532 3533 reg = FDI_RX_CTL(pipe); 3534 temp = I915_READ(reg); 3535 if (HAS_PCH_CPT(dev)) { 3536 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3537 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3538 } else { 3539 temp &= ~FDI_LINK_TRAIN_NONE; 3540 temp |= FDI_LINK_TRAIN_PATTERN_1; 3541 } 3542 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3543 3544 POSTING_READ(reg); 3545 udelay(150); 3546 3547 for (i = 0; i < 4; i++) { 3548 reg = FDI_TX_CTL(pipe); 3549 temp = I915_READ(reg); 3550 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3551 temp |= snb_b_fdi_train_param[i]; 3552 I915_WRITE(reg, temp); 3553 3554 POSTING_READ(reg); 3555 udelay(500); 3556 3557 for (retry = 0; retry < 5; retry++) { 3558 reg = FDI_RX_IIR(pipe); 3559 temp = I915_READ(reg); 3560 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3561 if (temp & FDI_RX_BIT_LOCK) { 3562 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3563 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3564 break; 3565 } 3566 udelay(50); 3567 } 3568 if (retry < 5) 3569 break; 3570 } 3571 if (i == 4) 3572 DRM_ERROR("FDI train 1 fail!\n"); 3573 3574 /* Train 2 */ 3575 reg = FDI_TX_CTL(pipe); 3576 temp = I915_READ(reg); 3577 temp &= ~FDI_LINK_TRAIN_NONE; 3578 temp |= FDI_LINK_TRAIN_PATTERN_2; 3579 if (IS_GEN6(dev)) { 3580 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3581 /* SNB-B */ 3582 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3583 } 3584 I915_WRITE(reg, temp); 3585 3586 reg = FDI_RX_CTL(pipe); 3587 temp = I915_READ(reg); 3588 if (HAS_PCH_CPT(dev)) { 3589 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3590 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3591 } else { 3592 temp &= ~FDI_LINK_TRAIN_NONE; 3593 temp |= FDI_LINK_TRAIN_PATTERN_2; 3594 } 3595 I915_WRITE(reg, temp); 3596 3597 POSTING_READ(reg); 3598 udelay(150); 3599 3600 for (i = 0; i < 4; i++) { 3601 reg = FDI_TX_CTL(pipe); 3602 temp = I915_READ(reg); 3603 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3604 temp |= snb_b_fdi_train_param[i]; 3605 I915_WRITE(reg, temp); 3606 3607 POSTING_READ(reg); 3608 udelay(500); 3609 3610 for (retry = 0; retry < 5; retry++) { 3611 reg = FDI_RX_IIR(pipe); 3612 temp = I915_READ(reg); 3613 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3614 if (temp & FDI_RX_SYMBOL_LOCK) { 3615 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3616 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3617 break; 3618 } 3619 udelay(50); 3620 } 3621 if (retry < 5) 3622 break; 3623 } 3624 if (i == 4) 3625 DRM_ERROR("FDI train 2 fail!\n"); 3626 3627 DRM_DEBUG_KMS("FDI train done.\n"); 3628 } 3629 3630 /* Manual link training for Ivy Bridge A0 parts */ 3631 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3632 { 3633 struct drm_device *dev = crtc->dev; 3634 struct drm_i915_private *dev_priv = dev->dev_private; 3635 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3636 int pipe = intel_crtc->pipe; 3637 u32 reg, temp, i, j; 3638 3639 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3640 for train result */ 3641 reg = FDI_RX_IMR(pipe); 3642 temp = I915_READ(reg); 3643 temp &= ~FDI_RX_SYMBOL_LOCK; 3644 temp &= ~FDI_RX_BIT_LOCK; 3645 I915_WRITE(reg, temp); 3646 3647 POSTING_READ(reg); 3648 udelay(150); 3649 3650 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3651 I915_READ(FDI_RX_IIR(pipe))); 3652 3653 /* Try each vswing and preemphasis setting twice before moving on */ 3654 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3655 /* disable first in case we need to retry */ 3656 reg = FDI_TX_CTL(pipe); 3657 temp = I915_READ(reg); 3658 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3659 temp &= ~FDI_TX_ENABLE; 3660 I915_WRITE(reg, temp); 3661 3662 reg = FDI_RX_CTL(pipe); 3663 temp = I915_READ(reg); 3664 temp &= ~FDI_LINK_TRAIN_AUTO; 3665 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3666 temp &= ~FDI_RX_ENABLE; 3667 I915_WRITE(reg, temp); 3668 3669 /* enable CPU FDI TX and PCH FDI RX */ 3670 reg = FDI_TX_CTL(pipe); 3671 temp = I915_READ(reg); 3672 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3673 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3674 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3675 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3676 temp |= snb_b_fdi_train_param[j/2]; 3677 temp |= FDI_COMPOSITE_SYNC; 3678 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3679 3680 I915_WRITE(FDI_RX_MISC(pipe), 3681 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3682 3683 reg = FDI_RX_CTL(pipe); 3684 temp = I915_READ(reg); 3685 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3686 temp |= FDI_COMPOSITE_SYNC; 3687 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3688 3689 POSTING_READ(reg); 3690 udelay(1); /* should be 0.5us */ 3691 3692 for (i = 0; i < 4; i++) { 3693 reg = FDI_RX_IIR(pipe); 3694 temp = I915_READ(reg); 3695 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3696 3697 if (temp & FDI_RX_BIT_LOCK || 3698 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3699 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3700 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3701 i); 3702 break; 3703 } 3704 udelay(1); /* should be 0.5us */ 3705 } 3706 if (i == 4) { 3707 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3708 continue; 3709 } 3710 3711 /* Train 2 */ 3712 reg = FDI_TX_CTL(pipe); 3713 temp = I915_READ(reg); 3714 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3715 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3716 I915_WRITE(reg, temp); 3717 3718 reg = FDI_RX_CTL(pipe); 3719 temp = I915_READ(reg); 3720 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3721 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3722 I915_WRITE(reg, temp); 3723 3724 POSTING_READ(reg); 3725 udelay(2); /* should be 1.5us */ 3726 3727 for (i = 0; i < 4; i++) { 3728 reg = FDI_RX_IIR(pipe); 3729 temp = I915_READ(reg); 3730 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3731 3732 if (temp & FDI_RX_SYMBOL_LOCK || 3733 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3734 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3735 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3736 i); 3737 goto train_done; 3738 } 3739 udelay(2); /* should be 1.5us */ 3740 } 3741 if (i == 4) 3742 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3743 } 3744 3745 train_done: 3746 DRM_DEBUG_KMS("FDI train done.\n"); 3747 } 3748 3749 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3750 { 3751 struct drm_device *dev = intel_crtc->base.dev; 3752 struct drm_i915_private *dev_priv = dev->dev_private; 3753 int pipe = intel_crtc->pipe; 3754 u32 reg, temp; 3755 3756 3757 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3758 reg = FDI_RX_CTL(pipe); 3759 temp = I915_READ(reg); 3760 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3761 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3762 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3763 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3764 3765 POSTING_READ(reg); 3766 udelay(200); 3767 3768 /* Switch from Rawclk to PCDclk */ 3769 temp = I915_READ(reg); 3770 I915_WRITE(reg, temp | FDI_PCDCLK); 3771 3772 POSTING_READ(reg); 3773 udelay(200); 3774 3775 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3776 reg = FDI_TX_CTL(pipe); 3777 temp = I915_READ(reg); 3778 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3779 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3780 3781 POSTING_READ(reg); 3782 udelay(100); 3783 } 3784 } 3785 3786 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3787 { 3788 struct drm_device *dev = intel_crtc->base.dev; 3789 struct drm_i915_private *dev_priv = dev->dev_private; 3790 int pipe = intel_crtc->pipe; 3791 u32 reg, temp; 3792 3793 /* Switch from PCDclk to Rawclk */ 3794 reg = FDI_RX_CTL(pipe); 3795 temp = I915_READ(reg); 3796 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3797 3798 /* Disable CPU FDI TX PLL */ 3799 reg = FDI_TX_CTL(pipe); 3800 temp = I915_READ(reg); 3801 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3802 3803 POSTING_READ(reg); 3804 udelay(100); 3805 3806 reg = FDI_RX_CTL(pipe); 3807 temp = I915_READ(reg); 3808 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3809 3810 /* Wait for the clocks to turn off. */ 3811 POSTING_READ(reg); 3812 udelay(100); 3813 } 3814 3815 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3816 { 3817 struct drm_device *dev = crtc->dev; 3818 struct drm_i915_private *dev_priv = dev->dev_private; 3819 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3820 int pipe = intel_crtc->pipe; 3821 u32 reg, temp; 3822 3823 /* disable CPU FDI tx and PCH FDI rx */ 3824 reg = FDI_TX_CTL(pipe); 3825 temp = I915_READ(reg); 3826 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3827 POSTING_READ(reg); 3828 3829 reg = FDI_RX_CTL(pipe); 3830 temp = I915_READ(reg); 3831 temp &= ~(0x7 << 16); 3832 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3833 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3834 3835 POSTING_READ(reg); 3836 udelay(100); 3837 3838 /* Ironlake workaround, disable clock pointer after downing FDI */ 3839 if (HAS_PCH_IBX(dev)) 3840 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3841 3842 /* still set train pattern 1 */ 3843 reg = FDI_TX_CTL(pipe); 3844 temp = I915_READ(reg); 3845 temp &= ~FDI_LINK_TRAIN_NONE; 3846 temp |= FDI_LINK_TRAIN_PATTERN_1; 3847 I915_WRITE(reg, temp); 3848 3849 reg = FDI_RX_CTL(pipe); 3850 temp = I915_READ(reg); 3851 if (HAS_PCH_CPT(dev)) { 3852 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3853 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3854 } else { 3855 temp &= ~FDI_LINK_TRAIN_NONE; 3856 temp |= FDI_LINK_TRAIN_PATTERN_1; 3857 } 3858 /* BPC in FDI rx is consistent with that in PIPECONF */ 3859 temp &= ~(0x07 << 16); 3860 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3861 I915_WRITE(reg, temp); 3862 3863 POSTING_READ(reg); 3864 udelay(100); 3865 } 3866 3867 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3868 { 3869 struct intel_crtc *crtc; 3870 3871 /* Note that we don't need to be called with mode_config.lock here 3872 * as our list of CRTC objects is static for the lifetime of the 3873 * device and so cannot disappear as we iterate. Similarly, we can 3874 * happily treat the predicates as racy, atomic checks as userspace 3875 * cannot claim and pin a new fb without at least acquring the 3876 * struct_mutex and so serialising with us. 3877 */ 3878 for_each_intel_crtc(dev, crtc) { 3879 if (atomic_read(&crtc->unpin_work_count) == 0) 3880 continue; 3881 3882 if (crtc->unpin_work) 3883 intel_wait_for_vblank(dev, crtc->pipe); 3884 3885 return true; 3886 } 3887 3888 return false; 3889 } 3890 3891 static void page_flip_completed(struct intel_crtc *intel_crtc) 3892 { 3893 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3894 struct intel_unpin_work *work = intel_crtc->unpin_work; 3895 3896 /* ensure that the unpin work is consistent wrt ->pending. */ 3897 smp_rmb(); 3898 intel_crtc->unpin_work = NULL; 3899 3900 if (work->event) 3901 drm_send_vblank_event(intel_crtc->base.dev, 3902 intel_crtc->pipe, 3903 work->event); 3904 3905 drm_crtc_vblank_put(&intel_crtc->base); 3906 3907 wake_up_all(&dev_priv->pending_flip_queue); 3908 queue_work(dev_priv->wq, &work->work); 3909 3910 trace_i915_flip_complete(intel_crtc->plane, 3911 work->pending_flip_obj); 3912 } 3913 3914 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3915 { 3916 struct drm_device *dev = crtc->dev; 3917 struct drm_i915_private *dev_priv = dev->dev_private; 3918 3919 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3920 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3921 !intel_crtc_has_pending_flip(crtc), 3922 60*HZ) == 0)) { 3923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3924 3925 spin_lock_irq(&dev->event_lock); 3926 if (intel_crtc->unpin_work) { 3927 WARN_ONCE(1, "Removing stuck page flip\n"); 3928 page_flip_completed(intel_crtc); 3929 } 3930 spin_unlock_irq(&dev->event_lock); 3931 } 3932 3933 if (crtc->primary->fb) { 3934 mutex_lock(&dev->struct_mutex); 3935 intel_finish_fb(crtc->primary->fb); 3936 mutex_unlock(&dev->struct_mutex); 3937 } 3938 } 3939 3940 /* Program iCLKIP clock to the desired frequency */ 3941 static void lpt_program_iclkip(struct drm_crtc *crtc) 3942 { 3943 struct drm_device *dev = crtc->dev; 3944 struct drm_i915_private *dev_priv = dev->dev_private; 3945 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3946 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3947 u32 temp; 3948 3949 mutex_lock(&dev_priv->sb_lock); 3950 3951 /* It is necessary to ungate the pixclk gate prior to programming 3952 * the divisors, and gate it back when it is done. 3953 */ 3954 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3955 3956 /* Disable SSCCTL */ 3957 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3958 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3959 SBI_SSCCTL_DISABLE, 3960 SBI_ICLK); 3961 3962 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3963 if (clock == 20000) { 3964 auxdiv = 1; 3965 divsel = 0x41; 3966 phaseinc = 0x20; 3967 } else { 3968 /* The iCLK virtual clock root frequency is in MHz, 3969 * but the adjusted_mode->crtc_clock in in KHz. To get the 3970 * divisors, it is necessary to divide one by another, so we 3971 * convert the virtual clock precision to KHz here for higher 3972 * precision. 3973 */ 3974 u32 iclk_virtual_root_freq = 172800 * 1000; 3975 u32 iclk_pi_range = 64; 3976 u32 desired_divisor, msb_divisor_value, pi_value; 3977 3978 desired_divisor = (iclk_virtual_root_freq / clock); 3979 msb_divisor_value = desired_divisor / iclk_pi_range; 3980 pi_value = desired_divisor % iclk_pi_range; 3981 3982 auxdiv = 0; 3983 divsel = msb_divisor_value - 2; 3984 phaseinc = pi_value; 3985 } 3986 3987 /* This should not happen with any sane values */ 3988 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3989 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3990 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3991 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3992 3993 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3994 clock, 3995 auxdiv, 3996 divsel, 3997 phasedir, 3998 phaseinc); 3999 4000 /* Program SSCDIVINTPHASE6 */ 4001 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4002 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4003 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4004 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4005 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4006 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4007 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4008 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4009 4010 /* Program SSCAUXDIV */ 4011 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4012 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4013 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4014 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4015 4016 /* Enable modulator and associated divider */ 4017 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4018 temp &= ~SBI_SSCCTL_DISABLE; 4019 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4020 4021 /* Wait for initialization time */ 4022 udelay(24); 4023 4024 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4025 4026 mutex_unlock(&dev_priv->sb_lock); 4027 } 4028 4029 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4030 enum i915_pipe pch_transcoder) 4031 { 4032 struct drm_device *dev = crtc->base.dev; 4033 struct drm_i915_private *dev_priv = dev->dev_private; 4034 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4035 4036 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4037 I915_READ(HTOTAL(cpu_transcoder))); 4038 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4039 I915_READ(HBLANK(cpu_transcoder))); 4040 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4041 I915_READ(HSYNC(cpu_transcoder))); 4042 4043 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4044 I915_READ(VTOTAL(cpu_transcoder))); 4045 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4046 I915_READ(VBLANK(cpu_transcoder))); 4047 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4048 I915_READ(VSYNC(cpu_transcoder))); 4049 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4050 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4051 } 4052 4053 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4054 { 4055 struct drm_i915_private *dev_priv = dev->dev_private; 4056 uint32_t temp; 4057 4058 temp = I915_READ(SOUTH_CHICKEN1); 4059 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4060 return; 4061 4062 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4063 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4064 4065 temp &= ~FDI_BC_BIFURCATION_SELECT; 4066 if (enable) 4067 temp |= FDI_BC_BIFURCATION_SELECT; 4068 4069 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4070 I915_WRITE(SOUTH_CHICKEN1, temp); 4071 POSTING_READ(SOUTH_CHICKEN1); 4072 } 4073 4074 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4075 { 4076 struct drm_device *dev = intel_crtc->base.dev; 4077 4078 switch (intel_crtc->pipe) { 4079 case PIPE_A: 4080 break; 4081 case PIPE_B: 4082 if (intel_crtc->config->fdi_lanes > 2) 4083 cpt_set_fdi_bc_bifurcation(dev, false); 4084 else 4085 cpt_set_fdi_bc_bifurcation(dev, true); 4086 4087 break; 4088 case PIPE_C: 4089 cpt_set_fdi_bc_bifurcation(dev, true); 4090 4091 break; 4092 default: 4093 BUG(); 4094 } 4095 } 4096 4097 /* 4098 * Enable PCH resources required for PCH ports: 4099 * - PCH PLLs 4100 * - FDI training & RX/TX 4101 * - update transcoder timings 4102 * - DP transcoding bits 4103 * - transcoder 4104 */ 4105 static void ironlake_pch_enable(struct drm_crtc *crtc) 4106 { 4107 struct drm_device *dev = crtc->dev; 4108 struct drm_i915_private *dev_priv = dev->dev_private; 4109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4110 int pipe = intel_crtc->pipe; 4111 u32 reg, temp; 4112 4113 assert_pch_transcoder_disabled(dev_priv, pipe); 4114 4115 if (IS_IVYBRIDGE(dev)) 4116 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4117 4118 /* Write the TU size bits before fdi link training, so that error 4119 * detection works. */ 4120 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4121 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4122 4123 /* For PCH output, training FDI link */ 4124 dev_priv->display.fdi_link_train(crtc); 4125 4126 /* We need to program the right clock selection before writing the pixel 4127 * mutliplier into the DPLL. */ 4128 if (HAS_PCH_CPT(dev)) { 4129 u32 sel; 4130 4131 temp = I915_READ(PCH_DPLL_SEL); 4132 temp |= TRANS_DPLL_ENABLE(pipe); 4133 sel = TRANS_DPLLB_SEL(pipe); 4134 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 4135 temp |= sel; 4136 else 4137 temp &= ~sel; 4138 I915_WRITE(PCH_DPLL_SEL, temp); 4139 } 4140 4141 /* XXX: pch pll's can be enabled any time before we enable the PCH 4142 * transcoder, and we actually should do this to not upset any PCH 4143 * transcoder that already use the clock when we share it. 4144 * 4145 * Note that enable_shared_dpll tries to do the right thing, but 4146 * get_shared_dpll unconditionally resets the pll - we need that to have 4147 * the right LVDS enable sequence. */ 4148 intel_enable_shared_dpll(intel_crtc); 4149 4150 /* set transcoder timing, panel must allow it */ 4151 assert_panel_unlocked(dev_priv, pipe); 4152 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4153 4154 intel_fdi_normal_train(crtc); 4155 4156 /* For PCH DP, enable TRANS_DP_CTL */ 4157 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4158 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4159 reg = TRANS_DP_CTL(pipe); 4160 temp = I915_READ(reg); 4161 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4162 TRANS_DP_SYNC_MASK | 4163 TRANS_DP_BPC_MASK); 4164 temp |= TRANS_DP_OUTPUT_ENABLE; 4165 temp |= bpc << 9; /* same format but at 11:9 */ 4166 4167 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 4168 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4169 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 4170 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4171 4172 switch (intel_trans_dp_port_sel(crtc)) { 4173 case PCH_DP_B: 4174 temp |= TRANS_DP_PORT_SEL_B; 4175 break; 4176 case PCH_DP_C: 4177 temp |= TRANS_DP_PORT_SEL_C; 4178 break; 4179 case PCH_DP_D: 4180 temp |= TRANS_DP_PORT_SEL_D; 4181 break; 4182 default: 4183 BUG(); 4184 } 4185 4186 I915_WRITE(reg, temp); 4187 } 4188 4189 ironlake_enable_pch_transcoder(dev_priv, pipe); 4190 } 4191 4192 static void lpt_pch_enable(struct drm_crtc *crtc) 4193 { 4194 struct drm_device *dev = crtc->dev; 4195 struct drm_i915_private *dev_priv = dev->dev_private; 4196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4197 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4198 4199 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4200 4201 lpt_program_iclkip(crtc); 4202 4203 /* Set transcoder timing. */ 4204 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4205 4206 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4207 } 4208 4209 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 4210 struct intel_crtc_state *crtc_state) 4211 { 4212 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 4213 struct intel_shared_dpll *pll; 4214 struct intel_shared_dpll_config *shared_dpll; 4215 enum intel_dpll_id i; 4216 4217 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4218 4219 if (HAS_PCH_IBX(dev_priv->dev)) { 4220 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 4221 i = (enum intel_dpll_id) crtc->pipe; 4222 pll = &dev_priv->shared_dplls[i]; 4223 4224 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4225 crtc->base.base.id, pll->name); 4226 4227 WARN_ON(shared_dpll[i].crtc_mask); 4228 4229 goto found; 4230 } 4231 4232 if (IS_BROXTON(dev_priv->dev)) { 4233 /* PLL is attached to port in bxt */ 4234 struct intel_encoder *encoder; 4235 struct intel_digital_port *intel_dig_port; 4236 4237 encoder = intel_ddi_get_crtc_new_encoder(crtc_state); 4238 if (WARN_ON(!encoder)) 4239 return NULL; 4240 4241 intel_dig_port = enc_to_dig_port(&encoder->base); 4242 /* 1:1 mapping between ports and PLLs */ 4243 i = (enum intel_dpll_id)intel_dig_port->port; 4244 pll = &dev_priv->shared_dplls[i]; 4245 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4246 crtc->base.base.id, pll->name); 4247 WARN_ON(shared_dpll[i].crtc_mask); 4248 4249 goto found; 4250 } 4251 4252 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4253 pll = &dev_priv->shared_dplls[i]; 4254 4255 /* Only want to check enabled timings first */ 4256 if (shared_dpll[i].crtc_mask == 0) 4257 continue; 4258 4259 if (memcmp(&crtc_state->dpll_hw_state, 4260 &shared_dpll[i].hw_state, 4261 sizeof(crtc_state->dpll_hw_state)) == 0) { 4262 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 4263 crtc->base.base.id, pll->name, 4264 shared_dpll[i].crtc_mask, 4265 pll->active); 4266 goto found; 4267 } 4268 } 4269 4270 /* Ok no matching timings, maybe there's a free one? */ 4271 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4272 pll = &dev_priv->shared_dplls[i]; 4273 if (shared_dpll[i].crtc_mask == 0) { 4274 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 4275 crtc->base.base.id, pll->name); 4276 goto found; 4277 } 4278 } 4279 4280 return NULL; 4281 4282 found: 4283 if (shared_dpll[i].crtc_mask == 0) 4284 shared_dpll[i].hw_state = 4285 crtc_state->dpll_hw_state; 4286 4287 crtc_state->shared_dpll = i; 4288 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 4289 pipe_name(crtc->pipe)); 4290 4291 shared_dpll[i].crtc_mask |= 1 << crtc->pipe; 4292 4293 return pll; 4294 } 4295 4296 static void intel_shared_dpll_commit(struct drm_atomic_state *state) 4297 { 4298 struct drm_i915_private *dev_priv = to_i915(state->dev); 4299 struct intel_shared_dpll_config *shared_dpll; 4300 struct intel_shared_dpll *pll; 4301 enum intel_dpll_id i; 4302 4303 if (!to_intel_atomic_state(state)->dpll_set) 4304 return; 4305 4306 shared_dpll = to_intel_atomic_state(state)->shared_dpll; 4307 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4308 pll = &dev_priv->shared_dplls[i]; 4309 pll->config = shared_dpll[i]; 4310 } 4311 } 4312 4313 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4314 { 4315 struct drm_i915_private *dev_priv = dev->dev_private; 4316 int dslreg = PIPEDSL(pipe); 4317 u32 temp; 4318 4319 temp = I915_READ(dslreg); 4320 udelay(500); 4321 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4322 if (wait_for(I915_READ(dslreg) != temp, 5)) 4323 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4324 } 4325 } 4326 4327 static int 4328 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4329 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4330 int src_w, int src_h, int dst_w, int dst_h) 4331 { 4332 struct intel_crtc_scaler_state *scaler_state = 4333 &crtc_state->scaler_state; 4334 struct intel_crtc *intel_crtc = 4335 to_intel_crtc(crtc_state->base.crtc); 4336 int need_scaling; 4337 4338 need_scaling = intel_rotation_90_or_270(rotation) ? 4339 (src_h != dst_w || src_w != dst_h): 4340 (src_w != dst_w || src_h != dst_h); 4341 4342 /* 4343 * if plane is being disabled or scaler is no more required or force detach 4344 * - free scaler binded to this plane/crtc 4345 * - in order to do this, update crtc->scaler_usage 4346 * 4347 * Here scaler state in crtc_state is set free so that 4348 * scaler can be assigned to other user. Actual register 4349 * update to free the scaler is done in plane/panel-fit programming. 4350 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4351 */ 4352 if (force_detach || !need_scaling) { 4353 if (*scaler_id >= 0) { 4354 scaler_state->scaler_users &= ~(1 << scaler_user); 4355 scaler_state->scalers[*scaler_id].in_use = 0; 4356 4357 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4358 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4359 intel_crtc->pipe, scaler_user, *scaler_id, 4360 scaler_state->scaler_users); 4361 *scaler_id = -1; 4362 } 4363 return 0; 4364 } 4365 4366 /* range checks */ 4367 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4368 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4369 4370 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4371 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4372 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4373 "size is out of scaler range\n", 4374 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4375 return -EINVAL; 4376 } 4377 4378 /* mark this plane as a scaler user in crtc_state */ 4379 scaler_state->scaler_users |= (1 << scaler_user); 4380 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4381 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4382 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4383 scaler_state->scaler_users); 4384 4385 return 0; 4386 } 4387 4388 /** 4389 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4390 * 4391 * @state: crtc's scaler state 4392 * 4393 * Return 4394 * 0 - scaler_usage updated successfully 4395 * error - requested scaling cannot be supported or other error condition 4396 */ 4397 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4398 { 4399 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4400 struct drm_display_mode *adjusted_mode = 4401 &state->base.adjusted_mode; 4402 4403 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4404 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4405 4406 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4407 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4408 state->pipe_src_w, state->pipe_src_h, 4409 adjusted_mode->hdisplay, adjusted_mode->vdisplay); 4410 } 4411 4412 /** 4413 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4414 * 4415 * @state: crtc's scaler state 4416 * @plane_state: atomic plane state to update 4417 * 4418 * Return 4419 * 0 - scaler_usage updated successfully 4420 * error - requested scaling cannot be supported or other error condition 4421 */ 4422 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4423 struct intel_plane_state *plane_state) 4424 { 4425 4426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4427 struct intel_plane *intel_plane = 4428 to_intel_plane(plane_state->base.plane); 4429 struct drm_framebuffer *fb = plane_state->base.fb; 4430 int ret; 4431 4432 bool force_detach = !fb || !plane_state->visible; 4433 4434 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4435 intel_plane->base.base.id, intel_crtc->pipe, 4436 drm_plane_index(&intel_plane->base)); 4437 4438 ret = skl_update_scaler(crtc_state, force_detach, 4439 drm_plane_index(&intel_plane->base), 4440 &plane_state->scaler_id, 4441 plane_state->base.rotation, 4442 drm_rect_width(&plane_state->src) >> 16, 4443 drm_rect_height(&plane_state->src) >> 16, 4444 drm_rect_width(&plane_state->dst), 4445 drm_rect_height(&plane_state->dst)); 4446 4447 if (ret || plane_state->scaler_id < 0) 4448 return ret; 4449 4450 /* check colorkey */ 4451 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4452 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4453 intel_plane->base.base.id); 4454 return -EINVAL; 4455 } 4456 4457 /* Check src format */ 4458 switch (fb->pixel_format) { 4459 case DRM_FORMAT_RGB565: 4460 case DRM_FORMAT_XBGR8888: 4461 case DRM_FORMAT_XRGB8888: 4462 case DRM_FORMAT_ABGR8888: 4463 case DRM_FORMAT_ARGB8888: 4464 case DRM_FORMAT_XRGB2101010: 4465 case DRM_FORMAT_XBGR2101010: 4466 case DRM_FORMAT_YUYV: 4467 case DRM_FORMAT_YVYU: 4468 case DRM_FORMAT_UYVY: 4469 case DRM_FORMAT_VYUY: 4470 break; 4471 default: 4472 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4473 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4474 return -EINVAL; 4475 } 4476 4477 return 0; 4478 } 4479 4480 static void skylake_scaler_disable(struct intel_crtc *crtc) 4481 { 4482 int i; 4483 4484 for (i = 0; i < crtc->num_scalers; i++) 4485 skl_detach_scaler(crtc, i); 4486 } 4487 4488 static void skylake_pfit_enable(struct intel_crtc *crtc) 4489 { 4490 struct drm_device *dev = crtc->base.dev; 4491 struct drm_i915_private *dev_priv = dev->dev_private; 4492 int pipe = crtc->pipe; 4493 struct intel_crtc_scaler_state *scaler_state = 4494 &crtc->config->scaler_state; 4495 4496 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4497 4498 if (crtc->config->pch_pfit.enabled) { 4499 int id; 4500 4501 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4502 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4503 return; 4504 } 4505 4506 id = scaler_state->scaler_id; 4507 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4508 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4509 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4510 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4511 4512 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4513 } 4514 } 4515 4516 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4517 { 4518 struct drm_device *dev = crtc->base.dev; 4519 struct drm_i915_private *dev_priv = dev->dev_private; 4520 int pipe = crtc->pipe; 4521 4522 if (crtc->config->pch_pfit.enabled) { 4523 /* Force use of hard-coded filter coefficients 4524 * as some pre-programmed values are broken, 4525 * e.g. x201. 4526 */ 4527 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4528 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4529 PF_PIPE_SEL_IVB(pipe)); 4530 else 4531 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4532 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4533 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4534 } 4535 } 4536 4537 void hsw_enable_ips(struct intel_crtc *crtc) 4538 { 4539 struct drm_device *dev = crtc->base.dev; 4540 struct drm_i915_private *dev_priv = dev->dev_private; 4541 4542 if (!crtc->config->ips_enabled) 4543 return; 4544 4545 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4546 intel_wait_for_vblank(dev, crtc->pipe); 4547 4548 assert_plane_enabled(dev_priv, crtc->plane); 4549 if (IS_BROADWELL(dev)) { 4550 mutex_lock(&dev_priv->rps.hw_lock); 4551 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4552 mutex_unlock(&dev_priv->rps.hw_lock); 4553 /* Quoting Art Runyan: "its not safe to expect any particular 4554 * value in IPS_CTL bit 31 after enabling IPS through the 4555 * mailbox." Moreover, the mailbox may return a bogus state, 4556 * so we need to just enable it and continue on. 4557 */ 4558 } else { 4559 I915_WRITE(IPS_CTL, IPS_ENABLE); 4560 /* The bit only becomes 1 in the next vblank, so this wait here 4561 * is essentially intel_wait_for_vblank. If we don't have this 4562 * and don't wait for vblanks until the end of crtc_enable, then 4563 * the HW state readout code will complain that the expected 4564 * IPS_CTL value is not the one we read. */ 4565 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4566 DRM_ERROR("Timed out waiting for IPS enable\n"); 4567 } 4568 } 4569 4570 void hsw_disable_ips(struct intel_crtc *crtc) 4571 { 4572 struct drm_device *dev = crtc->base.dev; 4573 struct drm_i915_private *dev_priv = dev->dev_private; 4574 4575 if (!crtc->config->ips_enabled) 4576 return; 4577 4578 assert_plane_enabled(dev_priv, crtc->plane); 4579 if (IS_BROADWELL(dev)) { 4580 mutex_lock(&dev_priv->rps.hw_lock); 4581 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4582 mutex_unlock(&dev_priv->rps.hw_lock); 4583 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4584 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4585 DRM_ERROR("Timed out waiting for IPS disable\n"); 4586 } else { 4587 I915_WRITE(IPS_CTL, 0); 4588 POSTING_READ(IPS_CTL); 4589 } 4590 4591 /* We need to wait for a vblank before we can disable the plane. */ 4592 intel_wait_for_vblank(dev, crtc->pipe); 4593 } 4594 4595 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4596 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4597 { 4598 struct drm_device *dev = crtc->dev; 4599 struct drm_i915_private *dev_priv = dev->dev_private; 4600 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4601 enum i915_pipe pipe = intel_crtc->pipe; 4602 int palreg = PALETTE(pipe); 4603 int i; 4604 bool reenable_ips = false; 4605 4606 /* The clocks have to be on to load the palette. */ 4607 if (!crtc->state->active) 4608 return; 4609 4610 if (HAS_GMCH_DISPLAY(dev_priv->dev)) { 4611 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4612 assert_dsi_pll_enabled(dev_priv); 4613 else 4614 assert_pll_enabled(dev_priv, pipe); 4615 } 4616 4617 /* use legacy palette for Ironlake */ 4618 if (!HAS_GMCH_DISPLAY(dev)) 4619 palreg = LGC_PALETTE(pipe); 4620 4621 /* Workaround : Do not read or write the pipe palette/gamma data while 4622 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4623 */ 4624 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4625 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4626 GAMMA_MODE_MODE_SPLIT)) { 4627 hsw_disable_ips(intel_crtc); 4628 reenable_ips = true; 4629 } 4630 4631 for (i = 0; i < 256; i++) { 4632 I915_WRITE(palreg + 4 * i, 4633 (intel_crtc->lut_r[i] << 16) | 4634 (intel_crtc->lut_g[i] << 8) | 4635 intel_crtc->lut_b[i]); 4636 } 4637 4638 if (reenable_ips) 4639 hsw_enable_ips(intel_crtc); 4640 } 4641 4642 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4643 { 4644 if (intel_crtc->overlay) { 4645 struct drm_device *dev = intel_crtc->base.dev; 4646 struct drm_i915_private *dev_priv = dev->dev_private; 4647 4648 mutex_lock(&dev->struct_mutex); 4649 dev_priv->mm.interruptible = false; 4650 (void) intel_overlay_switch_off(intel_crtc->overlay); 4651 dev_priv->mm.interruptible = true; 4652 mutex_unlock(&dev->struct_mutex); 4653 } 4654 4655 /* Let userspace switch the overlay on again. In most cases userspace 4656 * has to recompute where to put it anyway. 4657 */ 4658 } 4659 4660 /** 4661 * intel_post_enable_primary - Perform operations after enabling primary plane 4662 * @crtc: the CRTC whose primary plane was just enabled 4663 * 4664 * Performs potentially sleeping operations that must be done after the primary 4665 * plane is enabled, such as updating FBC and IPS. Note that this may be 4666 * called due to an explicit primary plane update, or due to an implicit 4667 * re-enable that is caused when a sprite plane is updated to no longer 4668 * completely hide the primary plane. 4669 */ 4670 static void 4671 intel_post_enable_primary(struct drm_crtc *crtc) 4672 { 4673 struct drm_device *dev = crtc->dev; 4674 struct drm_i915_private *dev_priv = dev->dev_private; 4675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4676 int pipe = intel_crtc->pipe; 4677 4678 /* 4679 * BDW signals flip done immediately if the plane 4680 * is disabled, even if the plane enable is already 4681 * armed to occur at the next vblank :( 4682 */ 4683 if (IS_BROADWELL(dev)) 4684 intel_wait_for_vblank(dev, pipe); 4685 4686 /* 4687 * FIXME IPS should be fine as long as one plane is 4688 * enabled, but in practice it seems to have problems 4689 * when going from primary only to sprite only and vice 4690 * versa. 4691 */ 4692 hsw_enable_ips(intel_crtc); 4693 4694 /* 4695 * Gen2 reports pipe underruns whenever all planes are disabled. 4696 * So don't enable underrun reporting before at least some planes 4697 * are enabled. 4698 * FIXME: Need to fix the logic to work when we turn off all planes 4699 * but leave the pipe running. 4700 */ 4701 if (IS_GEN2(dev)) 4702 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4703 4704 /* Underruns don't raise interrupts, so check manually. */ 4705 if (HAS_GMCH_DISPLAY(dev)) 4706 i9xx_check_fifo_underruns(dev_priv); 4707 } 4708 4709 /** 4710 * intel_pre_disable_primary - Perform operations before disabling primary plane 4711 * @crtc: the CRTC whose primary plane is to be disabled 4712 * 4713 * Performs potentially sleeping operations that must be done before the 4714 * primary plane is disabled, such as updating FBC and IPS. Note that this may 4715 * be called due to an explicit primary plane update, or due to an implicit 4716 * disable that is caused when a sprite plane completely hides the primary 4717 * plane. 4718 */ 4719 static void 4720 intel_pre_disable_primary(struct drm_crtc *crtc) 4721 { 4722 struct drm_device *dev = crtc->dev; 4723 struct drm_i915_private *dev_priv = dev->dev_private; 4724 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4725 int pipe = intel_crtc->pipe; 4726 4727 /* 4728 * Gen2 reports pipe underruns whenever all planes are disabled. 4729 * So diasble underrun reporting before all the planes get disabled. 4730 * FIXME: Need to fix the logic to work when we turn off all planes 4731 * but leave the pipe running. 4732 */ 4733 if (IS_GEN2(dev)) 4734 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4735 4736 /* 4737 * Vblank time updates from the shadow to live plane control register 4738 * are blocked if the memory self-refresh mode is active at that 4739 * moment. So to make sure the plane gets truly disabled, disable 4740 * first the self-refresh mode. The self-refresh enable bit in turn 4741 * will be checked/applied by the HW only at the next frame start 4742 * event which is after the vblank start event, so we need to have a 4743 * wait-for-vblank between disabling the plane and the pipe. 4744 */ 4745 if (HAS_GMCH_DISPLAY(dev)) { 4746 intel_set_memory_cxsr(dev_priv, false); 4747 dev_priv->wm.vlv.cxsr = false; 4748 intel_wait_for_vblank(dev, pipe); 4749 } 4750 4751 /* 4752 * FIXME IPS should be fine as long as one plane is 4753 * enabled, but in practice it seems to have problems 4754 * when going from primary only to sprite only and vice 4755 * versa. 4756 */ 4757 hsw_disable_ips(intel_crtc); 4758 } 4759 4760 static void intel_post_plane_update(struct intel_crtc *crtc) 4761 { 4762 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4763 struct drm_device *dev = crtc->base.dev; 4764 struct drm_i915_private *dev_priv = dev->dev_private; 4765 struct drm_plane *plane; 4766 4767 if (atomic->wait_vblank) 4768 intel_wait_for_vblank(dev, crtc->pipe); 4769 4770 intel_frontbuffer_flip(dev, atomic->fb_bits); 4771 4772 if (atomic->disable_cxsr) 4773 crtc->wm.cxsr_allowed = true; 4774 4775 if (crtc->atomic.update_wm_post) 4776 intel_update_watermarks(&crtc->base); 4777 4778 if (atomic->update_fbc) 4779 intel_fbc_update(dev_priv); 4780 4781 if (atomic->post_enable_primary) 4782 intel_post_enable_primary(&crtc->base); 4783 4784 drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) 4785 intel_update_sprite_watermarks(plane, &crtc->base, 4786 0, 0, 0, false, false); 4787 4788 memset(atomic, 0, sizeof(*atomic)); 4789 } 4790 4791 static void intel_pre_plane_update(struct intel_crtc *crtc) 4792 { 4793 struct drm_device *dev = crtc->base.dev; 4794 struct drm_i915_private *dev_priv = dev->dev_private; 4795 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4796 struct drm_plane *p; 4797 4798 /* Track fb's for any planes being disabled */ 4799 drm_for_each_plane_mask(p, dev, atomic->disabled_planes) { 4800 struct intel_plane *plane = to_intel_plane(p); 4801 4802 mutex_lock(&dev->struct_mutex); 4803 i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL, 4804 plane->frontbuffer_bit); 4805 mutex_unlock(&dev->struct_mutex); 4806 } 4807 4808 if (atomic->wait_for_flips) 4809 intel_crtc_wait_for_pending_flips(&crtc->base); 4810 4811 if (atomic->disable_fbc) 4812 intel_fbc_disable_crtc(crtc); 4813 4814 if (crtc->atomic.disable_ips) 4815 hsw_disable_ips(crtc); 4816 4817 if (atomic->pre_disable_primary) 4818 intel_pre_disable_primary(&crtc->base); 4819 4820 if (atomic->disable_cxsr) { 4821 crtc->wm.cxsr_allowed = false; 4822 intel_set_memory_cxsr(dev_priv, false); 4823 } 4824 } 4825 4826 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 4827 { 4828 struct drm_device *dev = crtc->dev; 4829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4830 struct drm_plane *p; 4831 int pipe = intel_crtc->pipe; 4832 4833 intel_crtc_dpms_overlay_disable(intel_crtc); 4834 4835 drm_for_each_plane_mask(p, dev, plane_mask) 4836 to_intel_plane(p)->disable_plane(p, crtc); 4837 4838 /* 4839 * FIXME: Once we grow proper nuclear flip support out of this we need 4840 * to compute the mask of flip planes precisely. For the time being 4841 * consider this a flip to a NULL plane. 4842 */ 4843 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4844 } 4845 4846 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4847 { 4848 struct drm_device *dev = crtc->dev; 4849 struct drm_i915_private *dev_priv = dev->dev_private; 4850 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4851 struct intel_encoder *encoder; 4852 int pipe = intel_crtc->pipe; 4853 4854 if (WARN_ON(intel_crtc->active)) 4855 return; 4856 4857 if (intel_crtc->config->has_pch_encoder) 4858 intel_prepare_shared_dpll(intel_crtc); 4859 4860 if (intel_crtc->config->has_dp_encoder) 4861 intel_dp_set_m_n(intel_crtc, M1_N1); 4862 4863 intel_set_pipe_timings(intel_crtc); 4864 4865 if (intel_crtc->config->has_pch_encoder) { 4866 intel_cpu_transcoder_set_m_n(intel_crtc, 4867 &intel_crtc->config->fdi_m_n, NULL); 4868 } 4869 4870 ironlake_set_pipeconf(crtc); 4871 4872 intel_crtc->active = true; 4873 4874 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4875 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4876 4877 for_each_encoder_on_crtc(dev, crtc, encoder) 4878 if (encoder->pre_enable) 4879 encoder->pre_enable(encoder); 4880 4881 if (intel_crtc->config->has_pch_encoder) { 4882 /* Note: FDI PLL enabling _must_ be done before we enable the 4883 * cpu pipes, hence this is separate from all the other fdi/pch 4884 * enabling. */ 4885 ironlake_fdi_pll_enable(intel_crtc); 4886 } else { 4887 assert_fdi_tx_disabled(dev_priv, pipe); 4888 assert_fdi_rx_disabled(dev_priv, pipe); 4889 } 4890 4891 ironlake_pfit_enable(intel_crtc); 4892 4893 /* 4894 * On ILK+ LUT must be loaded before the pipe is running but with 4895 * clocks enabled 4896 */ 4897 intel_crtc_load_lut(crtc); 4898 4899 intel_update_watermarks(crtc); 4900 intel_enable_pipe(intel_crtc); 4901 4902 if (intel_crtc->config->has_pch_encoder) 4903 ironlake_pch_enable(crtc); 4904 4905 assert_vblank_disabled(crtc); 4906 drm_crtc_vblank_on(crtc); 4907 4908 for_each_encoder_on_crtc(dev, crtc, encoder) 4909 encoder->enable(encoder); 4910 4911 if (HAS_PCH_CPT(dev)) 4912 cpt_verify_modeset(dev, intel_crtc->pipe); 4913 } 4914 4915 /* IPS only exists on ULT machines and is tied to pipe A. */ 4916 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4917 { 4918 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4919 } 4920 4921 static void haswell_crtc_enable(struct drm_crtc *crtc) 4922 { 4923 struct drm_device *dev = crtc->dev; 4924 struct drm_i915_private *dev_priv = dev->dev_private; 4925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4926 struct intel_encoder *encoder; 4927 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4928 struct intel_crtc_state *pipe_config = 4929 to_intel_crtc_state(crtc->state); 4930 4931 if (WARN_ON(intel_crtc->active)) 4932 return; 4933 4934 if (intel_crtc_to_shared_dpll(intel_crtc)) 4935 intel_enable_shared_dpll(intel_crtc); 4936 4937 if (intel_crtc->config->has_dp_encoder) 4938 intel_dp_set_m_n(intel_crtc, M1_N1); 4939 4940 intel_set_pipe_timings(intel_crtc); 4941 4942 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 4943 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 4944 intel_crtc->config->pixel_multiplier - 1); 4945 } 4946 4947 if (intel_crtc->config->has_pch_encoder) { 4948 intel_cpu_transcoder_set_m_n(intel_crtc, 4949 &intel_crtc->config->fdi_m_n, NULL); 4950 } 4951 4952 haswell_set_pipeconf(crtc); 4953 4954 intel_set_pipe_csc(crtc); 4955 4956 intel_crtc->active = true; 4957 4958 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4959 for_each_encoder_on_crtc(dev, crtc, encoder) 4960 if (encoder->pre_enable) 4961 encoder->pre_enable(encoder); 4962 4963 if (intel_crtc->config->has_pch_encoder) { 4964 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4965 true); 4966 dev_priv->display.fdi_link_train(crtc); 4967 } 4968 4969 intel_ddi_enable_pipe_clock(intel_crtc); 4970 4971 if (INTEL_INFO(dev)->gen == 9) 4972 skylake_pfit_enable(intel_crtc); 4973 else if (INTEL_INFO(dev)->gen < 9) 4974 ironlake_pfit_enable(intel_crtc); 4975 else 4976 MISSING_CASE(INTEL_INFO(dev)->gen); 4977 4978 /* 4979 * On ILK+ LUT must be loaded before the pipe is running but with 4980 * clocks enabled 4981 */ 4982 intel_crtc_load_lut(crtc); 4983 4984 intel_ddi_set_pipe_settings(crtc); 4985 intel_ddi_enable_transcoder_func(crtc); 4986 4987 intel_update_watermarks(crtc); 4988 intel_enable_pipe(intel_crtc); 4989 4990 if (intel_crtc->config->has_pch_encoder) 4991 lpt_pch_enable(crtc); 4992 4993 if (intel_crtc->config->dp_encoder_is_mst) 4994 intel_ddi_set_vc_payload_alloc(crtc, true); 4995 4996 assert_vblank_disabled(crtc); 4997 drm_crtc_vblank_on(crtc); 4998 4999 for_each_encoder_on_crtc(dev, crtc, encoder) { 5000 encoder->enable(encoder); 5001 intel_opregion_notify_encoder(encoder, true); 5002 } 5003 5004 /* If we change the relative order between pipe/planes enabling, we need 5005 * to change the workaround. */ 5006 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5007 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { 5008 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5009 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5010 } 5011 } 5012 5013 static void ironlake_pfit_disable(struct intel_crtc *crtc) 5014 { 5015 struct drm_device *dev = crtc->base.dev; 5016 struct drm_i915_private *dev_priv = dev->dev_private; 5017 int pipe = crtc->pipe; 5018 5019 /* To avoid upsetting the power well on haswell only disable the pfit if 5020 * it's in use. The hw state code will make sure we get this right. */ 5021 if (crtc->config->pch_pfit.enabled) { 5022 I915_WRITE(PF_CTL(pipe), 0); 5023 I915_WRITE(PF_WIN_POS(pipe), 0); 5024 I915_WRITE(PF_WIN_SZ(pipe), 0); 5025 } 5026 } 5027 5028 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5029 { 5030 struct drm_device *dev = crtc->dev; 5031 struct drm_i915_private *dev_priv = dev->dev_private; 5032 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5033 struct intel_encoder *encoder; 5034 int pipe = intel_crtc->pipe; 5035 u32 reg, temp; 5036 5037 for_each_encoder_on_crtc(dev, crtc, encoder) 5038 encoder->disable(encoder); 5039 5040 drm_crtc_vblank_off(crtc); 5041 assert_vblank_disabled(crtc); 5042 5043 if (intel_crtc->config->has_pch_encoder) 5044 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5045 5046 intel_disable_pipe(intel_crtc); 5047 5048 ironlake_pfit_disable(intel_crtc); 5049 5050 if (intel_crtc->config->has_pch_encoder) 5051 ironlake_fdi_disable(crtc); 5052 5053 for_each_encoder_on_crtc(dev, crtc, encoder) 5054 if (encoder->post_disable) 5055 encoder->post_disable(encoder); 5056 5057 if (intel_crtc->config->has_pch_encoder) { 5058 ironlake_disable_pch_transcoder(dev_priv, pipe); 5059 5060 if (HAS_PCH_CPT(dev)) { 5061 /* disable TRANS_DP_CTL */ 5062 reg = TRANS_DP_CTL(pipe); 5063 temp = I915_READ(reg); 5064 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5065 TRANS_DP_PORT_SEL_MASK); 5066 temp |= TRANS_DP_PORT_SEL_NONE; 5067 I915_WRITE(reg, temp); 5068 5069 /* disable DPLL_SEL */ 5070 temp = I915_READ(PCH_DPLL_SEL); 5071 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5072 I915_WRITE(PCH_DPLL_SEL, temp); 5073 } 5074 5075 ironlake_fdi_pll_disable(intel_crtc); 5076 } 5077 5078 intel_crtc->active = false; 5079 intel_update_watermarks(crtc); 5080 } 5081 5082 static void haswell_crtc_disable(struct drm_crtc *crtc) 5083 { 5084 struct drm_device *dev = crtc->dev; 5085 struct drm_i915_private *dev_priv = dev->dev_private; 5086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5087 struct intel_encoder *encoder; 5088 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5089 5090 for_each_encoder_on_crtc(dev, crtc, encoder) { 5091 intel_opregion_notify_encoder(encoder, false); 5092 encoder->disable(encoder); 5093 } 5094 5095 drm_crtc_vblank_off(crtc); 5096 assert_vblank_disabled(crtc); 5097 5098 if (intel_crtc->config->has_pch_encoder) 5099 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5100 false); 5101 intel_disable_pipe(intel_crtc); 5102 5103 if (intel_crtc->config->dp_encoder_is_mst) 5104 intel_ddi_set_vc_payload_alloc(crtc, false); 5105 5106 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5107 5108 if (INTEL_INFO(dev)->gen == 9) 5109 skylake_scaler_disable(intel_crtc); 5110 else if (INTEL_INFO(dev)->gen < 9) 5111 ironlake_pfit_disable(intel_crtc); 5112 else 5113 MISSING_CASE(INTEL_INFO(dev)->gen); 5114 5115 intel_ddi_disable_pipe_clock(intel_crtc); 5116 5117 if (intel_crtc->config->has_pch_encoder) { 5118 lpt_disable_pch_transcoder(dev_priv); 5119 intel_ddi_fdi_disable(crtc); 5120 } 5121 5122 for_each_encoder_on_crtc(dev, crtc, encoder) 5123 if (encoder->post_disable) 5124 encoder->post_disable(encoder); 5125 5126 intel_crtc->active = false; 5127 intel_update_watermarks(crtc); 5128 } 5129 5130 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5131 { 5132 struct drm_device *dev = crtc->base.dev; 5133 struct drm_i915_private *dev_priv = dev->dev_private; 5134 struct intel_crtc_state *pipe_config = crtc->config; 5135 5136 if (!pipe_config->gmch_pfit.control) 5137 return; 5138 5139 /* 5140 * The panel fitter should only be adjusted whilst the pipe is disabled, 5141 * according to register description and PRM. 5142 */ 5143 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5144 assert_pipe_disabled(dev_priv, crtc->pipe); 5145 5146 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5147 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5148 5149 /* Border color in case we don't scale up to the full screen. Black by 5150 * default, change to something else for debugging. */ 5151 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5152 } 5153 5154 static enum intel_display_power_domain port_to_power_domain(enum port port) 5155 { 5156 switch (port) { 5157 case PORT_A: 5158 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 5159 case PORT_B: 5160 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 5161 case PORT_C: 5162 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 5163 case PORT_D: 5164 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 5165 case PORT_E: 5166 return POWER_DOMAIN_PORT_DDI_E_2_LANES; 5167 default: 5168 WARN_ON_ONCE(1); 5169 return POWER_DOMAIN_PORT_OTHER; 5170 } 5171 } 5172 5173 #define for_each_power_domain(domain, mask) \ 5174 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5175 if ((1 << (domain)) & (mask)) 5176 5177 enum intel_display_power_domain 5178 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5179 { 5180 struct drm_device *dev = intel_encoder->base.dev; 5181 struct intel_digital_port *intel_dig_port; 5182 5183 switch (intel_encoder->type) { 5184 case INTEL_OUTPUT_UNKNOWN: 5185 /* Only DDI platforms should ever use this output type */ 5186 WARN_ON_ONCE(!HAS_DDI(dev)); 5187 case INTEL_OUTPUT_DISPLAYPORT: 5188 case INTEL_OUTPUT_HDMI: 5189 case INTEL_OUTPUT_EDP: 5190 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5191 return port_to_power_domain(intel_dig_port->port); 5192 case INTEL_OUTPUT_DP_MST: 5193 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5194 return port_to_power_domain(intel_dig_port->port); 5195 case INTEL_OUTPUT_ANALOG: 5196 return POWER_DOMAIN_PORT_CRT; 5197 case INTEL_OUTPUT_DSI: 5198 return POWER_DOMAIN_PORT_DSI; 5199 default: 5200 return POWER_DOMAIN_PORT_OTHER; 5201 } 5202 } 5203 5204 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5205 { 5206 struct drm_device *dev = crtc->dev; 5207 struct intel_encoder *intel_encoder; 5208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5209 enum i915_pipe pipe = intel_crtc->pipe; 5210 unsigned long mask; 5211 enum transcoder transcoder; 5212 5213 if (!crtc->state->active) 5214 return 0; 5215 5216 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 5217 5218 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5219 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5220 if (intel_crtc->config->pch_pfit.enabled || 5221 intel_crtc->config->pch_pfit.force_thru) 5222 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5223 5224 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5225 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5226 5227 return mask; 5228 } 5229 5230 static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) 5231 { 5232 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5234 enum intel_display_power_domain domain; 5235 unsigned long domains, new_domains, old_domains; 5236 5237 old_domains = intel_crtc->enabled_power_domains; 5238 intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc); 5239 5240 domains = new_domains & ~old_domains; 5241 5242 for_each_power_domain(domain, domains) 5243 intel_display_power_get(dev_priv, domain); 5244 5245 return old_domains & ~new_domains; 5246 } 5247 5248 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5249 unsigned long domains) 5250 { 5251 enum intel_display_power_domain domain; 5252 5253 for_each_power_domain(domain, domains) 5254 intel_display_power_put(dev_priv, domain); 5255 } 5256 5257 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) 5258 { 5259 struct drm_device *dev = state->dev; 5260 struct drm_i915_private *dev_priv = dev->dev_private; 5261 unsigned long put_domains[I915_MAX_PIPES] = {}; 5262 struct drm_crtc_state *crtc_state; 5263 struct drm_crtc *crtc; 5264 int i; 5265 5266 for_each_crtc_in_state(state, crtc, crtc_state, i) { 5267 if (needs_modeset(crtc->state)) 5268 put_domains[to_intel_crtc(crtc)->pipe] = 5269 modeset_get_crtc_power_domains(crtc); 5270 } 5271 5272 if (dev_priv->display.modeset_commit_cdclk) { 5273 unsigned int cdclk = to_intel_atomic_state(state)->cdclk; 5274 5275 if (cdclk != dev_priv->cdclk_freq && 5276 !WARN_ON(!state->allow_modeset)) 5277 dev_priv->display.modeset_commit_cdclk(state); 5278 } 5279 5280 for (i = 0; i < I915_MAX_PIPES; i++) 5281 if (put_domains[i]) 5282 modeset_put_power_domains(dev_priv, put_domains[i]); 5283 } 5284 5285 static void intel_update_max_cdclk(struct drm_device *dev) 5286 { 5287 struct drm_i915_private *dev_priv = dev->dev_private; 5288 5289 if (IS_SKYLAKE(dev)) { 5290 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5291 5292 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5293 dev_priv->max_cdclk_freq = 675000; 5294 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5295 dev_priv->max_cdclk_freq = 540000; 5296 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5297 dev_priv->max_cdclk_freq = 450000; 5298 else 5299 dev_priv->max_cdclk_freq = 337500; 5300 } else if (IS_BROADWELL(dev)) { 5301 /* 5302 * FIXME with extra cooling we can allow 5303 * 540 MHz for ULX and 675 Mhz for ULT. 5304 * How can we know if extra cooling is 5305 * available? PCI ID, VTB, something else? 5306 */ 5307 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 5308 dev_priv->max_cdclk_freq = 450000; 5309 else if (IS_BDW_ULX(dev)) 5310 dev_priv->max_cdclk_freq = 450000; 5311 else if (IS_BDW_ULT(dev)) 5312 dev_priv->max_cdclk_freq = 540000; 5313 else 5314 dev_priv->max_cdclk_freq = 675000; 5315 } else if (IS_CHERRYVIEW(dev)) { 5316 dev_priv->max_cdclk_freq = 320000; 5317 } else if (IS_VALLEYVIEW(dev)) { 5318 dev_priv->max_cdclk_freq = 400000; 5319 } else { 5320 /* otherwise assume cdclk is fixed */ 5321 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5322 } 5323 5324 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5325 dev_priv->max_cdclk_freq); 5326 } 5327 5328 static void intel_update_cdclk(struct drm_device *dev) 5329 { 5330 struct drm_i915_private *dev_priv = dev->dev_private; 5331 5332 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5333 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5334 dev_priv->cdclk_freq); 5335 5336 /* 5337 * Program the gmbus_freq based on the cdclk frequency. 5338 * BSpec erroneously claims we should aim for 4MHz, but 5339 * in fact 1MHz is the correct frequency. 5340 */ 5341 if (IS_VALLEYVIEW(dev)) { 5342 /* 5343 * Program the gmbus_freq based on the cdclk frequency. 5344 * BSpec erroneously claims we should aim for 4MHz, but 5345 * in fact 1MHz is the correct frequency. 5346 */ 5347 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5348 } 5349 5350 if (dev_priv->max_cdclk_freq == 0) 5351 intel_update_max_cdclk(dev); 5352 } 5353 5354 static void broxton_set_cdclk(struct drm_device *dev, int frequency) 5355 { 5356 struct drm_i915_private *dev_priv = dev->dev_private; 5357 uint32_t divider; 5358 uint32_t ratio; 5359 uint32_t current_freq; 5360 int ret; 5361 5362 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5363 switch (frequency) { 5364 case 144000: 5365 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5366 ratio = BXT_DE_PLL_RATIO(60); 5367 break; 5368 case 288000: 5369 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5370 ratio = BXT_DE_PLL_RATIO(60); 5371 break; 5372 case 384000: 5373 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5374 ratio = BXT_DE_PLL_RATIO(60); 5375 break; 5376 case 576000: 5377 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5378 ratio = BXT_DE_PLL_RATIO(60); 5379 break; 5380 case 624000: 5381 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5382 ratio = BXT_DE_PLL_RATIO(65); 5383 break; 5384 case 19200: 5385 /* 5386 * Bypass frequency with DE PLL disabled. Init ratio, divider 5387 * to suppress GCC warning. 5388 */ 5389 ratio = 0; 5390 divider = 0; 5391 break; 5392 default: 5393 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5394 5395 return; 5396 } 5397 5398 mutex_lock(&dev_priv->rps.hw_lock); 5399 /* Inform power controller of upcoming frequency change */ 5400 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5401 0x80000000); 5402 mutex_unlock(&dev_priv->rps.hw_lock); 5403 5404 if (ret) { 5405 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5406 ret, frequency); 5407 return; 5408 } 5409 5410 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5411 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5412 current_freq = current_freq * 500 + 1000; 5413 5414 /* 5415 * DE PLL has to be disabled when 5416 * - setting to 19.2MHz (bypass, PLL isn't used) 5417 * - before setting to 624MHz (PLL needs toggling) 5418 * - before setting to any frequency from 624MHz (PLL needs toggling) 5419 */ 5420 if (frequency == 19200 || frequency == 624000 || 5421 current_freq == 624000) { 5422 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5423 /* Timeout 200us */ 5424 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5425 1)) 5426 DRM_ERROR("timout waiting for DE PLL unlock\n"); 5427 } 5428 5429 if (frequency != 19200) { 5430 uint32_t val; 5431 5432 val = I915_READ(BXT_DE_PLL_CTL); 5433 val &= ~BXT_DE_PLL_RATIO_MASK; 5434 val |= ratio; 5435 I915_WRITE(BXT_DE_PLL_CTL, val); 5436 5437 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5438 /* Timeout 200us */ 5439 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) 5440 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5441 5442 val = I915_READ(CDCLK_CTL); 5443 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; 5444 val |= divider; 5445 /* 5446 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5447 * enable otherwise. 5448 */ 5449 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5450 if (frequency >= 500000) 5451 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5452 5453 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5454 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5455 val |= (frequency - 1000) / 500; 5456 I915_WRITE(CDCLK_CTL, val); 5457 } 5458 5459 mutex_lock(&dev_priv->rps.hw_lock); 5460 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5461 DIV_ROUND_UP(frequency, 25000)); 5462 mutex_unlock(&dev_priv->rps.hw_lock); 5463 5464 if (ret) { 5465 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5466 ret, frequency); 5467 return; 5468 } 5469 5470 intel_update_cdclk(dev); 5471 } 5472 5473 void broxton_init_cdclk(struct drm_device *dev) 5474 { 5475 struct drm_i915_private *dev_priv = dev->dev_private; 5476 uint32_t val; 5477 5478 /* 5479 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5480 * or else the reset will hang because there is no PCH to respond. 5481 * Move the handshake programming to initialization sequence. 5482 * Previously was left up to BIOS. 5483 */ 5484 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5485 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 5486 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 5487 5488 /* Enable PG1 for cdclk */ 5489 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5490 5491 /* check if cd clock is enabled */ 5492 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) { 5493 DRM_DEBUG_KMS("Display already initialized\n"); 5494 return; 5495 } 5496 5497 /* 5498 * FIXME: 5499 * - The initial CDCLK needs to be read from VBT. 5500 * Need to make this change after VBT has changes for BXT. 5501 * - check if setting the max (or any) cdclk freq is really necessary 5502 * here, it belongs to modeset time 5503 */ 5504 broxton_set_cdclk(dev, 624000); 5505 5506 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5507 POSTING_READ(DBUF_CTL); 5508 5509 udelay(10); 5510 5511 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5512 DRM_ERROR("DBuf power enable timeout!\n"); 5513 } 5514 5515 void broxton_uninit_cdclk(struct drm_device *dev) 5516 { 5517 struct drm_i915_private *dev_priv = dev->dev_private; 5518 5519 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5520 POSTING_READ(DBUF_CTL); 5521 5522 udelay(10); 5523 5524 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5525 DRM_ERROR("DBuf power disable timeout!\n"); 5526 5527 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5528 broxton_set_cdclk(dev, 19200); 5529 5530 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5531 } 5532 5533 static const struct skl_cdclk_entry { 5534 unsigned int freq; 5535 unsigned int vco; 5536 } skl_cdclk_frequencies[] = { 5537 { .freq = 308570, .vco = 8640 }, 5538 { .freq = 337500, .vco = 8100 }, 5539 { .freq = 432000, .vco = 8640 }, 5540 { .freq = 450000, .vco = 8100 }, 5541 { .freq = 540000, .vco = 8100 }, 5542 { .freq = 617140, .vco = 8640 }, 5543 { .freq = 675000, .vco = 8100 }, 5544 }; 5545 5546 static unsigned int skl_cdclk_decimal(unsigned int freq) 5547 { 5548 return (freq - 1000) / 500; 5549 } 5550 5551 static unsigned int skl_cdclk_get_vco(unsigned int freq) 5552 { 5553 unsigned int i; 5554 5555 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5556 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; 5557 5558 if (e->freq == freq) 5559 return e->vco; 5560 } 5561 5562 return 8100; 5563 } 5564 5565 static void 5566 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5567 { 5568 unsigned int min_freq; 5569 u32 val; 5570 5571 /* select the minimum CDCLK before enabling DPLL 0 */ 5572 val = I915_READ(CDCLK_CTL); 5573 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; 5574 val |= CDCLK_FREQ_337_308; 5575 5576 if (required_vco == 8640) 5577 min_freq = 308570; 5578 else 5579 min_freq = 337500; 5580 5581 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); 5582 5583 I915_WRITE(CDCLK_CTL, val); 5584 POSTING_READ(CDCLK_CTL); 5585 5586 /* 5587 * We always enable DPLL0 with the lowest link rate possible, but still 5588 * taking into account the VCO required to operate the eDP panel at the 5589 * desired frequency. The usual DP link rates operate with a VCO of 5590 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5591 * The modeset code is responsible for the selection of the exact link 5592 * rate later on, with the constraint of choosing a frequency that 5593 * works with required_vco. 5594 */ 5595 val = I915_READ(DPLL_CTRL1); 5596 5597 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5598 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5599 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5600 if (required_vco == 8640) 5601 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5602 SKL_DPLL0); 5603 else 5604 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5605 SKL_DPLL0); 5606 5607 I915_WRITE(DPLL_CTRL1, val); 5608 POSTING_READ(DPLL_CTRL1); 5609 5610 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5611 5612 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5613 DRM_ERROR("DPLL0 not locked\n"); 5614 } 5615 5616 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5617 { 5618 int ret; 5619 u32 val; 5620 5621 /* inform PCU we want to change CDCLK */ 5622 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5623 mutex_lock(&dev_priv->rps.hw_lock); 5624 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5625 mutex_unlock(&dev_priv->rps.hw_lock); 5626 5627 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5628 } 5629 5630 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5631 { 5632 unsigned int i; 5633 5634 for (i = 0; i < 15; i++) { 5635 if (skl_cdclk_pcu_ready(dev_priv)) 5636 return true; 5637 udelay(10); 5638 } 5639 5640 return false; 5641 } 5642 5643 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5644 { 5645 struct drm_device *dev = dev_priv->dev; 5646 u32 freq_select, pcu_ack; 5647 5648 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5649 5650 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5651 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5652 return; 5653 } 5654 5655 /* set CDCLK_CTL */ 5656 switch(freq) { 5657 case 450000: 5658 case 432000: 5659 freq_select = CDCLK_FREQ_450_432; 5660 pcu_ack = 1; 5661 break; 5662 case 540000: 5663 freq_select = CDCLK_FREQ_540; 5664 pcu_ack = 2; 5665 break; 5666 case 308570: 5667 case 337500: 5668 default: 5669 freq_select = CDCLK_FREQ_337_308; 5670 pcu_ack = 0; 5671 break; 5672 case 617140: 5673 case 675000: 5674 freq_select = CDCLK_FREQ_675_617; 5675 pcu_ack = 3; 5676 break; 5677 } 5678 5679 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5680 POSTING_READ(CDCLK_CTL); 5681 5682 /* inform PCU of the change */ 5683 mutex_lock(&dev_priv->rps.hw_lock); 5684 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5685 mutex_unlock(&dev_priv->rps.hw_lock); 5686 5687 intel_update_cdclk(dev); 5688 } 5689 5690 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5691 { 5692 /* disable DBUF power */ 5693 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5694 POSTING_READ(DBUF_CTL); 5695 5696 udelay(10); 5697 5698 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5699 DRM_ERROR("DBuf power disable timeout\n"); 5700 5701 /* disable DPLL0 */ 5702 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5703 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5704 DRM_ERROR("Couldn't disable DPLL0\n"); 5705 5706 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5707 } 5708 5709 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5710 { 5711 u32 val; 5712 unsigned int required_vco; 5713 5714 /* enable PCH reset handshake */ 5715 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5716 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 5717 5718 /* enable PG1 and Misc I/O */ 5719 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5720 5721 /* DPLL0 not enabled (happens on early BIOS versions) */ 5722 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5723 /* enable DPLL0 */ 5724 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); 5725 skl_dpll0_enable(dev_priv, required_vco); 5726 } 5727 5728 /* set CDCLK to the frequency the BIOS chose */ 5729 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5730 5731 /* enable DBUF power */ 5732 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5733 POSTING_READ(DBUF_CTL); 5734 5735 udelay(10); 5736 5737 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5738 DRM_ERROR("DBuf power enable timeout\n"); 5739 } 5740 5741 /* returns HPLL frequency in kHz */ 5742 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 5743 { 5744 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 5745 5746 /* Obtain SKU information */ 5747 mutex_lock(&dev_priv->sb_lock); 5748 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 5749 CCK_FUSE_HPLL_FREQ_MASK; 5750 mutex_unlock(&dev_priv->sb_lock); 5751 5752 return vco_freq[hpll_freq] * 1000; 5753 } 5754 5755 /* Adjust CDclk dividers to allow high res or save power if possible */ 5756 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5757 { 5758 struct drm_i915_private *dev_priv = dev->dev_private; 5759 u32 val, cmd; 5760 5761 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5762 != dev_priv->cdclk_freq); 5763 5764 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5765 cmd = 2; 5766 else if (cdclk == 266667) 5767 cmd = 1; 5768 else 5769 cmd = 0; 5770 5771 mutex_lock(&dev_priv->rps.hw_lock); 5772 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5773 val &= ~DSPFREQGUAR_MASK; 5774 val |= (cmd << DSPFREQGUAR_SHIFT); 5775 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5776 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5777 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5778 50)) { 5779 DRM_ERROR("timed out waiting for CDclk change\n"); 5780 } 5781 mutex_unlock(&dev_priv->rps.hw_lock); 5782 5783 mutex_lock(&dev_priv->sb_lock); 5784 5785 if (cdclk == 400000) { 5786 u32 divider; 5787 5788 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5789 5790 /* adjust cdclk divider */ 5791 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5792 val &= ~DISPLAY_FREQUENCY_VALUES; 5793 val |= divider; 5794 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5795 5796 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5797 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5798 50)) 5799 DRM_ERROR("timed out waiting for CDclk change\n"); 5800 } 5801 5802 /* adjust self-refresh exit latency value */ 5803 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5804 val &= ~0x7f; 5805 5806 /* 5807 * For high bandwidth configs, we set a higher latency in the bunit 5808 * so that the core display fetch happens in time to avoid underruns. 5809 */ 5810 if (cdclk == 400000) 5811 val |= 4500 / 250; /* 4.5 usec */ 5812 else 5813 val |= 3000 / 250; /* 3.0 usec */ 5814 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5815 5816 mutex_unlock(&dev_priv->sb_lock); 5817 5818 intel_update_cdclk(dev); 5819 } 5820 5821 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5822 { 5823 struct drm_i915_private *dev_priv = dev->dev_private; 5824 u32 val, cmd; 5825 5826 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5827 != dev_priv->cdclk_freq); 5828 5829 switch (cdclk) { 5830 case 333333: 5831 case 320000: 5832 case 266667: 5833 case 200000: 5834 break; 5835 default: 5836 MISSING_CASE(cdclk); 5837 return; 5838 } 5839 5840 /* 5841 * Specs are full of misinformation, but testing on actual 5842 * hardware has shown that we just need to write the desired 5843 * CCK divider into the Punit register. 5844 */ 5845 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5846 5847 mutex_lock(&dev_priv->rps.hw_lock); 5848 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5849 val &= ~DSPFREQGUAR_MASK_CHV; 5850 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 5851 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5852 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5853 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 5854 50)) { 5855 DRM_ERROR("timed out waiting for CDclk change\n"); 5856 } 5857 mutex_unlock(&dev_priv->rps.hw_lock); 5858 5859 intel_update_cdclk(dev); 5860 } 5861 5862 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 5863 int max_pixclk) 5864 { 5865 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 5866 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 5867 5868 /* 5869 * Really only a few cases to deal with, as only 4 CDclks are supported: 5870 * 200MHz 5871 * 267MHz 5872 * 320/333MHz (depends on HPLL freq) 5873 * 400MHz (VLV only) 5874 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 5875 * of the lower bin and adjust if needed. 5876 * 5877 * We seem to get an unstable or solid color picture at 200MHz. 5878 * Not sure what's wrong. For now use 200MHz only when all pipes 5879 * are off. 5880 */ 5881 if (!IS_CHERRYVIEW(dev_priv) && 5882 max_pixclk > freq_320*limit/100) 5883 return 400000; 5884 else if (max_pixclk > 266667*limit/100) 5885 return freq_320; 5886 else if (max_pixclk > 0) 5887 return 266667; 5888 else 5889 return 200000; 5890 } 5891 5892 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5893 int max_pixclk) 5894 { 5895 /* 5896 * FIXME: 5897 * - remove the guardband, it's not needed on BXT 5898 * - set 19.2MHz bypass frequency if there are no active pipes 5899 */ 5900 if (max_pixclk > 576000*9/10) 5901 return 624000; 5902 else if (max_pixclk > 384000*9/10) 5903 return 576000; 5904 else if (max_pixclk > 288000*9/10) 5905 return 384000; 5906 else if (max_pixclk > 144000*9/10) 5907 return 288000; 5908 else 5909 return 144000; 5910 } 5911 5912 /* Compute the max pixel clock for new configuration. Uses atomic state if 5913 * that's non-NULL, look at current state otherwise. */ 5914 static int intel_mode_max_pixclk(struct drm_device *dev, 5915 struct drm_atomic_state *state) 5916 { 5917 struct intel_crtc *intel_crtc; 5918 struct intel_crtc_state *crtc_state; 5919 int max_pixclk = 0; 5920 5921 for_each_intel_crtc(dev, intel_crtc) { 5922 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5923 if (IS_ERR(crtc_state)) 5924 return PTR_ERR(crtc_state); 5925 5926 if (!crtc_state->base.enable) 5927 continue; 5928 5929 max_pixclk = max(max_pixclk, 5930 crtc_state->base.adjusted_mode.crtc_clock); 5931 } 5932 5933 return max_pixclk; 5934 } 5935 5936 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 5937 { 5938 struct drm_device *dev = state->dev; 5939 struct drm_i915_private *dev_priv = dev->dev_private; 5940 int max_pixclk = intel_mode_max_pixclk(dev, state); 5941 5942 if (max_pixclk < 0) 5943 return max_pixclk; 5944 5945 to_intel_atomic_state(state)->cdclk = 5946 valleyview_calc_cdclk(dev_priv, max_pixclk); 5947 5948 return 0; 5949 } 5950 5951 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 5952 { 5953 struct drm_device *dev = state->dev; 5954 struct drm_i915_private *dev_priv = dev->dev_private; 5955 int max_pixclk = intel_mode_max_pixclk(dev, state); 5956 5957 if (max_pixclk < 0) 5958 return max_pixclk; 5959 5960 to_intel_atomic_state(state)->cdclk = 5961 broxton_calc_cdclk(dev_priv, max_pixclk); 5962 5963 return 0; 5964 } 5965 5966 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 5967 { 5968 unsigned int credits, default_credits; 5969 5970 if (IS_CHERRYVIEW(dev_priv)) 5971 default_credits = PFI_CREDIT(12); 5972 else 5973 default_credits = PFI_CREDIT(8); 5974 5975 if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { 5976 /* CHV suggested value is 31 or 63 */ 5977 if (IS_CHERRYVIEW(dev_priv)) 5978 credits = PFI_CREDIT_63; 5979 else 5980 credits = PFI_CREDIT(15); 5981 } else { 5982 credits = default_credits; 5983 } 5984 5985 /* 5986 * WA - write default credits before re-programming 5987 * FIXME: should we also set the resend bit here? 5988 */ 5989 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 5990 default_credits); 5991 5992 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 5993 credits | PFI_CREDIT_RESEND); 5994 5995 /* 5996 * FIXME is this guaranteed to clear 5997 * immediately or should we poll for it? 5998 */ 5999 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6000 } 6001 6002 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6003 { 6004 struct drm_device *dev = old_state->dev; 6005 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 6006 struct drm_i915_private *dev_priv = dev->dev_private; 6007 6008 /* 6009 * FIXME: We can end up here with all power domains off, yet 6010 * with a CDCLK frequency other than the minimum. To account 6011 * for this take the PIPE-A power domain, which covers the HW 6012 * blocks needed for the following programming. This can be 6013 * removed once it's guaranteed that we get here either with 6014 * the minimum CDCLK set, or the required power domains 6015 * enabled. 6016 */ 6017 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6018 6019 if (IS_CHERRYVIEW(dev)) 6020 cherryview_set_cdclk(dev, req_cdclk); 6021 else 6022 valleyview_set_cdclk(dev, req_cdclk); 6023 6024 vlv_program_pfi_credits(dev_priv); 6025 6026 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6027 } 6028 6029 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6030 { 6031 struct drm_device *dev = crtc->dev; 6032 struct drm_i915_private *dev_priv = to_i915(dev); 6033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6034 struct intel_encoder *encoder; 6035 int pipe = intel_crtc->pipe; 6036 bool is_dsi; 6037 6038 if (WARN_ON(intel_crtc->active)) 6039 return; 6040 6041 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 6042 6043 if (!is_dsi) { 6044 if (IS_CHERRYVIEW(dev)) 6045 chv_prepare_pll(intel_crtc, intel_crtc->config); 6046 else 6047 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6048 } 6049 6050 if (intel_crtc->config->has_dp_encoder) 6051 intel_dp_set_m_n(intel_crtc, M1_N1); 6052 6053 intel_set_pipe_timings(intel_crtc); 6054 6055 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6056 struct drm_i915_private *dev_priv = dev->dev_private; 6057 6058 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6059 I915_WRITE(CHV_CANVAS(pipe), 0); 6060 } 6061 6062 i9xx_set_pipeconf(intel_crtc); 6063 6064 intel_crtc->active = true; 6065 6066 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6067 6068 for_each_encoder_on_crtc(dev, crtc, encoder) 6069 if (encoder->pre_pll_enable) 6070 encoder->pre_pll_enable(encoder); 6071 6072 if (!is_dsi) { 6073 if (IS_CHERRYVIEW(dev)) 6074 chv_enable_pll(intel_crtc, intel_crtc->config); 6075 else 6076 vlv_enable_pll(intel_crtc, intel_crtc->config); 6077 } 6078 6079 for_each_encoder_on_crtc(dev, crtc, encoder) 6080 if (encoder->pre_enable) 6081 encoder->pre_enable(encoder); 6082 6083 i9xx_pfit_enable(intel_crtc); 6084 6085 intel_crtc_load_lut(crtc); 6086 6087 intel_enable_pipe(intel_crtc); 6088 6089 assert_vblank_disabled(crtc); 6090 drm_crtc_vblank_on(crtc); 6091 6092 for_each_encoder_on_crtc(dev, crtc, encoder) 6093 encoder->enable(encoder); 6094 } 6095 6096 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6097 { 6098 struct drm_device *dev = crtc->base.dev; 6099 struct drm_i915_private *dev_priv = dev->dev_private; 6100 6101 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6102 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6103 } 6104 6105 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6106 { 6107 struct drm_device *dev = crtc->dev; 6108 struct drm_i915_private *dev_priv = to_i915(dev); 6109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6110 struct intel_encoder *encoder; 6111 int pipe = intel_crtc->pipe; 6112 6113 if (WARN_ON(intel_crtc->active)) 6114 return; 6115 6116 i9xx_set_pll_dividers(intel_crtc); 6117 6118 if (intel_crtc->config->has_dp_encoder) 6119 intel_dp_set_m_n(intel_crtc, M1_N1); 6120 6121 intel_set_pipe_timings(intel_crtc); 6122 6123 i9xx_set_pipeconf(intel_crtc); 6124 6125 intel_crtc->active = true; 6126 6127 if (!IS_GEN2(dev)) 6128 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6129 6130 for_each_encoder_on_crtc(dev, crtc, encoder) 6131 if (encoder->pre_enable) 6132 encoder->pre_enable(encoder); 6133 6134 i9xx_enable_pll(intel_crtc); 6135 6136 i9xx_pfit_enable(intel_crtc); 6137 6138 intel_crtc_load_lut(crtc); 6139 6140 intel_update_watermarks(crtc); 6141 intel_enable_pipe(intel_crtc); 6142 6143 assert_vblank_disabled(crtc); 6144 drm_crtc_vblank_on(crtc); 6145 6146 for_each_encoder_on_crtc(dev, crtc, encoder) 6147 encoder->enable(encoder); 6148 } 6149 6150 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6151 { 6152 struct drm_device *dev = crtc->base.dev; 6153 struct drm_i915_private *dev_priv = dev->dev_private; 6154 6155 if (!crtc->config->gmch_pfit.control) 6156 return; 6157 6158 assert_pipe_disabled(dev_priv, crtc->pipe); 6159 6160 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6161 I915_READ(PFIT_CONTROL)); 6162 I915_WRITE(PFIT_CONTROL, 0); 6163 } 6164 6165 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6166 { 6167 struct drm_device *dev = crtc->dev; 6168 struct drm_i915_private *dev_priv = dev->dev_private; 6169 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6170 struct intel_encoder *encoder; 6171 int pipe = intel_crtc->pipe; 6172 6173 /* 6174 * On gen2 planes are double buffered but the pipe isn't, so we must 6175 * wait for planes to fully turn off before disabling the pipe. 6176 * We also need to wait on all gmch platforms because of the 6177 * self-refresh mode constraint explained above. 6178 */ 6179 intel_wait_for_vblank(dev, pipe); 6180 6181 for_each_encoder_on_crtc(dev, crtc, encoder) 6182 encoder->disable(encoder); 6183 6184 drm_crtc_vblank_off(crtc); 6185 assert_vblank_disabled(crtc); 6186 6187 intel_disable_pipe(intel_crtc); 6188 6189 i9xx_pfit_disable(intel_crtc); 6190 6191 for_each_encoder_on_crtc(dev, crtc, encoder) 6192 if (encoder->post_disable) 6193 encoder->post_disable(encoder); 6194 6195 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 6196 if (IS_CHERRYVIEW(dev)) 6197 chv_disable_pll(dev_priv, pipe); 6198 else if (IS_VALLEYVIEW(dev)) 6199 vlv_disable_pll(dev_priv, pipe); 6200 else 6201 i9xx_disable_pll(intel_crtc); 6202 } 6203 6204 if (!IS_GEN2(dev)) 6205 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6206 6207 intel_crtc->active = false; 6208 intel_update_watermarks(crtc); 6209 } 6210 6211 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6212 { 6213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6214 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6215 enum intel_display_power_domain domain; 6216 unsigned long domains; 6217 6218 if (!intel_crtc->active) 6219 return; 6220 6221 if (to_intel_plane_state(crtc->primary->state)->visible) { 6222 intel_crtc_wait_for_pending_flips(crtc); 6223 intel_pre_disable_primary(crtc); 6224 } 6225 6226 intel_crtc_disable_planes(crtc, crtc->state->plane_mask); 6227 dev_priv->display.crtc_disable(crtc); 6228 intel_disable_shared_dpll(intel_crtc); 6229 6230 domains = intel_crtc->enabled_power_domains; 6231 for_each_power_domain(domain, domains) 6232 intel_display_power_put(dev_priv, domain); 6233 intel_crtc->enabled_power_domains = 0; 6234 } 6235 6236 /* 6237 * turn all crtc's off, but do not adjust state 6238 * This has to be paired with a call to intel_modeset_setup_hw_state. 6239 */ 6240 int intel_display_suspend(struct drm_device *dev) 6241 { 6242 struct drm_mode_config *config = &dev->mode_config; 6243 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; 6244 struct drm_atomic_state *state; 6245 struct drm_crtc *crtc; 6246 unsigned crtc_mask = 0; 6247 int ret = 0; 6248 6249 if (WARN_ON(!ctx)) 6250 return 0; 6251 6252 #if 0 6253 lockdep_assert_held(&ctx->ww_ctx); 6254 #endif 6255 state = drm_atomic_state_alloc(dev); 6256 if (WARN_ON(!state)) 6257 return -ENOMEM; 6258 6259 state->acquire_ctx = ctx; 6260 state->allow_modeset = true; 6261 6262 for_each_crtc(dev, crtc) { 6263 struct drm_crtc_state *crtc_state = 6264 drm_atomic_get_crtc_state(state, crtc); 6265 6266 ret = PTR_ERR_OR_ZERO(crtc_state); 6267 if (ret) 6268 goto free; 6269 6270 if (!crtc_state->active) 6271 continue; 6272 6273 crtc_state->active = false; 6274 crtc_mask |= 1 << drm_crtc_index(crtc); 6275 } 6276 6277 if (crtc_mask) { 6278 ret = drm_atomic_commit(state); 6279 6280 if (!ret) { 6281 for_each_crtc(dev, crtc) 6282 if (crtc_mask & (1 << drm_crtc_index(crtc))) 6283 crtc->state->active = true; 6284 6285 return ret; 6286 } 6287 } 6288 6289 free: 6290 if (ret) 6291 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6292 drm_atomic_state_free(state); 6293 return ret; 6294 } 6295 6296 void intel_encoder_destroy(struct drm_encoder *encoder) 6297 { 6298 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6299 6300 drm_encoder_cleanup(encoder); 6301 kfree(intel_encoder); 6302 } 6303 6304 /* Cross check the actual hw state with our own modeset state tracking (and it's 6305 * internal consistency). */ 6306 static void intel_connector_check_state(struct intel_connector *connector) 6307 { 6308 struct drm_crtc *crtc = connector->base.state->crtc; 6309 6310 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6311 connector->base.base.id, 6312 connector->base.name); 6313 6314 if (connector->get_hw_state(connector)) { 6315 struct intel_encoder *encoder = connector->encoder; 6316 struct drm_connector_state *conn_state = connector->base.state; 6317 6318 I915_STATE_WARN(!crtc, 6319 "connector enabled without attached crtc\n"); 6320 6321 if (!crtc) 6322 return; 6323 6324 I915_STATE_WARN(!crtc->state->active, 6325 "connector is active, but attached crtc isn't\n"); 6326 6327 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6328 return; 6329 6330 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6331 "atomic encoder doesn't match attached encoder\n"); 6332 6333 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6334 "attached encoder crtc differs from connector crtc\n"); 6335 } else { 6336 I915_STATE_WARN(crtc && crtc->state->active, 6337 "attached crtc is active, but connector isn't\n"); 6338 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 6339 "best encoder set without crtc!\n"); 6340 } 6341 } 6342 6343 int intel_connector_init(struct intel_connector *connector) 6344 { 6345 struct drm_connector_state *connector_state; 6346 6347 connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); 6348 if (!connector_state) 6349 return -ENOMEM; 6350 6351 connector->base.state = connector_state; 6352 return 0; 6353 } 6354 6355 struct intel_connector *intel_connector_alloc(void) 6356 { 6357 struct intel_connector *connector; 6358 6359 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6360 if (!connector) 6361 return NULL; 6362 6363 if (intel_connector_init(connector) < 0) { 6364 kfree(connector); 6365 return NULL; 6366 } 6367 6368 return connector; 6369 } 6370 6371 /* Simple connector->get_hw_state implementation for encoders that support only 6372 * one connector and no cloning and hence the encoder state determines the state 6373 * of the connector. */ 6374 bool intel_connector_get_hw_state(struct intel_connector *connector) 6375 { 6376 enum i915_pipe pipe = 0; 6377 struct intel_encoder *encoder = connector->encoder; 6378 6379 return encoder->get_hw_state(encoder, &pipe); 6380 } 6381 6382 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6383 { 6384 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6385 return crtc_state->fdi_lanes; 6386 6387 return 0; 6388 } 6389 6390 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6391 struct intel_crtc_state *pipe_config) 6392 { 6393 struct drm_atomic_state *state = pipe_config->base.state; 6394 struct intel_crtc *other_crtc; 6395 struct intel_crtc_state *other_crtc_state; 6396 6397 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6398 pipe_name(pipe), pipe_config->fdi_lanes); 6399 if (pipe_config->fdi_lanes > 4) { 6400 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6401 pipe_name(pipe), pipe_config->fdi_lanes); 6402 return -EINVAL; 6403 } 6404 6405 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6406 if (pipe_config->fdi_lanes > 2) { 6407 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6408 pipe_config->fdi_lanes); 6409 return -EINVAL; 6410 } else { 6411 return 0; 6412 } 6413 } 6414 6415 if (INTEL_INFO(dev)->num_pipes == 2) 6416 return 0; 6417 6418 /* Ivybridge 3 pipe is really complicated */ 6419 switch (pipe) { 6420 case PIPE_A: 6421 return 0; 6422 case PIPE_B: 6423 if (pipe_config->fdi_lanes <= 2) 6424 return 0; 6425 6426 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6427 other_crtc_state = 6428 intel_atomic_get_crtc_state(state, other_crtc); 6429 if (IS_ERR(other_crtc_state)) 6430 return PTR_ERR(other_crtc_state); 6431 6432 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6433 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6434 pipe_name(pipe), pipe_config->fdi_lanes); 6435 return -EINVAL; 6436 } 6437 return 0; 6438 case PIPE_C: 6439 if (pipe_config->fdi_lanes > 2) { 6440 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6441 pipe_name(pipe), pipe_config->fdi_lanes); 6442 return -EINVAL; 6443 } 6444 6445 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6446 other_crtc_state = 6447 intel_atomic_get_crtc_state(state, other_crtc); 6448 if (IS_ERR(other_crtc_state)) 6449 return PTR_ERR(other_crtc_state); 6450 6451 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6452 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6453 return -EINVAL; 6454 } 6455 return 0; 6456 default: 6457 BUG(); 6458 } 6459 } 6460 6461 #define RETRY 1 6462 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6463 struct intel_crtc_state *pipe_config) 6464 { 6465 struct drm_device *dev = intel_crtc->base.dev; 6466 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6467 int lane, link_bw, fdi_dotclock, ret; 6468 bool needs_recompute = false; 6469 6470 retry: 6471 /* FDI is a binary signal running at ~2.7GHz, encoding 6472 * each output octet as 10 bits. The actual frequency 6473 * is stored as a divider into a 100MHz clock, and the 6474 * mode pixel clock is stored in units of 1KHz. 6475 * Hence the bw of each lane in terms of the mode signal 6476 * is: 6477 */ 6478 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 6479 6480 fdi_dotclock = adjusted_mode->crtc_clock; 6481 6482 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6483 pipe_config->pipe_bpp); 6484 6485 pipe_config->fdi_lanes = lane; 6486 6487 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6488 link_bw, &pipe_config->fdi_m_n); 6489 6490 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, 6491 intel_crtc->pipe, pipe_config); 6492 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6493 pipe_config->pipe_bpp -= 2*3; 6494 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6495 pipe_config->pipe_bpp); 6496 needs_recompute = true; 6497 pipe_config->bw_constrained = true; 6498 6499 goto retry; 6500 } 6501 6502 if (needs_recompute) 6503 return RETRY; 6504 6505 return ret; 6506 } 6507 6508 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6509 struct intel_crtc_state *pipe_config) 6510 { 6511 if (pipe_config->pipe_bpp > 24) 6512 return false; 6513 6514 /* HSW can handle pixel rate up to cdclk? */ 6515 if (IS_HASWELL(dev_priv->dev)) 6516 return true; 6517 6518 /* 6519 * We compare against max which means we must take 6520 * the increased cdclk requirement into account when 6521 * calculating the new cdclk. 6522 * 6523 * Should measure whether using a lower cdclk w/o IPS 6524 */ 6525 return ilk_pipe_pixel_rate(pipe_config) <= 6526 dev_priv->max_cdclk_freq * 95 / 100; 6527 } 6528 6529 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6530 struct intel_crtc_state *pipe_config) 6531 { 6532 struct drm_device *dev = crtc->base.dev; 6533 struct drm_i915_private *dev_priv = dev->dev_private; 6534 6535 pipe_config->ips_enabled = i915.enable_ips && 6536 hsw_crtc_supports_ips(crtc) && 6537 pipe_config_supports_ips(dev_priv, pipe_config); 6538 } 6539 6540 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6541 struct intel_crtc_state *pipe_config) 6542 { 6543 struct drm_device *dev = crtc->base.dev; 6544 struct drm_i915_private *dev_priv = dev->dev_private; 6545 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6546 6547 /* FIXME should check pixel clock limits on all platforms */ 6548 if (INTEL_INFO(dev)->gen < 4) { 6549 int clock_limit = dev_priv->max_cdclk_freq; 6550 6551 /* 6552 * Enable pixel doubling when the dot clock 6553 * is > 90% of the (display) core speed. 6554 * 6555 * GDG double wide on either pipe, 6556 * otherwise pipe A only. 6557 */ 6558 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 6559 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 6560 clock_limit *= 2; 6561 pipe_config->double_wide = true; 6562 } 6563 6564 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 6565 return -EINVAL; 6566 } 6567 6568 /* 6569 * Pipe horizontal size must be even in: 6570 * - DVO ganged mode 6571 * - LVDS dual channel mode 6572 * - Double wide pipe 6573 */ 6574 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6575 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6576 pipe_config->pipe_src_w &= ~1; 6577 6578 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6579 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6580 */ 6581 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6582 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 6583 return -EINVAL; 6584 6585 if (HAS_IPS(dev)) 6586 hsw_compute_ips_config(crtc, pipe_config); 6587 6588 if (pipe_config->has_pch_encoder) 6589 return ironlake_fdi_compute_config(crtc, pipe_config); 6590 6591 return 0; 6592 } 6593 6594 static int skylake_get_display_clock_speed(struct drm_device *dev) 6595 { 6596 struct drm_i915_private *dev_priv = to_i915(dev); 6597 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6598 uint32_t cdctl = I915_READ(CDCLK_CTL); 6599 uint32_t linkrate; 6600 6601 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6602 return 24000; /* 24MHz is the cd freq with NSSC ref */ 6603 6604 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6605 return 540000; 6606 6607 linkrate = (I915_READ(DPLL_CTRL1) & 6608 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; 6609 6610 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6611 linkrate == DPLL_CTRL1_LINK_RATE_1080) { 6612 /* vco 8640 */ 6613 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6614 case CDCLK_FREQ_450_432: 6615 return 432000; 6616 case CDCLK_FREQ_337_308: 6617 return 308570; 6618 case CDCLK_FREQ_675_617: 6619 return 617140; 6620 default: 6621 WARN(1, "Unknown cd freq selection\n"); 6622 } 6623 } else { 6624 /* vco 8100 */ 6625 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6626 case CDCLK_FREQ_450_432: 6627 return 450000; 6628 case CDCLK_FREQ_337_308: 6629 return 337500; 6630 case CDCLK_FREQ_675_617: 6631 return 675000; 6632 default: 6633 WARN(1, "Unknown cd freq selection\n"); 6634 } 6635 } 6636 6637 /* error case, do as if DPLL0 isn't enabled */ 6638 return 24000; 6639 } 6640 6641 static int broxton_get_display_clock_speed(struct drm_device *dev) 6642 { 6643 struct drm_i915_private *dev_priv = to_i915(dev); 6644 uint32_t cdctl = I915_READ(CDCLK_CTL); 6645 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6646 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); 6647 int cdclk; 6648 6649 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6650 return 19200; 6651 6652 cdclk = 19200 * pll_ratio / 2; 6653 6654 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6655 case BXT_CDCLK_CD2X_DIV_SEL_1: 6656 return cdclk; /* 576MHz or 624MHz */ 6657 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6658 return cdclk * 2 / 3; /* 384MHz */ 6659 case BXT_CDCLK_CD2X_DIV_SEL_2: 6660 return cdclk / 2; /* 288MHz */ 6661 case BXT_CDCLK_CD2X_DIV_SEL_4: 6662 return cdclk / 4; /* 144MHz */ 6663 } 6664 6665 /* error case, do as if DE PLL isn't enabled */ 6666 return 19200; 6667 } 6668 6669 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6670 { 6671 struct drm_i915_private *dev_priv = dev->dev_private; 6672 uint32_t lcpll = I915_READ(LCPLL_CTL); 6673 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6674 6675 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6676 return 800000; 6677 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6678 return 450000; 6679 else if (freq == LCPLL_CLK_FREQ_450) 6680 return 450000; 6681 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6682 return 540000; 6683 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6684 return 337500; 6685 else 6686 return 675000; 6687 } 6688 6689 static int haswell_get_display_clock_speed(struct drm_device *dev) 6690 { 6691 struct drm_i915_private *dev_priv = dev->dev_private; 6692 uint32_t lcpll = I915_READ(LCPLL_CTL); 6693 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6694 6695 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6696 return 800000; 6697 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6698 return 450000; 6699 else if (freq == LCPLL_CLK_FREQ_450) 6700 return 450000; 6701 else if (IS_HSW_ULT(dev)) 6702 return 337500; 6703 else 6704 return 540000; 6705 } 6706 6707 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6708 { 6709 struct drm_i915_private *dev_priv = dev->dev_private; 6710 u32 val; 6711 int divider; 6712 6713 if (dev_priv->hpll_freq == 0) 6714 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 6715 6716 mutex_lock(&dev_priv->sb_lock); 6717 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 6718 mutex_unlock(&dev_priv->sb_lock); 6719 6720 divider = val & DISPLAY_FREQUENCY_VALUES; 6721 6722 WARN((val & DISPLAY_FREQUENCY_STATUS) != 6723 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 6724 "cdclk change in progress\n"); 6725 6726 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 6727 } 6728 6729 static int ilk_get_display_clock_speed(struct drm_device *dev) 6730 { 6731 return 450000; 6732 } 6733 6734 static int i945_get_display_clock_speed(struct drm_device *dev) 6735 { 6736 return 400000; 6737 } 6738 6739 static int i915_get_display_clock_speed(struct drm_device *dev) 6740 { 6741 return 333333; 6742 } 6743 6744 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6745 { 6746 return 200000; 6747 } 6748 6749 static int pnv_get_display_clock_speed(struct drm_device *dev) 6750 { 6751 u16 gcfgc = 0; 6752 6753 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6754 6755 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6756 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6757 return 266667; 6758 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6759 return 333333; 6760 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6761 return 444444; 6762 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6763 return 200000; 6764 default: 6765 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6766 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6767 return 133333; 6768 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6769 return 166667; 6770 } 6771 } 6772 6773 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6774 { 6775 u16 gcfgc = 0; 6776 6777 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6778 6779 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6780 return 133333; 6781 else { 6782 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6783 case GC_DISPLAY_CLOCK_333_MHZ: 6784 return 333333; 6785 default: 6786 case GC_DISPLAY_CLOCK_190_200_MHZ: 6787 return 190000; 6788 } 6789 } 6790 } 6791 6792 static int i865_get_display_clock_speed(struct drm_device *dev) 6793 { 6794 return 266667; 6795 } 6796 6797 static int i85x_get_display_clock_speed(struct drm_device *dev) 6798 { 6799 u16 hpllcc = 0; 6800 6801 /* 6802 * 852GM/852GMV only supports 133 MHz and the HPLLCC 6803 * encoding is different :( 6804 * FIXME is this the right way to detect 852GM/852GMV? 6805 */ 6806 if (dev->pdev->revision == 0x1) 6807 return 133333; 6808 6809 #if 0 6810 pci_bus_read_config_word(dev->pdev->bus, 6811 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 6812 #endif 6813 6814 /* Assume that the hardware is in the high speed state. This 6815 * should be the default. 6816 */ 6817 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6818 case GC_CLOCK_133_200: 6819 case GC_CLOCK_133_200_2: 6820 case GC_CLOCK_100_200: 6821 return 200000; 6822 case GC_CLOCK_166_250: 6823 return 250000; 6824 case GC_CLOCK_100_133: 6825 return 133333; 6826 case GC_CLOCK_133_266: 6827 case GC_CLOCK_133_266_2: 6828 case GC_CLOCK_166_266: 6829 return 266667; 6830 } 6831 6832 /* Shouldn't happen */ 6833 return 0; 6834 } 6835 6836 static int i830_get_display_clock_speed(struct drm_device *dev) 6837 { 6838 return 133333; 6839 } 6840 6841 static unsigned int intel_hpll_vco(struct drm_device *dev) 6842 { 6843 struct drm_i915_private *dev_priv = dev->dev_private; 6844 static const unsigned int blb_vco[8] = { 6845 [0] = 3200000, 6846 [1] = 4000000, 6847 [2] = 5333333, 6848 [3] = 4800000, 6849 [4] = 6400000, 6850 }; 6851 static const unsigned int pnv_vco[8] = { 6852 [0] = 3200000, 6853 [1] = 4000000, 6854 [2] = 5333333, 6855 [3] = 4800000, 6856 [4] = 2666667, 6857 }; 6858 static const unsigned int cl_vco[8] = { 6859 [0] = 3200000, 6860 [1] = 4000000, 6861 [2] = 5333333, 6862 [3] = 6400000, 6863 [4] = 3333333, 6864 [5] = 3566667, 6865 [6] = 4266667, 6866 }; 6867 static const unsigned int elk_vco[8] = { 6868 [0] = 3200000, 6869 [1] = 4000000, 6870 [2] = 5333333, 6871 [3] = 4800000, 6872 }; 6873 static const unsigned int ctg_vco[8] = { 6874 [0] = 3200000, 6875 [1] = 4000000, 6876 [2] = 5333333, 6877 [3] = 6400000, 6878 [4] = 2666667, 6879 [5] = 4266667, 6880 }; 6881 const unsigned int *vco_table; 6882 unsigned int vco; 6883 uint8_t tmp = 0; 6884 6885 /* FIXME other chipsets? */ 6886 if (IS_GM45(dev)) 6887 vco_table = ctg_vco; 6888 else if (IS_G4X(dev)) 6889 vco_table = elk_vco; 6890 else if (IS_CRESTLINE(dev)) 6891 vco_table = cl_vco; 6892 else if (IS_PINEVIEW(dev)) 6893 vco_table = pnv_vco; 6894 else if (IS_G33(dev)) 6895 vco_table = blb_vco; 6896 else 6897 return 0; 6898 6899 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); 6900 6901 vco = vco_table[tmp & 0x7]; 6902 if (vco == 0) 6903 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); 6904 else 6905 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); 6906 6907 return vco; 6908 } 6909 6910 static int gm45_get_display_clock_speed(struct drm_device *dev) 6911 { 6912 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 6913 uint16_t tmp = 0; 6914 6915 pci_read_config_word(dev->pdev, GCFGC, &tmp); 6916 6917 cdclk_sel = (tmp >> 12) & 0x1; 6918 6919 switch (vco) { 6920 case 2666667: 6921 case 4000000: 6922 case 5333333: 6923 return cdclk_sel ? 333333 : 222222; 6924 case 3200000: 6925 return cdclk_sel ? 320000 : 228571; 6926 default: 6927 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); 6928 return 222222; 6929 } 6930 } 6931 6932 static int i965gm_get_display_clock_speed(struct drm_device *dev) 6933 { 6934 static const uint8_t div_3200[] = { 16, 10, 8 }; 6935 static const uint8_t div_4000[] = { 20, 12, 10 }; 6936 static const uint8_t div_5333[] = { 24, 16, 14 }; 6937 const uint8_t *div_table; 6938 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 6939 uint16_t tmp = 0; 6940 6941 pci_read_config_word(dev->pdev, GCFGC, &tmp); 6942 6943 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 6944 6945 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 6946 goto fail; 6947 6948 switch (vco) { 6949 case 3200000: 6950 div_table = div_3200; 6951 break; 6952 case 4000000: 6953 div_table = div_4000; 6954 break; 6955 case 5333333: 6956 div_table = div_5333; 6957 break; 6958 default: 6959 goto fail; 6960 } 6961 6962 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 6963 6964 fail: 6965 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); 6966 return 200000; 6967 } 6968 6969 static int g33_get_display_clock_speed(struct drm_device *dev) 6970 { 6971 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; 6972 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; 6973 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; 6974 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; 6975 const uint8_t *div_table; 6976 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 6977 uint16_t tmp = 0; 6978 6979 pci_read_config_word(dev->pdev, GCFGC, &tmp); 6980 6981 cdclk_sel = (tmp >> 4) & 0x7; 6982 6983 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 6984 goto fail; 6985 6986 switch (vco) { 6987 case 3200000: 6988 div_table = div_3200; 6989 break; 6990 case 4000000: 6991 div_table = div_4000; 6992 break; 6993 case 4800000: 6994 div_table = div_4800; 6995 break; 6996 case 5333333: 6997 div_table = div_5333; 6998 break; 6999 default: 7000 goto fail; 7001 } 7002 7003 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7004 7005 fail: 7006 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); 7007 return 190476; 7008 } 7009 7010 static void 7011 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 7012 { 7013 while (*num > DATA_LINK_M_N_MASK || 7014 *den > DATA_LINK_M_N_MASK) { 7015 *num >>= 1; 7016 *den >>= 1; 7017 } 7018 } 7019 7020 static void compute_m_n(unsigned int m, unsigned int n, 7021 uint32_t *ret_m, uint32_t *ret_n) 7022 { 7023 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7024 *ret_m = div_u64((uint64_t) m * *ret_n, n); 7025 intel_reduce_m_n_ratio(ret_m, ret_n); 7026 } 7027 7028 void 7029 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 7030 int pixel_clock, int link_clock, 7031 struct intel_link_m_n *m_n) 7032 { 7033 m_n->tu = 64; 7034 7035 compute_m_n(bits_per_pixel * pixel_clock, 7036 link_clock * nlanes * 8, 7037 &m_n->gmch_m, &m_n->gmch_n); 7038 7039 compute_m_n(pixel_clock, link_clock, 7040 &m_n->link_m, &m_n->link_n); 7041 } 7042 7043 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7044 { 7045 if (i915.panel_use_ssc >= 0) 7046 return i915.panel_use_ssc != 0; 7047 return dev_priv->vbt.lvds_use_ssc 7048 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7049 } 7050 7051 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 7052 int num_connectors) 7053 { 7054 struct drm_device *dev = crtc_state->base.crtc->dev; 7055 struct drm_i915_private *dev_priv = dev->dev_private; 7056 int refclk; 7057 7058 WARN_ON(!crtc_state->base.state); 7059 7060 if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { 7061 refclk = 100000; 7062 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7063 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 7064 refclk = dev_priv->vbt.lvds_ssc_freq; 7065 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7066 } else if (!IS_GEN2(dev)) { 7067 refclk = 96000; 7068 } else { 7069 refclk = 48000; 7070 } 7071 7072 return refclk; 7073 } 7074 7075 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 7076 { 7077 return (1 << dpll->n) << 16 | dpll->m2; 7078 } 7079 7080 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 7081 { 7082 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7083 } 7084 7085 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7086 struct intel_crtc_state *crtc_state, 7087 intel_clock_t *reduced_clock) 7088 { 7089 struct drm_device *dev = crtc->base.dev; 7090 u32 fp, fp2 = 0; 7091 7092 if (IS_PINEVIEW(dev)) { 7093 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7094 if (reduced_clock) 7095 fp2 = pnv_dpll_compute_fp(reduced_clock); 7096 } else { 7097 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7098 if (reduced_clock) 7099 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7100 } 7101 7102 crtc_state->dpll_hw_state.fp0 = fp; 7103 7104 crtc->lowfreq_avail = false; 7105 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7106 reduced_clock) { 7107 crtc_state->dpll_hw_state.fp1 = fp2; 7108 crtc->lowfreq_avail = true; 7109 } else { 7110 crtc_state->dpll_hw_state.fp1 = fp; 7111 } 7112 } 7113 7114 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 7115 pipe) 7116 { 7117 u32 reg_val; 7118 7119 /* 7120 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7121 * and set it to a reasonable value instead. 7122 */ 7123 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7124 reg_val &= 0xffffff00; 7125 reg_val |= 0x00000030; 7126 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7127 7128 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7129 reg_val &= 0x8cffffff; 7130 reg_val = 0x8c000000; 7131 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7132 7133 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7134 reg_val &= 0xffffff00; 7135 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7136 7137 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7138 reg_val &= 0x00ffffff; 7139 reg_val |= 0xb0000000; 7140 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7141 } 7142 7143 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 7144 struct intel_link_m_n *m_n) 7145 { 7146 struct drm_device *dev = crtc->base.dev; 7147 struct drm_i915_private *dev_priv = dev->dev_private; 7148 int pipe = crtc->pipe; 7149 7150 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7151 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7152 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7153 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7154 } 7155 7156 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 7157 struct intel_link_m_n *m_n, 7158 struct intel_link_m_n *m2_n2) 7159 { 7160 struct drm_device *dev = crtc->base.dev; 7161 struct drm_i915_private *dev_priv = dev->dev_private; 7162 int pipe = crtc->pipe; 7163 enum transcoder transcoder = crtc->config->cpu_transcoder; 7164 7165 if (INTEL_INFO(dev)->gen >= 5) { 7166 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7167 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7168 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7169 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7170 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7171 * for gen < 8) and if DRRS is supported (to make sure the 7172 * registers are not unnecessarily accessed). 7173 */ 7174 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7175 crtc->config->has_drrs) { 7176 I915_WRITE(PIPE_DATA_M2(transcoder), 7177 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7178 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7179 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7180 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7181 } 7182 } else { 7183 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7184 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7185 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7186 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7187 } 7188 } 7189 7190 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7191 { 7192 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7193 7194 if (m_n == M1_N1) { 7195 dp_m_n = &crtc->config->dp_m_n; 7196 dp_m2_n2 = &crtc->config->dp_m2_n2; 7197 } else if (m_n == M2_N2) { 7198 7199 /* 7200 * M2_N2 registers are not supported. Hence m2_n2 divider value 7201 * needs to be programmed into M1_N1. 7202 */ 7203 dp_m_n = &crtc->config->dp_m2_n2; 7204 } else { 7205 DRM_ERROR("Unsupported divider value\n"); 7206 return; 7207 } 7208 7209 if (crtc->config->has_pch_encoder) 7210 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7211 else 7212 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7213 } 7214 7215 static void vlv_compute_dpll(struct intel_crtc *crtc, 7216 struct intel_crtc_state *pipe_config) 7217 { 7218 u32 dpll, dpll_md; 7219 7220 /* 7221 * Enable DPIO clock input. We should never disable the reference 7222 * clock for pipe B, since VGA hotplug / manual detection depends 7223 * on it. 7224 */ 7225 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV | 7226 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV; 7227 /* We should never disable this, set it here for state tracking */ 7228 if (crtc->pipe == PIPE_B) 7229 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7230 dpll |= DPLL_VCO_ENABLE; 7231 pipe_config->dpll_hw_state.dpll = dpll; 7232 7233 dpll_md = (pipe_config->pixel_multiplier - 1) 7234 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7235 pipe_config->dpll_hw_state.dpll_md = dpll_md; 7236 } 7237 7238 static void vlv_prepare_pll(struct intel_crtc *crtc, 7239 const struct intel_crtc_state *pipe_config) 7240 { 7241 struct drm_device *dev = crtc->base.dev; 7242 struct drm_i915_private *dev_priv = dev->dev_private; 7243 int pipe = crtc->pipe; 7244 u32 mdiv; 7245 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7246 u32 coreclk, reg_val; 7247 7248 mutex_lock(&dev_priv->sb_lock); 7249 7250 bestn = pipe_config->dpll.n; 7251 bestm1 = pipe_config->dpll.m1; 7252 bestm2 = pipe_config->dpll.m2; 7253 bestp1 = pipe_config->dpll.p1; 7254 bestp2 = pipe_config->dpll.p2; 7255 7256 /* See eDP HDMI DPIO driver vbios notes doc */ 7257 7258 /* PLL B needs special handling */ 7259 if (pipe == PIPE_B) 7260 vlv_pllb_recal_opamp(dev_priv, pipe); 7261 7262 /* Set up Tx target for periodic Rcomp update */ 7263 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7264 7265 /* Disable target IRef on PLL */ 7266 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7267 reg_val &= 0x00ffffff; 7268 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7269 7270 /* Disable fast lock */ 7271 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7272 7273 /* Set idtafcrecal before PLL is enabled */ 7274 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7275 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7276 mdiv |= ((bestn << DPIO_N_SHIFT)); 7277 mdiv |= (1 << DPIO_K_SHIFT); 7278 7279 /* 7280 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7281 * but we don't support that). 7282 * Note: don't use the DAC post divider as it seems unstable. 7283 */ 7284 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7285 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7286 7287 mdiv |= DPIO_ENABLE_CALIBRATION; 7288 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7289 7290 /* Set HBR and RBR LPF coefficients */ 7291 if (pipe_config->port_clock == 162000 || 7292 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7293 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7294 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7295 0x009f0003); 7296 else 7297 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7298 0x00d0000f); 7299 7300 if (pipe_config->has_dp_encoder) { 7301 /* Use SSC source */ 7302 if (pipe == PIPE_A) 7303 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7304 0x0df40000); 7305 else 7306 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7307 0x0df70000); 7308 } else { /* HDMI or VGA */ 7309 /* Use bend source */ 7310 if (pipe == PIPE_A) 7311 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7312 0x0df70000); 7313 else 7314 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7315 0x0df40000); 7316 } 7317 7318 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7319 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7320 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7321 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 7322 coreclk |= 0x01000000; 7323 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7324 7325 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7326 mutex_unlock(&dev_priv->sb_lock); 7327 } 7328 7329 static void chv_compute_dpll(struct intel_crtc *crtc, 7330 struct intel_crtc_state *pipe_config) 7331 { 7332 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7333 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 7334 DPLL_VCO_ENABLE; 7335 if (crtc->pipe != PIPE_A) 7336 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7337 7338 pipe_config->dpll_hw_state.dpll_md = 7339 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7340 } 7341 7342 static void chv_prepare_pll(struct intel_crtc *crtc, 7343 const struct intel_crtc_state *pipe_config) 7344 { 7345 struct drm_device *dev = crtc->base.dev; 7346 struct drm_i915_private *dev_priv = dev->dev_private; 7347 int pipe = crtc->pipe; 7348 int dpll_reg = DPLL(crtc->pipe); 7349 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7350 u32 loopfilter, tribuf_calcntr; 7351 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7352 u32 dpio_val; 7353 int vco; 7354 7355 bestn = pipe_config->dpll.n; 7356 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7357 bestm1 = pipe_config->dpll.m1; 7358 bestm2 = pipe_config->dpll.m2 >> 22; 7359 bestp1 = pipe_config->dpll.p1; 7360 bestp2 = pipe_config->dpll.p2; 7361 vco = pipe_config->dpll.vco; 7362 dpio_val = 0; 7363 loopfilter = 0; 7364 7365 /* 7366 * Enable Refclk and SSC 7367 */ 7368 I915_WRITE(dpll_reg, 7369 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7370 7371 mutex_lock(&dev_priv->sb_lock); 7372 7373 /* p1 and p2 divider */ 7374 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7375 5 << DPIO_CHV_S1_DIV_SHIFT | 7376 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7377 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7378 1 << DPIO_CHV_K_DIV_SHIFT); 7379 7380 /* Feedback post-divider - m2 */ 7381 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7382 7383 /* Feedback refclk divider - n and m1 */ 7384 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7385 DPIO_CHV_M1_DIV_BY_2 | 7386 1 << DPIO_CHV_N_DIV_SHIFT); 7387 7388 /* M2 fraction division */ 7389 if (bestm2_frac) 7390 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7391 7392 /* M2 fraction division enable */ 7393 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7394 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7395 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7396 if (bestm2_frac) 7397 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7398 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7399 7400 /* Program digital lock detect threshold */ 7401 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7402 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7403 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7404 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7405 if (!bestm2_frac) 7406 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7407 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7408 7409 /* Loop filter */ 7410 if (vco == 5400000) { 7411 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7412 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7413 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7414 tribuf_calcntr = 0x9; 7415 } else if (vco <= 6200000) { 7416 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7417 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7418 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7419 tribuf_calcntr = 0x9; 7420 } else if (vco <= 6480000) { 7421 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7422 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7423 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7424 tribuf_calcntr = 0x8; 7425 } else { 7426 /* Not supported. Apply the same limits as in the max case */ 7427 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7428 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7429 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7430 tribuf_calcntr = 0; 7431 } 7432 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7433 7434 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7435 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7436 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7437 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7438 7439 /* AFC Recal */ 7440 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7441 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7442 DPIO_AFC_RECAL); 7443 7444 mutex_unlock(&dev_priv->sb_lock); 7445 } 7446 7447 /** 7448 * vlv_force_pll_on - forcibly enable just the PLL 7449 * @dev_priv: i915 private structure 7450 * @pipe: pipe PLL to enable 7451 * @dpll: PLL configuration 7452 * 7453 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7454 * in cases where we need the PLL enabled even when @pipe is not going to 7455 * be enabled. 7456 */ 7457 void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7458 const struct dpll *dpll) 7459 { 7460 struct intel_crtc *crtc = 7461 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7462 struct intel_crtc_state pipe_config = { 7463 .base.crtc = &crtc->base, 7464 .pixel_multiplier = 1, 7465 .dpll = *dpll, 7466 }; 7467 7468 if (IS_CHERRYVIEW(dev)) { 7469 chv_compute_dpll(crtc, &pipe_config); 7470 chv_prepare_pll(crtc, &pipe_config); 7471 chv_enable_pll(crtc, &pipe_config); 7472 } else { 7473 vlv_compute_dpll(crtc, &pipe_config); 7474 vlv_prepare_pll(crtc, &pipe_config); 7475 vlv_enable_pll(crtc, &pipe_config); 7476 } 7477 } 7478 7479 /** 7480 * vlv_force_pll_off - forcibly disable just the PLL 7481 * @dev_priv: i915 private structure 7482 * @pipe: pipe PLL to disable 7483 * 7484 * Disable the PLL for @pipe. To be used in cases where we need 7485 * the PLL enabled even when @pipe is not going to be enabled. 7486 */ 7487 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7488 { 7489 if (IS_CHERRYVIEW(dev)) 7490 chv_disable_pll(to_i915(dev), pipe); 7491 else 7492 vlv_disable_pll(to_i915(dev), pipe); 7493 } 7494 7495 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7496 struct intel_crtc_state *crtc_state, 7497 intel_clock_t *reduced_clock, 7498 int num_connectors) 7499 { 7500 struct drm_device *dev = crtc->base.dev; 7501 struct drm_i915_private *dev_priv = dev->dev_private; 7502 u32 dpll; 7503 bool is_sdvo; 7504 struct dpll *clock = &crtc_state->dpll; 7505 7506 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7507 7508 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || 7509 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); 7510 7511 dpll = DPLL_VGA_MODE_DIS; 7512 7513 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7514 dpll |= DPLLB_MODE_LVDS; 7515 else 7516 dpll |= DPLLB_MODE_DAC_SERIAL; 7517 7518 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7519 dpll |= (crtc_state->pixel_multiplier - 1) 7520 << SDVO_MULTIPLIER_SHIFT_HIRES; 7521 } 7522 7523 if (is_sdvo) 7524 dpll |= DPLL_SDVO_HIGH_SPEED; 7525 7526 if (crtc_state->has_dp_encoder) 7527 dpll |= DPLL_SDVO_HIGH_SPEED; 7528 7529 /* compute bitmask from p1 value */ 7530 if (IS_PINEVIEW(dev)) 7531 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7532 else { 7533 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7534 if (IS_G4X(dev) && reduced_clock) 7535 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7536 } 7537 switch (clock->p2) { 7538 case 5: 7539 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7540 break; 7541 case 7: 7542 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7543 break; 7544 case 10: 7545 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7546 break; 7547 case 14: 7548 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7549 break; 7550 } 7551 if (INTEL_INFO(dev)->gen >= 4) 7552 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7553 7554 if (crtc_state->sdvo_tv_clock) 7555 dpll |= PLL_REF_INPUT_TVCLKINBC; 7556 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7557 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7558 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7559 else 7560 dpll |= PLL_REF_INPUT_DREFCLK; 7561 7562 dpll |= DPLL_VCO_ENABLE; 7563 crtc_state->dpll_hw_state.dpll = dpll; 7564 7565 if (INTEL_INFO(dev)->gen >= 4) { 7566 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7567 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7568 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7569 } 7570 } 7571 7572 static void i8xx_compute_dpll(struct intel_crtc *crtc, 7573 struct intel_crtc_state *crtc_state, 7574 intel_clock_t *reduced_clock, 7575 int num_connectors) 7576 { 7577 struct drm_device *dev = crtc->base.dev; 7578 struct drm_i915_private *dev_priv = dev->dev_private; 7579 u32 dpll; 7580 struct dpll *clock = &crtc_state->dpll; 7581 7582 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7583 7584 dpll = DPLL_VGA_MODE_DIS; 7585 7586 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7587 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7588 } else { 7589 if (clock->p1 == 2) 7590 dpll |= PLL_P1_DIVIDE_BY_TWO; 7591 else 7592 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7593 if (clock->p2 == 4) 7594 dpll |= PLL_P2_DIVIDE_BY_4; 7595 } 7596 7597 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7598 dpll |= DPLL_DVO_2X_MODE; 7599 7600 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7601 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7602 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7603 else 7604 dpll |= PLL_REF_INPUT_DREFCLK; 7605 7606 dpll |= DPLL_VCO_ENABLE; 7607 crtc_state->dpll_hw_state.dpll = dpll; 7608 } 7609 7610 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7611 { 7612 struct drm_device *dev = intel_crtc->base.dev; 7613 struct drm_i915_private *dev_priv = dev->dev_private; 7614 enum i915_pipe pipe = intel_crtc->pipe; 7615 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7616 struct drm_display_mode *adjusted_mode = 7617 &intel_crtc->config->base.adjusted_mode; 7618 uint32_t crtc_vtotal, crtc_vblank_end; 7619 int vsyncshift = 0; 7620 7621 /* We need to be careful not to changed the adjusted mode, for otherwise 7622 * the hw state checker will get angry at the mismatch. */ 7623 crtc_vtotal = adjusted_mode->crtc_vtotal; 7624 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7625 7626 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7627 /* the chip adds 2 halflines automatically */ 7628 crtc_vtotal -= 1; 7629 crtc_vblank_end -= 1; 7630 7631 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7632 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7633 else 7634 vsyncshift = adjusted_mode->crtc_hsync_start - 7635 adjusted_mode->crtc_htotal / 2; 7636 if (vsyncshift < 0) 7637 vsyncshift += adjusted_mode->crtc_htotal; 7638 } 7639 7640 if (INTEL_INFO(dev)->gen > 3) 7641 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7642 7643 I915_WRITE(HTOTAL(cpu_transcoder), 7644 (adjusted_mode->crtc_hdisplay - 1) | 7645 ((adjusted_mode->crtc_htotal - 1) << 16)); 7646 I915_WRITE(HBLANK(cpu_transcoder), 7647 (adjusted_mode->crtc_hblank_start - 1) | 7648 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7649 I915_WRITE(HSYNC(cpu_transcoder), 7650 (adjusted_mode->crtc_hsync_start - 1) | 7651 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7652 7653 I915_WRITE(VTOTAL(cpu_transcoder), 7654 (adjusted_mode->crtc_vdisplay - 1) | 7655 ((crtc_vtotal - 1) << 16)); 7656 I915_WRITE(VBLANK(cpu_transcoder), 7657 (adjusted_mode->crtc_vblank_start - 1) | 7658 ((crtc_vblank_end - 1) << 16)); 7659 I915_WRITE(VSYNC(cpu_transcoder), 7660 (adjusted_mode->crtc_vsync_start - 1) | 7661 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7662 7663 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7664 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7665 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7666 * bits. */ 7667 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7668 (pipe == PIPE_B || pipe == PIPE_C)) 7669 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7670 7671 /* pipesrc controls the size that is scaled from, which should 7672 * always be the user's requested size. 7673 */ 7674 I915_WRITE(PIPESRC(pipe), 7675 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7676 (intel_crtc->config->pipe_src_h - 1)); 7677 } 7678 7679 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7680 struct intel_crtc_state *pipe_config) 7681 { 7682 struct drm_device *dev = crtc->base.dev; 7683 struct drm_i915_private *dev_priv = dev->dev_private; 7684 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7685 uint32_t tmp; 7686 7687 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7688 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7689 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7690 tmp = I915_READ(HBLANK(cpu_transcoder)); 7691 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7692 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7693 tmp = I915_READ(HSYNC(cpu_transcoder)); 7694 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7695 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7696 7697 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7698 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7699 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7700 tmp = I915_READ(VBLANK(cpu_transcoder)); 7701 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7702 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7703 tmp = I915_READ(VSYNC(cpu_transcoder)); 7704 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7705 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7706 7707 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7708 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7709 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7710 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7711 } 7712 7713 tmp = I915_READ(PIPESRC(crtc->pipe)); 7714 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7715 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7716 7717 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7718 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7719 } 7720 7721 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7722 struct intel_crtc_state *pipe_config) 7723 { 7724 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7725 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7726 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7727 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7728 7729 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7730 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7731 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7732 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7733 7734 mode->flags = pipe_config->base.adjusted_mode.flags; 7735 mode->type = DRM_MODE_TYPE_DRIVER; 7736 7737 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7738 mode->flags |= pipe_config->base.adjusted_mode.flags; 7739 7740 mode->hsync = drm_mode_hsync(mode); 7741 mode->vrefresh = drm_mode_vrefresh(mode); 7742 drm_mode_set_name(mode); 7743 } 7744 7745 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7746 { 7747 struct drm_device *dev = intel_crtc->base.dev; 7748 struct drm_i915_private *dev_priv = dev->dev_private; 7749 uint32_t pipeconf; 7750 7751 pipeconf = 0; 7752 7753 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7754 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7755 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7756 7757 if (intel_crtc->config->double_wide) 7758 pipeconf |= PIPECONF_DOUBLE_WIDE; 7759 7760 /* only g4x and later have fancy bpc/dither controls */ 7761 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 7762 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7763 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7764 pipeconf |= PIPECONF_DITHER_EN | 7765 PIPECONF_DITHER_TYPE_SP; 7766 7767 switch (intel_crtc->config->pipe_bpp) { 7768 case 18: 7769 pipeconf |= PIPECONF_6BPC; 7770 break; 7771 case 24: 7772 pipeconf |= PIPECONF_8BPC; 7773 break; 7774 case 30: 7775 pipeconf |= PIPECONF_10BPC; 7776 break; 7777 default: 7778 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7779 BUG(); 7780 } 7781 } 7782 7783 if (HAS_PIPE_CXSR(dev)) { 7784 if (intel_crtc->lowfreq_avail) { 7785 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7786 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7787 } else { 7788 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7789 } 7790 } 7791 7792 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7793 if (INTEL_INFO(dev)->gen < 4 || 7794 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7795 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7796 else 7797 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7798 } else 7799 pipeconf |= PIPECONF_PROGRESSIVE; 7800 7801 if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) 7802 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7803 7804 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7805 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7806 } 7807 7808 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7809 struct intel_crtc_state *crtc_state) 7810 { 7811 struct drm_device *dev = crtc->base.dev; 7812 struct drm_i915_private *dev_priv = dev->dev_private; 7813 int refclk, num_connectors = 0; 7814 intel_clock_t clock; 7815 bool ok; 7816 bool is_dsi = false; 7817 struct intel_encoder *encoder; 7818 const intel_limit_t *limit; 7819 struct drm_atomic_state *state = crtc_state->base.state; 7820 struct drm_connector *connector; 7821 struct drm_connector_state *connector_state; 7822 int i; 7823 7824 memset(&crtc_state->dpll_hw_state, 0, 7825 sizeof(crtc_state->dpll_hw_state)); 7826 7827 for_each_connector_in_state(state, connector, connector_state, i) { 7828 if (connector_state->crtc != &crtc->base) 7829 continue; 7830 7831 encoder = to_intel_encoder(connector_state->best_encoder); 7832 7833 switch (encoder->type) { 7834 case INTEL_OUTPUT_DSI: 7835 is_dsi = true; 7836 break; 7837 default: 7838 break; 7839 } 7840 7841 num_connectors++; 7842 } 7843 7844 if (is_dsi) 7845 return 0; 7846 7847 if (!crtc_state->clock_set) { 7848 refclk = i9xx_get_refclk(crtc_state, num_connectors); 7849 7850 /* 7851 * Returns a set of divisors for the desired target clock with 7852 * the given refclk, or FALSE. The returned values represent 7853 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 7854 * 2) / p1 / p2. 7855 */ 7856 limit = intel_limit(crtc_state, refclk); 7857 ok = dev_priv->display.find_dpll(limit, crtc_state, 7858 crtc_state->port_clock, 7859 refclk, NULL, &clock); 7860 if (!ok) { 7861 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7862 return -EINVAL; 7863 } 7864 7865 /* Compat-code for transition, will disappear. */ 7866 crtc_state->dpll.n = clock.n; 7867 crtc_state->dpll.m1 = clock.m1; 7868 crtc_state->dpll.m2 = clock.m2; 7869 crtc_state->dpll.p1 = clock.p1; 7870 crtc_state->dpll.p2 = clock.p2; 7871 } 7872 7873 if (IS_GEN2(dev)) { 7874 i8xx_compute_dpll(crtc, crtc_state, NULL, 7875 num_connectors); 7876 } else if (IS_CHERRYVIEW(dev)) { 7877 chv_compute_dpll(crtc, crtc_state); 7878 } else if (IS_VALLEYVIEW(dev)) { 7879 vlv_compute_dpll(crtc, crtc_state); 7880 } else { 7881 i9xx_compute_dpll(crtc, crtc_state, NULL, 7882 num_connectors); 7883 } 7884 7885 return 0; 7886 } 7887 7888 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7889 struct intel_crtc_state *pipe_config) 7890 { 7891 struct drm_device *dev = crtc->base.dev; 7892 struct drm_i915_private *dev_priv = dev->dev_private; 7893 uint32_t tmp; 7894 7895 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 7896 return; 7897 7898 tmp = I915_READ(PFIT_CONTROL); 7899 if (!(tmp & PFIT_ENABLE)) 7900 return; 7901 7902 /* Check whether the pfit is attached to our pipe. */ 7903 if (INTEL_INFO(dev)->gen < 4) { 7904 if (crtc->pipe != PIPE_B) 7905 return; 7906 } else { 7907 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7908 return; 7909 } 7910 7911 pipe_config->gmch_pfit.control = tmp; 7912 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7913 if (INTEL_INFO(dev)->gen < 5) 7914 pipe_config->gmch_pfit.lvds_border_bits = 7915 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 7916 } 7917 7918 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7919 struct intel_crtc_state *pipe_config) 7920 { 7921 struct drm_device *dev = crtc->base.dev; 7922 struct drm_i915_private *dev_priv = dev->dev_private; 7923 int pipe = pipe_config->cpu_transcoder; 7924 intel_clock_t clock; 7925 u32 mdiv; 7926 int refclk = 100000; 7927 7928 /* In case of MIPI DPLL will not even be used */ 7929 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 7930 return; 7931 7932 mutex_lock(&dev_priv->sb_lock); 7933 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7934 mutex_unlock(&dev_priv->sb_lock); 7935 7936 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7937 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7938 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7939 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7940 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7941 7942 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7943 } 7944 7945 static void 7946 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7947 struct intel_initial_plane_config *plane_config) 7948 { 7949 struct drm_device *dev = crtc->base.dev; 7950 struct drm_i915_private *dev_priv = dev->dev_private; 7951 u32 val, base, offset; 7952 int pipe = crtc->pipe, plane = crtc->plane; 7953 int fourcc, pixel_format; 7954 unsigned int aligned_height; 7955 struct drm_framebuffer *fb; 7956 struct intel_framebuffer *intel_fb; 7957 7958 val = I915_READ(DSPCNTR(plane)); 7959 if (!(val & DISPLAY_PLANE_ENABLE)) 7960 return; 7961 7962 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7963 if (!intel_fb) { 7964 DRM_DEBUG_KMS("failed to alloc fb\n"); 7965 return; 7966 } 7967 7968 fb = &intel_fb->base; 7969 7970 if (INTEL_INFO(dev)->gen >= 4) { 7971 if (val & DISPPLANE_TILED) { 7972 plane_config->tiling = I915_TILING_X; 7973 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 7974 } 7975 } 7976 7977 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7978 fourcc = i9xx_format_to_fourcc(pixel_format); 7979 fb->pixel_format = fourcc; 7980 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7981 7982 if (INTEL_INFO(dev)->gen >= 4) { 7983 if (plane_config->tiling) 7984 offset = I915_READ(DSPTILEOFF(plane)); 7985 else 7986 offset = I915_READ(DSPLINOFF(plane)); 7987 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7988 } else { 7989 base = I915_READ(DSPADDR(plane)); 7990 } 7991 plane_config->base = base; 7992 7993 val = I915_READ(PIPESRC(pipe)); 7994 fb->width = ((val >> 16) & 0xfff) + 1; 7995 fb->height = ((val >> 0) & 0xfff) + 1; 7996 7997 val = I915_READ(DSPSTRIDE(pipe)); 7998 fb->pitches[0] = val & 0xffffffc0; 7999 8000 aligned_height = intel_fb_align_height(dev, fb->height, 8001 fb->pixel_format, 8002 fb->modifier[0]); 8003 8004 plane_config->size = fb->pitches[0] * aligned_height; 8005 8006 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8007 pipe_name(pipe), plane, fb->width, fb->height, 8008 fb->bits_per_pixel, base, fb->pitches[0], 8009 plane_config->size); 8010 8011 plane_config->fb = intel_fb; 8012 } 8013 8014 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8015 struct intel_crtc_state *pipe_config) 8016 { 8017 struct drm_device *dev = crtc->base.dev; 8018 struct drm_i915_private *dev_priv = dev->dev_private; 8019 int pipe = pipe_config->cpu_transcoder; 8020 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8021 intel_clock_t clock; 8022 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8023 int refclk = 100000; 8024 8025 mutex_lock(&dev_priv->sb_lock); 8026 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8027 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8028 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8029 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8030 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8031 mutex_unlock(&dev_priv->sb_lock); 8032 8033 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8034 clock.m2 = (pll_dw0 & 0xff) << 22; 8035 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8036 clock.m2 |= pll_dw2 & 0x3fffff; 8037 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8038 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8039 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8040 8041 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8042 } 8043 8044 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8045 struct intel_crtc_state *pipe_config) 8046 { 8047 struct drm_device *dev = crtc->base.dev; 8048 struct drm_i915_private *dev_priv = dev->dev_private; 8049 uint32_t tmp; 8050 8051 if (!intel_display_power_is_enabled(dev_priv, 8052 POWER_DOMAIN_PIPE(crtc->pipe))) 8053 return false; 8054 8055 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8056 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8057 8058 tmp = I915_READ(PIPECONF(crtc->pipe)); 8059 if (!(tmp & PIPECONF_ENABLE)) 8060 return false; 8061 8062 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 8063 switch (tmp & PIPECONF_BPC_MASK) { 8064 case PIPECONF_6BPC: 8065 pipe_config->pipe_bpp = 18; 8066 break; 8067 case PIPECONF_8BPC: 8068 pipe_config->pipe_bpp = 24; 8069 break; 8070 case PIPECONF_10BPC: 8071 pipe_config->pipe_bpp = 30; 8072 break; 8073 default: 8074 break; 8075 } 8076 } 8077 8078 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8079 pipe_config->limited_color_range = true; 8080 8081 if (INTEL_INFO(dev)->gen < 4) 8082 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8083 8084 intel_get_pipe_timings(crtc, pipe_config); 8085 8086 i9xx_get_pfit_config(crtc, pipe_config); 8087 8088 if (INTEL_INFO(dev)->gen >= 4) { 8089 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8090 pipe_config->pixel_multiplier = 8091 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8092 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8093 pipe_config->dpll_hw_state.dpll_md = tmp; 8094 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 8095 tmp = I915_READ(DPLL(crtc->pipe)); 8096 pipe_config->pixel_multiplier = 8097 ((tmp & SDVO_MULTIPLIER_MASK) 8098 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8099 } else { 8100 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8101 * port and will be fixed up in the encoder->get_config 8102 * function. */ 8103 pipe_config->pixel_multiplier = 1; 8104 } 8105 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8106 if (!IS_VALLEYVIEW(dev)) { 8107 /* 8108 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 8109 * on 830. Filter it out here so that we don't 8110 * report errors due to that. 8111 */ 8112 if (IS_I830(dev)) 8113 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 8114 8115 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8116 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8117 } else { 8118 /* Mask out read-only status bits. */ 8119 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8120 DPLL_PORTC_READY_MASK | 8121 DPLL_PORTB_READY_MASK); 8122 } 8123 8124 if (IS_CHERRYVIEW(dev)) 8125 chv_crtc_clock_get(crtc, pipe_config); 8126 else if (IS_VALLEYVIEW(dev)) 8127 vlv_crtc_clock_get(crtc, pipe_config); 8128 else 8129 i9xx_crtc_clock_get(crtc, pipe_config); 8130 8131 return true; 8132 } 8133 8134 static void ironlake_init_pch_refclk(struct drm_device *dev) 8135 { 8136 struct drm_i915_private *dev_priv = dev->dev_private; 8137 struct intel_encoder *encoder; 8138 u32 val, final; 8139 bool has_lvds = false; 8140 bool has_cpu_edp = false; 8141 bool has_panel = false; 8142 bool has_ck505 = false; 8143 bool can_ssc = false; 8144 8145 /* We need to take the global config into account */ 8146 for_each_intel_encoder(dev, encoder) { 8147 switch (encoder->type) { 8148 case INTEL_OUTPUT_LVDS: 8149 has_panel = true; 8150 has_lvds = true; 8151 break; 8152 case INTEL_OUTPUT_EDP: 8153 has_panel = true; 8154 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8155 has_cpu_edp = true; 8156 break; 8157 default: 8158 break; 8159 } 8160 } 8161 8162 if (HAS_PCH_IBX(dev)) { 8163 has_ck505 = dev_priv->vbt.display_clock_mode; 8164 can_ssc = has_ck505; 8165 } else { 8166 has_ck505 = false; 8167 can_ssc = true; 8168 } 8169 8170 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8171 has_panel, has_lvds, has_ck505); 8172 8173 /* Ironlake: try to setup display ref clock before DPLL 8174 * enabling. This is only under driver's control after 8175 * PCH B stepping, previous chipset stepping should be 8176 * ignoring this setting. 8177 */ 8178 val = I915_READ(PCH_DREF_CONTROL); 8179 8180 /* As we must carefully and slowly disable/enable each source in turn, 8181 * compute the final state we want first and check if we need to 8182 * make any changes at all. 8183 */ 8184 final = val; 8185 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8186 if (has_ck505) 8187 final |= DREF_NONSPREAD_CK505_ENABLE; 8188 else 8189 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8190 8191 final &= ~DREF_SSC_SOURCE_MASK; 8192 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8193 final &= ~DREF_SSC1_ENABLE; 8194 8195 if (has_panel) { 8196 final |= DREF_SSC_SOURCE_ENABLE; 8197 8198 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8199 final |= DREF_SSC1_ENABLE; 8200 8201 if (has_cpu_edp) { 8202 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8203 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8204 else 8205 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8206 } else 8207 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8208 } else { 8209 final |= DREF_SSC_SOURCE_DISABLE; 8210 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8211 } 8212 8213 if (final == val) 8214 return; 8215 8216 /* Always enable nonspread source */ 8217 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8218 8219 if (has_ck505) 8220 val |= DREF_NONSPREAD_CK505_ENABLE; 8221 else 8222 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8223 8224 if (has_panel) { 8225 val &= ~DREF_SSC_SOURCE_MASK; 8226 val |= DREF_SSC_SOURCE_ENABLE; 8227 8228 /* SSC must be turned on before enabling the CPU output */ 8229 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8230 DRM_DEBUG_KMS("Using SSC on panel\n"); 8231 val |= DREF_SSC1_ENABLE; 8232 } else 8233 val &= ~DREF_SSC1_ENABLE; 8234 8235 /* Get SSC going before enabling the outputs */ 8236 I915_WRITE(PCH_DREF_CONTROL, val); 8237 POSTING_READ(PCH_DREF_CONTROL); 8238 udelay(200); 8239 8240 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8241 8242 /* Enable CPU source on CPU attached eDP */ 8243 if (has_cpu_edp) { 8244 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8245 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8246 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8247 } else 8248 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8249 } else 8250 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8251 8252 I915_WRITE(PCH_DREF_CONTROL, val); 8253 POSTING_READ(PCH_DREF_CONTROL); 8254 udelay(200); 8255 } else { 8256 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8257 8258 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8259 8260 /* Turn off CPU output */ 8261 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8262 8263 I915_WRITE(PCH_DREF_CONTROL, val); 8264 POSTING_READ(PCH_DREF_CONTROL); 8265 udelay(200); 8266 8267 /* Turn off the SSC source */ 8268 val &= ~DREF_SSC_SOURCE_MASK; 8269 val |= DREF_SSC_SOURCE_DISABLE; 8270 8271 /* Turn off SSC1 */ 8272 val &= ~DREF_SSC1_ENABLE; 8273 8274 I915_WRITE(PCH_DREF_CONTROL, val); 8275 POSTING_READ(PCH_DREF_CONTROL); 8276 udelay(200); 8277 } 8278 8279 BUG_ON(val != final); 8280 } 8281 8282 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8283 { 8284 uint32_t tmp; 8285 8286 tmp = I915_READ(SOUTH_CHICKEN2); 8287 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8288 I915_WRITE(SOUTH_CHICKEN2, tmp); 8289 8290 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8291 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8292 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8293 8294 tmp = I915_READ(SOUTH_CHICKEN2); 8295 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8296 I915_WRITE(SOUTH_CHICKEN2, tmp); 8297 8298 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8299 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8300 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8301 } 8302 8303 /* WaMPhyProgramming:hsw */ 8304 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8305 { 8306 uint32_t tmp; 8307 8308 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8309 tmp &= ~(0xFF << 24); 8310 tmp |= (0x12 << 24); 8311 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8312 8313 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8314 tmp |= (1 << 11); 8315 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8316 8317 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8318 tmp |= (1 << 11); 8319 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8320 8321 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8322 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8323 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8324 8325 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8326 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8327 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8328 8329 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8330 tmp &= ~(7 << 13); 8331 tmp |= (5 << 13); 8332 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8333 8334 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8335 tmp &= ~(7 << 13); 8336 tmp |= (5 << 13); 8337 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8338 8339 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8340 tmp &= ~0xFF; 8341 tmp |= 0x1C; 8342 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8343 8344 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8345 tmp &= ~0xFF; 8346 tmp |= 0x1C; 8347 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8348 8349 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8350 tmp &= ~(0xFF << 16); 8351 tmp |= (0x1C << 16); 8352 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8353 8354 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8355 tmp &= ~(0xFF << 16); 8356 tmp |= (0x1C << 16); 8357 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8358 8359 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8360 tmp |= (1 << 27); 8361 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8362 8363 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8364 tmp |= (1 << 27); 8365 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8366 8367 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8368 tmp &= ~(0xF << 28); 8369 tmp |= (4 << 28); 8370 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8371 8372 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8373 tmp &= ~(0xF << 28); 8374 tmp |= (4 << 28); 8375 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8376 } 8377 8378 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8379 * Programming" based on the parameters passed: 8380 * - Sequence to enable CLKOUT_DP 8381 * - Sequence to enable CLKOUT_DP without spread 8382 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8383 */ 8384 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8385 bool with_fdi) 8386 { 8387 struct drm_i915_private *dev_priv = dev->dev_private; 8388 uint32_t reg, tmp; 8389 8390 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8391 with_spread = true; 8392 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 8393 with_fdi, "LP PCH doesn't have FDI\n")) 8394 with_fdi = false; 8395 8396 mutex_lock(&dev_priv->sb_lock); 8397 8398 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8399 tmp &= ~SBI_SSCCTL_DISABLE; 8400 tmp |= SBI_SSCCTL_PATHALT; 8401 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8402 8403 udelay(24); 8404 8405 if (with_spread) { 8406 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8407 tmp &= ~SBI_SSCCTL_PATHALT; 8408 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8409 8410 if (with_fdi) { 8411 lpt_reset_fdi_mphy(dev_priv); 8412 lpt_program_fdi_mphy(dev_priv); 8413 } 8414 } 8415 8416 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8417 SBI_GEN0 : SBI_DBUFF0; 8418 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8419 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8420 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8421 8422 mutex_unlock(&dev_priv->sb_lock); 8423 } 8424 8425 /* Sequence to disable CLKOUT_DP */ 8426 static void lpt_disable_clkout_dp(struct drm_device *dev) 8427 { 8428 struct drm_i915_private *dev_priv = dev->dev_private; 8429 uint32_t reg, tmp; 8430 8431 mutex_lock(&dev_priv->sb_lock); 8432 8433 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8434 SBI_GEN0 : SBI_DBUFF0; 8435 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8436 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8437 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8438 8439 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8440 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8441 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8442 tmp |= SBI_SSCCTL_PATHALT; 8443 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8444 udelay(32); 8445 } 8446 tmp |= SBI_SSCCTL_DISABLE; 8447 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8448 } 8449 8450 mutex_unlock(&dev_priv->sb_lock); 8451 } 8452 8453 static void lpt_init_pch_refclk(struct drm_device *dev) 8454 { 8455 struct intel_encoder *encoder; 8456 bool has_vga = false; 8457 8458 for_each_intel_encoder(dev, encoder) { 8459 switch (encoder->type) { 8460 case INTEL_OUTPUT_ANALOG: 8461 has_vga = true; 8462 break; 8463 default: 8464 break; 8465 } 8466 } 8467 8468 if (has_vga) 8469 lpt_enable_clkout_dp(dev, true, true); 8470 else 8471 lpt_disable_clkout_dp(dev); 8472 } 8473 8474 /* 8475 * Initialize reference clocks when the driver loads 8476 */ 8477 void intel_init_pch_refclk(struct drm_device *dev) 8478 { 8479 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8480 ironlake_init_pch_refclk(dev); 8481 else if (HAS_PCH_LPT(dev)) 8482 lpt_init_pch_refclk(dev); 8483 } 8484 8485 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) 8486 { 8487 struct drm_device *dev = crtc_state->base.crtc->dev; 8488 struct drm_i915_private *dev_priv = dev->dev_private; 8489 struct drm_atomic_state *state = crtc_state->base.state; 8490 struct drm_connector *connector; 8491 struct drm_connector_state *connector_state; 8492 struct intel_encoder *encoder; 8493 int num_connectors = 0, i; 8494 bool is_lvds = false; 8495 8496 for_each_connector_in_state(state, connector, connector_state, i) { 8497 if (connector_state->crtc != crtc_state->base.crtc) 8498 continue; 8499 8500 encoder = to_intel_encoder(connector_state->best_encoder); 8501 8502 switch (encoder->type) { 8503 case INTEL_OUTPUT_LVDS: 8504 is_lvds = true; 8505 break; 8506 default: 8507 break; 8508 } 8509 num_connectors++; 8510 } 8511 8512 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 8513 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8514 dev_priv->vbt.lvds_ssc_freq); 8515 return dev_priv->vbt.lvds_ssc_freq; 8516 } 8517 8518 return 120000; 8519 } 8520 8521 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8522 { 8523 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8525 int pipe = intel_crtc->pipe; 8526 uint32_t val; 8527 8528 val = 0; 8529 8530 switch (intel_crtc->config->pipe_bpp) { 8531 case 18: 8532 val |= PIPECONF_6BPC; 8533 break; 8534 case 24: 8535 val |= PIPECONF_8BPC; 8536 break; 8537 case 30: 8538 val |= PIPECONF_10BPC; 8539 break; 8540 case 36: 8541 val |= PIPECONF_12BPC; 8542 break; 8543 default: 8544 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8545 BUG(); 8546 } 8547 8548 if (intel_crtc->config->dither) 8549 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8550 8551 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8552 val |= PIPECONF_INTERLACED_ILK; 8553 else 8554 val |= PIPECONF_PROGRESSIVE; 8555 8556 if (intel_crtc->config->limited_color_range) 8557 val |= PIPECONF_COLOR_RANGE_SELECT; 8558 8559 I915_WRITE(PIPECONF(pipe), val); 8560 POSTING_READ(PIPECONF(pipe)); 8561 } 8562 8563 /* 8564 * Set up the pipe CSC unit. 8565 * 8566 * Currently only full range RGB to limited range RGB conversion 8567 * is supported, but eventually this should handle various 8568 * RGB<->YCbCr scenarios as well. 8569 */ 8570 static void intel_set_pipe_csc(struct drm_crtc *crtc) 8571 { 8572 struct drm_device *dev = crtc->dev; 8573 struct drm_i915_private *dev_priv = dev->dev_private; 8574 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8575 int pipe = intel_crtc->pipe; 8576 uint16_t coeff = 0x7800; /* 1.0 */ 8577 8578 /* 8579 * TODO: Check what kind of values actually come out of the pipe 8580 * with these coeff/postoff values and adjust to get the best 8581 * accuracy. Perhaps we even need to take the bpc value into 8582 * consideration. 8583 */ 8584 8585 if (intel_crtc->config->limited_color_range) 8586 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 8587 8588 /* 8589 * GY/GU and RY/RU should be the other way around according 8590 * to BSpec, but reality doesn't agree. Just set them up in 8591 * a way that results in the correct picture. 8592 */ 8593 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 8594 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 8595 8596 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 8597 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 8598 8599 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 8600 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 8601 8602 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 8603 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 8604 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 8605 8606 if (INTEL_INFO(dev)->gen > 6) { 8607 uint16_t postoff = 0; 8608 8609 if (intel_crtc->config->limited_color_range) 8610 postoff = (16 * (1 << 12) / 255) & 0x1fff; 8611 8612 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 8613 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 8614 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 8615 8616 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 8617 } else { 8618 uint32_t mode = CSC_MODE_YUV_TO_RGB; 8619 8620 if (intel_crtc->config->limited_color_range) 8621 mode |= CSC_BLACK_SCREEN_OFFSET; 8622 8623 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 8624 } 8625 } 8626 8627 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8628 { 8629 struct drm_device *dev = crtc->dev; 8630 struct drm_i915_private *dev_priv = dev->dev_private; 8631 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8632 enum i915_pipe pipe = intel_crtc->pipe; 8633 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8634 uint32_t val; 8635 8636 val = 0; 8637 8638 if (IS_HASWELL(dev) && intel_crtc->config->dither) 8639 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8640 8641 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8642 val |= PIPECONF_INTERLACED_ILK; 8643 else 8644 val |= PIPECONF_PROGRESSIVE; 8645 8646 I915_WRITE(PIPECONF(cpu_transcoder), val); 8647 POSTING_READ(PIPECONF(cpu_transcoder)); 8648 8649 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 8650 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 8651 8652 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 8653 val = 0; 8654 8655 switch (intel_crtc->config->pipe_bpp) { 8656 case 18: 8657 val |= PIPEMISC_DITHER_6_BPC; 8658 break; 8659 case 24: 8660 val |= PIPEMISC_DITHER_8_BPC; 8661 break; 8662 case 30: 8663 val |= PIPEMISC_DITHER_10_BPC; 8664 break; 8665 case 36: 8666 val |= PIPEMISC_DITHER_12_BPC; 8667 break; 8668 default: 8669 /* Case prevented by pipe_config_set_bpp. */ 8670 BUG(); 8671 } 8672 8673 if (intel_crtc->config->dither) 8674 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8675 8676 I915_WRITE(PIPEMISC(pipe), val); 8677 } 8678 } 8679 8680 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 8681 struct intel_crtc_state *crtc_state, 8682 intel_clock_t *clock, 8683 bool *has_reduced_clock, 8684 intel_clock_t *reduced_clock) 8685 { 8686 struct drm_device *dev = crtc->dev; 8687 struct drm_i915_private *dev_priv = dev->dev_private; 8688 int refclk; 8689 const intel_limit_t *limit; 8690 bool ret; 8691 8692 refclk = ironlake_get_refclk(crtc_state); 8693 8694 /* 8695 * Returns a set of divisors for the desired target clock with the given 8696 * refclk, or FALSE. The returned values represent the clock equation: 8697 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 8698 */ 8699 limit = intel_limit(crtc_state, refclk); 8700 ret = dev_priv->display.find_dpll(limit, crtc_state, 8701 crtc_state->port_clock, 8702 refclk, NULL, clock); 8703 if (!ret) 8704 return false; 8705 8706 return true; 8707 } 8708 8709 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8710 { 8711 /* 8712 * Account for spread spectrum to avoid 8713 * oversubscribing the link. Max center spread 8714 * is 2.5%; use 5% for safety's sake. 8715 */ 8716 u32 bps = target_clock * bpp * 21 / 20; 8717 return DIV_ROUND_UP(bps, link_bw * 8); 8718 } 8719 8720 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8721 { 8722 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8723 } 8724 8725 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8726 struct intel_crtc_state *crtc_state, 8727 u32 *fp, 8728 intel_clock_t *reduced_clock, u32 *fp2) 8729 { 8730 struct drm_crtc *crtc = &intel_crtc->base; 8731 struct drm_device *dev = crtc->dev; 8732 struct drm_i915_private *dev_priv = dev->dev_private; 8733 struct drm_atomic_state *state = crtc_state->base.state; 8734 struct drm_connector *connector; 8735 struct drm_connector_state *connector_state; 8736 struct intel_encoder *encoder; 8737 uint32_t dpll; 8738 int factor, num_connectors = 0, i; 8739 bool is_lvds = false, is_sdvo = false; 8740 8741 for_each_connector_in_state(state, connector, connector_state, i) { 8742 if (connector_state->crtc != crtc_state->base.crtc) 8743 continue; 8744 8745 encoder = to_intel_encoder(connector_state->best_encoder); 8746 8747 switch (encoder->type) { 8748 case INTEL_OUTPUT_LVDS: 8749 is_lvds = true; 8750 break; 8751 case INTEL_OUTPUT_SDVO: 8752 case INTEL_OUTPUT_HDMI: 8753 is_sdvo = true; 8754 break; 8755 default: 8756 break; 8757 } 8758 8759 num_connectors++; 8760 } 8761 8762 /* Enable autotuning of the PLL clock (if permissible) */ 8763 factor = 21; 8764 if (is_lvds) { 8765 if ((intel_panel_use_ssc(dev_priv) && 8766 dev_priv->vbt.lvds_ssc_freq == 100000) || 8767 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8768 factor = 25; 8769 } else if (crtc_state->sdvo_tv_clock) 8770 factor = 20; 8771 8772 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8773 *fp |= FP_CB_TUNE; 8774 8775 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 8776 *fp2 |= FP_CB_TUNE; 8777 8778 dpll = 0; 8779 8780 if (is_lvds) 8781 dpll |= DPLLB_MODE_LVDS; 8782 else 8783 dpll |= DPLLB_MODE_DAC_SERIAL; 8784 8785 dpll |= (crtc_state->pixel_multiplier - 1) 8786 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8787 8788 if (is_sdvo) 8789 dpll |= DPLL_SDVO_HIGH_SPEED; 8790 if (crtc_state->has_dp_encoder) 8791 dpll |= DPLL_SDVO_HIGH_SPEED; 8792 8793 /* compute bitmask from p1 value */ 8794 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8795 /* also FPA1 */ 8796 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8797 8798 switch (crtc_state->dpll.p2) { 8799 case 5: 8800 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8801 break; 8802 case 7: 8803 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8804 break; 8805 case 10: 8806 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8807 break; 8808 case 14: 8809 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8810 break; 8811 } 8812 8813 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 8814 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8815 else 8816 dpll |= PLL_REF_INPUT_DREFCLK; 8817 8818 return dpll | DPLL_VCO_ENABLE; 8819 } 8820 8821 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8822 struct intel_crtc_state *crtc_state) 8823 { 8824 struct drm_device *dev = crtc->base.dev; 8825 intel_clock_t clock, reduced_clock; 8826 u32 dpll = 0, fp = 0, fp2 = 0; 8827 bool ok, has_reduced_clock = false; 8828 bool is_lvds = false; 8829 struct intel_shared_dpll *pll; 8830 8831 memset(&crtc_state->dpll_hw_state, 0, 8832 sizeof(crtc_state->dpll_hw_state)); 8833 8834 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 8835 8836 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 8837 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 8838 8839 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 8840 &has_reduced_clock, &reduced_clock); 8841 if (!ok && !crtc_state->clock_set) { 8842 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8843 return -EINVAL; 8844 } 8845 /* Compat-code for transition, will disappear. */ 8846 if (!crtc_state->clock_set) { 8847 crtc_state->dpll.n = clock.n; 8848 crtc_state->dpll.m1 = clock.m1; 8849 crtc_state->dpll.m2 = clock.m2; 8850 crtc_state->dpll.p1 = clock.p1; 8851 crtc_state->dpll.p2 = clock.p2; 8852 } 8853 8854 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8855 if (crtc_state->has_pch_encoder) { 8856 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8857 if (has_reduced_clock) 8858 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 8859 8860 dpll = ironlake_compute_dpll(crtc, crtc_state, 8861 &fp, &reduced_clock, 8862 has_reduced_clock ? &fp2 : NULL); 8863 8864 crtc_state->dpll_hw_state.dpll = dpll; 8865 crtc_state->dpll_hw_state.fp0 = fp; 8866 if (has_reduced_clock) 8867 crtc_state->dpll_hw_state.fp1 = fp2; 8868 else 8869 crtc_state->dpll_hw_state.fp1 = fp; 8870 8871 pll = intel_get_shared_dpll(crtc, crtc_state); 8872 if (pll == NULL) { 8873 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8874 pipe_name(crtc->pipe)); 8875 return -EINVAL; 8876 } 8877 } 8878 8879 if (is_lvds && has_reduced_clock) 8880 crtc->lowfreq_avail = true; 8881 else 8882 crtc->lowfreq_avail = false; 8883 8884 return 0; 8885 } 8886 8887 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8888 struct intel_link_m_n *m_n) 8889 { 8890 struct drm_device *dev = crtc->base.dev; 8891 struct drm_i915_private *dev_priv = dev->dev_private; 8892 enum i915_pipe pipe = crtc->pipe; 8893 8894 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8895 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8896 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8897 & ~TU_SIZE_MASK; 8898 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8899 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8900 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8901 } 8902 8903 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8904 enum transcoder transcoder, 8905 struct intel_link_m_n *m_n, 8906 struct intel_link_m_n *m2_n2) 8907 { 8908 struct drm_device *dev = crtc->base.dev; 8909 struct drm_i915_private *dev_priv = dev->dev_private; 8910 enum i915_pipe pipe = crtc->pipe; 8911 8912 if (INTEL_INFO(dev)->gen >= 5) { 8913 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8914 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8915 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8916 & ~TU_SIZE_MASK; 8917 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8918 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8919 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8920 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8921 * gen < 8) and if DRRS is supported (to make sure the 8922 * registers are not unnecessarily read). 8923 */ 8924 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 8925 crtc->config->has_drrs) { 8926 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8927 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8928 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8929 & ~TU_SIZE_MASK; 8930 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8931 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8932 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8933 } 8934 } else { 8935 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8936 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8937 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8938 & ~TU_SIZE_MASK; 8939 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8940 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8941 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8942 } 8943 } 8944 8945 void intel_dp_get_m_n(struct intel_crtc *crtc, 8946 struct intel_crtc_state *pipe_config) 8947 { 8948 if (pipe_config->has_pch_encoder) 8949 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8950 else 8951 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8952 &pipe_config->dp_m_n, 8953 &pipe_config->dp_m2_n2); 8954 } 8955 8956 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8957 struct intel_crtc_state *pipe_config) 8958 { 8959 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8960 &pipe_config->fdi_m_n, NULL); 8961 } 8962 8963 static void skylake_get_pfit_config(struct intel_crtc *crtc, 8964 struct intel_crtc_state *pipe_config) 8965 { 8966 struct drm_device *dev = crtc->base.dev; 8967 struct drm_i915_private *dev_priv = dev->dev_private; 8968 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8969 uint32_t ps_ctrl = 0; 8970 int id = -1; 8971 int i; 8972 8973 /* find scaler attached to this pipe */ 8974 for (i = 0; i < crtc->num_scalers; i++) { 8975 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8976 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8977 id = i; 8978 pipe_config->pch_pfit.enabled = true; 8979 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8980 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8981 break; 8982 } 8983 } 8984 8985 scaler_state->scaler_id = id; 8986 if (id >= 0) { 8987 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8988 } else { 8989 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8990 } 8991 } 8992 8993 static void 8994 skylake_get_initial_plane_config(struct intel_crtc *crtc, 8995 struct intel_initial_plane_config *plane_config) 8996 { 8997 struct drm_device *dev = crtc->base.dev; 8998 struct drm_i915_private *dev_priv = dev->dev_private; 8999 u32 val, base, offset, stride_mult, tiling; 9000 int pipe = crtc->pipe; 9001 int fourcc, pixel_format; 9002 unsigned int aligned_height; 9003 struct drm_framebuffer *fb; 9004 struct intel_framebuffer *intel_fb; 9005 9006 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9007 if (!intel_fb) { 9008 DRM_DEBUG_KMS("failed to alloc fb\n"); 9009 return; 9010 } 9011 9012 fb = &intel_fb->base; 9013 9014 val = I915_READ(PLANE_CTL(pipe, 0)); 9015 if (!(val & PLANE_CTL_ENABLE)) 9016 goto error; 9017 9018 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9019 fourcc = skl_format_to_fourcc(pixel_format, 9020 val & PLANE_CTL_ORDER_RGBX, 9021 val & PLANE_CTL_ALPHA_MASK); 9022 fb->pixel_format = fourcc; 9023 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9024 9025 tiling = val & PLANE_CTL_TILED_MASK; 9026 switch (tiling) { 9027 case PLANE_CTL_TILED_LINEAR: 9028 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 9029 break; 9030 case PLANE_CTL_TILED_X: 9031 plane_config->tiling = I915_TILING_X; 9032 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9033 break; 9034 case PLANE_CTL_TILED_Y: 9035 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 9036 break; 9037 case PLANE_CTL_TILED_YF: 9038 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 9039 break; 9040 default: 9041 MISSING_CASE(tiling); 9042 goto error; 9043 } 9044 9045 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 9046 plane_config->base = base; 9047 9048 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 9049 9050 val = I915_READ(PLANE_SIZE(pipe, 0)); 9051 fb->height = ((val >> 16) & 0xfff) + 1; 9052 fb->width = ((val >> 0) & 0x1fff) + 1; 9053 9054 val = I915_READ(PLANE_STRIDE(pipe, 0)); 9055 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], 9056 fb->pixel_format); 9057 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9058 9059 aligned_height = intel_fb_align_height(dev, fb->height, 9060 fb->pixel_format, 9061 fb->modifier[0]); 9062 9063 plane_config->size = fb->pitches[0] * aligned_height; 9064 9065 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9066 pipe_name(pipe), fb->width, fb->height, 9067 fb->bits_per_pixel, base, fb->pitches[0], 9068 plane_config->size); 9069 9070 plane_config->fb = intel_fb; 9071 return; 9072 9073 error: 9074 kfree(fb); 9075 } 9076 9077 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9078 struct intel_crtc_state *pipe_config) 9079 { 9080 struct drm_device *dev = crtc->base.dev; 9081 struct drm_i915_private *dev_priv = dev->dev_private; 9082 uint32_t tmp; 9083 9084 tmp = I915_READ(PF_CTL(crtc->pipe)); 9085 9086 if (tmp & PF_ENABLE) { 9087 pipe_config->pch_pfit.enabled = true; 9088 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9089 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9090 9091 /* We currently do not free assignements of panel fitters on 9092 * ivb/hsw (since we don't use the higher upscaling modes which 9093 * differentiates them) so just WARN about this case for now. */ 9094 if (IS_GEN7(dev)) { 9095 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9096 PF_PIPE_SEL_IVB(crtc->pipe)); 9097 } 9098 } 9099 } 9100 9101 static void 9102 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 9103 struct intel_initial_plane_config *plane_config) 9104 { 9105 struct drm_device *dev = crtc->base.dev; 9106 struct drm_i915_private *dev_priv = dev->dev_private; 9107 u32 val, base, offset; 9108 int pipe = crtc->pipe; 9109 int fourcc, pixel_format; 9110 unsigned int aligned_height; 9111 struct drm_framebuffer *fb; 9112 struct intel_framebuffer *intel_fb; 9113 9114 val = I915_READ(DSPCNTR(pipe)); 9115 if (!(val & DISPLAY_PLANE_ENABLE)) 9116 return; 9117 9118 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9119 if (!intel_fb) { 9120 DRM_DEBUG_KMS("failed to alloc fb\n"); 9121 return; 9122 } 9123 9124 fb = &intel_fb->base; 9125 9126 if (INTEL_INFO(dev)->gen >= 4) { 9127 if (val & DISPPLANE_TILED) { 9128 plane_config->tiling = I915_TILING_X; 9129 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9130 } 9131 } 9132 9133 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9134 fourcc = i9xx_format_to_fourcc(pixel_format); 9135 fb->pixel_format = fourcc; 9136 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9137 9138 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9139 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9140 offset = I915_READ(DSPOFFSET(pipe)); 9141 } else { 9142 if (plane_config->tiling) 9143 offset = I915_READ(DSPTILEOFF(pipe)); 9144 else 9145 offset = I915_READ(DSPLINOFF(pipe)); 9146 } 9147 plane_config->base = base; 9148 9149 val = I915_READ(PIPESRC(pipe)); 9150 fb->width = ((val >> 16) & 0xfff) + 1; 9151 fb->height = ((val >> 0) & 0xfff) + 1; 9152 9153 val = I915_READ(DSPSTRIDE(pipe)); 9154 fb->pitches[0] = val & 0xffffffc0; 9155 9156 aligned_height = intel_fb_align_height(dev, fb->height, 9157 fb->pixel_format, 9158 fb->modifier[0]); 9159 9160 plane_config->size = fb->pitches[0] * aligned_height; 9161 9162 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9163 pipe_name(pipe), fb->width, fb->height, 9164 fb->bits_per_pixel, base, fb->pitches[0], 9165 plane_config->size); 9166 9167 plane_config->fb = intel_fb; 9168 } 9169 9170 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9171 struct intel_crtc_state *pipe_config) 9172 { 9173 struct drm_device *dev = crtc->base.dev; 9174 struct drm_i915_private *dev_priv = dev->dev_private; 9175 uint32_t tmp; 9176 9177 if (!intel_display_power_is_enabled(dev_priv, 9178 POWER_DOMAIN_PIPE(crtc->pipe))) 9179 return false; 9180 9181 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9182 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9183 9184 tmp = I915_READ(PIPECONF(crtc->pipe)); 9185 if (!(tmp & PIPECONF_ENABLE)) 9186 return false; 9187 9188 switch (tmp & PIPECONF_BPC_MASK) { 9189 case PIPECONF_6BPC: 9190 pipe_config->pipe_bpp = 18; 9191 break; 9192 case PIPECONF_8BPC: 9193 pipe_config->pipe_bpp = 24; 9194 break; 9195 case PIPECONF_10BPC: 9196 pipe_config->pipe_bpp = 30; 9197 break; 9198 case PIPECONF_12BPC: 9199 pipe_config->pipe_bpp = 36; 9200 break; 9201 default: 9202 break; 9203 } 9204 9205 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9206 pipe_config->limited_color_range = true; 9207 9208 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9209 struct intel_shared_dpll *pll; 9210 9211 pipe_config->has_pch_encoder = true; 9212 9213 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9214 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9215 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9216 9217 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9218 9219 if (HAS_PCH_IBX(dev_priv->dev)) { 9220 pipe_config->shared_dpll = 9221 (enum intel_dpll_id) crtc->pipe; 9222 } else { 9223 tmp = I915_READ(PCH_DPLL_SEL); 9224 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9225 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 9226 else 9227 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 9228 } 9229 9230 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9231 9232 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9233 &pipe_config->dpll_hw_state)); 9234 9235 tmp = pipe_config->dpll_hw_state.dpll; 9236 pipe_config->pixel_multiplier = 9237 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9238 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9239 9240 ironlake_pch_clock_get(crtc, pipe_config); 9241 } else { 9242 pipe_config->pixel_multiplier = 1; 9243 } 9244 9245 intel_get_pipe_timings(crtc, pipe_config); 9246 9247 ironlake_get_pfit_config(crtc, pipe_config); 9248 9249 return true; 9250 } 9251 9252 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9253 { 9254 struct drm_device *dev = dev_priv->dev; 9255 struct intel_crtc *crtc; 9256 9257 for_each_intel_crtc(dev, crtc) 9258 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9259 pipe_name(crtc->pipe)); 9260 9261 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9262 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9263 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9264 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9265 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9266 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9267 "CPU PWM1 enabled\n"); 9268 if (IS_HASWELL(dev)) 9269 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9270 "CPU PWM2 enabled\n"); 9271 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9272 "PCH PWM1 enabled\n"); 9273 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9274 "Utility pin enabled\n"); 9275 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9276 9277 /* 9278 * In theory we can still leave IRQs enabled, as long as only the HPD 9279 * interrupts remain enabled. We used to check for that, but since it's 9280 * gen-specific and since we only disable LCPLL after we fully disable 9281 * the interrupts, the check below should be enough. 9282 */ 9283 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9284 } 9285 9286 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9287 { 9288 struct drm_device *dev = dev_priv->dev; 9289 9290 if (IS_HASWELL(dev)) 9291 return I915_READ(D_COMP_HSW); 9292 else 9293 return I915_READ(D_COMP_BDW); 9294 } 9295 9296 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9297 { 9298 struct drm_device *dev = dev_priv->dev; 9299 9300 if (IS_HASWELL(dev)) { 9301 mutex_lock(&dev_priv->rps.hw_lock); 9302 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9303 val)) 9304 DRM_ERROR("Failed to write to D_COMP\n"); 9305 mutex_unlock(&dev_priv->rps.hw_lock); 9306 } else { 9307 I915_WRITE(D_COMP_BDW, val); 9308 POSTING_READ(D_COMP_BDW); 9309 } 9310 } 9311 9312 /* 9313 * This function implements pieces of two sequences from BSpec: 9314 * - Sequence for display software to disable LCPLL 9315 * - Sequence for display software to allow package C8+ 9316 * The steps implemented here are just the steps that actually touch the LCPLL 9317 * register. Callers should take care of disabling all the display engine 9318 * functions, doing the mode unset, fixing interrupts, etc. 9319 */ 9320 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9321 bool switch_to_fclk, bool allow_power_down) 9322 { 9323 uint32_t val; 9324 9325 assert_can_disable_lcpll(dev_priv); 9326 9327 val = I915_READ(LCPLL_CTL); 9328 9329 if (switch_to_fclk) { 9330 val |= LCPLL_CD_SOURCE_FCLK; 9331 I915_WRITE(LCPLL_CTL, val); 9332 9333 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9334 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9335 DRM_ERROR("Switching to FCLK failed\n"); 9336 9337 val = I915_READ(LCPLL_CTL); 9338 } 9339 9340 val |= LCPLL_PLL_DISABLE; 9341 I915_WRITE(LCPLL_CTL, val); 9342 POSTING_READ(LCPLL_CTL); 9343 9344 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9345 DRM_ERROR("LCPLL still locked\n"); 9346 9347 val = hsw_read_dcomp(dev_priv); 9348 val |= D_COMP_COMP_DISABLE; 9349 hsw_write_dcomp(dev_priv, val); 9350 ndelay(100); 9351 9352 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9353 1)) 9354 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9355 9356 if (allow_power_down) { 9357 val = I915_READ(LCPLL_CTL); 9358 val |= LCPLL_POWER_DOWN_ALLOW; 9359 I915_WRITE(LCPLL_CTL, val); 9360 POSTING_READ(LCPLL_CTL); 9361 } 9362 } 9363 9364 /* 9365 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9366 * source. 9367 */ 9368 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9369 { 9370 uint32_t val; 9371 9372 val = I915_READ(LCPLL_CTL); 9373 9374 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9375 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9376 return; 9377 9378 /* 9379 * Make sure we're not on PC8 state before disabling PC8, otherwise 9380 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9381 */ 9382 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9383 9384 if (val & LCPLL_POWER_DOWN_ALLOW) { 9385 val &= ~LCPLL_POWER_DOWN_ALLOW; 9386 I915_WRITE(LCPLL_CTL, val); 9387 POSTING_READ(LCPLL_CTL); 9388 } 9389 9390 val = hsw_read_dcomp(dev_priv); 9391 val |= D_COMP_COMP_FORCE; 9392 val &= ~D_COMP_COMP_DISABLE; 9393 hsw_write_dcomp(dev_priv, val); 9394 9395 val = I915_READ(LCPLL_CTL); 9396 val &= ~LCPLL_PLL_DISABLE; 9397 I915_WRITE(LCPLL_CTL, val); 9398 9399 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9400 DRM_ERROR("LCPLL not locked yet\n"); 9401 9402 if (val & LCPLL_CD_SOURCE_FCLK) { 9403 val = I915_READ(LCPLL_CTL); 9404 val &= ~LCPLL_CD_SOURCE_FCLK; 9405 I915_WRITE(LCPLL_CTL, val); 9406 9407 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9408 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9409 DRM_ERROR("Switching back to LCPLL failed\n"); 9410 } 9411 9412 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9413 intel_update_cdclk(dev_priv->dev); 9414 } 9415 9416 /* 9417 * Package states C8 and deeper are really deep PC states that can only be 9418 * reached when all the devices on the system allow it, so even if the graphics 9419 * device allows PC8+, it doesn't mean the system will actually get to these 9420 * states. Our driver only allows PC8+ when going into runtime PM. 9421 * 9422 * The requirements for PC8+ are that all the outputs are disabled, the power 9423 * well is disabled and most interrupts are disabled, and these are also 9424 * requirements for runtime PM. When these conditions are met, we manually do 9425 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9426 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9427 * hang the machine. 9428 * 9429 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9430 * the state of some registers, so when we come back from PC8+ we need to 9431 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9432 * need to take care of the registers kept by RC6. Notice that this happens even 9433 * if we don't put the device in PCI D3 state (which is what currently happens 9434 * because of the runtime PM support). 9435 * 9436 * For more, read "Display Sequences for Package C8" on the hardware 9437 * documentation. 9438 */ 9439 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9440 { 9441 struct drm_device *dev = dev_priv->dev; 9442 uint32_t val; 9443 9444 DRM_DEBUG_KMS("Enabling package C8+\n"); 9445 9446 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9447 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9448 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9449 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9450 } 9451 9452 lpt_disable_clkout_dp(dev); 9453 hsw_disable_lcpll(dev_priv, true, true); 9454 } 9455 9456 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9457 { 9458 struct drm_device *dev = dev_priv->dev; 9459 uint32_t val; 9460 9461 DRM_DEBUG_KMS("Disabling package C8+\n"); 9462 9463 hsw_restore_lcpll(dev_priv); 9464 lpt_init_pch_refclk(dev); 9465 9466 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9467 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9468 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9469 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9470 } 9471 9472 intel_prepare_ddi(dev); 9473 } 9474 9475 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9476 { 9477 struct drm_device *dev = old_state->dev; 9478 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 9479 9480 broxton_set_cdclk(dev, req_cdclk); 9481 } 9482 9483 /* compute the max rate for new configuration */ 9484 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9485 { 9486 struct intel_crtc *intel_crtc; 9487 struct intel_crtc_state *crtc_state; 9488 int max_pixel_rate = 0; 9489 9490 for_each_intel_crtc(state->dev, intel_crtc) { 9491 int pixel_rate; 9492 9493 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9494 if (IS_ERR(crtc_state)) 9495 return PTR_ERR(crtc_state); 9496 9497 if (!crtc_state->base.enable) 9498 continue; 9499 9500 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 9501 9502 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 9503 if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled) 9504 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 9505 9506 max_pixel_rate = max(max_pixel_rate, pixel_rate); 9507 } 9508 9509 return max_pixel_rate; 9510 } 9511 9512 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9513 { 9514 struct drm_i915_private *dev_priv = dev->dev_private; 9515 uint32_t val, data; 9516 int ret; 9517 9518 if (WARN((I915_READ(LCPLL_CTL) & 9519 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 9520 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 9521 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 9522 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 9523 "trying to change cdclk frequency with cdclk not enabled\n")) 9524 return; 9525 9526 mutex_lock(&dev_priv->rps.hw_lock); 9527 ret = sandybridge_pcode_write(dev_priv, 9528 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 9529 mutex_unlock(&dev_priv->rps.hw_lock); 9530 if (ret) { 9531 DRM_ERROR("failed to inform pcode about cdclk change\n"); 9532 return; 9533 } 9534 9535 val = I915_READ(LCPLL_CTL); 9536 val |= LCPLL_CD_SOURCE_FCLK; 9537 I915_WRITE(LCPLL_CTL, val); 9538 9539 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9540 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9541 DRM_ERROR("Switching to FCLK failed\n"); 9542 9543 val = I915_READ(LCPLL_CTL); 9544 val &= ~LCPLL_CLK_FREQ_MASK; 9545 9546 switch (cdclk) { 9547 case 450000: 9548 val |= LCPLL_CLK_FREQ_450; 9549 data = 0; 9550 break; 9551 case 540000: 9552 val |= LCPLL_CLK_FREQ_54O_BDW; 9553 data = 1; 9554 break; 9555 case 337500: 9556 val |= LCPLL_CLK_FREQ_337_5_BDW; 9557 data = 2; 9558 break; 9559 case 675000: 9560 val |= LCPLL_CLK_FREQ_675_BDW; 9561 data = 3; 9562 break; 9563 default: 9564 WARN(1, "invalid cdclk frequency\n"); 9565 return; 9566 } 9567 9568 I915_WRITE(LCPLL_CTL, val); 9569 9570 val = I915_READ(LCPLL_CTL); 9571 val &= ~LCPLL_CD_SOURCE_FCLK; 9572 I915_WRITE(LCPLL_CTL, val); 9573 9574 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9575 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9576 DRM_ERROR("Switching back to LCPLL failed\n"); 9577 9578 mutex_lock(&dev_priv->rps.hw_lock); 9579 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 9580 mutex_unlock(&dev_priv->rps.hw_lock); 9581 9582 intel_update_cdclk(dev); 9583 9584 WARN(cdclk != dev_priv->cdclk_freq, 9585 "cdclk requested %d kHz but got %d kHz\n", 9586 cdclk, dev_priv->cdclk_freq); 9587 } 9588 9589 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9590 { 9591 struct drm_i915_private *dev_priv = to_i915(state->dev); 9592 int max_pixclk = ilk_max_pixel_rate(state); 9593 int cdclk; 9594 9595 /* 9596 * FIXME should also account for plane ratio 9597 * once 64bpp pixel formats are supported. 9598 */ 9599 if (max_pixclk > 540000) 9600 cdclk = 675000; 9601 else if (max_pixclk > 450000) 9602 cdclk = 540000; 9603 else if (max_pixclk > 337500) 9604 cdclk = 450000; 9605 else 9606 cdclk = 337500; 9607 9608 /* 9609 * FIXME move the cdclk caclulation to 9610 * compute_config() so we can fail gracegully. 9611 */ 9612 if (cdclk > dev_priv->max_cdclk_freq) { 9613 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9614 cdclk, dev_priv->max_cdclk_freq); 9615 cdclk = dev_priv->max_cdclk_freq; 9616 } 9617 9618 to_intel_atomic_state(state)->cdclk = cdclk; 9619 9620 return 0; 9621 } 9622 9623 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9624 { 9625 struct drm_device *dev = old_state->dev; 9626 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 9627 9628 broadwell_set_cdclk(dev, req_cdclk); 9629 } 9630 9631 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9632 struct intel_crtc_state *crtc_state) 9633 { 9634 if (!intel_ddi_pll_select(crtc, crtc_state)) 9635 return -EINVAL; 9636 9637 crtc->lowfreq_avail = false; 9638 9639 return 0; 9640 } 9641 9642 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9643 enum port port, 9644 struct intel_crtc_state *pipe_config) 9645 { 9646 switch (port) { 9647 case PORT_A: 9648 pipe_config->ddi_pll_sel = SKL_DPLL0; 9649 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9650 break; 9651 case PORT_B: 9652 pipe_config->ddi_pll_sel = SKL_DPLL1; 9653 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9654 break; 9655 case PORT_C: 9656 pipe_config->ddi_pll_sel = SKL_DPLL2; 9657 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9658 break; 9659 default: 9660 DRM_ERROR("Incorrect port type\n"); 9661 } 9662 } 9663 9664 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 9665 enum port port, 9666 struct intel_crtc_state *pipe_config) 9667 { 9668 u32 temp, dpll_ctl1; 9669 9670 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 9671 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 9672 9673 switch (pipe_config->ddi_pll_sel) { 9674 case SKL_DPLL0: 9675 /* 9676 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 9677 * of the shared DPLL framework and thus needs to be read out 9678 * separately 9679 */ 9680 dpll_ctl1 = I915_READ(DPLL_CTRL1); 9681 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 9682 break; 9683 case SKL_DPLL1: 9684 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9685 break; 9686 case SKL_DPLL2: 9687 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9688 break; 9689 case SKL_DPLL3: 9690 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9691 break; 9692 } 9693 } 9694 9695 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 9696 enum port port, 9697 struct intel_crtc_state *pipe_config) 9698 { 9699 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 9700 9701 switch (pipe_config->ddi_pll_sel) { 9702 case PORT_CLK_SEL_WRPLL1: 9703 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 9704 break; 9705 case PORT_CLK_SEL_WRPLL2: 9706 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9707 break; 9708 } 9709 } 9710 9711 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9712 struct intel_crtc_state *pipe_config) 9713 { 9714 struct drm_device *dev = crtc->base.dev; 9715 struct drm_i915_private *dev_priv = dev->dev_private; 9716 struct intel_shared_dpll *pll; 9717 enum port port; 9718 uint32_t tmp; 9719 9720 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9721 9722 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9723 9724 if (IS_SKYLAKE(dev)) 9725 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9726 else if (IS_BROXTON(dev)) 9727 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9728 else 9729 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9730 9731 if (pipe_config->shared_dpll >= 0) { 9732 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9733 9734 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9735 &pipe_config->dpll_hw_state)); 9736 } 9737 9738 /* 9739 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9740 * DDI E. So just check whether this pipe is wired to DDI E and whether 9741 * the PCH transcoder is on. 9742 */ 9743 if (INTEL_INFO(dev)->gen < 9 && 9744 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9745 pipe_config->has_pch_encoder = true; 9746 9747 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9748 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9749 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9750 9751 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9752 } 9753 } 9754 9755 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9756 struct intel_crtc_state *pipe_config) 9757 { 9758 struct drm_device *dev = crtc->base.dev; 9759 struct drm_i915_private *dev_priv = dev->dev_private; 9760 enum intel_display_power_domain pfit_domain; 9761 uint32_t tmp; 9762 9763 if (!intel_display_power_is_enabled(dev_priv, 9764 POWER_DOMAIN_PIPE(crtc->pipe))) 9765 return false; 9766 9767 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9768 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9769 9770 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9771 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9772 enum i915_pipe trans_edp_pipe; 9773 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9774 default: 9775 WARN(1, "unknown pipe linked to edp transcoder\n"); 9776 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9777 case TRANS_DDI_EDP_INPUT_A_ON: 9778 trans_edp_pipe = PIPE_A; 9779 break; 9780 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9781 trans_edp_pipe = PIPE_B; 9782 break; 9783 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9784 trans_edp_pipe = PIPE_C; 9785 break; 9786 } 9787 9788 if (trans_edp_pipe == crtc->pipe) 9789 pipe_config->cpu_transcoder = TRANSCODER_EDP; 9790 } 9791 9792 if (!intel_display_power_is_enabled(dev_priv, 9793 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 9794 return false; 9795 9796 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 9797 if (!(tmp & PIPECONF_ENABLE)) 9798 return false; 9799 9800 haswell_get_ddi_port_state(crtc, pipe_config); 9801 9802 intel_get_pipe_timings(crtc, pipe_config); 9803 9804 if (INTEL_INFO(dev)->gen >= 9) { 9805 skl_init_scalers(dev, crtc, pipe_config); 9806 } 9807 9808 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9809 9810 if (INTEL_INFO(dev)->gen >= 9) { 9811 pipe_config->scaler_state.scaler_id = -1; 9812 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9813 } 9814 9815 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 9816 if (INTEL_INFO(dev)->gen == 9) 9817 skylake_get_pfit_config(crtc, pipe_config); 9818 else if (INTEL_INFO(dev)->gen < 9) 9819 ironlake_get_pfit_config(crtc, pipe_config); 9820 else 9821 MISSING_CASE(INTEL_INFO(dev)->gen); 9822 } 9823 9824 if (IS_HASWELL(dev)) 9825 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9826 (I915_READ(IPS_CTL) & IPS_ENABLE); 9827 9828 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 9829 pipe_config->pixel_multiplier = 9830 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9831 } else { 9832 pipe_config->pixel_multiplier = 1; 9833 } 9834 9835 return true; 9836 } 9837 9838 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 9839 { 9840 struct drm_device *dev = crtc->dev; 9841 struct drm_i915_private *dev_priv = dev->dev_private; 9842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9843 uint32_t cntl = 0, size = 0; 9844 9845 if (base) { 9846 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 9847 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 9848 unsigned int stride = roundup_pow_of_two(width) * 4; 9849 9850 switch (stride) { 9851 default: 9852 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 9853 width, stride); 9854 stride = 256; 9855 /* fallthrough */ 9856 case 256: 9857 case 512: 9858 case 1024: 9859 case 2048: 9860 break; 9861 } 9862 9863 cntl |= CURSOR_ENABLE | 9864 CURSOR_GAMMA_ENABLE | 9865 CURSOR_FORMAT_ARGB | 9866 CURSOR_STRIDE(stride); 9867 9868 size = (height << 12) | width; 9869 } 9870 9871 if (intel_crtc->cursor_cntl != 0 && 9872 (intel_crtc->cursor_base != base || 9873 intel_crtc->cursor_size != size || 9874 intel_crtc->cursor_cntl != cntl)) { 9875 /* On these chipsets we can only modify the base/size/stride 9876 * whilst the cursor is disabled. 9877 */ 9878 I915_WRITE(_CURACNTR, 0); 9879 POSTING_READ(_CURACNTR); 9880 intel_crtc->cursor_cntl = 0; 9881 } 9882 9883 if (intel_crtc->cursor_base != base) { 9884 I915_WRITE(_CURABASE, base); 9885 intel_crtc->cursor_base = base; 9886 } 9887 9888 if (intel_crtc->cursor_size != size) { 9889 I915_WRITE(CURSIZE, size); 9890 intel_crtc->cursor_size = size; 9891 } 9892 9893 if (intel_crtc->cursor_cntl != cntl) { 9894 I915_WRITE(_CURACNTR, cntl); 9895 POSTING_READ(_CURACNTR); 9896 intel_crtc->cursor_cntl = cntl; 9897 } 9898 } 9899 9900 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 9901 { 9902 struct drm_device *dev = crtc->dev; 9903 struct drm_i915_private *dev_priv = dev->dev_private; 9904 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9905 int pipe = intel_crtc->pipe; 9906 uint32_t cntl; 9907 9908 cntl = 0; 9909 if (base) { 9910 cntl = MCURSOR_GAMMA_ENABLE; 9911 switch (intel_crtc->base.cursor->state->crtc_w) { 9912 case 64: 9913 cntl |= CURSOR_MODE_64_ARGB_AX; 9914 break; 9915 case 128: 9916 cntl |= CURSOR_MODE_128_ARGB_AX; 9917 break; 9918 case 256: 9919 cntl |= CURSOR_MODE_256_ARGB_AX; 9920 break; 9921 default: 9922 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); 9923 return; 9924 } 9925 cntl |= pipe << 28; /* Connect to correct pipe */ 9926 9927 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 9928 cntl |= CURSOR_PIPE_CSC_ENABLE; 9929 } 9930 9931 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) 9932 cntl |= CURSOR_ROTATE_180; 9933 9934 if (intel_crtc->cursor_cntl != cntl) { 9935 I915_WRITE(CURCNTR(pipe), cntl); 9936 POSTING_READ(CURCNTR(pipe)); 9937 intel_crtc->cursor_cntl = cntl; 9938 } 9939 9940 /* and commit changes on next vblank */ 9941 I915_WRITE(CURBASE(pipe), base); 9942 POSTING_READ(CURBASE(pipe)); 9943 9944 intel_crtc->cursor_base = base; 9945 } 9946 9947 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 9948 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 9949 bool on) 9950 { 9951 struct drm_device *dev = crtc->dev; 9952 struct drm_i915_private *dev_priv = dev->dev_private; 9953 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9954 int pipe = intel_crtc->pipe; 9955 int x = crtc->cursor_x; 9956 int y = crtc->cursor_y; 9957 u32 base = 0, pos = 0; 9958 9959 if (on) 9960 base = intel_crtc->cursor_addr; 9961 9962 if (x >= intel_crtc->config->pipe_src_w) 9963 base = 0; 9964 9965 if (y >= intel_crtc->config->pipe_src_h) 9966 base = 0; 9967 9968 if (x < 0) { 9969 if (x + intel_crtc->base.cursor->state->crtc_w <= 0) 9970 base = 0; 9971 9972 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9973 x = -x; 9974 } 9975 pos |= x << CURSOR_X_SHIFT; 9976 9977 if (y < 0) { 9978 if (y + intel_crtc->base.cursor->state->crtc_h <= 0) 9979 base = 0; 9980 9981 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9982 y = -y; 9983 } 9984 pos |= y << CURSOR_Y_SHIFT; 9985 9986 if (base == 0 && intel_crtc->cursor_base == 0) 9987 return; 9988 9989 I915_WRITE(CURPOS(pipe), pos); 9990 9991 /* ILK+ do this automagically */ 9992 if (HAS_GMCH_DISPLAY(dev) && 9993 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 9994 base += (intel_crtc->base.cursor->state->crtc_h * 9995 intel_crtc->base.cursor->state->crtc_w - 1) * 4; 9996 } 9997 9998 if (IS_845G(dev) || IS_I865G(dev)) 9999 i845_update_cursor(crtc, base); 10000 else 10001 i9xx_update_cursor(crtc, base); 10002 } 10003 10004 static bool cursor_size_ok(struct drm_device *dev, 10005 uint32_t width, uint32_t height) 10006 { 10007 if (width == 0 || height == 0) 10008 return false; 10009 10010 /* 10011 * 845g/865g are special in that they are only limited by 10012 * the width of their cursors, the height is arbitrary up to 10013 * the precision of the register. Everything else requires 10014 * square cursors, limited to a few power-of-two sizes. 10015 */ 10016 if (IS_845G(dev) || IS_I865G(dev)) { 10017 if ((width & 63) != 0) 10018 return false; 10019 10020 if (width > (IS_845G(dev) ? 64 : 512)) 10021 return false; 10022 10023 if (height > 1023) 10024 return false; 10025 } else { 10026 switch (width | height) { 10027 case 256: 10028 case 128: 10029 if (IS_GEN2(dev)) 10030 return false; 10031 case 64: 10032 break; 10033 default: 10034 return false; 10035 } 10036 } 10037 10038 return true; 10039 } 10040 10041 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 10042 u16 *blue, uint32_t start, uint32_t size) 10043 { 10044 int end = (start + size > 256) ? 256 : start + size, i; 10045 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10046 10047 for (i = start; i < end; i++) { 10048 intel_crtc->lut_r[i] = red[i] >> 8; 10049 intel_crtc->lut_g[i] = green[i] >> 8; 10050 intel_crtc->lut_b[i] = blue[i] >> 8; 10051 } 10052 10053 intel_crtc_load_lut(crtc); 10054 } 10055 10056 /* VESA 640x480x72Hz mode to set on the pipe */ 10057 static struct drm_display_mode load_detect_mode = { 10058 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 10059 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 10060 }; 10061 10062 struct drm_framebuffer * 10063 __intel_framebuffer_create(struct drm_device *dev, 10064 struct drm_mode_fb_cmd2 *mode_cmd, 10065 struct drm_i915_gem_object *obj) 10066 { 10067 struct intel_framebuffer *intel_fb; 10068 int ret; 10069 10070 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10071 if (!intel_fb) { 10072 drm_gem_object_unreference(&obj->base); 10073 return ERR_PTR(-ENOMEM); 10074 } 10075 10076 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10077 if (ret) 10078 goto err; 10079 10080 return &intel_fb->base; 10081 err: 10082 drm_gem_object_unreference(&obj->base); 10083 kfree(intel_fb); 10084 10085 return ERR_PTR(ret); 10086 } 10087 10088 static struct drm_framebuffer * 10089 intel_framebuffer_create(struct drm_device *dev, 10090 struct drm_mode_fb_cmd2 *mode_cmd, 10091 struct drm_i915_gem_object *obj) 10092 { 10093 struct drm_framebuffer *fb; 10094 int ret; 10095 10096 ret = i915_mutex_lock_interruptible(dev); 10097 if (ret) 10098 return ERR_PTR(ret); 10099 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 10100 mutex_unlock(&dev->struct_mutex); 10101 10102 return fb; 10103 } 10104 10105 static u32 10106 intel_framebuffer_pitch_for_width(int width, int bpp) 10107 { 10108 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 10109 return ALIGN(pitch, 64); 10110 } 10111 10112 static u32 10113 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 10114 { 10115 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 10116 return PAGE_ALIGN(pitch * mode->vdisplay); 10117 } 10118 10119 static struct drm_framebuffer * 10120 intel_framebuffer_create_for_mode(struct drm_device *dev, 10121 struct drm_display_mode *mode, 10122 int depth, int bpp) 10123 { 10124 struct drm_i915_gem_object *obj; 10125 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10126 10127 obj = i915_gem_alloc_object(dev, 10128 intel_framebuffer_size_for_mode(mode, bpp)); 10129 if (obj == NULL) 10130 return ERR_PTR(-ENOMEM); 10131 10132 mode_cmd.width = mode->hdisplay; 10133 mode_cmd.height = mode->vdisplay; 10134 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 10135 bpp); 10136 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10137 10138 return intel_framebuffer_create(dev, &mode_cmd, obj); 10139 } 10140 10141 static struct drm_framebuffer * 10142 mode_fits_in_fbdev(struct drm_device *dev, 10143 struct drm_display_mode *mode) 10144 { 10145 #ifdef CONFIG_DRM_FBDEV_EMULATION 10146 struct drm_i915_private *dev_priv = dev->dev_private; 10147 struct drm_i915_gem_object *obj; 10148 struct drm_framebuffer *fb; 10149 10150 if (!dev_priv->fbdev) 10151 return NULL; 10152 10153 if (!dev_priv->fbdev->fb) 10154 return NULL; 10155 10156 obj = dev_priv->fbdev->fb->obj; 10157 BUG_ON(!obj); 10158 10159 fb = &dev_priv->fbdev->fb->base; 10160 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 10161 fb->bits_per_pixel)) 10162 return NULL; 10163 10164 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10165 return NULL; 10166 10167 return fb; 10168 #else 10169 return NULL; 10170 #endif 10171 } 10172 10173 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 10174 struct drm_crtc *crtc, 10175 struct drm_display_mode *mode, 10176 struct drm_framebuffer *fb, 10177 int x, int y) 10178 { 10179 struct drm_plane_state *plane_state; 10180 int hdisplay, vdisplay; 10181 int ret; 10182 10183 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 10184 if (IS_ERR(plane_state)) 10185 return PTR_ERR(plane_state); 10186 10187 if (mode) 10188 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 10189 else 10190 hdisplay = vdisplay = 0; 10191 10192 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 10193 if (ret) 10194 return ret; 10195 drm_atomic_set_fb_for_plane(plane_state, fb); 10196 plane_state->crtc_x = 0; 10197 plane_state->crtc_y = 0; 10198 plane_state->crtc_w = hdisplay; 10199 plane_state->crtc_h = vdisplay; 10200 plane_state->src_x = x << 16; 10201 plane_state->src_y = y << 16; 10202 plane_state->src_w = hdisplay << 16; 10203 plane_state->src_h = vdisplay << 16; 10204 10205 return 0; 10206 } 10207 10208 bool intel_get_load_detect_pipe(struct drm_connector *connector, 10209 struct drm_display_mode *mode, 10210 struct intel_load_detect_pipe *old, 10211 struct drm_modeset_acquire_ctx *ctx) 10212 { 10213 struct intel_crtc *intel_crtc; 10214 struct intel_encoder *intel_encoder = 10215 intel_attached_encoder(connector); 10216 struct drm_crtc *possible_crtc; 10217 struct drm_encoder *encoder = &intel_encoder->base; 10218 struct drm_crtc *crtc = NULL; 10219 struct drm_device *dev = encoder->dev; 10220 struct drm_framebuffer *fb; 10221 struct drm_mode_config *config = &dev->mode_config; 10222 struct drm_atomic_state *state = NULL; 10223 struct drm_connector_state *connector_state; 10224 struct intel_crtc_state *crtc_state; 10225 int ret, i = -1; 10226 10227 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10228 connector->base.id, connector->name, 10229 encoder->base.id, encoder->name); 10230 10231 retry: 10232 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10233 if (ret) 10234 goto fail; 10235 10236 /* 10237 * Algorithm gets a little messy: 10238 * 10239 * - if the connector already has an assigned crtc, use it (but make 10240 * sure it's on first) 10241 * 10242 * - try to find the first unused crtc that can drive this connector, 10243 * and use that if we find one 10244 */ 10245 10246 /* See if we already have a CRTC for this connector */ 10247 if (encoder->crtc) { 10248 crtc = encoder->crtc; 10249 10250 ret = drm_modeset_lock(&crtc->mutex, ctx); 10251 if (ret) 10252 goto fail; 10253 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10254 if (ret) 10255 goto fail; 10256 10257 old->dpms_mode = connector->dpms; 10258 old->load_detect_temp = false; 10259 10260 /* Make sure the crtc and connector are running */ 10261 if (connector->dpms != DRM_MODE_DPMS_ON) 10262 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 10263 10264 return true; 10265 } 10266 10267 /* Find an unused one (if possible) */ 10268 for_each_crtc(dev, possible_crtc) { 10269 i++; 10270 if (!(encoder->possible_crtcs & (1 << i))) 10271 continue; 10272 if (possible_crtc->state->enable) 10273 continue; 10274 10275 crtc = possible_crtc; 10276 break; 10277 } 10278 10279 /* 10280 * If we didn't find an unused CRTC, don't use any. 10281 */ 10282 if (!crtc) { 10283 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10284 goto fail; 10285 } 10286 10287 ret = drm_modeset_lock(&crtc->mutex, ctx); 10288 if (ret) 10289 goto fail; 10290 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10291 if (ret) 10292 goto fail; 10293 10294 intel_crtc = to_intel_crtc(crtc); 10295 old->dpms_mode = connector->dpms; 10296 old->load_detect_temp = true; 10297 old->release_fb = NULL; 10298 10299 state = drm_atomic_state_alloc(dev); 10300 if (!state) 10301 return false; 10302 10303 state->acquire_ctx = ctx; 10304 10305 connector_state = drm_atomic_get_connector_state(state, connector); 10306 if (IS_ERR(connector_state)) { 10307 ret = PTR_ERR(connector_state); 10308 goto fail; 10309 } 10310 10311 connector_state->crtc = crtc; 10312 connector_state->best_encoder = &intel_encoder->base; 10313 10314 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10315 if (IS_ERR(crtc_state)) { 10316 ret = PTR_ERR(crtc_state); 10317 goto fail; 10318 } 10319 10320 crtc_state->base.active = crtc_state->base.enable = true; 10321 10322 if (!mode) 10323 mode = &load_detect_mode; 10324 10325 /* We need a framebuffer large enough to accommodate all accesses 10326 * that the plane may generate whilst we perform load detection. 10327 * We can not rely on the fbcon either being present (we get called 10328 * during its initialisation to detect all boot displays, or it may 10329 * not even exist) or that it is large enough to satisfy the 10330 * requested mode. 10331 */ 10332 fb = mode_fits_in_fbdev(dev, mode); 10333 if (fb == NULL) { 10334 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10335 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10336 old->release_fb = fb; 10337 } else 10338 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10339 if (IS_ERR(fb)) { 10340 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10341 goto fail; 10342 } 10343 10344 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10345 if (ret) 10346 goto fail; 10347 10348 drm_mode_copy(&crtc_state->base.mode, mode); 10349 10350 if (drm_atomic_commit(state)) { 10351 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10352 if (old->release_fb) 10353 old->release_fb->funcs->destroy(old->release_fb); 10354 goto fail; 10355 } 10356 crtc->primary->crtc = crtc; 10357 10358 /* let the connector get through one full cycle before testing */ 10359 intel_wait_for_vblank(dev, intel_crtc->pipe); 10360 return true; 10361 10362 fail: 10363 drm_atomic_state_free(state); 10364 state = NULL; 10365 10366 if (ret == -EDEADLK) { 10367 drm_modeset_backoff(ctx); 10368 goto retry; 10369 } 10370 10371 return false; 10372 } 10373 10374 void intel_release_load_detect_pipe(struct drm_connector *connector, 10375 struct intel_load_detect_pipe *old, 10376 struct drm_modeset_acquire_ctx *ctx) 10377 { 10378 struct drm_device *dev = connector->dev; 10379 struct intel_encoder *intel_encoder = 10380 intel_attached_encoder(connector); 10381 struct drm_encoder *encoder = &intel_encoder->base; 10382 struct drm_crtc *crtc = encoder->crtc; 10383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10384 struct drm_atomic_state *state; 10385 struct drm_connector_state *connector_state; 10386 struct intel_crtc_state *crtc_state; 10387 int ret; 10388 10389 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10390 connector->base.id, connector->name, 10391 encoder->base.id, encoder->name); 10392 10393 if (old->load_detect_temp) { 10394 state = drm_atomic_state_alloc(dev); 10395 if (!state) 10396 goto fail; 10397 10398 state->acquire_ctx = ctx; 10399 10400 connector_state = drm_atomic_get_connector_state(state, connector); 10401 if (IS_ERR(connector_state)) 10402 goto fail; 10403 10404 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10405 if (IS_ERR(crtc_state)) 10406 goto fail; 10407 10408 connector_state->best_encoder = NULL; 10409 connector_state->crtc = NULL; 10410 10411 crtc_state->base.enable = crtc_state->base.active = false; 10412 10413 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL, 10414 0, 0); 10415 if (ret) 10416 goto fail; 10417 10418 ret = drm_atomic_commit(state); 10419 if (ret) 10420 goto fail; 10421 10422 if (old->release_fb) { 10423 drm_framebuffer_unregister_private(old->release_fb); 10424 drm_framebuffer_unreference(old->release_fb); 10425 } 10426 10427 return; 10428 } 10429 10430 /* Switch crtc and encoder back off if necessary */ 10431 if (old->dpms_mode != DRM_MODE_DPMS_ON) 10432 connector->funcs->dpms(connector, old->dpms_mode); 10433 10434 return; 10435 fail: 10436 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); 10437 drm_atomic_state_free(state); 10438 } 10439 10440 static int i9xx_pll_refclk(struct drm_device *dev, 10441 const struct intel_crtc_state *pipe_config) 10442 { 10443 struct drm_i915_private *dev_priv = dev->dev_private; 10444 u32 dpll = pipe_config->dpll_hw_state.dpll; 10445 10446 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10447 return dev_priv->vbt.lvds_ssc_freq; 10448 else if (HAS_PCH_SPLIT(dev)) 10449 return 120000; 10450 else if (!IS_GEN2(dev)) 10451 return 96000; 10452 else 10453 return 48000; 10454 } 10455 10456 /* Returns the clock of the currently programmed mode of the given pipe. */ 10457 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10458 struct intel_crtc_state *pipe_config) 10459 { 10460 struct drm_device *dev = crtc->base.dev; 10461 struct drm_i915_private *dev_priv = dev->dev_private; 10462 int pipe = pipe_config->cpu_transcoder; 10463 u32 dpll = pipe_config->dpll_hw_state.dpll; 10464 u32 fp; 10465 intel_clock_t clock; 10466 int port_clock; 10467 int refclk = i9xx_pll_refclk(dev, pipe_config); 10468 10469 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10470 fp = pipe_config->dpll_hw_state.fp0; 10471 else 10472 fp = pipe_config->dpll_hw_state.fp1; 10473 10474 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10475 if (IS_PINEVIEW(dev)) { 10476 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10477 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10478 } else { 10479 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10480 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10481 } 10482 10483 if (!IS_GEN2(dev)) { 10484 if (IS_PINEVIEW(dev)) 10485 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10486 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10487 else 10488 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10489 DPLL_FPA01_P1_POST_DIV_SHIFT); 10490 10491 switch (dpll & DPLL_MODE_MASK) { 10492 case DPLLB_MODE_DAC_SERIAL: 10493 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10494 5 : 10; 10495 break; 10496 case DPLLB_MODE_LVDS: 10497 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10498 7 : 14; 10499 break; 10500 default: 10501 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10502 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10503 return; 10504 } 10505 10506 if (IS_PINEVIEW(dev)) 10507 port_clock = pnv_calc_dpll_params(refclk, &clock); 10508 else 10509 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10510 } else { 10511 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10512 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10513 10514 if (is_lvds) { 10515 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10516 DPLL_FPA01_P1_POST_DIV_SHIFT); 10517 10518 if (lvds & LVDS_CLKB_POWER_UP) 10519 clock.p2 = 7; 10520 else 10521 clock.p2 = 14; 10522 } else { 10523 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10524 clock.p1 = 2; 10525 else { 10526 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10527 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10528 } 10529 if (dpll & PLL_P2_DIVIDE_BY_4) 10530 clock.p2 = 4; 10531 else 10532 clock.p2 = 2; 10533 } 10534 10535 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10536 } 10537 10538 /* 10539 * This value includes pixel_multiplier. We will use 10540 * port_clock to compute adjusted_mode.crtc_clock in the 10541 * encoder's get_config() function. 10542 */ 10543 pipe_config->port_clock = port_clock; 10544 } 10545 10546 int intel_dotclock_calculate(int link_freq, 10547 const struct intel_link_m_n *m_n) 10548 { 10549 /* 10550 * The calculation for the data clock is: 10551 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10552 * But we want to avoid losing precison if possible, so: 10553 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10554 * 10555 * and the link clock is simpler: 10556 * link_clock = (m * link_clock) / n 10557 */ 10558 10559 if (!m_n->link_n) 10560 return 0; 10561 10562 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10563 } 10564 10565 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10566 struct intel_crtc_state *pipe_config) 10567 { 10568 struct drm_device *dev = crtc->base.dev; 10569 10570 /* read out port_clock from the DPLL */ 10571 i9xx_crtc_clock_get(crtc, pipe_config); 10572 10573 /* 10574 * This value does not include pixel_multiplier. 10575 * We will check that port_clock and adjusted_mode.crtc_clock 10576 * agree once we know their relationship in the encoder's 10577 * get_config() function. 10578 */ 10579 pipe_config->base.adjusted_mode.crtc_clock = 10580 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 10581 &pipe_config->fdi_m_n); 10582 } 10583 10584 /** Returns the currently programmed mode of the given pipe. */ 10585 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10586 struct drm_crtc *crtc) 10587 { 10588 struct drm_i915_private *dev_priv = dev->dev_private; 10589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10590 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10591 struct drm_display_mode *mode; 10592 struct intel_crtc_state pipe_config; 10593 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10594 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10595 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10596 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10597 enum i915_pipe pipe = intel_crtc->pipe; 10598 10599 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10600 if (!mode) 10601 return NULL; 10602 10603 /* 10604 * Construct a pipe_config sufficient for getting the clock info 10605 * back out of crtc_clock_get. 10606 * 10607 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10608 * to use a real value here instead. 10609 */ 10610 pipe_config.cpu_transcoder = (enum transcoder) pipe; 10611 pipe_config.pixel_multiplier = 1; 10612 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10613 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10614 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10615 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 10616 10617 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 10618 mode->hdisplay = (htot & 0xffff) + 1; 10619 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10620 mode->hsync_start = (hsync & 0xffff) + 1; 10621 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10622 mode->vdisplay = (vtot & 0xffff) + 1; 10623 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10624 mode->vsync_start = (vsync & 0xffff) + 1; 10625 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10626 10627 drm_mode_set_name(mode); 10628 10629 return mode; 10630 } 10631 10632 void intel_mark_busy(struct drm_device *dev) 10633 { 10634 struct drm_i915_private *dev_priv = dev->dev_private; 10635 10636 if (dev_priv->mm.busy) 10637 return; 10638 10639 intel_runtime_pm_get(dev_priv); 10640 i915_update_gfx_val(dev_priv); 10641 if (INTEL_INFO(dev)->gen >= 6) 10642 gen6_rps_busy(dev_priv); 10643 dev_priv->mm.busy = true; 10644 } 10645 10646 void intel_mark_idle(struct drm_device *dev) 10647 { 10648 struct drm_i915_private *dev_priv = dev->dev_private; 10649 10650 if (!dev_priv->mm.busy) 10651 return; 10652 10653 dev_priv->mm.busy = false; 10654 10655 if (INTEL_INFO(dev)->gen >= 6) 10656 gen6_rps_idle(dev->dev_private); 10657 10658 intel_runtime_pm_put(dev_priv); 10659 } 10660 10661 static void intel_crtc_destroy(struct drm_crtc *crtc) 10662 { 10663 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10664 struct drm_device *dev = crtc->dev; 10665 struct intel_unpin_work *work; 10666 10667 spin_lock_irq(&dev->event_lock); 10668 work = intel_crtc->unpin_work; 10669 intel_crtc->unpin_work = NULL; 10670 spin_unlock_irq(&dev->event_lock); 10671 10672 if (work) { 10673 cancel_work_sync(&work->work); 10674 kfree(work); 10675 } 10676 10677 drm_crtc_cleanup(crtc); 10678 10679 kfree(intel_crtc); 10680 } 10681 10682 static void intel_unpin_work_fn(struct work_struct *__work) 10683 { 10684 struct intel_unpin_work *work = 10685 container_of(__work, struct intel_unpin_work, work); 10686 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10687 struct drm_device *dev = crtc->base.dev; 10688 struct drm_plane *primary = crtc->base.primary; 10689 10690 mutex_lock(&dev->struct_mutex); 10691 intel_unpin_fb_obj(work->old_fb, primary->state); 10692 drm_gem_object_unreference(&work->pending_flip_obj->base); 10693 10694 if (work->flip_queued_req) 10695 i915_gem_request_assign(&work->flip_queued_req, NULL); 10696 mutex_unlock(&dev->struct_mutex); 10697 10698 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); 10699 drm_framebuffer_unreference(work->old_fb); 10700 10701 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 10702 atomic_dec(&crtc->unpin_work_count); 10703 10704 kfree(work); 10705 } 10706 10707 static void do_intel_finish_page_flip(struct drm_device *dev, 10708 struct drm_crtc *crtc) 10709 { 10710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10711 struct intel_unpin_work *work; 10712 unsigned long flags; 10713 10714 /* Ignore early vblank irqs */ 10715 if (intel_crtc == NULL) 10716 return; 10717 10718 /* 10719 * This is called both by irq handlers and the reset code (to complete 10720 * lost pageflips) so needs the full irqsave spinlocks. 10721 */ 10722 spin_lock_irqsave(&dev->event_lock, flags); 10723 work = intel_crtc->unpin_work; 10724 10725 /* Ensure we don't miss a work->pending update ... */ 10726 smp_rmb(); 10727 10728 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 10729 spin_unlock_irqrestore(&dev->event_lock, flags); 10730 return; 10731 } 10732 10733 page_flip_completed(intel_crtc); 10734 10735 spin_unlock_irqrestore(&dev->event_lock, flags); 10736 } 10737 10738 void intel_finish_page_flip(struct drm_device *dev, int pipe) 10739 { 10740 struct drm_i915_private *dev_priv = dev->dev_private; 10741 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 10742 10743 do_intel_finish_page_flip(dev, crtc); 10744 } 10745 10746 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 10747 { 10748 struct drm_i915_private *dev_priv = dev->dev_private; 10749 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 10750 10751 do_intel_finish_page_flip(dev, crtc); 10752 } 10753 10754 /* Is 'a' after or equal to 'b'? */ 10755 static bool g4x_flip_count_after_eq(u32 a, u32 b) 10756 { 10757 return !((a - b) & 0x80000000); 10758 } 10759 10760 static bool page_flip_finished(struct intel_crtc *crtc) 10761 { 10762 struct drm_device *dev = crtc->base.dev; 10763 struct drm_i915_private *dev_priv = dev->dev_private; 10764 10765 if (i915_reset_in_progress(&dev_priv->gpu_error) || 10766 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 10767 return true; 10768 10769 /* 10770 * The relevant registers doen't exist on pre-ctg. 10771 * As the flip done interrupt doesn't trigger for mmio 10772 * flips on gmch platforms, a flip count check isn't 10773 * really needed there. But since ctg has the registers, 10774 * include it in the check anyway. 10775 */ 10776 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 10777 return true; 10778 10779 /* 10780 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 10781 * used the same base address. In that case the mmio flip might 10782 * have completed, but the CS hasn't even executed the flip yet. 10783 * 10784 * A flip count check isn't enough as the CS might have updated 10785 * the base address just after start of vblank, but before we 10786 * managed to process the interrupt. This means we'd complete the 10787 * CS flip too soon. 10788 * 10789 * Combining both checks should get us a good enough result. It may 10790 * still happen that the CS flip has been executed, but has not 10791 * yet actually completed. But in case the base address is the same 10792 * anyway, we don't really care. 10793 */ 10794 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10795 crtc->unpin_work->gtt_offset && 10796 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 10797 crtc->unpin_work->flip_count); 10798 } 10799 10800 void intel_prepare_page_flip(struct drm_device *dev, int plane) 10801 { 10802 struct drm_i915_private *dev_priv = dev->dev_private; 10803 struct intel_crtc *intel_crtc = 10804 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 10805 unsigned long flags; 10806 10807 10808 /* 10809 * This is called both by irq handlers and the reset code (to complete 10810 * lost pageflips) so needs the full irqsave spinlocks. 10811 * 10812 * NB: An MMIO update of the plane base pointer will also 10813 * generate a page-flip completion irq, i.e. every modeset 10814 * is also accompanied by a spurious intel_prepare_page_flip(). 10815 */ 10816 spin_lock_irqsave(&dev->event_lock, flags); 10817 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 10818 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 10819 spin_unlock_irqrestore(&dev->event_lock, flags); 10820 } 10821 10822 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 10823 { 10824 /* Ensure that the work item is consistent when activating it ... */ 10825 smp_wmb(); 10826 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 10827 /* and that it is marked active as soon as the irq could fire. */ 10828 smp_wmb(); 10829 } 10830 10831 static int intel_gen2_queue_flip(struct drm_device *dev, 10832 struct drm_crtc *crtc, 10833 struct drm_framebuffer *fb, 10834 struct drm_i915_gem_object *obj, 10835 struct drm_i915_gem_request *req, 10836 uint32_t flags) 10837 { 10838 struct intel_engine_cs *ring = req->ring; 10839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10840 u32 flip_mask; 10841 int ret; 10842 10843 ret = intel_ring_begin(req, 6); 10844 if (ret) 10845 return ret; 10846 10847 /* Can't queue multiple flips, so wait for the previous 10848 * one to finish before executing the next. 10849 */ 10850 if (intel_crtc->plane) 10851 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10852 else 10853 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10854 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 10855 intel_ring_emit(ring, MI_NOOP); 10856 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10857 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10858 intel_ring_emit(ring, fb->pitches[0]); 10859 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10860 intel_ring_emit(ring, 0); /* aux display base address, unused */ 10861 10862 intel_mark_page_flip_active(intel_crtc); 10863 return 0; 10864 } 10865 10866 static int intel_gen3_queue_flip(struct drm_device *dev, 10867 struct drm_crtc *crtc, 10868 struct drm_framebuffer *fb, 10869 struct drm_i915_gem_object *obj, 10870 struct drm_i915_gem_request *req, 10871 uint32_t flags) 10872 { 10873 struct intel_engine_cs *ring = req->ring; 10874 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10875 u32 flip_mask; 10876 int ret; 10877 10878 ret = intel_ring_begin(req, 6); 10879 if (ret) 10880 return ret; 10881 10882 if (intel_crtc->plane) 10883 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10884 else 10885 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10886 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 10887 intel_ring_emit(ring, MI_NOOP); 10888 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 10889 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10890 intel_ring_emit(ring, fb->pitches[0]); 10891 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10892 intel_ring_emit(ring, MI_NOOP); 10893 10894 intel_mark_page_flip_active(intel_crtc); 10895 return 0; 10896 } 10897 10898 static int intel_gen4_queue_flip(struct drm_device *dev, 10899 struct drm_crtc *crtc, 10900 struct drm_framebuffer *fb, 10901 struct drm_i915_gem_object *obj, 10902 struct drm_i915_gem_request *req, 10903 uint32_t flags) 10904 { 10905 struct intel_engine_cs *ring = req->ring; 10906 struct drm_i915_private *dev_priv = dev->dev_private; 10907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10908 uint32_t pf, pipesrc; 10909 int ret; 10910 10911 ret = intel_ring_begin(req, 4); 10912 if (ret) 10913 return ret; 10914 10915 /* i965+ uses the linear or tiled offsets from the 10916 * Display Registers (which do not change across a page-flip) 10917 * so we need only reprogram the base address. 10918 */ 10919 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10920 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10921 intel_ring_emit(ring, fb->pitches[0]); 10922 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 10923 obj->tiling_mode); 10924 10925 /* XXX Enabling the panel-fitter across page-flip is so far 10926 * untested on non-native modes, so ignore it for now. 10927 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 10928 */ 10929 pf = 0; 10930 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10931 intel_ring_emit(ring, pf | pipesrc); 10932 10933 intel_mark_page_flip_active(intel_crtc); 10934 return 0; 10935 } 10936 10937 static int intel_gen6_queue_flip(struct drm_device *dev, 10938 struct drm_crtc *crtc, 10939 struct drm_framebuffer *fb, 10940 struct drm_i915_gem_object *obj, 10941 struct drm_i915_gem_request *req, 10942 uint32_t flags) 10943 { 10944 struct intel_engine_cs *ring = req->ring; 10945 struct drm_i915_private *dev_priv = dev->dev_private; 10946 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10947 uint32_t pf, pipesrc; 10948 int ret; 10949 10950 ret = intel_ring_begin(req, 4); 10951 if (ret) 10952 return ret; 10953 10954 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10955 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10956 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 10957 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10958 10959 /* Contrary to the suggestions in the documentation, 10960 * "Enable Panel Fitter" does not seem to be required when page 10961 * flipping with a non-native mode, and worse causes a normal 10962 * modeset to fail. 10963 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 10964 */ 10965 pf = 0; 10966 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10967 intel_ring_emit(ring, pf | pipesrc); 10968 10969 intel_mark_page_flip_active(intel_crtc); 10970 return 0; 10971 } 10972 10973 static int intel_gen7_queue_flip(struct drm_device *dev, 10974 struct drm_crtc *crtc, 10975 struct drm_framebuffer *fb, 10976 struct drm_i915_gem_object *obj, 10977 struct drm_i915_gem_request *req, 10978 uint32_t flags) 10979 { 10980 struct intel_engine_cs *ring = req->ring; 10981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10982 uint32_t plane_bit = 0; 10983 int len, ret; 10984 10985 switch (intel_crtc->plane) { 10986 case PLANE_A: 10987 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 10988 break; 10989 case PLANE_B: 10990 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 10991 break; 10992 case PLANE_C: 10993 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 10994 break; 10995 default: 10996 WARN_ONCE(1, "unknown plane in flip command\n"); 10997 return -ENODEV; 10998 } 10999 11000 len = 4; 11001 if (ring->id == RCS) { 11002 len += 6; 11003 /* 11004 * On Gen 8, SRM is now taking an extra dword to accommodate 11005 * 48bits addresses, and we need a NOOP for the batch size to 11006 * stay even. 11007 */ 11008 if (IS_GEN8(dev)) 11009 len += 2; 11010 } 11011 11012 /* 11013 * BSpec MI_DISPLAY_FLIP for IVB: 11014 * "The full packet must be contained within the same cache line." 11015 * 11016 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 11017 * cacheline, if we ever start emitting more commands before 11018 * the MI_DISPLAY_FLIP we may need to first emit everything else, 11019 * then do the cacheline alignment, and finally emit the 11020 * MI_DISPLAY_FLIP. 11021 */ 11022 ret = intel_ring_cacheline_align(req); 11023 if (ret) 11024 return ret; 11025 11026 ret = intel_ring_begin(req, len); 11027 if (ret) 11028 return ret; 11029 11030 /* Unmask the flip-done completion message. Note that the bspec says that 11031 * we should do this for both the BCS and RCS, and that we must not unmask 11032 * more than one flip event at any time (or ensure that one flip message 11033 * can be sent by waiting for flip-done prior to queueing new flips). 11034 * Experimentation says that BCS works despite DERRMR masking all 11035 * flip-done completion events and that unmasking all planes at once 11036 * for the RCS also doesn't appear to drop events. Setting the DERRMR 11037 * to zero does lead to lockups within MI_DISPLAY_FLIP. 11038 */ 11039 if (ring->id == RCS) { 11040 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11041 intel_ring_emit(ring, DERRMR); 11042 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11043 DERRMR_PIPEB_PRI_FLIP_DONE | 11044 DERRMR_PIPEC_PRI_FLIP_DONE)); 11045 if (IS_GEN8(dev)) 11046 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 11047 MI_SRM_LRM_GLOBAL_GTT); 11048 else 11049 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 11050 MI_SRM_LRM_GLOBAL_GTT); 11051 intel_ring_emit(ring, DERRMR); 11052 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11053 if (IS_GEN8(dev)) { 11054 intel_ring_emit(ring, 0); 11055 intel_ring_emit(ring, MI_NOOP); 11056 } 11057 } 11058 11059 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 11060 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 11061 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11062 intel_ring_emit(ring, (MI_NOOP)); 11063 11064 intel_mark_page_flip_active(intel_crtc); 11065 return 0; 11066 } 11067 11068 static bool use_mmio_flip(struct intel_engine_cs *ring, 11069 struct drm_i915_gem_object *obj) 11070 { 11071 /* 11072 * This is not being used for older platforms, because 11073 * non-availability of flip done interrupt forces us to use 11074 * CS flips. Older platforms derive flip done using some clever 11075 * tricks involving the flip_pending status bits and vblank irqs. 11076 * So using MMIO flips there would disrupt this mechanism. 11077 */ 11078 11079 if (ring == NULL) 11080 return true; 11081 11082 if (INTEL_INFO(ring->dev)->gen < 5) 11083 return false; 11084 11085 if (i915.use_mmio_flip < 0) 11086 return false; 11087 else if (i915.use_mmio_flip > 0) 11088 return true; 11089 else if (i915.enable_execlists) 11090 return true; 11091 else 11092 return ring != i915_gem_request_get_ring(obj->last_write_req); 11093 } 11094 11095 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) 11096 { 11097 struct drm_device *dev = intel_crtc->base.dev; 11098 struct drm_i915_private *dev_priv = dev->dev_private; 11099 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11100 const enum i915_pipe pipe = intel_crtc->pipe; 11101 u32 ctl, stride; 11102 11103 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11104 ctl &= ~PLANE_CTL_TILED_MASK; 11105 switch (fb->modifier[0]) { 11106 case DRM_FORMAT_MOD_NONE: 11107 break; 11108 case I915_FORMAT_MOD_X_TILED: 11109 ctl |= PLANE_CTL_TILED_X; 11110 break; 11111 case I915_FORMAT_MOD_Y_TILED: 11112 ctl |= PLANE_CTL_TILED_Y; 11113 break; 11114 case I915_FORMAT_MOD_Yf_TILED: 11115 ctl |= PLANE_CTL_TILED_YF; 11116 break; 11117 default: 11118 MISSING_CASE(fb->modifier[0]); 11119 } 11120 11121 /* 11122 * The stride is either expressed as a multiple of 64 bytes chunks for 11123 * linear buffers or in number of tiles for tiled buffers. 11124 */ 11125 stride = fb->pitches[0] / 11126 intel_fb_stride_alignment(dev, fb->modifier[0], 11127 fb->pixel_format); 11128 11129 /* 11130 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11131 * PLANE_SURF updates, the update is then guaranteed to be atomic. 11132 */ 11133 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11134 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11135 11136 I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); 11137 POSTING_READ(PLANE_SURF(pipe, 0)); 11138 } 11139 11140 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) 11141 { 11142 struct drm_device *dev = intel_crtc->base.dev; 11143 struct drm_i915_private *dev_priv = dev->dev_private; 11144 struct intel_framebuffer *intel_fb = 11145 to_intel_framebuffer(intel_crtc->base.primary->fb); 11146 struct drm_i915_gem_object *obj = intel_fb->obj; 11147 u32 dspcntr; 11148 u32 reg; 11149 11150 reg = DSPCNTR(intel_crtc->plane); 11151 dspcntr = I915_READ(reg); 11152 11153 if (obj->tiling_mode != I915_TILING_NONE) 11154 dspcntr |= DISPPLANE_TILED; 11155 else 11156 dspcntr &= ~DISPPLANE_TILED; 11157 11158 I915_WRITE(reg, dspcntr); 11159 11160 I915_WRITE(DSPSURF(intel_crtc->plane), 11161 intel_crtc->unpin_work->gtt_offset); 11162 POSTING_READ(DSPSURF(intel_crtc->plane)); 11163 11164 } 11165 11166 /* 11167 * XXX: This is the temporary way to update the plane registers until we get 11168 * around to using the usual plane update functions for MMIO flips 11169 */ 11170 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 11171 { 11172 struct drm_device *dev = intel_crtc->base.dev; 11173 u32 start_vbl_count; 11174 11175 intel_mark_page_flip_active(intel_crtc); 11176 11177 intel_pipe_update_start(intel_crtc, &start_vbl_count); 11178 11179 if (INTEL_INFO(dev)->gen >= 9) 11180 skl_do_mmio_flip(intel_crtc); 11181 else 11182 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11183 ilk_do_mmio_flip(intel_crtc); 11184 11185 intel_pipe_update_end(intel_crtc, start_vbl_count); 11186 } 11187 11188 static void intel_mmio_flip_work_func(struct work_struct *work) 11189 { 11190 struct intel_mmio_flip *mmio_flip = 11191 container_of(work, struct intel_mmio_flip, work); 11192 11193 if (mmio_flip->req) 11194 WARN_ON(__i915_wait_request(mmio_flip->req, 11195 mmio_flip->crtc->reset_counter, 11196 false, NULL, 11197 &mmio_flip->i915->rps.mmioflips)); 11198 11199 intel_do_mmio_flip(mmio_flip->crtc); 11200 11201 i915_gem_request_unreference__unlocked(mmio_flip->req); 11202 kfree(mmio_flip); 11203 } 11204 11205 static int intel_queue_mmio_flip(struct drm_device *dev, 11206 struct drm_crtc *crtc, 11207 struct drm_framebuffer *fb, 11208 struct drm_i915_gem_object *obj, 11209 struct intel_engine_cs *ring, 11210 uint32_t flags) 11211 { 11212 struct intel_mmio_flip *mmio_flip; 11213 11214 mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK); 11215 if (mmio_flip == NULL) 11216 return -ENOMEM; 11217 11218 mmio_flip->i915 = to_i915(dev); 11219 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11220 mmio_flip->crtc = to_intel_crtc(crtc); 11221 11222 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11223 schedule_work(&mmio_flip->work); 11224 11225 return 0; 11226 } 11227 11228 static int intel_default_queue_flip(struct drm_device *dev, 11229 struct drm_crtc *crtc, 11230 struct drm_framebuffer *fb, 11231 struct drm_i915_gem_object *obj, 11232 struct drm_i915_gem_request *req, 11233 uint32_t flags) 11234 { 11235 return -ENODEV; 11236 } 11237 11238 static bool __intel_pageflip_stall_check(struct drm_device *dev, 11239 struct drm_crtc *crtc) 11240 { 11241 struct drm_i915_private *dev_priv = dev->dev_private; 11242 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11243 struct intel_unpin_work *work = intel_crtc->unpin_work; 11244 u32 addr; 11245 11246 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11247 return true; 11248 11249 if (!work->enable_stall_check) 11250 return false; 11251 11252 if (work->flip_ready_vblank == 0) { 11253 if (work->flip_queued_req && 11254 !i915_gem_request_completed(work->flip_queued_req, true)) 11255 return false; 11256 11257 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11258 } 11259 11260 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11261 return false; 11262 11263 /* Potential stall - if we see that the flip has happened, 11264 * assume a missed interrupt. */ 11265 if (INTEL_INFO(dev)->gen >= 4) 11266 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11267 else 11268 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11269 11270 /* There is a potential issue here with a false positive after a flip 11271 * to the same address. We could address this by checking for a 11272 * non-incrementing frame counter. 11273 */ 11274 return addr == work->gtt_offset; 11275 } 11276 11277 void intel_check_page_flip(struct drm_device *dev, int pipe) 11278 { 11279 struct drm_i915_private *dev_priv = dev->dev_private; 11280 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11282 struct intel_unpin_work *work; 11283 11284 // WARN_ON(!in_interrupt()); 11285 11286 if (crtc == NULL) 11287 return; 11288 11289 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11290 work = intel_crtc->unpin_work; 11291 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11292 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11293 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11294 page_flip_completed(intel_crtc); 11295 work = NULL; 11296 } 11297 if (work != NULL && 11298 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11299 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11300 lockmgr(&dev->event_lock, LK_RELEASE); 11301 } 11302 11303 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11304 struct drm_framebuffer *fb, 11305 struct drm_pending_vblank_event *event, 11306 uint32_t page_flip_flags) 11307 { 11308 struct drm_device *dev = crtc->dev; 11309 struct drm_i915_private *dev_priv = dev->dev_private; 11310 struct drm_framebuffer *old_fb = crtc->primary->fb; 11311 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11313 struct drm_plane *primary = crtc->primary; 11314 enum i915_pipe pipe = intel_crtc->pipe; 11315 struct intel_unpin_work *work; 11316 struct intel_engine_cs *ring; 11317 bool mmio_flip; 11318 struct drm_i915_gem_request *request = NULL; 11319 int ret; 11320 11321 /* 11322 * drm_mode_page_flip_ioctl() should already catch this, but double 11323 * check to be safe. In the future we may enable pageflipping from 11324 * a disabled primary plane. 11325 */ 11326 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11327 return -EBUSY; 11328 11329 /* Can't change pixel format via MI display flips. */ 11330 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11331 return -EINVAL; 11332 11333 /* 11334 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11335 * Note that pitch changes could also affect these register. 11336 */ 11337 if (INTEL_INFO(dev)->gen > 3 && 11338 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11339 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11340 return -EINVAL; 11341 11342 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11343 goto out_hang; 11344 11345 work = kzalloc(sizeof(*work), GFP_KERNEL); 11346 if (work == NULL) 11347 return -ENOMEM; 11348 11349 work->event = event; 11350 work->crtc = crtc; 11351 work->old_fb = old_fb; 11352 INIT_WORK(&work->work, intel_unpin_work_fn); 11353 11354 ret = drm_crtc_vblank_get(crtc); 11355 if (ret) 11356 goto free_work; 11357 11358 /* We borrow the event spin lock for protecting unpin_work */ 11359 spin_lock_irq(&dev->event_lock); 11360 if (intel_crtc->unpin_work) { 11361 /* Before declaring the flip queue wedged, check if 11362 * the hardware completed the operation behind our backs. 11363 */ 11364 if (__intel_pageflip_stall_check(dev, crtc)) { 11365 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11366 page_flip_completed(intel_crtc); 11367 } else { 11368 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11369 spin_unlock_irq(&dev->event_lock); 11370 11371 drm_crtc_vblank_put(crtc); 11372 kfree(work); 11373 return -EBUSY; 11374 } 11375 } 11376 intel_crtc->unpin_work = work; 11377 spin_unlock_irq(&dev->event_lock); 11378 11379 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11380 flush_workqueue(dev_priv->wq); 11381 11382 /* Reference the objects for the scheduled work. */ 11383 drm_framebuffer_reference(work->old_fb); 11384 drm_gem_object_reference(&obj->base); 11385 11386 crtc->primary->fb = fb; 11387 update_state_fb(crtc->primary); 11388 11389 work->pending_flip_obj = obj; 11390 11391 ret = i915_mutex_lock_interruptible(dev); 11392 if (ret) 11393 goto cleanup; 11394 11395 atomic_inc(&intel_crtc->unpin_work_count); 11396 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 11397 11398 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11399 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 11400 11401 if (IS_VALLEYVIEW(dev)) { 11402 ring = &dev_priv->ring[BCS]; 11403 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 11404 /* vlv: DISPLAY_FLIP fails to change tiling */ 11405 ring = NULL; 11406 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11407 ring = &dev_priv->ring[BCS]; 11408 } else if (INTEL_INFO(dev)->gen >= 7) { 11409 ring = i915_gem_request_get_ring(obj->last_write_req); 11410 if (ring == NULL || ring->id != RCS) 11411 ring = &dev_priv->ring[BCS]; 11412 } else { 11413 ring = &dev_priv->ring[RCS]; 11414 } 11415 11416 mmio_flip = use_mmio_flip(ring, obj); 11417 11418 /* When using CS flips, we want to emit semaphores between rings. 11419 * However, when using mmio flips we will create a task to do the 11420 * synchronisation, so all we want here is to pin the framebuffer 11421 * into the display plane and skip any waits. 11422 */ 11423 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11424 crtc->primary->state, 11425 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring, &request); 11426 if (ret) 11427 goto cleanup_pending; 11428 11429 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) 11430 + intel_crtc->dspaddr_offset; 11431 11432 if (mmio_flip) { 11433 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 11434 page_flip_flags); 11435 if (ret) 11436 goto cleanup_unpin; 11437 11438 i915_gem_request_assign(&work->flip_queued_req, 11439 obj->last_write_req); 11440 } else { 11441 if (!request) { 11442 ret = i915_gem_request_alloc(ring, ring->default_context, &request); 11443 if (ret) 11444 goto cleanup_unpin; 11445 } 11446 11447 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11448 page_flip_flags); 11449 if (ret) 11450 goto cleanup_unpin; 11451 11452 i915_gem_request_assign(&work->flip_queued_req, request); 11453 } 11454 11455 if (request) 11456 i915_add_request_no_flush(request); 11457 11458 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11459 work->enable_stall_check = true; 11460 11461 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 11462 to_intel_plane(primary)->frontbuffer_bit); 11463 mutex_unlock(&dev->struct_mutex); 11464 11465 intel_fbc_disable_crtc(intel_crtc); 11466 intel_frontbuffer_flip_prepare(dev, 11467 to_intel_plane(primary)->frontbuffer_bit); 11468 11469 trace_i915_flip_request(intel_crtc->plane, obj); 11470 11471 return 0; 11472 11473 cleanup_unpin: 11474 intel_unpin_fb_obj(fb, crtc->primary->state); 11475 cleanup_pending: 11476 if (request) 11477 i915_gem_request_cancel(request); 11478 atomic_dec(&intel_crtc->unpin_work_count); 11479 mutex_unlock(&dev->struct_mutex); 11480 cleanup: 11481 crtc->primary->fb = old_fb; 11482 update_state_fb(crtc->primary); 11483 11484 drm_gem_object_unreference_unlocked(&obj->base); 11485 drm_framebuffer_unreference(work->old_fb); 11486 11487 spin_lock_irq(&dev->event_lock); 11488 intel_crtc->unpin_work = NULL; 11489 spin_unlock_irq(&dev->event_lock); 11490 11491 drm_crtc_vblank_put(crtc); 11492 free_work: 11493 kfree(work); 11494 11495 if (ret == -EIO) { 11496 struct drm_atomic_state *state; 11497 struct drm_plane_state *plane_state; 11498 11499 out_hang: 11500 state = drm_atomic_state_alloc(dev); 11501 if (!state) 11502 return -ENOMEM; 11503 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 11504 11505 retry: 11506 plane_state = drm_atomic_get_plane_state(state, primary); 11507 ret = PTR_ERR_OR_ZERO(plane_state); 11508 if (!ret) { 11509 drm_atomic_set_fb_for_plane(plane_state, fb); 11510 11511 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 11512 if (!ret) 11513 ret = drm_atomic_commit(state); 11514 } 11515 11516 if (ret == -EDEADLK) { 11517 drm_modeset_backoff(state->acquire_ctx); 11518 drm_atomic_state_clear(state); 11519 goto retry; 11520 } 11521 11522 if (ret) 11523 drm_atomic_state_free(state); 11524 11525 if (ret == 0 && event) { 11526 spin_lock_irq(&dev->event_lock); 11527 drm_send_vblank_event(dev, pipe, event); 11528 spin_unlock_irq(&dev->event_lock); 11529 } 11530 } 11531 return ret; 11532 } 11533 11534 11535 /** 11536 * intel_wm_need_update - Check whether watermarks need updating 11537 * @plane: drm plane 11538 * @state: new plane state 11539 * 11540 * Check current plane state versus the new one to determine whether 11541 * watermarks need to be recalculated. 11542 * 11543 * Returns true or false. 11544 */ 11545 static bool intel_wm_need_update(struct drm_plane *plane, 11546 struct drm_plane_state *state) 11547 { 11548 /* Update watermarks on tiling changes. */ 11549 if (!plane->state->fb || !state->fb || 11550 plane->state->fb->modifier[0] != state->fb->modifier[0] || 11551 plane->state->rotation != state->rotation) 11552 return true; 11553 11554 if (plane->state->crtc_w != state->crtc_w) 11555 return true; 11556 11557 return false; 11558 } 11559 11560 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11561 struct drm_plane_state *plane_state) 11562 { 11563 struct drm_crtc *crtc = crtc_state->crtc; 11564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11565 struct drm_plane *plane = plane_state->plane; 11566 struct drm_device *dev = crtc->dev; 11567 struct drm_i915_private *dev_priv = dev->dev_private; 11568 struct intel_plane_state *old_plane_state = 11569 to_intel_plane_state(plane->state); 11570 int idx = intel_crtc->base.base.id, ret; 11571 int i = drm_plane_index(plane); 11572 bool mode_changed = needs_modeset(crtc_state); 11573 bool was_crtc_enabled = crtc->state->active; 11574 bool is_crtc_enabled = crtc_state->active; 11575 11576 bool turn_off, turn_on, visible, was_visible; 11577 struct drm_framebuffer *fb = plane_state->fb; 11578 11579 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11580 plane->type != DRM_PLANE_TYPE_CURSOR) { 11581 ret = skl_update_scaler_plane( 11582 to_intel_crtc_state(crtc_state), 11583 to_intel_plane_state(plane_state)); 11584 if (ret) 11585 return ret; 11586 } 11587 11588 /* 11589 * Disabling a plane is always okay; we just need to update 11590 * fb tracking in a special way since cleanup_fb() won't 11591 * get called by the plane helpers. 11592 */ 11593 if (old_plane_state->base.fb && !fb) 11594 intel_crtc->atomic.disabled_planes |= 1 << i; 11595 11596 was_visible = old_plane_state->visible; 11597 visible = to_intel_plane_state(plane_state)->visible; 11598 11599 if (!was_crtc_enabled && WARN_ON(was_visible)) 11600 was_visible = false; 11601 11602 if (!is_crtc_enabled && WARN_ON(visible)) 11603 visible = false; 11604 11605 if (!was_visible && !visible) 11606 return 0; 11607 11608 turn_off = was_visible && (!visible || mode_changed); 11609 turn_on = visible && (!was_visible || mode_changed); 11610 11611 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11612 plane->base.id, fb ? fb->base.id : -1); 11613 11614 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11615 plane->base.id, was_visible, visible, 11616 turn_off, turn_on, mode_changed); 11617 11618 if (turn_on) { 11619 intel_crtc->atomic.update_wm_pre = true; 11620 /* must disable cxsr around plane enable/disable */ 11621 if (plane->type != DRM_PLANE_TYPE_CURSOR) { 11622 intel_crtc->atomic.disable_cxsr = true; 11623 /* to potentially re-enable cxsr */ 11624 intel_crtc->atomic.wait_vblank = true; 11625 intel_crtc->atomic.update_wm_post = true; 11626 } 11627 } else if (turn_off) { 11628 intel_crtc->atomic.update_wm_post = true; 11629 /* must disable cxsr around plane enable/disable */ 11630 if (plane->type != DRM_PLANE_TYPE_CURSOR) { 11631 if (is_crtc_enabled) 11632 intel_crtc->atomic.wait_vblank = true; 11633 intel_crtc->atomic.disable_cxsr = true; 11634 } 11635 } else if (intel_wm_need_update(plane, plane_state)) { 11636 intel_crtc->atomic.update_wm_pre = true; 11637 } 11638 11639 if (visible) 11640 intel_crtc->atomic.fb_bits |= 11641 to_intel_plane(plane)->frontbuffer_bit; 11642 11643 switch (plane->type) { 11644 case DRM_PLANE_TYPE_PRIMARY: 11645 intel_crtc->atomic.wait_for_flips = true; 11646 intel_crtc->atomic.pre_disable_primary = turn_off; 11647 intel_crtc->atomic.post_enable_primary = turn_on; 11648 11649 if (turn_off) { 11650 /* 11651 * FIXME: Actually if we will still have any other 11652 * plane enabled on the pipe we could let IPS enabled 11653 * still, but for now lets consider that when we make 11654 * primary invisible by setting DSPCNTR to 0 on 11655 * update_primary_plane function IPS needs to be 11656 * disable. 11657 */ 11658 intel_crtc->atomic.disable_ips = true; 11659 11660 intel_crtc->atomic.disable_fbc = true; 11661 } 11662 11663 /* 11664 * FBC does not work on some platforms for rotated 11665 * planes, so disable it when rotation is not 0 and 11666 * update it when rotation is set back to 0. 11667 * 11668 * FIXME: This is redundant with the fbc update done in 11669 * the primary plane enable function except that that 11670 * one is done too late. We eventually need to unify 11671 * this. 11672 */ 11673 11674 if (visible && 11675 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11676 dev_priv->fbc.crtc == intel_crtc && 11677 plane_state->rotation != BIT(DRM_ROTATE_0)) 11678 intel_crtc->atomic.disable_fbc = true; 11679 11680 /* 11681 * BDW signals flip done immediately if the plane 11682 * is disabled, even if the plane enable is already 11683 * armed to occur at the next vblank :( 11684 */ 11685 if (turn_on && IS_BROADWELL(dev)) 11686 intel_crtc->atomic.wait_vblank = true; 11687 11688 intel_crtc->atomic.update_fbc |= visible || mode_changed; 11689 break; 11690 case DRM_PLANE_TYPE_CURSOR: 11691 break; 11692 case DRM_PLANE_TYPE_OVERLAY: 11693 if (turn_off && !mode_changed) { 11694 intel_crtc->atomic.wait_vblank = true; 11695 intel_crtc->atomic.update_sprite_watermarks |= 11696 1 << i; 11697 } 11698 } 11699 return 0; 11700 } 11701 11702 static bool encoders_cloneable(const struct intel_encoder *a, 11703 const struct intel_encoder *b) 11704 { 11705 /* masks could be asymmetric, so check both ways */ 11706 return a == b || (a->cloneable & (1 << b->type) && 11707 b->cloneable & (1 << a->type)); 11708 } 11709 11710 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11711 struct intel_crtc *crtc, 11712 struct intel_encoder *encoder) 11713 { 11714 struct intel_encoder *source_encoder; 11715 struct drm_connector *connector; 11716 struct drm_connector_state *connector_state; 11717 int i; 11718 11719 for_each_connector_in_state(state, connector, connector_state, i) { 11720 if (connector_state->crtc != &crtc->base) 11721 continue; 11722 11723 source_encoder = 11724 to_intel_encoder(connector_state->best_encoder); 11725 if (!encoders_cloneable(encoder, source_encoder)) 11726 return false; 11727 } 11728 11729 return true; 11730 } 11731 11732 static bool check_encoder_cloning(struct drm_atomic_state *state, 11733 struct intel_crtc *crtc) 11734 { 11735 struct intel_encoder *encoder; 11736 struct drm_connector *connector; 11737 struct drm_connector_state *connector_state; 11738 int i; 11739 11740 for_each_connector_in_state(state, connector, connector_state, i) { 11741 if (connector_state->crtc != &crtc->base) 11742 continue; 11743 11744 encoder = to_intel_encoder(connector_state->best_encoder); 11745 if (!check_single_encoder_cloning(state, crtc, encoder)) 11746 return false; 11747 } 11748 11749 return true; 11750 } 11751 11752 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11753 struct drm_crtc_state *crtc_state) 11754 { 11755 struct drm_device *dev = crtc->dev; 11756 struct drm_i915_private *dev_priv = dev->dev_private; 11757 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11758 struct intel_crtc_state *pipe_config = 11759 to_intel_crtc_state(crtc_state); 11760 struct drm_atomic_state *state = crtc_state->state; 11761 int ret; 11762 bool mode_changed = needs_modeset(crtc_state); 11763 11764 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { 11765 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11766 return -EINVAL; 11767 } 11768 11769 if (mode_changed && !crtc_state->active) 11770 intel_crtc->atomic.update_wm_post = true; 11771 11772 if (mode_changed && crtc_state->enable && 11773 dev_priv->display.crtc_compute_clock && 11774 !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) { 11775 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11776 pipe_config); 11777 if (ret) 11778 return ret; 11779 } 11780 11781 ret = 0; 11782 if (INTEL_INFO(dev)->gen >= 9) { 11783 if (mode_changed) 11784 ret = skl_update_scaler_crtc(pipe_config); 11785 11786 if (!ret) 11787 ret = intel_atomic_setup_scalers(dev, intel_crtc, 11788 pipe_config); 11789 } 11790 11791 return ret; 11792 } 11793 11794 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11795 .mode_set_base_atomic = intel_pipe_set_base_atomic, 11796 .load_lut = intel_crtc_load_lut, 11797 .atomic_begin = intel_begin_crtc_commit, 11798 .atomic_flush = intel_finish_crtc_commit, 11799 .atomic_check = intel_crtc_atomic_check, 11800 }; 11801 11802 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11803 { 11804 struct intel_connector *connector; 11805 11806 for_each_intel_connector(dev, connector) { 11807 if (connector->base.encoder) { 11808 connector->base.state->best_encoder = 11809 connector->base.encoder; 11810 connector->base.state->crtc = 11811 connector->base.encoder->crtc; 11812 } else { 11813 connector->base.state->best_encoder = NULL; 11814 connector->base.state->crtc = NULL; 11815 } 11816 } 11817 } 11818 11819 static void 11820 connected_sink_compute_bpp(struct intel_connector *connector, 11821 struct intel_crtc_state *pipe_config) 11822 { 11823 int bpp = pipe_config->pipe_bpp; 11824 11825 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11826 connector->base.base.id, 11827 connector->base.name); 11828 11829 /* Don't use an invalid EDID bpc value */ 11830 if (connector->base.display_info.bpc && 11831 connector->base.display_info.bpc * 3 < bpp) { 11832 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11833 bpp, connector->base.display_info.bpc*3); 11834 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 11835 } 11836 11837 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11838 if (connector->base.display_info.bpc == 0 && bpp > 24) { 11839 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11840 bpp); 11841 pipe_config->pipe_bpp = 24; 11842 } 11843 } 11844 11845 static int 11846 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11847 struct intel_crtc_state *pipe_config) 11848 { 11849 struct drm_device *dev = crtc->base.dev; 11850 struct drm_atomic_state *state; 11851 struct drm_connector *connector; 11852 struct drm_connector_state *connector_state; 11853 int bpp, i; 11854 11855 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev))) 11856 bpp = 10*3; 11857 else if (INTEL_INFO(dev)->gen >= 5) 11858 bpp = 12*3; 11859 else 11860 bpp = 8*3; 11861 11862 11863 pipe_config->pipe_bpp = bpp; 11864 11865 state = pipe_config->base.state; 11866 11867 /* Clamp display bpp to EDID value */ 11868 for_each_connector_in_state(state, connector, connector_state, i) { 11869 if (connector_state->crtc != &crtc->base) 11870 continue; 11871 11872 connected_sink_compute_bpp(to_intel_connector(connector), 11873 pipe_config); 11874 } 11875 11876 return bpp; 11877 } 11878 11879 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11880 { 11881 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11882 "type: 0x%x flags: 0x%x\n", 11883 mode->crtc_clock, 11884 mode->crtc_hdisplay, mode->crtc_hsync_start, 11885 mode->crtc_hsync_end, mode->crtc_htotal, 11886 mode->crtc_vdisplay, mode->crtc_vsync_start, 11887 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 11888 } 11889 11890 static void intel_dump_pipe_config(struct intel_crtc *crtc, 11891 struct intel_crtc_state *pipe_config, 11892 const char *context) 11893 { 11894 struct drm_device *dev = crtc->base.dev; 11895 struct drm_plane *plane; 11896 struct intel_plane *intel_plane; 11897 struct intel_plane_state *state; 11898 struct drm_framebuffer *fb; 11899 11900 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 11901 context, pipe_config, pipe_name(crtc->pipe)); 11902 11903 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 11904 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 11905 pipe_config->pipe_bpp, pipe_config->dither); 11906 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11907 pipe_config->has_pch_encoder, 11908 pipe_config->fdi_lanes, 11909 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 11910 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 11911 pipe_config->fdi_m_n.tu); 11912 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11913 pipe_config->has_dp_encoder, 11914 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 11915 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 11916 pipe_config->dp_m_n.tu); 11917 11918 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 11919 pipe_config->has_dp_encoder, 11920 pipe_config->dp_m2_n2.gmch_m, 11921 pipe_config->dp_m2_n2.gmch_n, 11922 pipe_config->dp_m2_n2.link_m, 11923 pipe_config->dp_m2_n2.link_n, 11924 pipe_config->dp_m2_n2.tu); 11925 11926 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 11927 pipe_config->has_audio, 11928 pipe_config->has_infoframe); 11929 11930 DRM_DEBUG_KMS("requested mode:\n"); 11931 drm_mode_debug_printmodeline(&pipe_config->base.mode); 11932 DRM_DEBUG_KMS("adjusted mode:\n"); 11933 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 11934 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 11935 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 11936 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 11937 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 11938 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 11939 crtc->num_scalers, 11940 pipe_config->scaler_state.scaler_users, 11941 pipe_config->scaler_state.scaler_id); 11942 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 11943 pipe_config->gmch_pfit.control, 11944 pipe_config->gmch_pfit.pgm_ratios, 11945 pipe_config->gmch_pfit.lvds_border_bits); 11946 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 11947 pipe_config->pch_pfit.pos, 11948 pipe_config->pch_pfit.size, 11949 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 11950 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 11951 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 11952 11953 if (IS_BROXTON(dev)) { 11954 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 11955 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 11956 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 11957 pipe_config->ddi_pll_sel, 11958 pipe_config->dpll_hw_state.ebb0, 11959 pipe_config->dpll_hw_state.ebb4, 11960 pipe_config->dpll_hw_state.pll0, 11961 pipe_config->dpll_hw_state.pll1, 11962 pipe_config->dpll_hw_state.pll2, 11963 pipe_config->dpll_hw_state.pll3, 11964 pipe_config->dpll_hw_state.pll6, 11965 pipe_config->dpll_hw_state.pll8, 11966 pipe_config->dpll_hw_state.pll9, 11967 pipe_config->dpll_hw_state.pll10, 11968 pipe_config->dpll_hw_state.pcsdw12); 11969 } else if (IS_SKYLAKE(dev)) { 11970 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 11971 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 11972 pipe_config->ddi_pll_sel, 11973 pipe_config->dpll_hw_state.ctrl1, 11974 pipe_config->dpll_hw_state.cfgcr1, 11975 pipe_config->dpll_hw_state.cfgcr2); 11976 } else if (HAS_DDI(dev)) { 11977 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 11978 pipe_config->ddi_pll_sel, 11979 pipe_config->dpll_hw_state.wrpll); 11980 } else { 11981 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 11982 "fp0: 0x%x, fp1: 0x%x\n", 11983 pipe_config->dpll_hw_state.dpll, 11984 pipe_config->dpll_hw_state.dpll_md, 11985 pipe_config->dpll_hw_state.fp0, 11986 pipe_config->dpll_hw_state.fp1); 11987 } 11988 11989 DRM_DEBUG_KMS("planes on this crtc\n"); 11990 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 11991 intel_plane = to_intel_plane(plane); 11992 if (intel_plane->pipe != crtc->pipe) 11993 continue; 11994 11995 state = to_intel_plane_state(plane->state); 11996 fb = state->base.fb; 11997 if (!fb) { 11998 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 11999 "disabled, scaler_id = %d\n", 12000 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12001 plane->base.id, intel_plane->pipe, 12002 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, 12003 drm_plane_index(plane), state->scaler_id); 12004 continue; 12005 } 12006 12007 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12008 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12009 plane->base.id, intel_plane->pipe, 12010 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12011 drm_plane_index(plane)); 12012 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12013 fb->base.id, fb->width, fb->height, fb->pixel_format); 12014 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12015 state->scaler_id, 12016 state->src.x1 >> 16, state->src.y1 >> 16, 12017 drm_rect_width(&state->src) >> 16, 12018 drm_rect_height(&state->src) >> 16, 12019 state->dst.x1, state->dst.y1, 12020 drm_rect_width(&state->dst), drm_rect_height(&state->dst)); 12021 } 12022 } 12023 12024 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12025 { 12026 struct drm_device *dev = state->dev; 12027 struct intel_encoder *encoder; 12028 struct drm_connector *connector; 12029 struct drm_connector_state *connector_state; 12030 unsigned int used_ports = 0; 12031 int i; 12032 12033 /* 12034 * Walk the connector list instead of the encoder 12035 * list to detect the problem on ddi platforms 12036 * where there's just one encoder per digital port. 12037 */ 12038 for_each_connector_in_state(state, connector, connector_state, i) { 12039 if (!connector_state->best_encoder) 12040 continue; 12041 12042 encoder = to_intel_encoder(connector_state->best_encoder); 12043 12044 WARN_ON(!connector_state->crtc); 12045 12046 switch (encoder->type) { 12047 unsigned int port_mask; 12048 case INTEL_OUTPUT_UNKNOWN: 12049 if (WARN_ON(!HAS_DDI(dev))) 12050 break; 12051 case INTEL_OUTPUT_DISPLAYPORT: 12052 case INTEL_OUTPUT_HDMI: 12053 case INTEL_OUTPUT_EDP: 12054 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12055 12056 /* the same port mustn't appear more than once */ 12057 if (used_ports & port_mask) 12058 return false; 12059 12060 used_ports |= port_mask; 12061 default: 12062 break; 12063 } 12064 } 12065 12066 return true; 12067 } 12068 12069 static void 12070 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12071 { 12072 struct drm_crtc_state tmp_state; 12073 struct intel_crtc_scaler_state scaler_state; 12074 struct intel_dpll_hw_state dpll_hw_state; 12075 enum intel_dpll_id shared_dpll; 12076 uint32_t ddi_pll_sel; 12077 bool force_thru; 12078 12079 /* FIXME: before the switch to atomic started, a new pipe_config was 12080 * kzalloc'd. Code that depends on any field being zero should be 12081 * fixed, so that the crtc_state can be safely duplicated. For now, 12082 * only fields that are know to not cause problems are preserved. */ 12083 12084 tmp_state = crtc_state->base; 12085 scaler_state = crtc_state->scaler_state; 12086 shared_dpll = crtc_state->shared_dpll; 12087 dpll_hw_state = crtc_state->dpll_hw_state; 12088 ddi_pll_sel = crtc_state->ddi_pll_sel; 12089 force_thru = crtc_state->pch_pfit.force_thru; 12090 12091 memset(crtc_state, 0, sizeof *crtc_state); 12092 12093 crtc_state->base = tmp_state; 12094 crtc_state->scaler_state = scaler_state; 12095 crtc_state->shared_dpll = shared_dpll; 12096 crtc_state->dpll_hw_state = dpll_hw_state; 12097 crtc_state->ddi_pll_sel = ddi_pll_sel; 12098 crtc_state->pch_pfit.force_thru = force_thru; 12099 } 12100 12101 static int 12102 intel_modeset_pipe_config(struct drm_crtc *crtc, 12103 struct intel_crtc_state *pipe_config) 12104 { 12105 struct drm_atomic_state *state = pipe_config->base.state; 12106 struct intel_encoder *encoder; 12107 struct drm_connector *connector; 12108 struct drm_connector_state *connector_state; 12109 int base_bpp, ret = -EINVAL; 12110 int i; 12111 bool retry = true; 12112 12113 clear_intel_crtc_state(pipe_config); 12114 12115 pipe_config->cpu_transcoder = 12116 (enum transcoder) to_intel_crtc(crtc)->pipe; 12117 12118 /* 12119 * Sanitize sync polarity flags based on requested ones. If neither 12120 * positive or negative polarity is requested, treat this as meaning 12121 * negative polarity. 12122 */ 12123 if (!(pipe_config->base.adjusted_mode.flags & 12124 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12125 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12126 12127 if (!(pipe_config->base.adjusted_mode.flags & 12128 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12129 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12130 12131 /* Compute a starting value for pipe_config->pipe_bpp taking the source 12132 * plane pixel format and any sink constraints into account. Returns the 12133 * source plane bpp so that dithering can be selected on mismatches 12134 * after encoders and crtc also have had their say. */ 12135 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12136 pipe_config); 12137 if (base_bpp < 0) 12138 goto fail; 12139 12140 /* 12141 * Determine the real pipe dimensions. Note that stereo modes can 12142 * increase the actual pipe size due to the frame doubling and 12143 * insertion of additional space for blanks between the frame. This 12144 * is stored in the crtc timings. We use the requested mode to do this 12145 * computation to clearly distinguish it from the adjusted mode, which 12146 * can be changed by the connectors in the below retry loop. 12147 */ 12148 drm_crtc_get_hv_timing(&pipe_config->base.mode, 12149 &pipe_config->pipe_src_w, 12150 &pipe_config->pipe_src_h); 12151 12152 encoder_retry: 12153 /* Ensure the port clock defaults are reset when retrying. */ 12154 pipe_config->port_clock = 0; 12155 pipe_config->pixel_multiplier = 1; 12156 12157 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12158 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12159 CRTC_STEREO_DOUBLE); 12160 12161 /* Pass our mode to the connectors and the CRTC to give them a chance to 12162 * adjust it according to limitations or connector properties, and also 12163 * a chance to reject the mode entirely. 12164 */ 12165 for_each_connector_in_state(state, connector, connector_state, i) { 12166 if (connector_state->crtc != crtc) 12167 continue; 12168 12169 encoder = to_intel_encoder(connector_state->best_encoder); 12170 12171 if (!(encoder->compute_config(encoder, pipe_config))) { 12172 DRM_DEBUG_KMS("Encoder config failure\n"); 12173 goto fail; 12174 } 12175 } 12176 12177 /* Set default port clock if not overwritten by the encoder. Needs to be 12178 * done afterwards in case the encoder adjusts the mode. */ 12179 if (!pipe_config->port_clock) 12180 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12181 * pipe_config->pixel_multiplier; 12182 12183 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12184 if (ret < 0) { 12185 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12186 goto fail; 12187 } 12188 12189 if (ret == RETRY) { 12190 if (WARN(!retry, "loop in pipe configuration computation\n")) { 12191 ret = -EINVAL; 12192 goto fail; 12193 } 12194 12195 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12196 retry = false; 12197 goto encoder_retry; 12198 } 12199 12200 /* Dithering seems to not pass-through bits correctly when it should, so 12201 * only enable it on 6bpc panels. */ 12202 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12203 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 12204 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12205 12206 fail: 12207 return ret; 12208 } 12209 12210 static void 12211 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 12212 { 12213 struct drm_crtc *crtc; 12214 struct drm_crtc_state *crtc_state; 12215 int i; 12216 12217 /* Double check state. */ 12218 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12219 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); 12220 12221 /* Update hwmode for vblank functions */ 12222 if (crtc->state->active) 12223 crtc->hwmode = crtc->state->adjusted_mode; 12224 else 12225 crtc->hwmode.crtc_clock = 0; 12226 } 12227 } 12228 12229 static bool intel_fuzzy_clock_check(int clock1, int clock2) 12230 { 12231 int diff; 12232 12233 if (clock1 == clock2) 12234 return true; 12235 12236 if (!clock1 || !clock2) 12237 return false; 12238 12239 diff = abs(clock1 - clock2); 12240 12241 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12242 return true; 12243 12244 return false; 12245 } 12246 12247 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 12248 list_for_each_entry((intel_crtc), \ 12249 &(dev)->mode_config.crtc_list, \ 12250 base.head) \ 12251 if (mask & (1 <<(intel_crtc)->pipe)) 12252 12253 12254 static bool 12255 intel_compare_m_n(unsigned int m, unsigned int n, 12256 unsigned int m2, unsigned int n2, 12257 bool exact) 12258 { 12259 if (m == m2 && n == n2) 12260 return true; 12261 12262 if (exact || !m || !n || !m2 || !n2) 12263 return false; 12264 12265 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12266 12267 if (m > m2) { 12268 while (m > m2) { 12269 m2 <<= 1; 12270 n2 <<= 1; 12271 } 12272 } else if (m < m2) { 12273 while (m < m2) { 12274 m <<= 1; 12275 n <<= 1; 12276 } 12277 } 12278 12279 return m == m2 && n == n2; 12280 } 12281 12282 static bool 12283 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12284 struct intel_link_m_n *m2_n2, 12285 bool adjust) 12286 { 12287 if (m_n->tu == m2_n2->tu && 12288 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12289 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 12290 intel_compare_m_n(m_n->link_m, m_n->link_n, 12291 m2_n2->link_m, m2_n2->link_n, !adjust)) { 12292 if (adjust) 12293 *m2_n2 = *m_n; 12294 12295 return true; 12296 } 12297 12298 return false; 12299 } 12300 12301 static bool 12302 intel_pipe_config_compare(struct drm_device *dev, 12303 struct intel_crtc_state *current_config, 12304 struct intel_crtc_state *pipe_config, 12305 bool adjust) 12306 { 12307 bool ret = true; 12308 12309 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ 12310 do { \ 12311 if (!adjust) \ 12312 DRM_ERROR(fmt, ##__VA_ARGS__); \ 12313 else \ 12314 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ 12315 } while (0) 12316 12317 #define PIPE_CONF_CHECK_X(name) \ 12318 if (current_config->name != pipe_config->name) { \ 12319 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12320 "(expected 0x%08x, found 0x%08x)\n", \ 12321 current_config->name, \ 12322 pipe_config->name); \ 12323 ret = false; \ 12324 } 12325 12326 #define PIPE_CONF_CHECK_I(name) \ 12327 if (current_config->name != pipe_config->name) { \ 12328 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12329 "(expected %i, found %i)\n", \ 12330 current_config->name, \ 12331 pipe_config->name); \ 12332 ret = false; \ 12333 } 12334 12335 #define PIPE_CONF_CHECK_M_N(name) \ 12336 if (!intel_compare_link_m_n(¤t_config->name, \ 12337 &pipe_config->name,\ 12338 adjust)) { \ 12339 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12340 "(expected tu %i gmch %i/%i link %i/%i, " \ 12341 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12342 current_config->name.tu, \ 12343 current_config->name.gmch_m, \ 12344 current_config->name.gmch_n, \ 12345 current_config->name.link_m, \ 12346 current_config->name.link_n, \ 12347 pipe_config->name.tu, \ 12348 pipe_config->name.gmch_m, \ 12349 pipe_config->name.gmch_n, \ 12350 pipe_config->name.link_m, \ 12351 pipe_config->name.link_n); \ 12352 ret = false; \ 12353 } 12354 12355 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 12356 if (!intel_compare_link_m_n(¤t_config->name, \ 12357 &pipe_config->name, adjust) && \ 12358 !intel_compare_link_m_n(¤t_config->alt_name, \ 12359 &pipe_config->name, adjust)) { \ 12360 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12361 "(expected tu %i gmch %i/%i link %i/%i, " \ 12362 "or tu %i gmch %i/%i link %i/%i, " \ 12363 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12364 current_config->name.tu, \ 12365 current_config->name.gmch_m, \ 12366 current_config->name.gmch_n, \ 12367 current_config->name.link_m, \ 12368 current_config->name.link_n, \ 12369 current_config->alt_name.tu, \ 12370 current_config->alt_name.gmch_m, \ 12371 current_config->alt_name.gmch_n, \ 12372 current_config->alt_name.link_m, \ 12373 current_config->alt_name.link_n, \ 12374 pipe_config->name.tu, \ 12375 pipe_config->name.gmch_m, \ 12376 pipe_config->name.gmch_n, \ 12377 pipe_config->name.link_m, \ 12378 pipe_config->name.link_n); \ 12379 ret = false; \ 12380 } 12381 12382 /* This is required for BDW+ where there is only one set of registers for 12383 * switching between high and low RR. 12384 * This macro can be used whenever a comparison has to be made between one 12385 * hw state and multiple sw state variables. 12386 */ 12387 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 12388 if ((current_config->name != pipe_config->name) && \ 12389 (current_config->alt_name != pipe_config->name)) { \ 12390 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12391 "(expected %i or %i, found %i)\n", \ 12392 current_config->name, \ 12393 current_config->alt_name, \ 12394 pipe_config->name); \ 12395 ret = false; \ 12396 } 12397 12398 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 12399 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12400 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ 12401 "(expected %i, found %i)\n", \ 12402 current_config->name & (mask), \ 12403 pipe_config->name & (mask)); \ 12404 ret = false; \ 12405 } 12406 12407 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 12408 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12409 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12410 "(expected %i, found %i)\n", \ 12411 current_config->name, \ 12412 pipe_config->name); \ 12413 ret = false; \ 12414 } 12415 12416 #define PIPE_CONF_QUIRK(quirk) \ 12417 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12418 12419 PIPE_CONF_CHECK_I(cpu_transcoder); 12420 12421 PIPE_CONF_CHECK_I(has_pch_encoder); 12422 PIPE_CONF_CHECK_I(fdi_lanes); 12423 PIPE_CONF_CHECK_M_N(fdi_m_n); 12424 12425 PIPE_CONF_CHECK_I(has_dp_encoder); 12426 12427 if (INTEL_INFO(dev)->gen < 8) { 12428 PIPE_CONF_CHECK_M_N(dp_m_n); 12429 12430 PIPE_CONF_CHECK_I(has_drrs); 12431 if (current_config->has_drrs) 12432 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12433 } else 12434 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12435 12436 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12437 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12438 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12439 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12440 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12441 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12442 12443 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12444 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12445 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12446 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12447 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12448 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12449 12450 PIPE_CONF_CHECK_I(pixel_multiplier); 12451 PIPE_CONF_CHECK_I(has_hdmi_sink); 12452 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12453 IS_VALLEYVIEW(dev)) 12454 PIPE_CONF_CHECK_I(limited_color_range); 12455 PIPE_CONF_CHECK_I(has_infoframe); 12456 12457 PIPE_CONF_CHECK_I(has_audio); 12458 12459 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12460 DRM_MODE_FLAG_INTERLACE); 12461 12462 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12463 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12464 DRM_MODE_FLAG_PHSYNC); 12465 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12466 DRM_MODE_FLAG_NHSYNC); 12467 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12468 DRM_MODE_FLAG_PVSYNC); 12469 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12470 DRM_MODE_FLAG_NVSYNC); 12471 } 12472 12473 PIPE_CONF_CHECK_I(pipe_src_w); 12474 PIPE_CONF_CHECK_I(pipe_src_h); 12475 12476 PIPE_CONF_CHECK_I(gmch_pfit.control); 12477 /* pfit ratios are autocomputed by the hw on gen4+ */ 12478 if (INTEL_INFO(dev)->gen < 4) 12479 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 12480 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 12481 12482 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12483 if (current_config->pch_pfit.enabled) { 12484 PIPE_CONF_CHECK_I(pch_pfit.pos); 12485 PIPE_CONF_CHECK_I(pch_pfit.size); 12486 } 12487 12488 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12489 12490 /* BDW+ don't expose a synchronous way to read the state */ 12491 if (IS_HASWELL(dev)) 12492 PIPE_CONF_CHECK_I(ips_enabled); 12493 12494 PIPE_CONF_CHECK_I(double_wide); 12495 12496 PIPE_CONF_CHECK_X(ddi_pll_sel); 12497 12498 PIPE_CONF_CHECK_I(shared_dpll); 12499 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12500 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12501 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12502 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12503 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12504 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12505 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12506 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12507 12508 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12509 PIPE_CONF_CHECK_I(pipe_bpp); 12510 12511 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12512 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12513 12514 #undef PIPE_CONF_CHECK_X 12515 #undef PIPE_CONF_CHECK_I 12516 #undef PIPE_CONF_CHECK_I_ALT 12517 #undef PIPE_CONF_CHECK_FLAGS 12518 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12519 #undef PIPE_CONF_QUIRK 12520 #undef INTEL_ERR_OR_DBG_KMS 12521 12522 return ret; 12523 } 12524 12525 static void check_wm_state(struct drm_device *dev) 12526 { 12527 struct drm_i915_private *dev_priv = dev->dev_private; 12528 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12529 struct intel_crtc *intel_crtc; 12530 int plane; 12531 12532 if (INTEL_INFO(dev)->gen < 9) 12533 return; 12534 12535 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12536 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12537 12538 for_each_intel_crtc(dev, intel_crtc) { 12539 struct skl_ddb_entry *hw_entry, *sw_entry; 12540 const enum i915_pipe pipe = intel_crtc->pipe; 12541 12542 if (!intel_crtc->active) 12543 continue; 12544 12545 /* planes */ 12546 for_each_plane(dev_priv, pipe, plane) { 12547 hw_entry = &hw_ddb.plane[pipe][plane]; 12548 sw_entry = &sw_ddb->plane[pipe][plane]; 12549 12550 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12551 continue; 12552 12553 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 12554 "(expected (%u,%u), found (%u,%u))\n", 12555 pipe_name(pipe), plane + 1, 12556 sw_entry->start, sw_entry->end, 12557 hw_entry->start, hw_entry->end); 12558 } 12559 12560 /* cursor */ 12561 hw_entry = &hw_ddb.cursor[pipe]; 12562 sw_entry = &sw_ddb->cursor[pipe]; 12563 12564 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12565 continue; 12566 12567 DRM_ERROR("mismatch in DDB state pipe %c cursor " 12568 "(expected (%u,%u), found (%u,%u))\n", 12569 pipe_name(pipe), 12570 sw_entry->start, sw_entry->end, 12571 hw_entry->start, hw_entry->end); 12572 } 12573 } 12574 12575 static void 12576 check_connector_state(struct drm_device *dev, 12577 struct drm_atomic_state *old_state) 12578 { 12579 struct drm_connector_state *old_conn_state; 12580 struct drm_connector *connector; 12581 int i; 12582 12583 for_each_connector_in_state(old_state, connector, old_conn_state, i) { 12584 struct drm_encoder *encoder = connector->encoder; 12585 struct drm_connector_state *state = connector->state; 12586 12587 /* This also checks the encoder/connector hw state with the 12588 * ->get_hw_state callbacks. */ 12589 intel_connector_check_state(to_intel_connector(connector)); 12590 12591 I915_STATE_WARN(state->best_encoder != encoder, 12592 "connector's atomic encoder doesn't match legacy encoder\n"); 12593 } 12594 } 12595 12596 static void 12597 check_encoder_state(struct drm_device *dev) 12598 { 12599 struct intel_encoder *encoder; 12600 struct intel_connector *connector; 12601 12602 for_each_intel_encoder(dev, encoder) { 12603 bool enabled = false; 12604 enum i915_pipe pipe; 12605 12606 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 12607 encoder->base.base.id, 12608 encoder->base.name); 12609 12610 for_each_intel_connector(dev, connector) { 12611 if (connector->base.state->best_encoder != &encoder->base) 12612 continue; 12613 enabled = true; 12614 12615 I915_STATE_WARN(connector->base.state->crtc != 12616 encoder->base.crtc, 12617 "connector's crtc doesn't match encoder crtc\n"); 12618 } 12619 12620 I915_STATE_WARN(!!encoder->base.crtc != enabled, 12621 "encoder's enabled state mismatch " 12622 "(expected %i, found %i)\n", 12623 !!encoder->base.crtc, enabled); 12624 12625 if (!encoder->base.crtc) { 12626 bool active; 12627 12628 active = encoder->get_hw_state(encoder, &pipe); 12629 I915_STATE_WARN(active, 12630 "encoder detached but still enabled on pipe %c.\n", 12631 pipe_name(pipe)); 12632 } 12633 } 12634 } 12635 12636 static void 12637 check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state) 12638 { 12639 struct drm_i915_private *dev_priv = dev->dev_private; 12640 struct intel_encoder *encoder; 12641 struct drm_crtc_state *old_crtc_state; 12642 struct drm_crtc *crtc; 12643 int i; 12644 12645 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { 12646 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12647 struct intel_crtc_state *pipe_config, *sw_config; 12648 bool active; 12649 12650 if (!needs_modeset(crtc->state)) 12651 continue; 12652 12653 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); 12654 pipe_config = to_intel_crtc_state(old_crtc_state); 12655 memset(pipe_config, 0, sizeof(*pipe_config)); 12656 pipe_config->base.crtc = crtc; 12657 pipe_config->base.state = old_state; 12658 12659 DRM_DEBUG_KMS("[CRTC:%d]\n", 12660 crtc->base.id); 12661 12662 active = dev_priv->display.get_pipe_config(intel_crtc, 12663 pipe_config); 12664 12665 /* hw state is inconsistent with the pipe quirk */ 12666 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12667 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12668 active = crtc->state->active; 12669 12670 I915_STATE_WARN(crtc->state->active != active, 12671 "crtc active state doesn't match with hw state " 12672 "(expected %i, found %i)\n", crtc->state->active, active); 12673 12674 I915_STATE_WARN(intel_crtc->active != crtc->state->active, 12675 "transitional active state does not match atomic hw state " 12676 "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active); 12677 12678 for_each_encoder_on_crtc(dev, crtc, encoder) { 12679 enum i915_pipe pipe; 12680 12681 active = encoder->get_hw_state(encoder, &pipe); 12682 I915_STATE_WARN(active != crtc->state->active, 12683 "[ENCODER:%i] active %i with crtc active %i\n", 12684 encoder->base.base.id, active, crtc->state->active); 12685 12686 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12687 "Encoder connected to wrong pipe %c\n", 12688 pipe_name(pipe)); 12689 12690 if (active) 12691 encoder->get_config(encoder, pipe_config); 12692 } 12693 12694 if (!crtc->state->active) 12695 continue; 12696 12697 sw_config = to_intel_crtc_state(crtc->state); 12698 if (!intel_pipe_config_compare(dev, sw_config, 12699 pipe_config, false)) { 12700 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12701 intel_dump_pipe_config(intel_crtc, pipe_config, 12702 "[hw state]"); 12703 intel_dump_pipe_config(intel_crtc, sw_config, 12704 "[sw state]"); 12705 } 12706 } 12707 } 12708 12709 static void 12710 check_shared_dpll_state(struct drm_device *dev) 12711 { 12712 struct drm_i915_private *dev_priv = dev->dev_private; 12713 struct intel_crtc *crtc; 12714 struct intel_dpll_hw_state dpll_hw_state; 12715 int i; 12716 12717 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12718 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12719 int enabled_crtcs = 0, active_crtcs = 0; 12720 bool active; 12721 12722 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12723 12724 DRM_DEBUG_KMS("%s\n", pll->name); 12725 12726 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 12727 12728 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 12729 "more active pll users than references: %i vs %i\n", 12730 pll->active, hweight32(pll->config.crtc_mask)); 12731 I915_STATE_WARN(pll->active && !pll->on, 12732 "pll in active use but not on in sw tracking\n"); 12733 I915_STATE_WARN(pll->on && !pll->active, 12734 "pll in on but not on in use in sw tracking\n"); 12735 I915_STATE_WARN(pll->on != active, 12736 "pll on state mismatch (expected %i, found %i)\n", 12737 pll->on, active); 12738 12739 for_each_intel_crtc(dev, crtc) { 12740 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) 12741 enabled_crtcs++; 12742 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 12743 active_crtcs++; 12744 } 12745 I915_STATE_WARN(pll->active != active_crtcs, 12746 "pll active crtcs mismatch (expected %i, found %i)\n", 12747 pll->active, active_crtcs); 12748 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 12749 "pll enabled crtcs mismatch (expected %i, found %i)\n", 12750 hweight32(pll->config.crtc_mask), enabled_crtcs); 12751 12752 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 12753 sizeof(dpll_hw_state)), 12754 "pll hw state mismatch\n"); 12755 } 12756 } 12757 12758 static void 12759 intel_modeset_check_state(struct drm_device *dev, 12760 struct drm_atomic_state *old_state) 12761 { 12762 check_wm_state(dev); 12763 check_connector_state(dev, old_state); 12764 check_encoder_state(dev); 12765 check_crtc_state(dev, old_state); 12766 check_shared_dpll_state(dev); 12767 } 12768 12769 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 12770 int dotclock) 12771 { 12772 /* 12773 * FDI already provided one idea for the dotclock. 12774 * Yell if the encoder disagrees. 12775 */ 12776 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 12777 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12778 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 12779 } 12780 12781 static void update_scanline_offset(struct intel_crtc *crtc) 12782 { 12783 struct drm_device *dev = crtc->base.dev; 12784 12785 /* 12786 * The scanline counter increments at the leading edge of hsync. 12787 * 12788 * On most platforms it starts counting from vtotal-1 on the 12789 * first active line. That means the scanline counter value is 12790 * always one less than what we would expect. Ie. just after 12791 * start of vblank, which also occurs at start of hsync (on the 12792 * last active line), the scanline counter will read vblank_start-1. 12793 * 12794 * On gen2 the scanline counter starts counting from 1 instead 12795 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 12796 * to keep the value positive), instead of adding one. 12797 * 12798 * On HSW+ the behaviour of the scanline counter depends on the output 12799 * type. For DP ports it behaves like most other platforms, but on HDMI 12800 * there's an extra 1 line difference. So we need to add two instead of 12801 * one to the value. 12802 */ 12803 if (IS_GEN2(dev)) { 12804 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 12805 int vtotal; 12806 12807 vtotal = mode->crtc_vtotal; 12808 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 12809 vtotal /= 2; 12810 12811 crtc->scanline_offset = vtotal - 1; 12812 } else if (HAS_DDI(dev) && 12813 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 12814 crtc->scanline_offset = 2; 12815 } else 12816 crtc->scanline_offset = 1; 12817 } 12818 12819 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 12820 { 12821 struct drm_device *dev = state->dev; 12822 struct drm_i915_private *dev_priv = to_i915(dev); 12823 struct intel_shared_dpll_config *shared_dpll = NULL; 12824 struct intel_crtc *intel_crtc; 12825 struct intel_crtc_state *intel_crtc_state; 12826 struct drm_crtc *crtc; 12827 struct drm_crtc_state *crtc_state; 12828 int i; 12829 12830 if (!dev_priv->display.crtc_compute_clock) 12831 return; 12832 12833 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12834 int dpll; 12835 12836 intel_crtc = to_intel_crtc(crtc); 12837 intel_crtc_state = to_intel_crtc_state(crtc_state); 12838 dpll = intel_crtc_state->shared_dpll; 12839 12840 if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE) 12841 continue; 12842 12843 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; 12844 12845 if (!shared_dpll) 12846 shared_dpll = intel_atomic_get_shared_dpll_state(state); 12847 12848 shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe); 12849 } 12850 } 12851 12852 /* 12853 * This implements the workaround described in the "notes" section of the mode 12854 * set sequence documentation. When going from no pipes or single pipe to 12855 * multiple pipes, and planes are enabled after the pipe, we need to wait at 12856 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 12857 */ 12858 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 12859 { 12860 struct drm_crtc_state *crtc_state; 12861 struct intel_crtc *intel_crtc; 12862 struct drm_crtc *crtc; 12863 struct intel_crtc_state *first_crtc_state = NULL; 12864 struct intel_crtc_state *other_crtc_state = NULL; 12865 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 12866 int i; 12867 12868 /* look at all crtc's that are going to be enabled in during modeset */ 12869 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12870 intel_crtc = to_intel_crtc(crtc); 12871 12872 if (!crtc_state->active || !needs_modeset(crtc_state)) 12873 continue; 12874 12875 if (first_crtc_state) { 12876 other_crtc_state = to_intel_crtc_state(crtc_state); 12877 break; 12878 } else { 12879 first_crtc_state = to_intel_crtc_state(crtc_state); 12880 first_pipe = intel_crtc->pipe; 12881 } 12882 } 12883 12884 /* No workaround needed? */ 12885 if (!first_crtc_state) 12886 return 0; 12887 12888 /* w/a possibly needed, check how many crtc's are already enabled. */ 12889 for_each_intel_crtc(state->dev, intel_crtc) { 12890 struct intel_crtc_state *pipe_config; 12891 12892 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 12893 if (IS_ERR(pipe_config)) 12894 return PTR_ERR(pipe_config); 12895 12896 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 12897 12898 if (!pipe_config->base.active || 12899 needs_modeset(&pipe_config->base)) 12900 continue; 12901 12902 /* 2 or more enabled crtcs means no need for w/a */ 12903 if (enabled_pipe != INVALID_PIPE) 12904 return 0; 12905 12906 enabled_pipe = intel_crtc->pipe; 12907 } 12908 12909 if (enabled_pipe != INVALID_PIPE) 12910 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 12911 else if (other_crtc_state) 12912 other_crtc_state->hsw_workaround_pipe = first_pipe; 12913 12914 return 0; 12915 } 12916 12917 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 12918 { 12919 struct drm_crtc *crtc; 12920 struct drm_crtc_state *crtc_state; 12921 int ret = 0; 12922 12923 /* add all active pipes to the state */ 12924 for_each_crtc(state->dev, crtc) { 12925 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12926 if (IS_ERR(crtc_state)) 12927 return PTR_ERR(crtc_state); 12928 12929 if (!crtc_state->active || needs_modeset(crtc_state)) 12930 continue; 12931 12932 crtc_state->mode_changed = true; 12933 12934 ret = drm_atomic_add_affected_connectors(state, crtc); 12935 if (ret) 12936 break; 12937 12938 ret = drm_atomic_add_affected_planes(state, crtc); 12939 if (ret) 12940 break; 12941 } 12942 12943 return ret; 12944 } 12945 12946 12947 static int intel_modeset_checks(struct drm_atomic_state *state) 12948 { 12949 struct drm_device *dev = state->dev; 12950 struct drm_i915_private *dev_priv = dev->dev_private; 12951 int ret; 12952 12953 if (!check_digital_port_conflicts(state)) { 12954 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 12955 return -EINVAL; 12956 } 12957 12958 /* 12959 * See if the config requires any additional preparation, e.g. 12960 * to adjust global state with pipes off. We need to do this 12961 * here so we can get the modeset_pipe updated config for the new 12962 * mode set on this crtc. For other crtcs we need to use the 12963 * adjusted_mode bits in the crtc directly. 12964 */ 12965 if (dev_priv->display.modeset_calc_cdclk) { 12966 unsigned int cdclk; 12967 12968 ret = dev_priv->display.modeset_calc_cdclk(state); 12969 12970 cdclk = to_intel_atomic_state(state)->cdclk; 12971 if (!ret && cdclk != dev_priv->cdclk_freq) 12972 ret = intel_modeset_all_pipes(state); 12973 12974 if (ret < 0) 12975 return ret; 12976 } else 12977 to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq; 12978 12979 intel_modeset_clear_plls(state); 12980 12981 if (IS_HASWELL(dev)) 12982 return haswell_mode_set_planes_workaround(state); 12983 12984 return 0; 12985 } 12986 12987 /** 12988 * intel_atomic_check - validate state object 12989 * @dev: drm device 12990 * @state: state to validate 12991 */ 12992 static int intel_atomic_check(struct drm_device *dev, 12993 struct drm_atomic_state *state) 12994 { 12995 struct drm_crtc *crtc; 12996 struct drm_crtc_state *crtc_state; 12997 int ret, i; 12998 bool any_ms = false; 12999 13000 ret = drm_atomic_helper_check_modeset(dev, state); 13001 if (ret) 13002 return ret; 13003 13004 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13005 struct intel_crtc_state *pipe_config = 13006 to_intel_crtc_state(crtc_state); 13007 13008 /* Catch I915_MODE_FLAG_INHERITED */ 13009 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13010 crtc_state->mode_changed = true; 13011 13012 if (!crtc_state->enable) { 13013 if (needs_modeset(crtc_state)) 13014 any_ms = true; 13015 continue; 13016 } 13017 13018 if (!needs_modeset(crtc_state)) 13019 continue; 13020 13021 /* FIXME: For only active_changed we shouldn't need to do any 13022 * state recomputation at all. */ 13023 13024 ret = drm_atomic_add_affected_connectors(state, crtc); 13025 if (ret) 13026 return ret; 13027 13028 ret = intel_modeset_pipe_config(crtc, pipe_config); 13029 if (ret) 13030 return ret; 13031 13032 if (i915.fastboot && 13033 intel_pipe_config_compare(state->dev, 13034 to_intel_crtc_state(crtc->state), 13035 pipe_config, true)) { 13036 crtc_state->mode_changed = false; 13037 } 13038 13039 if (needs_modeset(crtc_state)) { 13040 any_ms = true; 13041 13042 ret = drm_atomic_add_affected_planes(state, crtc); 13043 if (ret) 13044 return ret; 13045 } 13046 13047 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13048 needs_modeset(crtc_state) ? 13049 "[modeset]" : "[fastset]"); 13050 } 13051 13052 if (any_ms) { 13053 ret = intel_modeset_checks(state); 13054 13055 if (ret) 13056 return ret; 13057 } else 13058 to_intel_atomic_state(state)->cdclk = 13059 to_i915(state->dev)->cdclk_freq; 13060 13061 return drm_atomic_helper_check_planes(state->dev, state); 13062 } 13063 13064 /** 13065 * intel_atomic_commit - commit validated state object 13066 * @dev: DRM device 13067 * @state: the top-level driver state object 13068 * @async: asynchronous commit 13069 * 13070 * This function commits a top-level state object that has been validated 13071 * with drm_atomic_helper_check(). 13072 * 13073 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment 13074 * we can only handle plane-related operations and do not yet support 13075 * asynchronous commit. 13076 * 13077 * RETURNS 13078 * Zero for success or -errno. 13079 */ 13080 static int intel_atomic_commit(struct drm_device *dev, 13081 struct drm_atomic_state *state, 13082 bool async) 13083 { 13084 struct drm_i915_private *dev_priv = dev->dev_private; 13085 struct drm_crtc *crtc; 13086 struct drm_crtc_state *crtc_state; 13087 int ret = 0; 13088 int i; 13089 bool any_ms = false; 13090 13091 if (async) { 13092 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 13093 return -EINVAL; 13094 } 13095 13096 ret = drm_atomic_helper_prepare_planes(dev, state); 13097 if (ret) 13098 return ret; 13099 13100 drm_atomic_helper_swap_state(dev, state); 13101 13102 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13103 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13104 13105 if (!needs_modeset(crtc->state)) 13106 continue; 13107 13108 any_ms = true; 13109 intel_pre_plane_update(intel_crtc); 13110 13111 if (crtc_state->active) { 13112 intel_crtc_disable_planes(crtc, crtc_state->plane_mask); 13113 dev_priv->display.crtc_disable(crtc); 13114 intel_crtc->active = false; 13115 intel_disable_shared_dpll(intel_crtc); 13116 } 13117 } 13118 13119 /* Only after disabling all output pipelines that will be changed can we 13120 * update the the output configuration. */ 13121 intel_modeset_update_crtc_state(state); 13122 13123 if (any_ms) { 13124 intel_shared_dpll_commit(state); 13125 13126 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13127 modeset_update_crtc_power_domains(state); 13128 } 13129 13130 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13131 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13132 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13133 bool modeset = needs_modeset(crtc->state); 13134 13135 if (modeset && crtc->state->active) { 13136 update_scanline_offset(to_intel_crtc(crtc)); 13137 dev_priv->display.crtc_enable(crtc); 13138 } 13139 13140 if (!modeset) 13141 intel_pre_plane_update(intel_crtc); 13142 13143 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13144 intel_post_plane_update(intel_crtc); 13145 } 13146 13147 /* FIXME: add subpixel order */ 13148 13149 drm_atomic_helper_wait_for_vblanks(dev, state); 13150 drm_atomic_helper_cleanup_planes(dev, state); 13151 13152 if (any_ms) 13153 intel_modeset_check_state(dev, state); 13154 13155 drm_atomic_state_free(state); 13156 13157 return 0; 13158 } 13159 13160 void intel_crtc_restore_mode(struct drm_crtc *crtc) 13161 { 13162 struct drm_device *dev = crtc->dev; 13163 struct drm_atomic_state *state; 13164 struct drm_crtc_state *crtc_state; 13165 int ret; 13166 13167 state = drm_atomic_state_alloc(dev); 13168 if (!state) { 13169 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13170 crtc->base.id); 13171 return; 13172 } 13173 13174 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 13175 13176 retry: 13177 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13178 ret = PTR_ERR_OR_ZERO(crtc_state); 13179 if (!ret) { 13180 if (!crtc_state->active) 13181 goto out; 13182 13183 crtc_state->mode_changed = true; 13184 ret = drm_atomic_commit(state); 13185 } 13186 13187 if (ret == -EDEADLK) { 13188 drm_atomic_state_clear(state); 13189 drm_modeset_backoff(state->acquire_ctx); 13190 goto retry; 13191 } 13192 13193 if (ret) 13194 out: 13195 drm_atomic_state_free(state); 13196 } 13197 13198 #undef for_each_intel_crtc_masked 13199 13200 static const struct drm_crtc_funcs intel_crtc_funcs = { 13201 .gamma_set = intel_crtc_gamma_set, 13202 .set_config = drm_atomic_helper_set_config, 13203 .destroy = intel_crtc_destroy, 13204 .page_flip = intel_crtc_page_flip, 13205 .atomic_duplicate_state = intel_crtc_duplicate_state, 13206 .atomic_destroy_state = intel_crtc_destroy_state, 13207 }; 13208 13209 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 13210 struct intel_shared_dpll *pll, 13211 struct intel_dpll_hw_state *hw_state) 13212 { 13213 uint32_t val; 13214 13215 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13216 return false; 13217 13218 val = I915_READ(PCH_DPLL(pll->id)); 13219 hw_state->dpll = val; 13220 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13221 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13222 13223 return val & DPLL_VCO_ENABLE; 13224 } 13225 13226 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 13227 struct intel_shared_dpll *pll) 13228 { 13229 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 13230 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 13231 } 13232 13233 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 13234 struct intel_shared_dpll *pll) 13235 { 13236 /* PCH refclock must be enabled first */ 13237 ibx_assert_pch_refclk_enabled(dev_priv); 13238 13239 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13240 13241 /* Wait for the clocks to stabilize. */ 13242 POSTING_READ(PCH_DPLL(pll->id)); 13243 udelay(150); 13244 13245 /* The pixel multiplier can only be updated once the 13246 * DPLL is enabled and the clocks are stable. 13247 * 13248 * So write it again. 13249 */ 13250 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13251 POSTING_READ(PCH_DPLL(pll->id)); 13252 udelay(200); 13253 } 13254 13255 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 13256 struct intel_shared_dpll *pll) 13257 { 13258 struct drm_device *dev = dev_priv->dev; 13259 struct intel_crtc *crtc; 13260 13261 /* Make sure no transcoder isn't still depending on us. */ 13262 for_each_intel_crtc(dev, crtc) { 13263 if (intel_crtc_to_shared_dpll(crtc) == pll) 13264 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 13265 } 13266 13267 I915_WRITE(PCH_DPLL(pll->id), 0); 13268 POSTING_READ(PCH_DPLL(pll->id)); 13269 udelay(200); 13270 } 13271 13272 static char *ibx_pch_dpll_names[] = { 13273 "PCH DPLL A", 13274 "PCH DPLL B", 13275 }; 13276 13277 static void ibx_pch_dpll_init(struct drm_device *dev) 13278 { 13279 struct drm_i915_private *dev_priv = dev->dev_private; 13280 int i; 13281 13282 dev_priv->num_shared_dpll = 2; 13283 13284 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13285 dev_priv->shared_dplls[i].id = i; 13286 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 13287 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 13288 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 13289 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 13290 dev_priv->shared_dplls[i].get_hw_state = 13291 ibx_pch_dpll_get_hw_state; 13292 } 13293 } 13294 13295 static void intel_shared_dpll_init(struct drm_device *dev) 13296 { 13297 struct drm_i915_private *dev_priv = dev->dev_private; 13298 13299 intel_update_cdclk(dev); 13300 13301 if (HAS_DDI(dev)) 13302 intel_ddi_pll_init(dev); 13303 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13304 ibx_pch_dpll_init(dev); 13305 else 13306 dev_priv->num_shared_dpll = 0; 13307 13308 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 13309 } 13310 13311 /** 13312 * intel_prepare_plane_fb - Prepare fb for usage on plane 13313 * @plane: drm plane to prepare for 13314 * @fb: framebuffer to prepare for presentation 13315 * 13316 * Prepares a framebuffer for usage on a display plane. Generally this 13317 * involves pinning the underlying object and updating the frontbuffer tracking 13318 * bits. Some older platforms need special physical address handling for 13319 * cursor planes. 13320 * 13321 * Returns 0 on success, negative error code on failure. 13322 */ 13323 int 13324 intel_prepare_plane_fb(struct drm_plane *plane, 13325 struct drm_framebuffer *fb, 13326 const struct drm_plane_state *new_state) 13327 { 13328 struct drm_device *dev = plane->dev; 13329 struct intel_plane *intel_plane = to_intel_plane(plane); 13330 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13331 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 13332 int ret = 0; 13333 13334 if (!obj) 13335 return 0; 13336 13337 mutex_lock(&dev->struct_mutex); 13338 13339 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13340 INTEL_INFO(dev)->cursor_needs_physical) { 13341 int align = IS_I830(dev) ? 16 * 1024 : 256; 13342 ret = i915_gem_object_attach_phys(obj, align); 13343 if (ret) 13344 DRM_DEBUG_KMS("failed to attach phys object\n"); 13345 } else { 13346 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL, NULL); 13347 } 13348 13349 if (ret == 0) 13350 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13351 13352 mutex_unlock(&dev->struct_mutex); 13353 13354 return ret; 13355 } 13356 13357 /** 13358 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13359 * @plane: drm plane to clean up for 13360 * @fb: old framebuffer that was on plane 13361 * 13362 * Cleans up a framebuffer that has just been removed from a plane. 13363 */ 13364 void 13365 intel_cleanup_plane_fb(struct drm_plane *plane, 13366 struct drm_framebuffer *fb, 13367 const struct drm_plane_state *old_state) 13368 { 13369 struct drm_device *dev = plane->dev; 13370 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13371 13372 if (WARN_ON(!obj)) 13373 return; 13374 13375 if (plane->type != DRM_PLANE_TYPE_CURSOR || 13376 !INTEL_INFO(dev)->cursor_needs_physical) { 13377 mutex_lock(&dev->struct_mutex); 13378 intel_unpin_fb_obj(fb, old_state); 13379 mutex_unlock(&dev->struct_mutex); 13380 } 13381 } 13382 13383 int 13384 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13385 { 13386 int max_scale; 13387 struct drm_device *dev; 13388 struct drm_i915_private *dev_priv; 13389 int crtc_clock, cdclk; 13390 13391 if (!intel_crtc || !crtc_state) 13392 return DRM_PLANE_HELPER_NO_SCALING; 13393 13394 dev = intel_crtc->base.dev; 13395 dev_priv = dev->dev_private; 13396 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13397 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 13398 13399 if (!crtc_clock || !cdclk) 13400 return DRM_PLANE_HELPER_NO_SCALING; 13401 13402 /* 13403 * skl max scale is lower of: 13404 * close to 3 but not 3, -1 is for that purpose 13405 * or 13406 * cdclk/crtc_clock 13407 */ 13408 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 13409 13410 return max_scale; 13411 } 13412 13413 static int 13414 intel_check_primary_plane(struct drm_plane *plane, 13415 struct intel_crtc_state *crtc_state, 13416 struct intel_plane_state *state) 13417 { 13418 struct drm_crtc *crtc = state->base.crtc; 13419 struct drm_framebuffer *fb = state->base.fb; 13420 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13421 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13422 bool can_position = false; 13423 13424 /* use scaler when colorkey is not required */ 13425 if (INTEL_INFO(plane->dev)->gen >= 9 && 13426 state->ckey.flags == I915_SET_COLORKEY_NONE) { 13427 min_scale = 1; 13428 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13429 can_position = true; 13430 } 13431 13432 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 13433 &state->dst, &state->clip, 13434 min_scale, max_scale, 13435 can_position, true, 13436 &state->visible); 13437 } 13438 13439 static void 13440 intel_commit_primary_plane(struct drm_plane *plane, 13441 struct intel_plane_state *state) 13442 { 13443 struct drm_crtc *crtc = state->base.crtc; 13444 struct drm_framebuffer *fb = state->base.fb; 13445 struct drm_device *dev = plane->dev; 13446 struct drm_i915_private *dev_priv = dev->dev_private; 13447 struct intel_crtc *intel_crtc; 13448 struct drm_rect *src = &state->src; 13449 13450 crtc = crtc ? crtc : plane->crtc; 13451 intel_crtc = to_intel_crtc(crtc); 13452 13453 plane->fb = fb; 13454 crtc->x = src->x1 >> 16; 13455 crtc->y = src->y1 >> 16; 13456 13457 if (!crtc->state->active) 13458 return; 13459 13460 if (state->visible) 13461 /* FIXME: kill this fastboot hack */ 13462 intel_update_pipe_size(intel_crtc); 13463 13464 dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y); 13465 } 13466 13467 static void 13468 intel_disable_primary_plane(struct drm_plane *plane, 13469 struct drm_crtc *crtc) 13470 { 13471 struct drm_device *dev = plane->dev; 13472 struct drm_i915_private *dev_priv = dev->dev_private; 13473 13474 dev_priv->display.update_primary_plane(crtc, NULL, 0, 0); 13475 } 13476 13477 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13478 struct drm_crtc_state *old_crtc_state) 13479 { 13480 struct drm_device *dev = crtc->dev; 13481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13482 13483 if (intel_crtc->atomic.update_wm_pre) 13484 intel_update_watermarks(crtc); 13485 13486 /* Perform vblank evasion around commit operation */ 13487 if (crtc->state->active) 13488 intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count); 13489 13490 if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) 13491 skl_detach_scalers(intel_crtc); 13492 } 13493 13494 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 13495 struct drm_crtc_state *old_crtc_state) 13496 { 13497 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13498 13499 if (crtc->state->active) 13500 intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count); 13501 } 13502 13503 /** 13504 * intel_plane_destroy - destroy a plane 13505 * @plane: plane to destroy 13506 * 13507 * Common destruction function for all types of planes (primary, cursor, 13508 * sprite). 13509 */ 13510 void intel_plane_destroy(struct drm_plane *plane) 13511 { 13512 struct intel_plane *intel_plane = to_intel_plane(plane); 13513 drm_plane_cleanup(plane); 13514 kfree(intel_plane); 13515 } 13516 13517 const struct drm_plane_funcs intel_plane_funcs = { 13518 .update_plane = drm_atomic_helper_update_plane, 13519 .disable_plane = drm_atomic_helper_disable_plane, 13520 .destroy = intel_plane_destroy, 13521 .set_property = drm_atomic_helper_plane_set_property, 13522 .atomic_get_property = intel_plane_atomic_get_property, 13523 .atomic_set_property = intel_plane_atomic_set_property, 13524 .atomic_duplicate_state = intel_plane_duplicate_state, 13525 .atomic_destroy_state = intel_plane_destroy_state, 13526 13527 }; 13528 13529 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 13530 int pipe) 13531 { 13532 struct intel_plane *primary; 13533 struct intel_plane_state *state; 13534 const uint32_t *intel_primary_formats; 13535 unsigned int num_formats; 13536 13537 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13538 if (primary == NULL) 13539 return NULL; 13540 13541 state = intel_create_plane_state(&primary->base); 13542 if (!state) { 13543 kfree(primary); 13544 return NULL; 13545 } 13546 primary->base.state = &state->base; 13547 13548 primary->can_scale = false; 13549 primary->max_downscale = 1; 13550 if (INTEL_INFO(dev)->gen >= 9) { 13551 primary->can_scale = true; 13552 state->scaler_id = -1; 13553 } 13554 primary->pipe = pipe; 13555 primary->plane = pipe; 13556 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13557 primary->check_plane = intel_check_primary_plane; 13558 primary->commit_plane = intel_commit_primary_plane; 13559 primary->disable_plane = intel_disable_primary_plane; 13560 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 13561 primary->plane = !pipe; 13562 13563 if (INTEL_INFO(dev)->gen >= 9) { 13564 intel_primary_formats = skl_primary_formats; 13565 num_formats = ARRAY_SIZE(skl_primary_formats); 13566 } else if (INTEL_INFO(dev)->gen >= 4) { 13567 intel_primary_formats = i965_primary_formats; 13568 num_formats = ARRAY_SIZE(i965_primary_formats); 13569 } else { 13570 intel_primary_formats = i8xx_primary_formats; 13571 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13572 } 13573 13574 drm_universal_plane_init(dev, &primary->base, 0, 13575 &intel_plane_funcs, 13576 intel_primary_formats, num_formats, 13577 DRM_PLANE_TYPE_PRIMARY); 13578 13579 if (INTEL_INFO(dev)->gen >= 4) 13580 intel_create_rotation_property(dev, primary); 13581 13582 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13583 13584 return &primary->base; 13585 } 13586 13587 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 13588 { 13589 if (!dev->mode_config.rotation_property) { 13590 unsigned long flags = BIT(DRM_ROTATE_0) | 13591 BIT(DRM_ROTATE_180); 13592 13593 if (INTEL_INFO(dev)->gen >= 9) 13594 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); 13595 13596 dev->mode_config.rotation_property = 13597 drm_mode_create_rotation_property(dev, flags); 13598 } 13599 if (dev->mode_config.rotation_property) 13600 drm_object_attach_property(&plane->base.base, 13601 dev->mode_config.rotation_property, 13602 plane->base.state->rotation); 13603 } 13604 13605 static int 13606 intel_check_cursor_plane(struct drm_plane *plane, 13607 struct intel_crtc_state *crtc_state, 13608 struct intel_plane_state *state) 13609 { 13610 struct drm_crtc *crtc = crtc_state->base.crtc; 13611 struct drm_framebuffer *fb = state->base.fb; 13612 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13613 unsigned stride; 13614 int ret; 13615 13616 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 13617 &state->dst, &state->clip, 13618 DRM_PLANE_HELPER_NO_SCALING, 13619 DRM_PLANE_HELPER_NO_SCALING, 13620 true, true, &state->visible); 13621 if (ret) 13622 return ret; 13623 13624 /* if we want to turn off the cursor ignore width and height */ 13625 if (!obj) 13626 return 0; 13627 13628 /* Check for which cursor types we support */ 13629 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 13630 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13631 state->base.crtc_w, state->base.crtc_h); 13632 return -EINVAL; 13633 } 13634 13635 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 13636 if (obj->base.size < stride * state->base.crtc_h) { 13637 DRM_DEBUG_KMS("buffer is too small\n"); 13638 return -ENOMEM; 13639 } 13640 13641 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 13642 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13643 return -EINVAL; 13644 } 13645 13646 return 0; 13647 } 13648 13649 static void 13650 intel_disable_cursor_plane(struct drm_plane *plane, 13651 struct drm_crtc *crtc) 13652 { 13653 intel_crtc_update_cursor(crtc, false); 13654 } 13655 13656 static void 13657 intel_commit_cursor_plane(struct drm_plane *plane, 13658 struct intel_plane_state *state) 13659 { 13660 struct drm_crtc *crtc = state->base.crtc; 13661 struct drm_device *dev = plane->dev; 13662 struct intel_crtc *intel_crtc; 13663 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 13664 uint32_t addr; 13665 13666 crtc = crtc ? crtc : plane->crtc; 13667 intel_crtc = to_intel_crtc(crtc); 13668 13669 plane->fb = state->base.fb; 13670 crtc->cursor_x = state->base.crtc_x; 13671 crtc->cursor_y = state->base.crtc_y; 13672 13673 if (intel_crtc->cursor_bo == obj) 13674 goto update; 13675 13676 if (!obj) 13677 addr = 0; 13678 else if (!INTEL_INFO(dev)->cursor_needs_physical) 13679 addr = i915_gem_obj_ggtt_offset(obj); 13680 else 13681 addr = obj->phys_handle->busaddr; 13682 13683 intel_crtc->cursor_addr = addr; 13684 intel_crtc->cursor_bo = obj; 13685 13686 update: 13687 if (crtc->state->active) 13688 intel_crtc_update_cursor(crtc, state->visible); 13689 } 13690 13691 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 13692 int pipe) 13693 { 13694 struct intel_plane *cursor; 13695 struct intel_plane_state *state; 13696 13697 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13698 if (cursor == NULL) 13699 return NULL; 13700 13701 state = intel_create_plane_state(&cursor->base); 13702 if (!state) { 13703 kfree(cursor); 13704 return NULL; 13705 } 13706 cursor->base.state = &state->base; 13707 13708 cursor->can_scale = false; 13709 cursor->max_downscale = 1; 13710 cursor->pipe = pipe; 13711 cursor->plane = pipe; 13712 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13713 cursor->check_plane = intel_check_cursor_plane; 13714 cursor->commit_plane = intel_commit_cursor_plane; 13715 cursor->disable_plane = intel_disable_cursor_plane; 13716 13717 drm_universal_plane_init(dev, &cursor->base, 0, 13718 &intel_plane_funcs, 13719 intel_cursor_formats, 13720 ARRAY_SIZE(intel_cursor_formats), 13721 DRM_PLANE_TYPE_CURSOR); 13722 13723 if (INTEL_INFO(dev)->gen >= 4) { 13724 if (!dev->mode_config.rotation_property) 13725 dev->mode_config.rotation_property = 13726 drm_mode_create_rotation_property(dev, 13727 BIT(DRM_ROTATE_0) | 13728 BIT(DRM_ROTATE_180)); 13729 if (dev->mode_config.rotation_property) 13730 drm_object_attach_property(&cursor->base.base, 13731 dev->mode_config.rotation_property, 13732 state->base.rotation); 13733 } 13734 13735 if (INTEL_INFO(dev)->gen >=9) 13736 state->scaler_id = -1; 13737 13738 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13739 13740 return &cursor->base; 13741 } 13742 13743 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 13744 struct intel_crtc_state *crtc_state) 13745 { 13746 int i; 13747 struct intel_scaler *intel_scaler; 13748 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 13749 13750 for (i = 0; i < intel_crtc->num_scalers; i++) { 13751 intel_scaler = &scaler_state->scalers[i]; 13752 intel_scaler->in_use = 0; 13753 intel_scaler->mode = PS_SCALER_MODE_DYN; 13754 } 13755 13756 scaler_state->scaler_id = -1; 13757 } 13758 13759 static void intel_crtc_init(struct drm_device *dev, int pipe) 13760 { 13761 struct drm_i915_private *dev_priv = dev->dev_private; 13762 struct intel_crtc *intel_crtc; 13763 struct intel_crtc_state *crtc_state = NULL; 13764 struct drm_plane *primary = NULL; 13765 struct drm_plane *cursor = NULL; 13766 int i, ret; 13767 13768 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13769 if (intel_crtc == NULL) 13770 return; 13771 13772 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13773 if (!crtc_state) 13774 goto fail; 13775 intel_crtc->config = crtc_state; 13776 intel_crtc->base.state = &crtc_state->base; 13777 crtc_state->base.crtc = &intel_crtc->base; 13778 13779 /* initialize shared scalers */ 13780 if (INTEL_INFO(dev)->gen >= 9) { 13781 if (pipe == PIPE_C) 13782 intel_crtc->num_scalers = 1; 13783 else 13784 intel_crtc->num_scalers = SKL_NUM_SCALERS; 13785 13786 skl_init_scalers(dev, intel_crtc, crtc_state); 13787 } 13788 13789 primary = intel_primary_plane_create(dev, pipe); 13790 if (!primary) 13791 goto fail; 13792 13793 cursor = intel_cursor_plane_create(dev, pipe); 13794 if (!cursor) 13795 goto fail; 13796 13797 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 13798 cursor, &intel_crtc_funcs); 13799 if (ret) 13800 goto fail; 13801 13802 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 13803 for (i = 0; i < 256; i++) { 13804 intel_crtc->lut_r[i] = i; 13805 intel_crtc->lut_g[i] = i; 13806 intel_crtc->lut_b[i] = i; 13807 } 13808 13809 /* 13810 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 13811 * is hooked to pipe B. Hence we want plane A feeding pipe B. 13812 */ 13813 intel_crtc->pipe = pipe; 13814 intel_crtc->plane = pipe; 13815 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 13816 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 13817 intel_crtc->plane = !pipe; 13818 } 13819 13820 intel_crtc->cursor_base = ~0; 13821 intel_crtc->cursor_cntl = ~0; 13822 intel_crtc->cursor_size = ~0; 13823 13824 intel_crtc->wm.cxsr_allowed = true; 13825 13826 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13827 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13828 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 13829 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 13830 13831 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13832 13833 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13834 return; 13835 13836 fail: 13837 if (primary) 13838 drm_plane_cleanup(primary); 13839 if (cursor) 13840 drm_plane_cleanup(cursor); 13841 kfree(crtc_state); 13842 kfree(intel_crtc); 13843 } 13844 13845 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 13846 { 13847 struct drm_encoder *encoder = connector->base.encoder; 13848 struct drm_device *dev = connector->base.dev; 13849 13850 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 13851 13852 if (!encoder || WARN_ON(!encoder->crtc)) 13853 return INVALID_PIPE; 13854 13855 return to_intel_crtc(encoder->crtc)->pipe; 13856 } 13857 13858 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13859 struct drm_file *file) 13860 { 13861 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13862 struct drm_crtc *drmmode_crtc; 13863 struct intel_crtc *crtc; 13864 13865 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 13866 13867 if (!drmmode_crtc) { 13868 DRM_ERROR("no such CRTC id\n"); 13869 return -ENOENT; 13870 } 13871 13872 crtc = to_intel_crtc(drmmode_crtc); 13873 pipe_from_crtc_id->pipe = crtc->pipe; 13874 13875 return 0; 13876 } 13877 13878 static int intel_encoder_clones(struct intel_encoder *encoder) 13879 { 13880 struct drm_device *dev = encoder->base.dev; 13881 struct intel_encoder *source_encoder; 13882 int index_mask = 0; 13883 int entry = 0; 13884 13885 for_each_intel_encoder(dev, source_encoder) { 13886 if (encoders_cloneable(encoder, source_encoder)) 13887 index_mask |= (1 << entry); 13888 13889 entry++; 13890 } 13891 13892 return index_mask; 13893 } 13894 13895 static bool has_edp_a(struct drm_device *dev) 13896 { 13897 struct drm_i915_private *dev_priv = dev->dev_private; 13898 13899 if (!IS_MOBILE(dev)) 13900 return false; 13901 13902 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 13903 return false; 13904 13905 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13906 return false; 13907 13908 return true; 13909 } 13910 13911 static bool intel_crt_present(struct drm_device *dev) 13912 { 13913 struct drm_i915_private *dev_priv = dev->dev_private; 13914 13915 if (INTEL_INFO(dev)->gen >= 9) 13916 return false; 13917 13918 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 13919 return false; 13920 13921 if (IS_CHERRYVIEW(dev)) 13922 return false; 13923 13924 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 13925 return false; 13926 13927 return true; 13928 } 13929 13930 static void intel_setup_outputs(struct drm_device *dev) 13931 { 13932 struct drm_i915_private *dev_priv = dev->dev_private; 13933 struct intel_encoder *encoder; 13934 bool dpd_is_edp = false; 13935 13936 intel_lvds_init(dev); 13937 13938 if (intel_crt_present(dev)) 13939 intel_crt_init(dev); 13940 13941 if (IS_BROXTON(dev)) { 13942 /* 13943 * FIXME: Broxton doesn't support port detection via the 13944 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 13945 * detect the ports. 13946 */ 13947 intel_ddi_init(dev, PORT_A); 13948 intel_ddi_init(dev, PORT_B); 13949 intel_ddi_init(dev, PORT_C); 13950 } else if (HAS_DDI(dev)) { 13951 int found; 13952 13953 /* 13954 * Haswell uses DDI functions to detect digital outputs. 13955 * On SKL pre-D0 the strap isn't connected, so we assume 13956 * it's there. 13957 */ 13958 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 13959 /* WaIgnoreDDIAStrap: skl */ 13960 if (found || IS_SKYLAKE(dev)) 13961 intel_ddi_init(dev, PORT_A); 13962 13963 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 13964 * register */ 13965 found = I915_READ(SFUSE_STRAP); 13966 13967 if (found & SFUSE_STRAP_DDIB_DETECTED) 13968 intel_ddi_init(dev, PORT_B); 13969 if (found & SFUSE_STRAP_DDIC_DETECTED) 13970 intel_ddi_init(dev, PORT_C); 13971 if (found & SFUSE_STRAP_DDID_DETECTED) 13972 intel_ddi_init(dev, PORT_D); 13973 /* 13974 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 13975 */ 13976 if (IS_SKYLAKE(dev) && 13977 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 13978 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 13979 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 13980 intel_ddi_init(dev, PORT_E); 13981 13982 } else if (HAS_PCH_SPLIT(dev)) { 13983 int found; 13984 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 13985 13986 if (has_edp_a(dev)) 13987 intel_dp_init(dev, DP_A, PORT_A); 13988 13989 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 13990 /* PCH SDVOB multiplex with HDMIB */ 13991 found = intel_sdvo_init(dev, PCH_SDVOB, true); 13992 if (!found) 13993 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 13994 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 13995 intel_dp_init(dev, PCH_DP_B, PORT_B); 13996 } 13997 13998 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 13999 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14000 14001 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14002 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14003 14004 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14005 intel_dp_init(dev, PCH_DP_C, PORT_C); 14006 14007 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14008 intel_dp_init(dev, PCH_DP_D, PORT_D); 14009 } else if (IS_VALLEYVIEW(dev)) { 14010 /* 14011 * The DP_DETECTED bit is the latched state of the DDC 14012 * SDA pin at boot. However since eDP doesn't require DDC 14013 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14014 * eDP ports may have been muxed to an alternate function. 14015 * Thus we can't rely on the DP_DETECTED bit alone to detect 14016 * eDP ports. Consult the VBT as well as DP_DETECTED to 14017 * detect eDP ports. 14018 */ 14019 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 14020 !intel_dp_is_edp(dev, PORT_B)) 14021 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 14022 PORT_B); 14023 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 14024 intel_dp_is_edp(dev, PORT_B)) 14025 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 14026 14027 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 14028 !intel_dp_is_edp(dev, PORT_C)) 14029 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 14030 PORT_C); 14031 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 14032 intel_dp_is_edp(dev, PORT_C)) 14033 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 14034 14035 if (IS_CHERRYVIEW(dev)) { 14036 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 14037 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 14038 PORT_D); 14039 /* eDP not supported on port D, so don't check VBT */ 14040 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 14041 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 14042 } 14043 14044 intel_dsi_init(dev); 14045 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 14046 bool found = false; 14047 14048 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14049 DRM_DEBUG_KMS("probing SDVOB\n"); 14050 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 14051 if (!found && IS_G4X(dev)) { 14052 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14053 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14054 } 14055 14056 if (!found && IS_G4X(dev)) 14057 intel_dp_init(dev, DP_B, PORT_B); 14058 } 14059 14060 /* Before G4X SDVOC doesn't have its own detect register */ 14061 14062 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14063 DRM_DEBUG_KMS("probing SDVOC\n"); 14064 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 14065 } 14066 14067 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14068 14069 if (IS_G4X(dev)) { 14070 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14071 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14072 } 14073 if (IS_G4X(dev)) 14074 intel_dp_init(dev, DP_C, PORT_C); 14075 } 14076 14077 if (IS_G4X(dev) && 14078 (I915_READ(DP_D) & DP_DETECTED)) 14079 intel_dp_init(dev, DP_D, PORT_D); 14080 } else if (IS_GEN2(dev)) 14081 intel_dvo_init(dev); 14082 14083 if (SUPPORTS_TV(dev)) 14084 intel_tv_init(dev); 14085 14086 intel_psr_init(dev); 14087 14088 for_each_intel_encoder(dev, encoder) { 14089 encoder->base.possible_crtcs = encoder->crtc_mask; 14090 encoder->base.possible_clones = 14091 intel_encoder_clones(encoder); 14092 } 14093 14094 intel_init_pch_refclk(dev); 14095 14096 drm_helper_move_panel_connectors_to_head(dev); 14097 } 14098 14099 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14100 { 14101 struct drm_device *dev = fb->dev; 14102 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14103 14104 drm_framebuffer_cleanup(fb); 14105 mutex_lock(&dev->struct_mutex); 14106 WARN_ON(!intel_fb->obj->framebuffer_references--); 14107 drm_gem_object_unreference(&intel_fb->obj->base); 14108 mutex_unlock(&dev->struct_mutex); 14109 kfree(intel_fb); 14110 } 14111 14112 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14113 struct drm_file *file, 14114 unsigned int *handle) 14115 { 14116 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14117 struct drm_i915_gem_object *obj = intel_fb->obj; 14118 14119 if (obj->userptr.mm) { 14120 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14121 return -EINVAL; 14122 } 14123 14124 return drm_gem_handle_create(file, &obj->base, handle); 14125 } 14126 14127 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14128 struct drm_file *file, 14129 unsigned flags, unsigned color, 14130 struct drm_clip_rect *clips, 14131 unsigned num_clips) 14132 { 14133 struct drm_device *dev = fb->dev; 14134 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14135 struct drm_i915_gem_object *obj = intel_fb->obj; 14136 14137 mutex_lock(&dev->struct_mutex); 14138 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 14139 mutex_unlock(&dev->struct_mutex); 14140 14141 return 0; 14142 } 14143 14144 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14145 .destroy = intel_user_framebuffer_destroy, 14146 .create_handle = intel_user_framebuffer_create_handle, 14147 .dirty = intel_user_framebuffer_dirty, 14148 }; 14149 14150 static 14151 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14152 uint32_t pixel_format) 14153 { 14154 u32 gen = INTEL_INFO(dev)->gen; 14155 14156 if (gen >= 9) { 14157 /* "The stride in bytes must not exceed the of the size of 8K 14158 * pixels and 32K bytes." 14159 */ 14160 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); 14161 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { 14162 return 32*1024; 14163 } else if (gen >= 4) { 14164 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14165 return 16*1024; 14166 else 14167 return 32*1024; 14168 } else if (gen >= 3) { 14169 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14170 return 8*1024; 14171 else 14172 return 16*1024; 14173 } else { 14174 /* XXX DSPC is limited to 4k tiled */ 14175 return 8*1024; 14176 } 14177 } 14178 14179 static int intel_framebuffer_init(struct drm_device *dev, 14180 struct intel_framebuffer *intel_fb, 14181 struct drm_mode_fb_cmd2 *mode_cmd, 14182 struct drm_i915_gem_object *obj) 14183 { 14184 unsigned int aligned_height; 14185 int ret; 14186 u32 pitch_limit, stride_alignment; 14187 14188 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 14189 14190 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14191 /* Enforce that fb modifier and tiling mode match, but only for 14192 * X-tiled. This is needed for FBC. */ 14193 if (!!(obj->tiling_mode == I915_TILING_X) != 14194 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 14195 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 14196 return -EINVAL; 14197 } 14198 } else { 14199 if (obj->tiling_mode == I915_TILING_X) 14200 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14201 else if (obj->tiling_mode == I915_TILING_Y) { 14202 DRM_DEBUG("No Y tiling for legacy addfb\n"); 14203 return -EINVAL; 14204 } 14205 } 14206 14207 /* Passed in modifier sanity checking. */ 14208 switch (mode_cmd->modifier[0]) { 14209 case I915_FORMAT_MOD_Y_TILED: 14210 case I915_FORMAT_MOD_Yf_TILED: 14211 if (INTEL_INFO(dev)->gen < 9) { 14212 DRM_DEBUG("Unsupported tiling 0x%lx!\n", 14213 mode_cmd->modifier[0]); 14214 return -EINVAL; 14215 } 14216 case DRM_FORMAT_MOD_NONE: 14217 case I915_FORMAT_MOD_X_TILED: 14218 break; 14219 default: 14220 DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", 14221 mode_cmd->modifier[0]); 14222 return -EINVAL; 14223 } 14224 14225 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], 14226 mode_cmd->pixel_format); 14227 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14228 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 14229 mode_cmd->pitches[0], stride_alignment); 14230 return -EINVAL; 14231 } 14232 14233 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 14234 mode_cmd->pixel_format); 14235 if (mode_cmd->pitches[0] > pitch_limit) { 14236 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 14237 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14238 "tiled" : "linear", 14239 mode_cmd->pitches[0], pitch_limit); 14240 return -EINVAL; 14241 } 14242 14243 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 14244 mode_cmd->pitches[0] != obj->stride) { 14245 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 14246 mode_cmd->pitches[0], obj->stride); 14247 return -EINVAL; 14248 } 14249 14250 /* Reject formats not supported by any plane early. */ 14251 switch (mode_cmd->pixel_format) { 14252 case DRM_FORMAT_C8: 14253 case DRM_FORMAT_RGB565: 14254 case DRM_FORMAT_XRGB8888: 14255 case DRM_FORMAT_ARGB8888: 14256 break; 14257 case DRM_FORMAT_XRGB1555: 14258 if (INTEL_INFO(dev)->gen > 3) { 14259 DRM_DEBUG("unsupported pixel format: %s\n", 14260 drm_get_format_name(mode_cmd->pixel_format)); 14261 return -EINVAL; 14262 } 14263 break; 14264 case DRM_FORMAT_ABGR8888: 14265 if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { 14266 DRM_DEBUG("unsupported pixel format: %s\n", 14267 drm_get_format_name(mode_cmd->pixel_format)); 14268 return -EINVAL; 14269 } 14270 break; 14271 case DRM_FORMAT_XBGR8888: 14272 case DRM_FORMAT_XRGB2101010: 14273 case DRM_FORMAT_XBGR2101010: 14274 if (INTEL_INFO(dev)->gen < 4) { 14275 DRM_DEBUG("unsupported pixel format: %s\n", 14276 drm_get_format_name(mode_cmd->pixel_format)); 14277 return -EINVAL; 14278 } 14279 break; 14280 case DRM_FORMAT_ABGR2101010: 14281 if (!IS_VALLEYVIEW(dev)) { 14282 DRM_DEBUG("unsupported pixel format: %s\n", 14283 drm_get_format_name(mode_cmd->pixel_format)); 14284 return -EINVAL; 14285 } 14286 break; 14287 case DRM_FORMAT_YUYV: 14288 case DRM_FORMAT_UYVY: 14289 case DRM_FORMAT_YVYU: 14290 case DRM_FORMAT_VYUY: 14291 if (INTEL_INFO(dev)->gen < 5) { 14292 DRM_DEBUG("unsupported pixel format: %s\n", 14293 drm_get_format_name(mode_cmd->pixel_format)); 14294 return -EINVAL; 14295 } 14296 break; 14297 default: 14298 DRM_DEBUG("unsupported pixel format: %s\n", 14299 drm_get_format_name(mode_cmd->pixel_format)); 14300 return -EINVAL; 14301 } 14302 14303 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14304 if (mode_cmd->offsets[0] != 0) 14305 return -EINVAL; 14306 14307 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 14308 mode_cmd->pixel_format, 14309 mode_cmd->modifier[0]); 14310 /* FIXME drm helper for size checks (especially planar formats)? */ 14311 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 14312 return -EINVAL; 14313 14314 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 14315 intel_fb->obj = obj; 14316 intel_fb->obj->framebuffer_references++; 14317 14318 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 14319 if (ret) { 14320 DRM_ERROR("framebuffer init failed %d\n", ret); 14321 return ret; 14322 } 14323 14324 return 0; 14325 } 14326 14327 static struct drm_framebuffer * 14328 intel_user_framebuffer_create(struct drm_device *dev, 14329 struct drm_file *filp, 14330 struct drm_mode_fb_cmd2 *mode_cmd) 14331 { 14332 struct drm_i915_gem_object *obj; 14333 14334 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14335 mode_cmd->handles[0])); 14336 if (&obj->base == NULL) 14337 return ERR_PTR(-ENOENT); 14338 14339 return intel_framebuffer_create(dev, mode_cmd, obj); 14340 } 14341 14342 #ifndef CONFIG_DRM_FBDEV_EMULATION 14343 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14344 { 14345 } 14346 #endif 14347 14348 static const struct drm_mode_config_funcs intel_mode_funcs = { 14349 .fb_create = intel_user_framebuffer_create, 14350 .output_poll_changed = intel_fbdev_output_poll_changed, 14351 .atomic_check = intel_atomic_check, 14352 .atomic_commit = intel_atomic_commit, 14353 .atomic_state_alloc = intel_atomic_state_alloc, 14354 .atomic_state_clear = intel_atomic_state_clear, 14355 }; 14356 14357 /* Set up chip specific display functions */ 14358 static void intel_init_display(struct drm_device *dev) 14359 { 14360 struct drm_i915_private *dev_priv = dev->dev_private; 14361 14362 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 14363 dev_priv->display.find_dpll = g4x_find_best_dpll; 14364 else if (IS_CHERRYVIEW(dev)) 14365 dev_priv->display.find_dpll = chv_find_best_dpll; 14366 else if (IS_VALLEYVIEW(dev)) 14367 dev_priv->display.find_dpll = vlv_find_best_dpll; 14368 else if (IS_PINEVIEW(dev)) 14369 dev_priv->display.find_dpll = pnv_find_best_dpll; 14370 else 14371 dev_priv->display.find_dpll = i9xx_find_best_dpll; 14372 14373 if (INTEL_INFO(dev)->gen >= 9) { 14374 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14375 dev_priv->display.get_initial_plane_config = 14376 skylake_get_initial_plane_config; 14377 dev_priv->display.crtc_compute_clock = 14378 haswell_crtc_compute_clock; 14379 dev_priv->display.crtc_enable = haswell_crtc_enable; 14380 dev_priv->display.crtc_disable = haswell_crtc_disable; 14381 dev_priv->display.update_primary_plane = 14382 skylake_update_primary_plane; 14383 } else if (HAS_DDI(dev)) { 14384 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14385 dev_priv->display.get_initial_plane_config = 14386 ironlake_get_initial_plane_config; 14387 dev_priv->display.crtc_compute_clock = 14388 haswell_crtc_compute_clock; 14389 dev_priv->display.crtc_enable = haswell_crtc_enable; 14390 dev_priv->display.crtc_disable = haswell_crtc_disable; 14391 dev_priv->display.update_primary_plane = 14392 ironlake_update_primary_plane; 14393 } else if (HAS_PCH_SPLIT(dev)) { 14394 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14395 dev_priv->display.get_initial_plane_config = 14396 ironlake_get_initial_plane_config; 14397 dev_priv->display.crtc_compute_clock = 14398 ironlake_crtc_compute_clock; 14399 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14400 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14401 dev_priv->display.update_primary_plane = 14402 ironlake_update_primary_plane; 14403 } else if (IS_VALLEYVIEW(dev)) { 14404 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14405 dev_priv->display.get_initial_plane_config = 14406 i9xx_get_initial_plane_config; 14407 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14408 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14409 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14410 dev_priv->display.update_primary_plane = 14411 i9xx_update_primary_plane; 14412 } else { 14413 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14414 dev_priv->display.get_initial_plane_config = 14415 i9xx_get_initial_plane_config; 14416 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14417 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14418 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14419 dev_priv->display.update_primary_plane = 14420 i9xx_update_primary_plane; 14421 } 14422 14423 /* Returns the core display clock speed */ 14424 if (IS_SKYLAKE(dev)) 14425 dev_priv->display.get_display_clock_speed = 14426 skylake_get_display_clock_speed; 14427 else if (IS_BROXTON(dev)) 14428 dev_priv->display.get_display_clock_speed = 14429 broxton_get_display_clock_speed; 14430 else if (IS_BROADWELL(dev)) 14431 dev_priv->display.get_display_clock_speed = 14432 broadwell_get_display_clock_speed; 14433 else if (IS_HASWELL(dev)) 14434 dev_priv->display.get_display_clock_speed = 14435 haswell_get_display_clock_speed; 14436 else if (IS_VALLEYVIEW(dev)) 14437 dev_priv->display.get_display_clock_speed = 14438 valleyview_get_display_clock_speed; 14439 else if (IS_GEN5(dev)) 14440 dev_priv->display.get_display_clock_speed = 14441 ilk_get_display_clock_speed; 14442 else if (IS_I945G(dev) || IS_BROADWATER(dev) || 14443 IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 14444 dev_priv->display.get_display_clock_speed = 14445 i945_get_display_clock_speed; 14446 else if (IS_GM45(dev)) 14447 dev_priv->display.get_display_clock_speed = 14448 gm45_get_display_clock_speed; 14449 else if (IS_CRESTLINE(dev)) 14450 dev_priv->display.get_display_clock_speed = 14451 i965gm_get_display_clock_speed; 14452 else if (IS_PINEVIEW(dev)) 14453 dev_priv->display.get_display_clock_speed = 14454 pnv_get_display_clock_speed; 14455 else if (IS_G33(dev) || IS_G4X(dev)) 14456 dev_priv->display.get_display_clock_speed = 14457 g33_get_display_clock_speed; 14458 else if (IS_I915G(dev)) 14459 dev_priv->display.get_display_clock_speed = 14460 i915_get_display_clock_speed; 14461 else if (IS_I945GM(dev) || IS_845G(dev)) 14462 dev_priv->display.get_display_clock_speed = 14463 i9xx_misc_get_display_clock_speed; 14464 else if (IS_PINEVIEW(dev)) 14465 dev_priv->display.get_display_clock_speed = 14466 pnv_get_display_clock_speed; 14467 else if (IS_I915GM(dev)) 14468 dev_priv->display.get_display_clock_speed = 14469 i915gm_get_display_clock_speed; 14470 else if (IS_I865G(dev)) 14471 dev_priv->display.get_display_clock_speed = 14472 i865_get_display_clock_speed; 14473 else if (IS_I85X(dev)) 14474 dev_priv->display.get_display_clock_speed = 14475 i85x_get_display_clock_speed; 14476 else { /* 830 */ 14477 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); 14478 dev_priv->display.get_display_clock_speed = 14479 i830_get_display_clock_speed; 14480 } 14481 14482 if (IS_GEN5(dev)) { 14483 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14484 } else if (IS_GEN6(dev)) { 14485 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14486 } else if (IS_IVYBRIDGE(dev)) { 14487 /* FIXME: detect B0+ stepping and use auto training */ 14488 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14489 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 14490 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14491 if (IS_BROADWELL(dev)) { 14492 dev_priv->display.modeset_commit_cdclk = 14493 broadwell_modeset_commit_cdclk; 14494 dev_priv->display.modeset_calc_cdclk = 14495 broadwell_modeset_calc_cdclk; 14496 } 14497 } else if (IS_VALLEYVIEW(dev)) { 14498 dev_priv->display.modeset_commit_cdclk = 14499 valleyview_modeset_commit_cdclk; 14500 dev_priv->display.modeset_calc_cdclk = 14501 valleyview_modeset_calc_cdclk; 14502 } else if (IS_BROXTON(dev)) { 14503 dev_priv->display.modeset_commit_cdclk = 14504 broxton_modeset_commit_cdclk; 14505 dev_priv->display.modeset_calc_cdclk = 14506 broxton_modeset_calc_cdclk; 14507 } 14508 14509 switch (INTEL_INFO(dev)->gen) { 14510 case 2: 14511 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14512 break; 14513 14514 case 3: 14515 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14516 break; 14517 14518 case 4: 14519 case 5: 14520 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14521 break; 14522 14523 case 6: 14524 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14525 break; 14526 case 7: 14527 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14528 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14529 break; 14530 case 9: 14531 /* Drop through - unsupported since execlist only. */ 14532 default: 14533 /* Default just returns -ENODEV to indicate unsupported */ 14534 dev_priv->display.queue_flip = intel_default_queue_flip; 14535 } 14536 14537 intel_panel_init_backlight_funcs(dev); 14538 14539 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 14540 } 14541 14542 /* 14543 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 14544 * resume, or other times. This quirk makes sure that's the case for 14545 * affected systems. 14546 */ 14547 static void quirk_pipea_force(struct drm_device *dev) 14548 { 14549 struct drm_i915_private *dev_priv = dev->dev_private; 14550 14551 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 14552 DRM_INFO("applying pipe a force quirk\n"); 14553 } 14554 14555 static void quirk_pipeb_force(struct drm_device *dev) 14556 { 14557 struct drm_i915_private *dev_priv = dev->dev_private; 14558 14559 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 14560 DRM_INFO("applying pipe b force quirk\n"); 14561 } 14562 14563 /* 14564 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14565 */ 14566 static void quirk_ssc_force_disable(struct drm_device *dev) 14567 { 14568 struct drm_i915_private *dev_priv = dev->dev_private; 14569 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14570 DRM_INFO("applying lvds SSC disable quirk\n"); 14571 } 14572 14573 /* 14574 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14575 * brightness value 14576 */ 14577 static void quirk_invert_brightness(struct drm_device *dev) 14578 { 14579 struct drm_i915_private *dev_priv = dev->dev_private; 14580 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14581 DRM_INFO("applying inverted panel brightness quirk\n"); 14582 } 14583 14584 /* Some VBT's incorrectly indicate no backlight is present */ 14585 static void quirk_backlight_present(struct drm_device *dev) 14586 { 14587 struct drm_i915_private *dev_priv = dev->dev_private; 14588 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14589 DRM_INFO("applying backlight present quirk\n"); 14590 } 14591 14592 struct intel_quirk { 14593 int device; 14594 int subsystem_vendor; 14595 int subsystem_device; 14596 void (*hook)(struct drm_device *dev); 14597 }; 14598 14599 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14600 struct intel_dmi_quirk { 14601 void (*hook)(struct drm_device *dev); 14602 const struct dmi_system_id (*dmi_id_list)[]; 14603 }; 14604 14605 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14606 { 14607 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14608 return 1; 14609 } 14610 14611 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14612 { 14613 .dmi_id_list = &(const struct dmi_system_id[]) { 14614 { 14615 .callback = intel_dmi_reverse_brightness, 14616 .ident = "NCR Corporation", 14617 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14618 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14619 }, 14620 }, 14621 { } /* terminating entry */ 14622 }, 14623 .hook = quirk_invert_brightness, 14624 }, 14625 }; 14626 14627 static struct intel_quirk intel_quirks[] = { 14628 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 14629 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 14630 14631 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 14632 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 14633 14634 /* 830 needs to leave pipe A & dpll A up */ 14635 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 14636 14637 /* 830 needs to leave pipe B & dpll B up */ 14638 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 14639 14640 /* Lenovo U160 cannot use SSC on LVDS */ 14641 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14642 14643 /* Sony Vaio Y cannot use SSC on LVDS */ 14644 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14645 14646 /* Acer Aspire 5734Z must invert backlight brightness */ 14647 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14648 14649 /* Acer/eMachines G725 */ 14650 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14651 14652 /* Acer/eMachines e725 */ 14653 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14654 14655 /* Acer/Packard Bell NCL20 */ 14656 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14657 14658 /* Acer Aspire 4736Z */ 14659 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14660 14661 /* Acer Aspire 5336 */ 14662 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14663 14664 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14665 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14666 14667 /* Acer C720 Chromebook (Core i3 4005U) */ 14668 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14669 14670 /* Apple Macbook 2,1 (Core 2 T7400) */ 14671 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14672 14673 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14674 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14675 14676 /* HP Chromebook 14 (Celeron 2955U) */ 14677 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14678 14679 /* Dell Chromebook 11 */ 14680 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14681 }; 14682 14683 static void intel_init_quirks(struct drm_device *dev) 14684 { 14685 struct pci_dev *d = dev->pdev; 14686 int i; 14687 14688 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14689 struct intel_quirk *q = &intel_quirks[i]; 14690 14691 if (d->device == q->device && 14692 (d->subsystem_vendor == q->subsystem_vendor || 14693 q->subsystem_vendor == PCI_ANY_ID) && 14694 (d->subsystem_device == q->subsystem_device || 14695 q->subsystem_device == PCI_ANY_ID)) 14696 q->hook(dev); 14697 } 14698 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14699 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14700 intel_dmi_quirks[i].hook(dev); 14701 } 14702 } 14703 14704 /* Disable the VGA plane that we never use */ 14705 static void i915_disable_vga(struct drm_device *dev) 14706 { 14707 struct drm_i915_private *dev_priv = dev->dev_private; 14708 u8 sr1; 14709 u32 vga_reg = i915_vgacntrl_reg(dev); 14710 14711 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14712 #if 0 14713 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 14714 #endif 14715 outb(VGA_SR_INDEX, SR01); 14716 sr1 = inb(VGA_SR_DATA); 14717 outb(VGA_SR_DATA, sr1 | 1 << 5); 14718 #if 0 14719 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 14720 #endif 14721 udelay(300); 14722 14723 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14724 POSTING_READ(vga_reg); 14725 } 14726 14727 void intel_modeset_init_hw(struct drm_device *dev) 14728 { 14729 intel_update_cdclk(dev); 14730 intel_prepare_ddi(dev); 14731 intel_init_clock_gating(dev); 14732 intel_enable_gt_powersave(dev); 14733 } 14734 14735 void intel_modeset_init(struct drm_device *dev) 14736 { 14737 struct drm_i915_private *dev_priv = dev->dev_private; 14738 int sprite, ret; 14739 enum i915_pipe pipe; 14740 struct intel_crtc *crtc; 14741 14742 drm_mode_config_init(dev); 14743 14744 dev->mode_config.min_width = 0; 14745 dev->mode_config.min_height = 0; 14746 14747 dev->mode_config.preferred_depth = 24; 14748 dev->mode_config.prefer_shadow = 1; 14749 14750 dev->mode_config.allow_fb_modifiers = true; 14751 14752 dev->mode_config.funcs = &intel_mode_funcs; 14753 14754 intel_init_quirks(dev); 14755 14756 intel_init_pm(dev); 14757 14758 if (INTEL_INFO(dev)->num_pipes == 0) 14759 return; 14760 14761 /* 14762 * There may be no VBT; and if the BIOS enabled SSC we can 14763 * just keep using it to avoid unnecessary flicker. Whereas if the 14764 * BIOS isn't using it, don't assume it will work even if the VBT 14765 * indicates as much. 14766 */ 14767 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 14768 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 14769 DREF_SSC1_ENABLE); 14770 14771 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 14772 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 14773 bios_lvds_use_ssc ? "en" : "dis", 14774 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 14775 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 14776 } 14777 } 14778 14779 intel_init_display(dev); 14780 intel_init_audio(dev); 14781 14782 if (IS_GEN2(dev)) { 14783 dev->mode_config.max_width = 2048; 14784 dev->mode_config.max_height = 2048; 14785 } else if (IS_GEN3(dev)) { 14786 dev->mode_config.max_width = 4096; 14787 dev->mode_config.max_height = 4096; 14788 } else { 14789 dev->mode_config.max_width = 8192; 14790 dev->mode_config.max_height = 8192; 14791 } 14792 14793 if (IS_845G(dev) || IS_I865G(dev)) { 14794 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 14795 dev->mode_config.cursor_height = 1023; 14796 } else if (IS_GEN2(dev)) { 14797 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 14798 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 14799 } else { 14800 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 14801 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 14802 } 14803 14804 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 14805 14806 DRM_DEBUG_KMS("%d display pipe%s available.\n", 14807 INTEL_INFO(dev)->num_pipes, 14808 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 14809 14810 for_each_pipe(dev_priv, pipe) { 14811 intel_crtc_init(dev, pipe); 14812 for_each_sprite(dev_priv, pipe, sprite) { 14813 ret = intel_plane_init(dev, pipe, sprite); 14814 if (ret) 14815 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 14816 pipe_name(pipe), sprite_name(pipe, sprite), ret); 14817 } 14818 } 14819 14820 intel_init_dpio(dev); 14821 14822 intel_shared_dpll_init(dev); 14823 14824 /* Just disable it once at startup */ 14825 i915_disable_vga(dev); 14826 intel_setup_outputs(dev); 14827 14828 /* Just in case the BIOS is doing something questionable. */ 14829 intel_fbc_disable(dev_priv); 14830 14831 drm_modeset_lock_all(dev); 14832 intel_modeset_setup_hw_state(dev); 14833 drm_modeset_unlock_all(dev); 14834 14835 for_each_intel_crtc(dev, crtc) { 14836 struct intel_initial_plane_config plane_config = {}; 14837 14838 if (!crtc->active) 14839 continue; 14840 14841 /* 14842 * Note that reserving the BIOS fb up front prevents us 14843 * from stuffing other stolen allocations like the ring 14844 * on top. This prevents some ugliness at boot time, and 14845 * can even allow for smooth boot transitions if the BIOS 14846 * fb is large enough for the active pipe configuration. 14847 */ 14848 dev_priv->display.get_initial_plane_config(crtc, 14849 &plane_config); 14850 14851 /* 14852 * If the fb is shared between multiple heads, we'll 14853 * just get the first one. 14854 */ 14855 intel_find_initial_plane_obj(crtc, &plane_config); 14856 } 14857 } 14858 14859 static void intel_enable_pipe_a(struct drm_device *dev) 14860 { 14861 struct intel_connector *connector; 14862 struct drm_connector *crt = NULL; 14863 struct intel_load_detect_pipe load_detect_temp; 14864 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 14865 14866 /* We can't just switch on the pipe A, we need to set things up with a 14867 * proper mode and output configuration. As a gross hack, enable pipe A 14868 * by enabling the load detect pipe once. */ 14869 for_each_intel_connector(dev, connector) { 14870 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 14871 crt = &connector->base; 14872 break; 14873 } 14874 } 14875 14876 if (!crt) 14877 return; 14878 14879 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 14880 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 14881 } 14882 14883 static bool 14884 intel_check_plane_mapping(struct intel_crtc *crtc) 14885 { 14886 struct drm_device *dev = crtc->base.dev; 14887 struct drm_i915_private *dev_priv = dev->dev_private; 14888 u32 reg, val; 14889 14890 if (INTEL_INFO(dev)->num_pipes == 1) 14891 return true; 14892 14893 reg = DSPCNTR(!crtc->plane); 14894 val = I915_READ(reg); 14895 14896 if ((val & DISPLAY_PLANE_ENABLE) && 14897 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 14898 return false; 14899 14900 return true; 14901 } 14902 14903 static void intel_sanitize_crtc(struct intel_crtc *crtc) 14904 { 14905 struct drm_device *dev = crtc->base.dev; 14906 struct drm_i915_private *dev_priv = dev->dev_private; 14907 struct intel_encoder *encoder; 14908 u32 reg; 14909 bool enable; 14910 14911 /* Clear any frame start delays used for debugging left by the BIOS */ 14912 reg = PIPECONF(crtc->config->cpu_transcoder); 14913 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 14914 14915 /* restore vblank interrupts to correct state */ 14916 drm_crtc_vblank_reset(&crtc->base); 14917 if (crtc->active) { 14918 struct intel_plane *plane; 14919 14920 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 14921 update_scanline_offset(crtc); 14922 drm_crtc_vblank_on(&crtc->base); 14923 14924 /* Disable everything but the primary plane */ 14925 for_each_intel_plane_on_crtc(dev, crtc, plane) { 14926 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 14927 continue; 14928 14929 plane->disable_plane(&plane->base, &crtc->base); 14930 } 14931 } 14932 14933 /* We need to sanitize the plane -> pipe mapping first because this will 14934 * disable the crtc (and hence change the state) if it is wrong. Note 14935 * that gen4+ has a fixed plane -> pipe mapping. */ 14936 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 14937 bool plane; 14938 14939 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 14940 crtc->base.base.id); 14941 14942 /* Pipe has the wrong plane attached and the plane is active. 14943 * Temporarily change the plane mapping and disable everything 14944 * ... */ 14945 plane = crtc->plane; 14946 to_intel_plane_state(crtc->base.primary->state)->visible = true; 14947 crtc->plane = !plane; 14948 intel_crtc_disable_noatomic(&crtc->base); 14949 crtc->plane = plane; 14950 } 14951 14952 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 14953 crtc->pipe == PIPE_A && !crtc->active) { 14954 /* BIOS forgot to enable pipe A, this mostly happens after 14955 * resume. Force-enable the pipe to fix this, the update_dpms 14956 * call below we restore the pipe to the right state, but leave 14957 * the required bits on. */ 14958 intel_enable_pipe_a(dev); 14959 } 14960 14961 /* Adjust the state of the output pipe according to whether we 14962 * have active connectors/encoders. */ 14963 enable = false; 14964 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 14965 enable = true; 14966 break; 14967 } 14968 14969 if (!enable) 14970 intel_crtc_disable_noatomic(&crtc->base); 14971 14972 if (crtc->active != crtc->base.state->active) { 14973 14974 /* This can happen either due to bugs in the get_hw_state 14975 * functions or because of calls to intel_crtc_disable_noatomic, 14976 * or because the pipe is force-enabled due to the 14977 * pipe A quirk. */ 14978 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 14979 crtc->base.base.id, 14980 crtc->base.state->enable ? "enabled" : "disabled", 14981 crtc->active ? "enabled" : "disabled"); 14982 14983 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); 14984 crtc->base.state->active = crtc->active; 14985 crtc->base.enabled = crtc->active; 14986 14987 /* Because we only establish the connector -> encoder -> 14988 * crtc links if something is active, this means the 14989 * crtc is now deactivated. Break the links. connector 14990 * -> encoder links are only establish when things are 14991 * actually up, hence no need to break them. */ 14992 WARN_ON(crtc->active); 14993 14994 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 14995 encoder->base.crtc = NULL; 14996 } 14997 14998 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 14999 /* 15000 * We start out with underrun reporting disabled to avoid races. 15001 * For correct bookkeeping mark this on active crtcs. 15002 * 15003 * Also on gmch platforms we dont have any hardware bits to 15004 * disable the underrun reporting. Which means we need to start 15005 * out with underrun reporting disabled also on inactive pipes, 15006 * since otherwise we'll complain about the garbage we read when 15007 * e.g. coming up after runtime pm. 15008 * 15009 * No protection against concurrent access is required - at 15010 * worst a fifo underrun happens which also sets this to false. 15011 */ 15012 crtc->cpu_fifo_underrun_disabled = true; 15013 crtc->pch_fifo_underrun_disabled = true; 15014 } 15015 } 15016 15017 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15018 { 15019 struct intel_connector *connector; 15020 struct drm_device *dev = encoder->base.dev; 15021 bool active = false; 15022 15023 /* We need to check both for a crtc link (meaning that the 15024 * encoder is active and trying to read from a pipe) and the 15025 * pipe itself being active. */ 15026 bool has_active_crtc = encoder->base.crtc && 15027 to_intel_crtc(encoder->base.crtc)->active; 15028 15029 for_each_intel_connector(dev, connector) { 15030 if (connector->base.encoder != &encoder->base) 15031 continue; 15032 15033 active = true; 15034 break; 15035 } 15036 15037 if (active && !has_active_crtc) { 15038 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15039 encoder->base.base.id, 15040 encoder->base.name); 15041 15042 /* Connector is active, but has no active pipe. This is 15043 * fallout from our resume register restoring. Disable 15044 * the encoder manually again. */ 15045 if (encoder->base.crtc) { 15046 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15047 encoder->base.base.id, 15048 encoder->base.name); 15049 encoder->disable(encoder); 15050 if (encoder->post_disable) 15051 encoder->post_disable(encoder); 15052 } 15053 encoder->base.crtc = NULL; 15054 15055 /* Inconsistent output/port/pipe state happens presumably due to 15056 * a bug in one of the get_hw_state functions. Or someplace else 15057 * in our code, like the register restore mess on resume. Clamp 15058 * things to off as a safer default. */ 15059 for_each_intel_connector(dev, connector) { 15060 if (connector->encoder != encoder) 15061 continue; 15062 connector->base.dpms = DRM_MODE_DPMS_OFF; 15063 connector->base.encoder = NULL; 15064 } 15065 } 15066 /* Enabled encoders without active connectors will be fixed in 15067 * the crtc fixup. */ 15068 } 15069 15070 void i915_redisable_vga_power_on(struct drm_device *dev) 15071 { 15072 struct drm_i915_private *dev_priv = dev->dev_private; 15073 u32 vga_reg = i915_vgacntrl_reg(dev); 15074 15075 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15076 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15077 i915_disable_vga(dev); 15078 } 15079 } 15080 15081 void i915_redisable_vga(struct drm_device *dev) 15082 { 15083 struct drm_i915_private *dev_priv = dev->dev_private; 15084 15085 /* This function can be called both from intel_modeset_setup_hw_state or 15086 * at a very early point in our resume sequence, where the power well 15087 * structures are not yet restored. Since this function is at a very 15088 * paranoid "someone might have enabled VGA while we were not looking" 15089 * level, just check if the power well is enabled instead of trying to 15090 * follow the "don't touch the power well if we don't need it" policy 15091 * the rest of the driver uses. */ 15092 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15093 return; 15094 15095 i915_redisable_vga_power_on(dev); 15096 } 15097 15098 static bool primary_get_hw_state(struct intel_plane *plane) 15099 { 15100 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15101 15102 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15103 } 15104 15105 /* FIXME read out full plane state for all planes */ 15106 static void readout_plane_state(struct intel_crtc *crtc) 15107 { 15108 struct drm_plane *primary = crtc->base.primary; 15109 struct intel_plane_state *plane_state = 15110 to_intel_plane_state(primary->state); 15111 15112 plane_state->visible = 15113 primary_get_hw_state(to_intel_plane(primary)); 15114 15115 if (plane_state->visible) 15116 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15117 } 15118 15119 static void intel_modeset_readout_hw_state(struct drm_device *dev) 15120 { 15121 struct drm_i915_private *dev_priv = dev->dev_private; 15122 enum i915_pipe pipe; 15123 struct intel_crtc *crtc; 15124 struct intel_encoder *encoder; 15125 struct intel_connector *connector; 15126 int i; 15127 15128 for_each_intel_crtc(dev, crtc) { 15129 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); 15130 memset(crtc->config, 0, sizeof(*crtc->config)); 15131 crtc->config->base.crtc = &crtc->base; 15132 15133 crtc->active = dev_priv->display.get_pipe_config(crtc, 15134 crtc->config); 15135 15136 crtc->base.state->active = crtc->active; 15137 crtc->base.enabled = crtc->active; 15138 15139 readout_plane_state(crtc); 15140 15141 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15142 crtc->base.base.id, 15143 crtc->active ? "enabled" : "disabled"); 15144 } 15145 15146 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15147 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15148 15149 pll->on = pll->get_hw_state(dev_priv, pll, 15150 &pll->config.hw_state); 15151 pll->active = 0; 15152 pll->config.crtc_mask = 0; 15153 for_each_intel_crtc(dev, crtc) { 15154 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 15155 pll->active++; 15156 pll->config.crtc_mask |= 1 << crtc->pipe; 15157 } 15158 } 15159 15160 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15161 pll->name, pll->config.crtc_mask, pll->on); 15162 15163 if (pll->config.crtc_mask) 15164 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 15165 } 15166 15167 for_each_intel_encoder(dev, encoder) { 15168 pipe = 0; 15169 15170 if (encoder->get_hw_state(encoder, &pipe)) { 15171 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15172 encoder->base.crtc = &crtc->base; 15173 encoder->get_config(encoder, crtc->config); 15174 } else { 15175 encoder->base.crtc = NULL; 15176 } 15177 15178 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15179 encoder->base.base.id, 15180 encoder->base.name, 15181 encoder->base.crtc ? "enabled" : "disabled", 15182 pipe_name(pipe)); 15183 } 15184 15185 for_each_intel_connector(dev, connector) { 15186 if (connector->get_hw_state(connector)) { 15187 connector->base.dpms = DRM_MODE_DPMS_ON; 15188 connector->base.encoder = &connector->encoder->base; 15189 } else { 15190 connector->base.dpms = DRM_MODE_DPMS_OFF; 15191 connector->base.encoder = NULL; 15192 } 15193 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15194 connector->base.base.id, 15195 connector->base.name, 15196 connector->base.encoder ? "enabled" : "disabled"); 15197 } 15198 15199 for_each_intel_crtc(dev, crtc) { 15200 crtc->base.hwmode = crtc->config->base.adjusted_mode; 15201 15202 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15203 if (crtc->base.state->active) { 15204 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15205 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15206 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15207 15208 /* 15209 * The initial mode needs to be set in order to keep 15210 * the atomic core happy. It wants a valid mode if the 15211 * crtc's enabled, so we do the above call. 15212 * 15213 * At this point some state updated by the connectors 15214 * in their ->detect() callback has not run yet, so 15215 * no recalculation can be done yet. 15216 * 15217 * Even if we could do a recalculation and modeset 15218 * right now it would cause a double modeset if 15219 * fbdev or userspace chooses a different initial mode. 15220 * 15221 * If that happens, someone indicated they wanted a 15222 * mode change, which means it's safe to do a full 15223 * recalculation. 15224 */ 15225 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15226 } 15227 } 15228 } 15229 15230 /* Scan out the current hw modeset state, 15231 * and sanitizes it to the current state 15232 */ 15233 static void 15234 intel_modeset_setup_hw_state(struct drm_device *dev) 15235 { 15236 struct drm_i915_private *dev_priv = dev->dev_private; 15237 enum i915_pipe pipe; 15238 struct intel_crtc *crtc; 15239 struct intel_encoder *encoder; 15240 int i; 15241 15242 intel_modeset_readout_hw_state(dev); 15243 15244 /* HW state is read out, now we need to sanitize this mess. */ 15245 for_each_intel_encoder(dev, encoder) { 15246 intel_sanitize_encoder(encoder); 15247 } 15248 15249 for_each_pipe(dev_priv, pipe) { 15250 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15251 intel_sanitize_crtc(crtc); 15252 intel_dump_pipe_config(crtc, crtc->config, 15253 "[setup_hw_state]"); 15254 } 15255 15256 intel_modeset_update_connector_atomic_state(dev); 15257 15258 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15259 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15260 15261 if (!pll->on || pll->active) 15262 continue; 15263 15264 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15265 15266 pll->disable(dev_priv, pll); 15267 pll->on = false; 15268 } 15269 15270 if (IS_VALLEYVIEW(dev)) 15271 vlv_wm_get_hw_state(dev); 15272 else if (IS_GEN9(dev)) 15273 skl_wm_get_hw_state(dev); 15274 else if (HAS_PCH_SPLIT(dev)) 15275 ilk_wm_get_hw_state(dev); 15276 15277 for_each_intel_crtc(dev, crtc) { 15278 unsigned long put_domains; 15279 15280 put_domains = modeset_get_crtc_power_domains(&crtc->base); 15281 if (WARN_ON(put_domains)) 15282 modeset_put_power_domains(dev_priv, put_domains); 15283 } 15284 intel_display_set_init_power(dev_priv, false); 15285 } 15286 15287 void intel_display_resume(struct drm_device *dev) 15288 { 15289 struct drm_atomic_state *state = drm_atomic_state_alloc(dev); 15290 struct intel_connector *conn; 15291 struct intel_plane *plane; 15292 struct drm_crtc *crtc; 15293 int ret; 15294 15295 if (!state) 15296 return; 15297 15298 state->acquire_ctx = dev->mode_config.acquire_ctx; 15299 15300 /* preserve complete old state, including dpll */ 15301 intel_atomic_get_shared_dpll_state(state); 15302 15303 for_each_crtc(dev, crtc) { 15304 struct drm_crtc_state *crtc_state = 15305 drm_atomic_get_crtc_state(state, crtc); 15306 15307 ret = PTR_ERR_OR_ZERO(crtc_state); 15308 if (ret) 15309 goto err; 15310 15311 /* force a restore */ 15312 crtc_state->mode_changed = true; 15313 } 15314 15315 for_each_intel_plane(dev, plane) { 15316 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); 15317 if (ret) 15318 goto err; 15319 } 15320 15321 for_each_intel_connector(dev, conn) { 15322 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); 15323 if (ret) 15324 goto err; 15325 } 15326 15327 intel_modeset_setup_hw_state(dev); 15328 15329 i915_redisable_vga(dev); 15330 ret = drm_atomic_commit(state); 15331 if (!ret) 15332 return; 15333 15334 err: 15335 DRM_ERROR("Restoring old state failed with %i\n", ret); 15336 drm_atomic_state_free(state); 15337 } 15338 15339 void intel_modeset_gem_init(struct drm_device *dev) 15340 { 15341 struct drm_crtc *c; 15342 struct drm_i915_gem_object *obj; 15343 int ret; 15344 15345 mutex_lock(&dev->struct_mutex); 15346 intel_init_gt_powersave(dev); 15347 mutex_unlock(&dev->struct_mutex); 15348 15349 intel_modeset_init_hw(dev); 15350 15351 intel_setup_overlay(dev); 15352 15353 /* 15354 * Make sure any fbs we allocated at startup are properly 15355 * pinned & fenced. When we do the allocation it's too early 15356 * for this. 15357 */ 15358 for_each_crtc(dev, c) { 15359 obj = intel_fb_obj(c->primary->fb); 15360 if (obj == NULL) 15361 continue; 15362 15363 mutex_lock(&dev->struct_mutex); 15364 ret = intel_pin_and_fence_fb_obj(c->primary, 15365 c->primary->fb, 15366 c->primary->state, 15367 NULL, NULL); 15368 mutex_unlock(&dev->struct_mutex); 15369 if (ret) { 15370 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15371 to_intel_crtc(c)->pipe); 15372 drm_framebuffer_unreference(c->primary->fb); 15373 c->primary->fb = NULL; 15374 c->primary->crtc = c->primary->state->crtc = NULL; 15375 update_state_fb(c->primary); 15376 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 15377 } 15378 } 15379 15380 intel_backlight_register(dev); 15381 } 15382 15383 void intel_connector_unregister(struct intel_connector *intel_connector) 15384 { 15385 struct drm_connector *connector = &intel_connector->base; 15386 15387 intel_panel_destroy_backlight(connector); 15388 drm_connector_unregister(connector); 15389 } 15390 15391 void intel_modeset_cleanup(struct drm_device *dev) 15392 { 15393 struct drm_i915_private *dev_priv = dev->dev_private; 15394 struct drm_connector *connector; 15395 15396 intel_disable_gt_powersave(dev); 15397 15398 intel_backlight_unregister(dev); 15399 15400 /* 15401 * Interrupts and polling as the first thing to avoid creating havoc. 15402 * Too much stuff here (turning of connectors, ...) would 15403 * experience fancy races otherwise. 15404 */ 15405 intel_irq_uninstall(dev_priv); 15406 15407 /* 15408 * Due to the hpd irq storm handling the hotplug work can re-arm the 15409 * poll handlers. Hence disable polling after hpd handling is shut down. 15410 */ 15411 drm_kms_helper_poll_fini(dev); 15412 15413 intel_unregister_dsm_handler(); 15414 15415 intel_fbc_disable(dev_priv); 15416 15417 /* flush any delayed tasks or pending work */ 15418 flush_scheduled_work(); 15419 15420 /* destroy the backlight and sysfs files before encoders/connectors */ 15421 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 15422 struct intel_connector *intel_connector; 15423 15424 intel_connector = to_intel_connector(connector); 15425 intel_connector->unregister(intel_connector); 15426 } 15427 15428 drm_mode_config_cleanup(dev); 15429 15430 intel_cleanup_overlay(dev); 15431 15432 mutex_lock(&dev->struct_mutex); 15433 intel_cleanup_gt_powersave(dev); 15434 mutex_unlock(&dev->struct_mutex); 15435 } 15436 15437 /* 15438 * Return which encoder is currently attached for connector. 15439 */ 15440 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 15441 { 15442 return &intel_attached_encoder(connector)->base; 15443 } 15444 15445 void intel_connector_attach_encoder(struct intel_connector *connector, 15446 struct intel_encoder *encoder) 15447 { 15448 connector->encoder = encoder; 15449 drm_mode_connector_attach_encoder(&connector->base, 15450 &encoder->base); 15451 } 15452 15453 /* 15454 * set vga decode state - true == enable VGA decode 15455 */ 15456 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 15457 { 15458 struct drm_i915_private *dev_priv = dev->dev_private; 15459 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15460 u16 gmch_ctrl; 15461 15462 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15463 DRM_ERROR("failed to read control word\n"); 15464 return -EIO; 15465 } 15466 15467 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15468 return 0; 15469 15470 if (state) 15471 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15472 else 15473 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15474 15475 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15476 DRM_ERROR("failed to write control word\n"); 15477 return -EIO; 15478 } 15479 15480 return 0; 15481 } 15482 15483 #if 0 15484 struct intel_display_error_state { 15485 15486 u32 power_well_driver; 15487 15488 int num_transcoders; 15489 15490 struct intel_cursor_error_state { 15491 u32 control; 15492 u32 position; 15493 u32 base; 15494 u32 size; 15495 } cursor[I915_MAX_PIPES]; 15496 15497 struct intel_pipe_error_state { 15498 bool power_domain_on; 15499 u32 source; 15500 u32 stat; 15501 } pipe[I915_MAX_PIPES]; 15502 15503 struct intel_plane_error_state { 15504 u32 control; 15505 u32 stride; 15506 u32 size; 15507 u32 pos; 15508 u32 addr; 15509 u32 surface; 15510 u32 tile_offset; 15511 } plane[I915_MAX_PIPES]; 15512 15513 struct intel_transcoder_error_state { 15514 bool power_domain_on; 15515 enum transcoder cpu_transcoder; 15516 15517 u32 conf; 15518 15519 u32 htotal; 15520 u32 hblank; 15521 u32 hsync; 15522 u32 vtotal; 15523 u32 vblank; 15524 u32 vsync; 15525 } transcoder[4]; 15526 }; 15527 15528 struct intel_display_error_state * 15529 intel_display_capture_error_state(struct drm_device *dev) 15530 { 15531 struct drm_i915_private *dev_priv = dev->dev_private; 15532 struct intel_display_error_state *error; 15533 int transcoders[] = { 15534 TRANSCODER_A, 15535 TRANSCODER_B, 15536 TRANSCODER_C, 15537 TRANSCODER_EDP, 15538 }; 15539 int i; 15540 15541 if (INTEL_INFO(dev)->num_pipes == 0) 15542 return NULL; 15543 15544 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15545 if (error == NULL) 15546 return NULL; 15547 15548 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15549 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15550 15551 for_each_pipe(dev_priv, i) { 15552 error->pipe[i].power_domain_on = 15553 __intel_display_power_is_enabled(dev_priv, 15554 POWER_DOMAIN_PIPE(i)); 15555 if (!error->pipe[i].power_domain_on) 15556 continue; 15557 15558 error->cursor[i].control = I915_READ(CURCNTR(i)); 15559 error->cursor[i].position = I915_READ(CURPOS(i)); 15560 error->cursor[i].base = I915_READ(CURBASE(i)); 15561 15562 error->plane[i].control = I915_READ(DSPCNTR(i)); 15563 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15564 if (INTEL_INFO(dev)->gen <= 3) { 15565 error->plane[i].size = I915_READ(DSPSIZE(i)); 15566 error->plane[i].pos = I915_READ(DSPPOS(i)); 15567 } 15568 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15569 error->plane[i].addr = I915_READ(DSPADDR(i)); 15570 if (INTEL_INFO(dev)->gen >= 4) { 15571 error->plane[i].surface = I915_READ(DSPSURF(i)); 15572 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15573 } 15574 15575 error->pipe[i].source = I915_READ(PIPESRC(i)); 15576 15577 if (HAS_GMCH_DISPLAY(dev)) 15578 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15579 } 15580 15581 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 15582 if (HAS_DDI(dev_priv->dev)) 15583 error->num_transcoders++; /* Account for eDP. */ 15584 15585 for (i = 0; i < error->num_transcoders; i++) { 15586 enum transcoder cpu_transcoder = transcoders[i]; 15587 15588 error->transcoder[i].power_domain_on = 15589 __intel_display_power_is_enabled(dev_priv, 15590 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15591 if (!error->transcoder[i].power_domain_on) 15592 continue; 15593 15594 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15595 15596 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15597 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15598 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15599 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15600 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15601 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15602 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15603 } 15604 15605 return error; 15606 } 15607 15608 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15609 15610 void 15611 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15612 struct drm_device *dev, 15613 struct intel_display_error_state *error) 15614 { 15615 struct drm_i915_private *dev_priv = dev->dev_private; 15616 int i; 15617 15618 if (!error) 15619 return; 15620 15621 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 15622 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15623 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15624 error->power_well_driver); 15625 for_each_pipe(dev_priv, i) { 15626 err_printf(m, "Pipe [%d]:\n", i); 15627 err_printf(m, " Power: %s\n", 15628 error->pipe[i].power_domain_on ? "on" : "off"); 15629 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15630 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15631 15632 err_printf(m, "Plane [%d]:\n", i); 15633 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15634 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15635 if (INTEL_INFO(dev)->gen <= 3) { 15636 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15637 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15638 } 15639 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15640 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15641 if (INTEL_INFO(dev)->gen >= 4) { 15642 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15643 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15644 } 15645 15646 err_printf(m, "Cursor [%d]:\n", i); 15647 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15648 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15649 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15650 } 15651 15652 for (i = 0; i < error->num_transcoders; i++) { 15653 err_printf(m, "CPU transcoder: %c\n", 15654 transcoder_name(error->transcoder[i].cpu_transcoder)); 15655 err_printf(m, " Power: %s\n", 15656 error->transcoder[i].power_domain_on ? "on" : "off"); 15657 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15658 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15659 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15660 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15661 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15662 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15663 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15664 } 15665 } 15666 #endif 15667 15668 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 15669 { 15670 struct intel_crtc *crtc; 15671 15672 for_each_intel_crtc(dev, crtc) { 15673 struct intel_unpin_work *work; 15674 15675 spin_lock_irq(&dev->event_lock); 15676 15677 work = crtc->unpin_work; 15678 15679 if (work && work->event && 15680 work->event->base.file_priv == file) { 15681 kfree(work->event); 15682 work->event = NULL; 15683 } 15684 15685 spin_unlock_irq(&dev->event_lock); 15686 } 15687 } 15688