1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $ 27 */ 28 29 #include <ddb/ddb.h> 30 #include <sys/limits.h> 31 32 #include <drm/drmP.h> 33 #include <drm/drm_edid.h> 34 #include "intel_drv.h" 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include <drm/drm_dp_helper.h> 38 #include <drm/drm_crtc_helper.h> 39 40 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 41 42 bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 43 static void intel_increase_pllclock(struct drm_crtc *crtc); 44 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45 46 typedef struct { 47 /* given values */ 48 int n; 49 int m1, m2; 50 int p1, p2; 51 /* derived values */ 52 int dot; 53 int vco; 54 int m; 55 int p; 56 } intel_clock_t; 57 58 typedef struct { 59 int min, max; 60 } intel_range_t; 61 62 typedef struct { 63 int dot_limit; 64 int p2_slow, p2_fast; 65 } intel_p2_t; 66 67 #define INTEL_P2_NUM 2 68 typedef struct intel_limit intel_limit_t; 69 struct intel_limit { 70 intel_range_t dot, vco, n, m, m1, m2, p, p1; 71 intel_p2_t p2; 72 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 73 int, int, intel_clock_t *, intel_clock_t *); 74 }; 75 76 /* FDI */ 77 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 78 79 static bool 80 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 81 int target, int refclk, intel_clock_t *match_clock, 82 intel_clock_t *best_clock); 83 static bool 84 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 85 int target, int refclk, intel_clock_t *match_clock, 86 intel_clock_t *best_clock); 87 88 static bool 89 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 90 int target, int refclk, intel_clock_t *match_clock, 91 intel_clock_t *best_clock); 92 static bool 93 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, 94 int target, int refclk, intel_clock_t *match_clock, 95 intel_clock_t *best_clock); 96 97 static inline u32 /* units of 100MHz */ 98 intel_fdi_link_freq(struct drm_device *dev) 99 { 100 if (IS_GEN5(dev)) { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 103 } else 104 return 27; 105 } 106 107 static const intel_limit_t intel_limits_i8xx_dvo = { 108 .dot = { .min = 25000, .max = 350000 }, 109 .vco = { .min = 930000, .max = 1400000 }, 110 .n = { .min = 3, .max = 16 }, 111 .m = { .min = 96, .max = 140 }, 112 .m1 = { .min = 18, .max = 26 }, 113 .m2 = { .min = 6, .max = 16 }, 114 .p = { .min = 4, .max = 128 }, 115 .p1 = { .min = 2, .max = 33 }, 116 .p2 = { .dot_limit = 165000, 117 .p2_slow = 4, .p2_fast = 2 }, 118 .find_pll = intel_find_best_PLL, 119 }; 120 121 static const intel_limit_t intel_limits_i8xx_lvds = { 122 .dot = { .min = 25000, .max = 350000 }, 123 .vco = { .min = 930000, .max = 1400000 }, 124 .n = { .min = 3, .max = 16 }, 125 .m = { .min = 96, .max = 140 }, 126 .m1 = { .min = 18, .max = 26 }, 127 .m2 = { .min = 6, .max = 16 }, 128 .p = { .min = 4, .max = 128 }, 129 .p1 = { .min = 1, .max = 6 }, 130 .p2 = { .dot_limit = 165000, 131 .p2_slow = 14, .p2_fast = 7 }, 132 .find_pll = intel_find_best_PLL, 133 }; 134 135 static const intel_limit_t intel_limits_i9xx_sdvo = { 136 .dot = { .min = 20000, .max = 400000 }, 137 .vco = { .min = 1400000, .max = 2800000 }, 138 .n = { .min = 1, .max = 6 }, 139 .m = { .min = 70, .max = 120 }, 140 .m1 = { .min = 10, .max = 22 }, 141 .m2 = { .min = 5, .max = 9 }, 142 .p = { .min = 5, .max = 80 }, 143 .p1 = { .min = 1, .max = 8 }, 144 .p2 = { .dot_limit = 200000, 145 .p2_slow = 10, .p2_fast = 5 }, 146 .find_pll = intel_find_best_PLL, 147 }; 148 149 static const intel_limit_t intel_limits_i9xx_lvds = { 150 .dot = { .min = 20000, .max = 400000 }, 151 .vco = { .min = 1400000, .max = 2800000 }, 152 .n = { .min = 1, .max = 6 }, 153 .m = { .min = 70, .max = 120 }, 154 .m1 = { .min = 10, .max = 22 }, 155 .m2 = { .min = 5, .max = 9 }, 156 .p = { .min = 7, .max = 98 }, 157 .p1 = { .min = 1, .max = 8 }, 158 .p2 = { .dot_limit = 112000, 159 .p2_slow = 14, .p2_fast = 7 }, 160 .find_pll = intel_find_best_PLL, 161 }; 162 163 164 static const intel_limit_t intel_limits_g4x_sdvo = { 165 .dot = { .min = 25000, .max = 270000 }, 166 .vco = { .min = 1750000, .max = 3500000}, 167 .n = { .min = 1, .max = 4 }, 168 .m = { .min = 104, .max = 138 }, 169 .m1 = { .min = 17, .max = 23 }, 170 .m2 = { .min = 5, .max = 11 }, 171 .p = { .min = 10, .max = 30 }, 172 .p1 = { .min = 1, .max = 3}, 173 .p2 = { .dot_limit = 270000, 174 .p2_slow = 10, 175 .p2_fast = 10 176 }, 177 .find_pll = intel_g4x_find_best_PLL, 178 }; 179 180 static const intel_limit_t intel_limits_g4x_hdmi = { 181 .dot = { .min = 22000, .max = 400000 }, 182 .vco = { .min = 1750000, .max = 3500000}, 183 .n = { .min = 1, .max = 4 }, 184 .m = { .min = 104, .max = 138 }, 185 .m1 = { .min = 16, .max = 23 }, 186 .m2 = { .min = 5, .max = 11 }, 187 .p = { .min = 5, .max = 80 }, 188 .p1 = { .min = 1, .max = 8}, 189 .p2 = { .dot_limit = 165000, 190 .p2_slow = 10, .p2_fast = 5 }, 191 .find_pll = intel_g4x_find_best_PLL, 192 }; 193 194 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 195 .dot = { .min = 20000, .max = 115000 }, 196 .vco = { .min = 1750000, .max = 3500000 }, 197 .n = { .min = 1, .max = 3 }, 198 .m = { .min = 104, .max = 138 }, 199 .m1 = { .min = 17, .max = 23 }, 200 .m2 = { .min = 5, .max = 11 }, 201 .p = { .min = 28, .max = 112 }, 202 .p1 = { .min = 2, .max = 8 }, 203 .p2 = { .dot_limit = 0, 204 .p2_slow = 14, .p2_fast = 14 205 }, 206 .find_pll = intel_g4x_find_best_PLL, 207 }; 208 209 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 210 .dot = { .min = 80000, .max = 224000 }, 211 .vco = { .min = 1750000, .max = 3500000 }, 212 .n = { .min = 1, .max = 3 }, 213 .m = { .min = 104, .max = 138 }, 214 .m1 = { .min = 17, .max = 23 }, 215 .m2 = { .min = 5, .max = 11 }, 216 .p = { .min = 14, .max = 42 }, 217 .p1 = { .min = 2, .max = 6 }, 218 .p2 = { .dot_limit = 0, 219 .p2_slow = 7, .p2_fast = 7 220 }, 221 .find_pll = intel_g4x_find_best_PLL, 222 }; 223 224 static const intel_limit_t intel_limits_g4x_display_port = { 225 .dot = { .min = 161670, .max = 227000 }, 226 .vco = { .min = 1750000, .max = 3500000}, 227 .n = { .min = 1, .max = 2 }, 228 .m = { .min = 97, .max = 108 }, 229 .m1 = { .min = 0x10, .max = 0x12 }, 230 .m2 = { .min = 0x05, .max = 0x06 }, 231 .p = { .min = 10, .max = 20 }, 232 .p1 = { .min = 1, .max = 2}, 233 .p2 = { .dot_limit = 0, 234 .p2_slow = 10, .p2_fast = 10 }, 235 .find_pll = intel_find_pll_g4x_dp, 236 }; 237 238 static const intel_limit_t intel_limits_pineview_sdvo = { 239 .dot = { .min = 20000, .max = 400000}, 240 .vco = { .min = 1700000, .max = 3500000 }, 241 /* Pineview's Ncounter is a ring counter */ 242 .n = { .min = 3, .max = 6 }, 243 .m = { .min = 2, .max = 256 }, 244 /* Pineview only has one combined m divider, which we treat as m2. */ 245 .m1 = { .min = 0, .max = 0 }, 246 .m2 = { .min = 0, .max = 254 }, 247 .p = { .min = 5, .max = 80 }, 248 .p1 = { .min = 1, .max = 8 }, 249 .p2 = { .dot_limit = 200000, 250 .p2_slow = 10, .p2_fast = 5 }, 251 .find_pll = intel_find_best_PLL, 252 }; 253 254 static const intel_limit_t intel_limits_pineview_lvds = { 255 .dot = { .min = 20000, .max = 400000 }, 256 .vco = { .min = 1700000, .max = 3500000 }, 257 .n = { .min = 3, .max = 6 }, 258 .m = { .min = 2, .max = 256 }, 259 .m1 = { .min = 0, .max = 0 }, 260 .m2 = { .min = 0, .max = 254 }, 261 .p = { .min = 7, .max = 112 }, 262 .p1 = { .min = 1, .max = 8 }, 263 .p2 = { .dot_limit = 112000, 264 .p2_slow = 14, .p2_fast = 14 }, 265 .find_pll = intel_find_best_PLL, 266 }; 267 268 /* Ironlake / Sandybridge 269 * 270 * We calculate clock using (register_value + 2) for N/M1/M2, so here 271 * the range value for them is (actual_value - 2). 272 */ 273 static const intel_limit_t intel_limits_ironlake_dac = { 274 .dot = { .min = 25000, .max = 350000 }, 275 .vco = { .min = 1760000, .max = 3510000 }, 276 .n = { .min = 1, .max = 5 }, 277 .m = { .min = 79, .max = 127 }, 278 .m1 = { .min = 12, .max = 22 }, 279 .m2 = { .min = 5, .max = 9 }, 280 .p = { .min = 5, .max = 80 }, 281 .p1 = { .min = 1, .max = 8 }, 282 .p2 = { .dot_limit = 225000, 283 .p2_slow = 10, .p2_fast = 5 }, 284 .find_pll = intel_g4x_find_best_PLL, 285 }; 286 287 static const intel_limit_t intel_limits_ironlake_single_lvds = { 288 .dot = { .min = 25000, .max = 350000 }, 289 .vco = { .min = 1760000, .max = 3510000 }, 290 .n = { .min = 1, .max = 3 }, 291 .m = { .min = 79, .max = 118 }, 292 .m1 = { .min = 12, .max = 22 }, 293 .m2 = { .min = 5, .max = 9 }, 294 .p = { .min = 28, .max = 112 }, 295 .p1 = { .min = 2, .max = 8 }, 296 .p2 = { .dot_limit = 225000, 297 .p2_slow = 14, .p2_fast = 14 }, 298 .find_pll = intel_g4x_find_best_PLL, 299 }; 300 301 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 302 .dot = { .min = 25000, .max = 350000 }, 303 .vco = { .min = 1760000, .max = 3510000 }, 304 .n = { .min = 1, .max = 3 }, 305 .m = { .min = 79, .max = 127 }, 306 .m1 = { .min = 12, .max = 22 }, 307 .m2 = { .min = 5, .max = 9 }, 308 .p = { .min = 14, .max = 56 }, 309 .p1 = { .min = 2, .max = 8 }, 310 .p2 = { .dot_limit = 225000, 311 .p2_slow = 7, .p2_fast = 7 }, 312 .find_pll = intel_g4x_find_best_PLL, 313 }; 314 315 /* LVDS 100mhz refclk limits. */ 316 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 317 .dot = { .min = 25000, .max = 350000 }, 318 .vco = { .min = 1760000, .max = 3510000 }, 319 .n = { .min = 1, .max = 2 }, 320 .m = { .min = 79, .max = 126 }, 321 .m1 = { .min = 12, .max = 22 }, 322 .m2 = { .min = 5, .max = 9 }, 323 .p = { .min = 28, .max = 112 }, 324 .p1 = { .min = 2, .max = 8 }, 325 .p2 = { .dot_limit = 225000, 326 .p2_slow = 14, .p2_fast = 14 }, 327 .find_pll = intel_g4x_find_best_PLL, 328 }; 329 330 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 331 .dot = { .min = 25000, .max = 350000 }, 332 .vco = { .min = 1760000, .max = 3510000 }, 333 .n = { .min = 1, .max = 3 }, 334 .m = { .min = 79, .max = 126 }, 335 .m1 = { .min = 12, .max = 22 }, 336 .m2 = { .min = 5, .max = 9 }, 337 .p = { .min = 14, .max = 42 }, 338 .p1 = { .min = 2, .max = 6 }, 339 .p2 = { .dot_limit = 225000, 340 .p2_slow = 7, .p2_fast = 7 }, 341 .find_pll = intel_g4x_find_best_PLL, 342 }; 343 344 static const intel_limit_t intel_limits_ironlake_display_port = { 345 .dot = { .min = 25000, .max = 350000 }, 346 .vco = { .min = 1760000, .max = 3510000}, 347 .n = { .min = 1, .max = 2 }, 348 .m = { .min = 81, .max = 90 }, 349 .m1 = { .min = 12, .max = 22 }, 350 .m2 = { .min = 5, .max = 9 }, 351 .p = { .min = 10, .max = 20 }, 352 .p1 = { .min = 1, .max = 2}, 353 .p2 = { .dot_limit = 0, 354 .p2_slow = 10, .p2_fast = 10 }, 355 .find_pll = intel_find_pll_ironlake_dp, 356 }; 357 358 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 359 int refclk) 360 { 361 struct drm_device *dev = crtc->dev; 362 struct drm_i915_private *dev_priv = dev->dev_private; 363 const intel_limit_t *limit; 364 365 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 366 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 367 LVDS_CLKB_POWER_UP) { 368 /* LVDS dual channel */ 369 if (refclk == 100000) 370 limit = &intel_limits_ironlake_dual_lvds_100m; 371 else 372 limit = &intel_limits_ironlake_dual_lvds; 373 } else { 374 if (refclk == 100000) 375 limit = &intel_limits_ironlake_single_lvds_100m; 376 else 377 limit = &intel_limits_ironlake_single_lvds; 378 } 379 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 380 HAS_eDP) 381 limit = &intel_limits_ironlake_display_port; 382 else 383 limit = &intel_limits_ironlake_dac; 384 385 return limit; 386 } 387 388 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 389 { 390 struct drm_device *dev = crtc->dev; 391 struct drm_i915_private *dev_priv = dev->dev_private; 392 const intel_limit_t *limit; 393 394 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 395 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 396 LVDS_CLKB_POWER_UP) 397 /* LVDS with dual channel */ 398 limit = &intel_limits_g4x_dual_channel_lvds; 399 else 400 /* LVDS with dual channel */ 401 limit = &intel_limits_g4x_single_channel_lvds; 402 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 403 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 404 limit = &intel_limits_g4x_hdmi; 405 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 406 limit = &intel_limits_g4x_sdvo; 407 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 408 limit = &intel_limits_g4x_display_port; 409 } else /* The option is for other outputs */ 410 limit = &intel_limits_i9xx_sdvo; 411 412 return limit; 413 } 414 415 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 416 { 417 struct drm_device *dev = crtc->dev; 418 const intel_limit_t *limit; 419 420 if (HAS_PCH_SPLIT(dev)) 421 limit = intel_ironlake_limit(crtc, refclk); 422 else if (IS_G4X(dev)) { 423 limit = intel_g4x_limit(crtc); 424 } else if (IS_PINEVIEW(dev)) { 425 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 426 limit = &intel_limits_pineview_lvds; 427 else 428 limit = &intel_limits_pineview_sdvo; 429 } else if (!IS_GEN2(dev)) { 430 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 431 limit = &intel_limits_i9xx_lvds; 432 else 433 limit = &intel_limits_i9xx_sdvo; 434 } else { 435 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 436 limit = &intel_limits_i8xx_lvds; 437 else 438 limit = &intel_limits_i8xx_dvo; 439 } 440 return limit; 441 } 442 443 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 444 static void pineview_clock(int refclk, intel_clock_t *clock) 445 { 446 clock->m = clock->m2 + 2; 447 clock->p = clock->p1 * clock->p2; 448 clock->vco = refclk * clock->m / clock->n; 449 clock->dot = clock->vco / clock->p; 450 } 451 452 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 453 { 454 if (IS_PINEVIEW(dev)) { 455 pineview_clock(refclk, clock); 456 return; 457 } 458 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 459 clock->p = clock->p1 * clock->p2; 460 clock->vco = refclk * clock->m / (clock->n + 2); 461 clock->dot = clock->vco / clock->p; 462 } 463 464 /** 465 * Returns whether any output on the specified pipe is of the specified type 466 */ 467 bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 468 { 469 struct drm_device *dev = crtc->dev; 470 struct drm_mode_config *mode_config = &dev->mode_config; 471 struct intel_encoder *encoder; 472 473 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 474 if (encoder->base.crtc == crtc && encoder->type == type) 475 return true; 476 477 return false; 478 } 479 480 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 481 /** 482 * Returns whether the given set of divisors are valid for a given refclk with 483 * the given connectors. 484 */ 485 486 static bool intel_PLL_is_valid(struct drm_device *dev, 487 const intel_limit_t *limit, 488 const intel_clock_t *clock) 489 { 490 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 491 INTELPllInvalid("p1 out of range\n"); 492 if (clock->p < limit->p.min || limit->p.max < clock->p) 493 INTELPllInvalid("p out of range\n"); 494 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 495 INTELPllInvalid("m2 out of range\n"); 496 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 497 INTELPllInvalid("m1 out of range\n"); 498 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) 499 INTELPllInvalid("m1 <= m2\n"); 500 if (clock->m < limit->m.min || limit->m.max < clock->m) 501 INTELPllInvalid("m out of range\n"); 502 if (clock->n < limit->n.min || limit->n.max < clock->n) 503 INTELPllInvalid("n out of range\n"); 504 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 505 INTELPllInvalid("vco out of range\n"); 506 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 507 * connector, etc., rather than just a single range. 508 */ 509 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 510 INTELPllInvalid("dot out of range\n"); 511 512 return true; 513 } 514 515 static bool 516 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 517 int target, int refclk, intel_clock_t *match_clock, 518 intel_clock_t *best_clock) 519 520 { 521 struct drm_device *dev = crtc->dev; 522 struct drm_i915_private *dev_priv = dev->dev_private; 523 intel_clock_t clock; 524 int err = target; 525 526 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 (I915_READ(LVDS)) != 0) { 528 /* 529 * For LVDS, if the panel is on, just rely on its current 530 * settings for dual-channel. We haven't figured out how to 531 * reliably set up different single/dual channel state, if we 532 * even can. 533 */ 534 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 535 LVDS_CLKB_POWER_UP) 536 clock.p2 = limit->p2.p2_fast; 537 else 538 clock.p2 = limit->p2.p2_slow; 539 } else { 540 if (target < limit->p2.dot_limit) 541 clock.p2 = limit->p2.p2_slow; 542 else 543 clock.p2 = limit->p2.p2_fast; 544 } 545 546 memset(best_clock, 0, sizeof(*best_clock)); 547 548 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 549 clock.m1++) { 550 for (clock.m2 = limit->m2.min; 551 clock.m2 <= limit->m2.max; clock.m2++) { 552 /* m1 is always 0 in Pineview */ 553 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) 554 break; 555 for (clock.n = limit->n.min; 556 clock.n <= limit->n.max; clock.n++) { 557 for (clock.p1 = limit->p1.min; 558 clock.p1 <= limit->p1.max; clock.p1++) { 559 int this_err; 560 561 intel_clock(dev, refclk, &clock); 562 if (!intel_PLL_is_valid(dev, limit, 563 &clock)) 564 continue; 565 if (match_clock && 566 clock.p != match_clock->p) 567 continue; 568 569 this_err = abs(clock.dot - target); 570 if (this_err < err) { 571 *best_clock = clock; 572 err = this_err; 573 } 574 } 575 } 576 } 577 } 578 579 return (err != target); 580 } 581 582 static bool 583 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 584 int target, int refclk, intel_clock_t *match_clock, 585 intel_clock_t *best_clock) 586 { 587 struct drm_device *dev = crtc->dev; 588 struct drm_i915_private *dev_priv = dev->dev_private; 589 intel_clock_t clock; 590 int max_n; 591 bool found; 592 /* approximately equals target * 0.00585 */ 593 int err_most = (target >> 8) + (target >> 9); 594 found = false; 595 596 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 597 int lvds_reg; 598 599 if (HAS_PCH_SPLIT(dev)) 600 lvds_reg = PCH_LVDS; 601 else 602 lvds_reg = LVDS; 603 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 604 LVDS_CLKB_POWER_UP) 605 clock.p2 = limit->p2.p2_fast; 606 else 607 clock.p2 = limit->p2.p2_slow; 608 } else { 609 if (target < limit->p2.dot_limit) 610 clock.p2 = limit->p2.p2_slow; 611 else 612 clock.p2 = limit->p2.p2_fast; 613 } 614 615 memset(best_clock, 0, sizeof(*best_clock)); 616 max_n = limit->n.max; 617 /* based on hardware requirement, prefer smaller n to precision */ 618 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 619 /* based on hardware requirement, prefere larger m1,m2 */ 620 for (clock.m1 = limit->m1.max; 621 clock.m1 >= limit->m1.min; clock.m1--) { 622 for (clock.m2 = limit->m2.max; 623 clock.m2 >= limit->m2.min; clock.m2--) { 624 for (clock.p1 = limit->p1.max; 625 clock.p1 >= limit->p1.min; clock.p1--) { 626 int this_err; 627 628 intel_clock(dev, refclk, &clock); 629 if (!intel_PLL_is_valid(dev, limit, 630 &clock)) 631 continue; 632 if (match_clock && 633 clock.p != match_clock->p) 634 continue; 635 636 this_err = abs(clock.dot - target); 637 if (this_err < err_most) { 638 *best_clock = clock; 639 err_most = this_err; 640 max_n = clock.n; 641 found = true; 642 } 643 } 644 } 645 } 646 } 647 return found; 648 } 649 650 static bool 651 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 652 int target, int refclk, intel_clock_t *match_clock, 653 intel_clock_t *best_clock) 654 { 655 struct drm_device *dev = crtc->dev; 656 intel_clock_t clock; 657 658 if (target < 200000) { 659 clock.n = 1; 660 clock.p1 = 2; 661 clock.p2 = 10; 662 clock.m1 = 12; 663 clock.m2 = 9; 664 } else { 665 clock.n = 2; 666 clock.p1 = 1; 667 clock.p2 = 10; 668 clock.m1 = 14; 669 clock.m2 = 8; 670 } 671 intel_clock(dev, refclk, &clock); 672 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 673 return true; 674 } 675 676 /* DisplayPort has only two frequencies, 162MHz and 270MHz */ 677 static bool 678 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 679 int target, int refclk, intel_clock_t *match_clock, 680 intel_clock_t *best_clock) 681 { 682 intel_clock_t clock; 683 if (target < 200000) { 684 clock.p1 = 2; 685 clock.p2 = 10; 686 clock.n = 2; 687 clock.m1 = 23; 688 clock.m2 = 8; 689 } else { 690 clock.p1 = 1; 691 clock.p2 = 10; 692 clock.n = 1; 693 clock.m1 = 14; 694 clock.m2 = 2; 695 } 696 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); 697 clock.p = (clock.p1 * clock.p2); 698 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; 699 clock.vco = 0; 700 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 701 return true; 702 } 703 704 /** 705 * intel_wait_for_vblank - wait for vblank on a given pipe 706 * @dev: drm device 707 * @pipe: pipe to wait for 708 * 709 * Wait for vblank to occur on a given pipe. Needed for various bits of 710 * mode setting code. 711 */ 712 void intel_wait_for_vblank(struct drm_device *dev, int pipe) 713 { 714 struct drm_i915_private *dev_priv = dev->dev_private; 715 int pipestat_reg = PIPESTAT(pipe); 716 717 /* Clear existing vblank status. Note this will clear any other 718 * sticky status fields as well. 719 * 720 * This races with i915_driver_irq_handler() with the result 721 * that either function could miss a vblank event. Here it is not 722 * fatal, as we will either wait upon the next vblank interrupt or 723 * timeout. Generally speaking intel_wait_for_vblank() is only 724 * called during modeset at which time the GPU should be idle and 725 * should *not* be performing page flips and thus not waiting on 726 * vblanks... 727 * Currently, the result of us stealing a vblank from the irq 728 * handler is that a single frame will be skipped during swapbuffers. 729 */ 730 I915_WRITE(pipestat_reg, 731 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); 732 733 /* Wait for vblank interrupt bit to set */ 734 if (_intel_wait_for(dev, 735 I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS, 736 50, 1, "915vbl")) 737 DRM_DEBUG_KMS("vblank wait timed out\n"); 738 } 739 740 /* 741 * intel_wait_for_pipe_off - wait for pipe to turn off 742 * @dev: drm device 743 * @pipe: pipe to wait for 744 * 745 * After disabling a pipe, we can't wait for vblank in the usual way, 746 * spinning on the vblank interrupt status bit, since we won't actually 747 * see an interrupt when the pipe is disabled. 748 * 749 * On Gen4 and above: 750 * wait for the pipe register state bit to turn off 751 * 752 * Otherwise: 753 * wait for the display line value to settle (it usually 754 * ends up stopping at the start of the next frame). 755 * 756 */ 757 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 758 { 759 struct drm_i915_private *dev_priv = dev->dev_private; 760 761 if (INTEL_INFO(dev)->gen >= 4) { 762 int reg = PIPECONF(pipe); 763 764 /* Wait for the Pipe State to go off */ 765 if (_intel_wait_for(dev, 766 (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100, 767 1, "915pip")) 768 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 769 } else { 770 u32 last_line, line_mask; 771 int reg = PIPEDSL(pipe); 772 unsigned long timeout = jiffies + msecs_to_jiffies(100); 773 774 if (IS_GEN2(dev)) 775 line_mask = DSL_LINEMASK_GEN2; 776 else 777 line_mask = DSL_LINEMASK_GEN3; 778 779 /* Wait for the display line to settle */ 780 do { 781 last_line = I915_READ(reg) & line_mask; 782 DELAY(5000); 783 } while (((I915_READ(reg) & line_mask) != last_line) && 784 time_after(timeout, jiffies)); 785 if (time_after(jiffies, timeout)) 786 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 787 } 788 } 789 790 static const char *state_string(bool enabled) 791 { 792 return enabled ? "on" : "off"; 793 } 794 795 /* Only for pre-ILK configs */ 796 static void assert_pll(struct drm_i915_private *dev_priv, 797 enum i915_pipe pipe, bool state) 798 { 799 int reg; 800 u32 val; 801 bool cur_state; 802 803 reg = DPLL(pipe); 804 val = I915_READ(reg); 805 cur_state = !!(val & DPLL_VCO_ENABLE); 806 if (cur_state != state) 807 kprintf("PLL state assertion failure (expected %s, current %s)\n", 808 state_string(state), state_string(cur_state)); 809 } 810 #define assert_pll_enabled(d, p) assert_pll(d, p, true) 811 #define assert_pll_disabled(d, p) assert_pll(d, p, false) 812 813 /* For ILK+ */ 814 static void assert_pch_pll(struct drm_i915_private *dev_priv, 815 enum i915_pipe pipe, bool state) 816 { 817 int reg; 818 u32 val; 819 bool cur_state; 820 821 if (HAS_PCH_CPT(dev_priv->dev)) { 822 u32 pch_dpll; 823 824 pch_dpll = I915_READ(PCH_DPLL_SEL); 825 826 /* Make sure the selected PLL is enabled to the transcoder */ 827 KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0, 828 ("transcoder %d PLL not enabled\n", pipe)); 829 830 /* Convert the transcoder pipe number to a pll pipe number */ 831 pipe = (pch_dpll >> (4 * pipe)) & 1; 832 } 833 834 reg = _PCH_DPLL(pipe); 835 val = I915_READ(reg); 836 cur_state = !!(val & DPLL_VCO_ENABLE); 837 if (cur_state != state) 838 kprintf("PCH PLL state assertion failure (expected %s, current %s)\n", 839 state_string(state), state_string(cur_state)); 840 } 841 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) 842 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) 843 844 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 845 enum i915_pipe pipe, bool state) 846 { 847 int reg; 848 u32 val; 849 bool cur_state; 850 851 reg = FDI_TX_CTL(pipe); 852 val = I915_READ(reg); 853 cur_state = !!(val & FDI_TX_ENABLE); 854 if (cur_state != state) 855 kprintf("FDI TX state assertion failure (expected %s, current %s)\n", 856 state_string(state), state_string(cur_state)); 857 } 858 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 859 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 860 861 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 862 enum i915_pipe pipe, bool state) 863 { 864 int reg; 865 u32 val; 866 bool cur_state; 867 868 reg = FDI_RX_CTL(pipe); 869 val = I915_READ(reg); 870 cur_state = !!(val & FDI_RX_ENABLE); 871 if (cur_state != state) 872 kprintf("FDI RX state assertion failure (expected %s, current %s)\n", 873 state_string(state), state_string(cur_state)); 874 } 875 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 876 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 877 878 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 879 enum i915_pipe pipe) 880 { 881 int reg; 882 u32 val; 883 884 /* ILK FDI PLL is always enabled */ 885 if (dev_priv->info->gen == 5) 886 return; 887 888 reg = FDI_TX_CTL(pipe); 889 val = I915_READ(reg); 890 if (!(val & FDI_TX_PLL_ENABLE)) 891 kprintf("FDI TX PLL assertion failure, should be active but is disabled\n"); 892 } 893 894 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, 895 enum i915_pipe pipe) 896 { 897 int reg; 898 u32 val; 899 900 reg = FDI_RX_CTL(pipe); 901 val = I915_READ(reg); 902 if (!(val & FDI_RX_PLL_ENABLE)) 903 kprintf("FDI RX PLL assertion failure, should be active but is disabled\n"); 904 } 905 906 static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 907 enum i915_pipe pipe) 908 { 909 int pp_reg, lvds_reg; 910 u32 val; 911 enum i915_pipe panel_pipe = PIPE_A; 912 bool locked = true; 913 914 if (HAS_PCH_SPLIT(dev_priv->dev)) { 915 pp_reg = PCH_PP_CONTROL; 916 lvds_reg = PCH_LVDS; 917 } else { 918 pp_reg = PP_CONTROL; 919 lvds_reg = LVDS; 920 } 921 922 val = I915_READ(pp_reg); 923 if (!(val & PANEL_POWER_ON) || 924 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 925 locked = false; 926 927 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) 928 panel_pipe = PIPE_B; 929 930 if (panel_pipe == pipe && locked) 931 kprintf("panel assertion failure, pipe %c regs locked\n", 932 pipe_name(pipe)); 933 } 934 935 void assert_pipe(struct drm_i915_private *dev_priv, 936 enum i915_pipe pipe, bool state) 937 { 938 int reg; 939 u32 val; 940 bool cur_state; 941 942 /* if we need the pipe A quirk it must be always on */ 943 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 944 state = true; 945 946 reg = PIPECONF(pipe); 947 val = I915_READ(reg); 948 cur_state = !!(val & PIPECONF_ENABLE); 949 if (cur_state != state) 950 kprintf("pipe %c assertion failure (expected %s, current %s)\n", 951 pipe_name(pipe), state_string(state), state_string(cur_state)); 952 } 953 954 static void assert_plane(struct drm_i915_private *dev_priv, 955 enum plane plane, bool state) 956 { 957 int reg; 958 u32 val; 959 bool cur_state; 960 961 reg = DSPCNTR(plane); 962 val = I915_READ(reg); 963 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 964 if (cur_state != state) 965 kprintf("plane %c assertion failure, (expected %s, current %s)\n", 966 plane_name(plane), state_string(state), state_string(cur_state)); 967 } 968 969 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 970 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 971 972 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 973 enum i915_pipe pipe) 974 { 975 int reg, i; 976 u32 val; 977 int cur_pipe; 978 979 /* Planes are fixed to pipes on ILK+ */ 980 if (HAS_PCH_SPLIT(dev_priv->dev)) { 981 reg = DSPCNTR(pipe); 982 val = I915_READ(reg); 983 if ((val & DISPLAY_PLANE_ENABLE) != 0) 984 kprintf("plane %c assertion failure, should be disabled but not\n", 985 plane_name(pipe)); 986 return; 987 } 988 989 /* Need to check both planes against the pipe */ 990 for (i = 0; i < 2; i++) { 991 reg = DSPCNTR(i); 992 val = I915_READ(reg); 993 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 994 DISPPLANE_SEL_PIPE_SHIFT; 995 if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe) 996 kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n", 997 plane_name(i), pipe_name(pipe)); 998 } 999 } 1000 1001 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1002 { 1003 u32 val; 1004 bool enabled; 1005 1006 val = I915_READ(PCH_DREF_CONTROL); 1007 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1008 DREF_SUPERSPREAD_SOURCE_MASK)); 1009 if (!enabled) 1010 kprintf("PCH refclk assertion failure, should be active but is disabled\n"); 1011 } 1012 1013 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, 1014 enum i915_pipe pipe) 1015 { 1016 int reg; 1017 u32 val; 1018 bool enabled; 1019 1020 reg = TRANSCONF(pipe); 1021 val = I915_READ(reg); 1022 enabled = !!(val & TRANS_ENABLE); 1023 if (enabled) 1024 kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n", 1025 pipe_name(pipe)); 1026 } 1027 1028 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1029 enum i915_pipe pipe, u32 val) 1030 { 1031 if ((val & PORT_ENABLE) == 0) 1032 return false; 1033 1034 if (HAS_PCH_CPT(dev_priv->dev)) { 1035 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1036 return false; 1037 } else { 1038 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1039 return false; 1040 } 1041 return true; 1042 } 1043 1044 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1045 enum i915_pipe pipe, u32 val) 1046 { 1047 if ((val & LVDS_PORT_EN) == 0) 1048 return false; 1049 1050 if (HAS_PCH_CPT(dev_priv->dev)) { 1051 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1052 return false; 1053 } else { 1054 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1055 return false; 1056 } 1057 return true; 1058 } 1059 1060 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1061 enum i915_pipe pipe, u32 val) 1062 { 1063 if ((val & ADPA_DAC_ENABLE) == 0) 1064 return false; 1065 if (HAS_PCH_CPT(dev_priv->dev)) { 1066 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1067 return false; 1068 } else { 1069 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1070 return false; 1071 } 1072 return true; 1073 } 1074 1075 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1076 enum i915_pipe pipe, u32 port_sel, u32 val) 1077 { 1078 if ((val & DP_PORT_EN) == 0) 1079 return false; 1080 1081 if (HAS_PCH_CPT(dev_priv->dev)) { 1082 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1083 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1084 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1085 return false; 1086 } else { 1087 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1088 return false; 1089 } 1090 return true; 1091 } 1092 1093 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1094 enum i915_pipe pipe, int reg, u32 port_sel) 1095 { 1096 u32 val = I915_READ(reg); 1097 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) 1098 kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1099 reg, pipe_name(pipe)); 1100 } 1101 1102 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1103 enum i915_pipe pipe, int reg) 1104 { 1105 u32 val = I915_READ(reg); 1106 if (hdmi_pipe_enabled(dev_priv, val, pipe)) 1107 kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1108 reg, pipe_name(pipe)); 1109 } 1110 1111 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1112 enum i915_pipe pipe) 1113 { 1114 int reg; 1115 u32 val; 1116 1117 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1118 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1119 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1120 1121 reg = PCH_ADPA; 1122 val = I915_READ(reg); 1123 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1124 kprintf("PCH VGA enabled on transcoder %c, should be disabled\n", 1125 pipe_name(pipe)); 1126 1127 reg = PCH_LVDS; 1128 val = I915_READ(reg); 1129 if (lvds_pipe_enabled(dev_priv, val, pipe)) 1130 kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n", 1131 pipe_name(pipe)); 1132 1133 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1134 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1135 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1136 } 1137 1138 /** 1139 * intel_enable_pll - enable a PLL 1140 * @dev_priv: i915 private structure 1141 * @pipe: pipe PLL to enable 1142 * 1143 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to 1144 * make sure the PLL reg is writable first though, since the panel write 1145 * protect mechanism may be enabled. 1146 * 1147 * Note! This is for pre-ILK only. 1148 */ 1149 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1150 { 1151 int reg; 1152 u32 val; 1153 1154 /* No really, not for ILK+ */ 1155 KASSERT(dev_priv->info->gen < 5, ("Wrong device gen")); 1156 1157 /* PLL is protected by panel, make sure we can write it */ 1158 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1159 assert_panel_unlocked(dev_priv, pipe); 1160 1161 reg = DPLL(pipe); 1162 val = I915_READ(reg); 1163 val |= DPLL_VCO_ENABLE; 1164 1165 /* We do this three times for luck */ 1166 I915_WRITE(reg, val); 1167 POSTING_READ(reg); 1168 DELAY(150); /* wait for warmup */ 1169 I915_WRITE(reg, val); 1170 POSTING_READ(reg); 1171 DELAY(150); /* wait for warmup */ 1172 I915_WRITE(reg, val); 1173 POSTING_READ(reg); 1174 DELAY(150); /* wait for warmup */ 1175 } 1176 1177 /** 1178 * intel_disable_pll - disable a PLL 1179 * @dev_priv: i915 private structure 1180 * @pipe: pipe PLL to disable 1181 * 1182 * Disable the PLL for @pipe, making sure the pipe is off first. 1183 * 1184 * Note! This is for pre-ILK only. 1185 */ 1186 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1187 { 1188 int reg; 1189 u32 val; 1190 1191 /* Don't disable pipe A or pipe A PLLs if needed */ 1192 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1193 return; 1194 1195 /* Make sure the pipe isn't still relying on us */ 1196 assert_pipe_disabled(dev_priv, pipe); 1197 1198 reg = DPLL(pipe); 1199 val = I915_READ(reg); 1200 val &= ~DPLL_VCO_ENABLE; 1201 I915_WRITE(reg, val); 1202 POSTING_READ(reg); 1203 } 1204 1205 /** 1206 * intel_enable_pch_pll - enable PCH PLL 1207 * @dev_priv: i915 private structure 1208 * @pipe: pipe PLL to enable 1209 * 1210 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1211 * drives the transcoder clock. 1212 */ 1213 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, 1214 enum i915_pipe pipe) 1215 { 1216 int reg; 1217 u32 val; 1218 1219 if (pipe > 1) 1220 return; 1221 1222 /* PCH only available on ILK+ */ 1223 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1224 1225 /* PCH refclock must be enabled first */ 1226 assert_pch_refclk_enabled(dev_priv); 1227 1228 reg = _PCH_DPLL(pipe); 1229 val = I915_READ(reg); 1230 val |= DPLL_VCO_ENABLE; 1231 I915_WRITE(reg, val); 1232 POSTING_READ(reg); 1233 DELAY(200); 1234 } 1235 1236 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, 1237 enum i915_pipe pipe) 1238 { 1239 int reg; 1240 u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, 1241 pll_sel = TRANSC_DPLL_ENABLE; 1242 1243 if (pipe > 1) 1244 return; 1245 1246 /* PCH only available on ILK+ */ 1247 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1248 1249 /* Make sure transcoder isn't still depending on us */ 1250 assert_transcoder_disabled(dev_priv, pipe); 1251 1252 if (pipe == 0) 1253 pll_sel |= TRANSC_DPLLA_SEL; 1254 else if (pipe == 1) 1255 pll_sel |= TRANSC_DPLLB_SEL; 1256 1257 1258 if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) 1259 return; 1260 1261 reg = _PCH_DPLL(pipe); 1262 val = I915_READ(reg); 1263 val &= ~DPLL_VCO_ENABLE; 1264 I915_WRITE(reg, val); 1265 POSTING_READ(reg); 1266 DELAY(200); 1267 } 1268 1269 static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1270 enum i915_pipe pipe) 1271 { 1272 int reg; 1273 u32 val, pipeconf_val; 1274 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1275 1276 /* PCH only available on ILK+ */ 1277 KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen")); 1278 1279 /* Make sure PCH DPLL is enabled */ 1280 assert_pch_pll_enabled(dev_priv, pipe); 1281 1282 /* FDI must be feeding us bits for PCH ports */ 1283 assert_fdi_tx_enabled(dev_priv, pipe); 1284 assert_fdi_rx_enabled(dev_priv, pipe); 1285 1286 1287 reg = TRANSCONF(pipe); 1288 val = I915_READ(reg); 1289 pipeconf_val = I915_READ(PIPECONF(pipe)); 1290 1291 if (HAS_PCH_IBX(dev_priv->dev)) { 1292 /* 1293 * make the BPC in transcoder be consistent with 1294 * that in pipeconf reg. 1295 */ 1296 val &= ~PIPE_BPC_MASK; 1297 val |= pipeconf_val & PIPE_BPC_MASK; 1298 } 1299 1300 val &= ~TRANS_INTERLACE_MASK; 1301 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1302 if (HAS_PCH_IBX(dev_priv->dev) && 1303 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1304 val |= TRANS_LEGACY_INTERLACED_ILK; 1305 else 1306 val |= TRANS_INTERLACED; 1307 else 1308 val |= TRANS_PROGRESSIVE; 1309 1310 I915_WRITE(reg, val | TRANS_ENABLE); 1311 if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE, 1312 100, 1, "915trc")) 1313 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1314 } 1315 1316 static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1317 enum i915_pipe pipe) 1318 { 1319 int reg; 1320 u32 val; 1321 1322 /* FDI relies on the transcoder */ 1323 assert_fdi_tx_disabled(dev_priv, pipe); 1324 assert_fdi_rx_disabled(dev_priv, pipe); 1325 1326 /* Ports must be off as well */ 1327 assert_pch_ports_disabled(dev_priv, pipe); 1328 1329 reg = TRANSCONF(pipe); 1330 val = I915_READ(reg); 1331 val &= ~TRANS_ENABLE; 1332 I915_WRITE(reg, val); 1333 /* wait for PCH transcoder off, transcoder state */ 1334 if (_intel_wait_for(dev_priv->dev, 1335 (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50, 1336 1, "915trd")) 1337 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1338 } 1339 1340 /** 1341 * intel_enable_pipe - enable a pipe, asserting requirements 1342 * @dev_priv: i915 private structure 1343 * @pipe: pipe to enable 1344 * @pch_port: on ILK+, is this pipe driving a PCH port or not 1345 * 1346 * Enable @pipe, making sure that various hardware specific requirements 1347 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1348 * 1349 * @pipe should be %PIPE_A or %PIPE_B. 1350 * 1351 * Will wait until the pipe is actually running (i.e. first vblank) before 1352 * returning. 1353 */ 1354 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 1355 bool pch_port) 1356 { 1357 int reg; 1358 u32 val; 1359 1360 /* 1361 * A pipe without a PLL won't actually be able to drive bits from 1362 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1363 * need the check. 1364 */ 1365 if (!HAS_PCH_SPLIT(dev_priv->dev)) 1366 assert_pll_enabled(dev_priv, pipe); 1367 else { 1368 if (pch_port) { 1369 /* if driving the PCH, we need FDI enabled */ 1370 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1371 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1372 } 1373 /* FIXME: assert CPU port conditions for SNB+ */ 1374 } 1375 1376 reg = PIPECONF(pipe); 1377 val = I915_READ(reg); 1378 if (val & PIPECONF_ENABLE) 1379 return; 1380 1381 I915_WRITE(reg, val | PIPECONF_ENABLE); 1382 intel_wait_for_vblank(dev_priv->dev, pipe); 1383 } 1384 1385 /** 1386 * intel_disable_pipe - disable a pipe, asserting requirements 1387 * @dev_priv: i915 private structure 1388 * @pipe: pipe to disable 1389 * 1390 * Disable @pipe, making sure that various hardware specific requirements 1391 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 1392 * 1393 * @pipe should be %PIPE_A or %PIPE_B. 1394 * 1395 * Will wait until the pipe has shut down before returning. 1396 */ 1397 static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1398 enum i915_pipe pipe) 1399 { 1400 int reg; 1401 u32 val; 1402 1403 /* 1404 * Make sure planes won't keep trying to pump pixels to us, 1405 * or we might hang the display. 1406 */ 1407 assert_planes_disabled(dev_priv, pipe); 1408 1409 /* Don't disable pipe A or pipe A PLLs if needed */ 1410 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1411 return; 1412 1413 reg = PIPECONF(pipe); 1414 val = I915_READ(reg); 1415 if ((val & PIPECONF_ENABLE) == 0) 1416 return; 1417 1418 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1419 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1420 } 1421 1422 /* 1423 * Plane regs are double buffered, going from enabled->disabled needs a 1424 * trigger in order to latch. The display address reg provides this. 1425 */ 1426 void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1427 enum plane plane) 1428 { 1429 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1430 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1431 } 1432 1433 /** 1434 * intel_enable_plane - enable a display plane on a given pipe 1435 * @dev_priv: i915 private structure 1436 * @plane: plane to enable 1437 * @pipe: pipe being fed 1438 * 1439 * Enable @plane on @pipe, making sure that @pipe is running first. 1440 */ 1441 static void intel_enable_plane(struct drm_i915_private *dev_priv, 1442 enum plane plane, enum i915_pipe pipe) 1443 { 1444 int reg; 1445 u32 val; 1446 1447 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 1448 assert_pipe_enabled(dev_priv, pipe); 1449 1450 reg = DSPCNTR(plane); 1451 val = I915_READ(reg); 1452 if (val & DISPLAY_PLANE_ENABLE) 1453 return; 1454 1455 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1456 intel_flush_display_plane(dev_priv, plane); 1457 intel_wait_for_vblank(dev_priv->dev, pipe); 1458 } 1459 1460 /** 1461 * intel_disable_plane - disable a display plane 1462 * @dev_priv: i915 private structure 1463 * @plane: plane to disable 1464 * @pipe: pipe consuming the data 1465 * 1466 * Disable @plane; should be an independent operation. 1467 */ 1468 static void intel_disable_plane(struct drm_i915_private *dev_priv, 1469 enum plane plane, enum i915_pipe pipe) 1470 { 1471 int reg; 1472 u32 val; 1473 1474 reg = DSPCNTR(plane); 1475 val = I915_READ(reg); 1476 if ((val & DISPLAY_PLANE_ENABLE) == 0) 1477 return; 1478 1479 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1480 intel_flush_display_plane(dev_priv, plane); 1481 intel_wait_for_vblank(dev_priv->dev, pipe); 1482 } 1483 1484 static void disable_pch_dp(struct drm_i915_private *dev_priv, 1485 enum i915_pipe pipe, int reg, u32 port_sel) 1486 { 1487 u32 val = I915_READ(reg); 1488 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { 1489 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); 1490 I915_WRITE(reg, val & ~DP_PORT_EN); 1491 } 1492 } 1493 1494 static void disable_pch_hdmi(struct drm_i915_private *dev_priv, 1495 enum i915_pipe pipe, int reg) 1496 { 1497 u32 val = I915_READ(reg); 1498 if (hdmi_pipe_enabled(dev_priv, val, pipe)) { 1499 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", 1500 reg, pipe); 1501 I915_WRITE(reg, val & ~PORT_ENABLE); 1502 } 1503 } 1504 1505 /* Disable any ports connected to this transcoder */ 1506 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, 1507 enum i915_pipe pipe) 1508 { 1509 u32 reg, val; 1510 1511 val = I915_READ(PCH_PP_CONTROL); 1512 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); 1513 1514 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1515 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1516 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1517 1518 reg = PCH_ADPA; 1519 val = I915_READ(reg); 1520 if (adpa_pipe_enabled(dev_priv, val, pipe)) 1521 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); 1522 1523 reg = PCH_LVDS; 1524 val = I915_READ(reg); 1525 if (lvds_pipe_enabled(dev_priv, val, pipe)) { 1526 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); 1527 I915_WRITE(reg, val & ~LVDS_PORT_EN); 1528 POSTING_READ(reg); 1529 DELAY(100); 1530 } 1531 1532 disable_pch_hdmi(dev_priv, pipe, HDMIB); 1533 disable_pch_hdmi(dev_priv, pipe, HDMIC); 1534 disable_pch_hdmi(dev_priv, pipe, HDMID); 1535 } 1536 1537 int 1538 intel_pin_and_fence_fb_obj(struct drm_device *dev, 1539 struct drm_i915_gem_object *obj, 1540 struct intel_ring_buffer *pipelined) 1541 { 1542 struct drm_i915_private *dev_priv = dev->dev_private; 1543 u32 alignment; 1544 int ret; 1545 1546 alignment = 0; /* shut gcc */ 1547 switch (obj->tiling_mode) { 1548 case I915_TILING_NONE: 1549 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1550 alignment = 128 * 1024; 1551 else if (INTEL_INFO(dev)->gen >= 4) 1552 alignment = 4 * 1024; 1553 else 1554 alignment = 64 * 1024; 1555 break; 1556 case I915_TILING_X: 1557 /* pin() will align the object as required by fence */ 1558 alignment = 0; 1559 break; 1560 case I915_TILING_Y: 1561 /* FIXME: Is this true? */ 1562 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 1563 return -EINVAL; 1564 default: 1565 KASSERT(0, ("Wrong tiling for fb obj")); 1566 } 1567 1568 dev_priv->mm.interruptible = false; 1569 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 1570 if (ret) 1571 goto err_interruptible; 1572 1573 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1574 * fence, whereas 965+ only requires a fence if using 1575 * framebuffer compression. For simplicity, we always install 1576 * a fence as the cost is not that onerous. 1577 */ 1578 if (obj->tiling_mode != I915_TILING_NONE) { 1579 ret = i915_gem_object_get_fence(obj, pipelined); 1580 if (ret) 1581 goto err_unpin; 1582 1583 i915_gem_object_pin_fence(obj); 1584 } 1585 1586 dev_priv->mm.interruptible = true; 1587 return 0; 1588 1589 err_unpin: 1590 i915_gem_object_unpin(obj); 1591 err_interruptible: 1592 dev_priv->mm.interruptible = true; 1593 return ret; 1594 } 1595 1596 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 1597 { 1598 i915_gem_object_unpin_fence(obj); 1599 i915_gem_object_unpin(obj); 1600 } 1601 1602 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1603 int x, int y) 1604 { 1605 struct drm_device *dev = crtc->dev; 1606 struct drm_i915_private *dev_priv = dev->dev_private; 1607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1608 struct intel_framebuffer *intel_fb; 1609 struct drm_i915_gem_object *obj; 1610 int plane = intel_crtc->plane; 1611 unsigned long Start, Offset; 1612 u32 dspcntr; 1613 u32 reg; 1614 1615 switch (plane) { 1616 case 0: 1617 case 1: 1618 break; 1619 default: 1620 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1621 return -EINVAL; 1622 } 1623 1624 intel_fb = to_intel_framebuffer(fb); 1625 obj = intel_fb->obj; 1626 1627 reg = DSPCNTR(plane); 1628 dspcntr = I915_READ(reg); 1629 /* Mask out pixel format bits in case we change it */ 1630 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1631 switch (fb->bits_per_pixel) { 1632 case 8: 1633 dspcntr |= DISPPLANE_8BPP; 1634 break; 1635 case 16: 1636 if (fb->depth == 15) 1637 dspcntr |= DISPPLANE_BGRX555; 1638 else 1639 dspcntr |= DISPPLANE_BGRX565; 1640 break; 1641 case 24: 1642 case 32: 1643 dspcntr |= DISPPLANE_BGRX888; 1644 break; 1645 default: 1646 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1647 return -EINVAL; 1648 } 1649 if (INTEL_INFO(dev)->gen >= 4) { 1650 if (obj->tiling_mode != I915_TILING_NONE) 1651 dspcntr |= DISPPLANE_TILED; 1652 else 1653 dspcntr &= ~DISPPLANE_TILED; 1654 } 1655 1656 I915_WRITE(reg, dspcntr); 1657 1658 Start = obj->gtt_offset; 1659 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1660 1661 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1662 Start, Offset, x, y, fb->pitches[0]); 1663 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1664 if (INTEL_INFO(dev)->gen >= 4) { 1665 I915_WRITE(DSPSURF(plane), Start); 1666 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1667 I915_WRITE(DSPADDR(plane), Offset); 1668 } else 1669 I915_WRITE(DSPADDR(plane), Start + Offset); 1670 POSTING_READ(reg); 1671 1672 return (0); 1673 } 1674 1675 static int ironlake_update_plane(struct drm_crtc *crtc, 1676 struct drm_framebuffer *fb, int x, int y) 1677 { 1678 struct drm_device *dev = crtc->dev; 1679 struct drm_i915_private *dev_priv = dev->dev_private; 1680 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1681 struct intel_framebuffer *intel_fb; 1682 struct drm_i915_gem_object *obj; 1683 int plane = intel_crtc->plane; 1684 unsigned long Start, Offset; 1685 u32 dspcntr; 1686 u32 reg; 1687 1688 switch (plane) { 1689 case 0: 1690 case 1: 1691 case 2: 1692 break; 1693 default: 1694 DRM_ERROR("Can't update plane %d in SAREA\n", plane); 1695 return -EINVAL; 1696 } 1697 1698 intel_fb = to_intel_framebuffer(fb); 1699 obj = intel_fb->obj; 1700 1701 reg = DSPCNTR(plane); 1702 dspcntr = I915_READ(reg); 1703 /* Mask out pixel format bits in case we change it */ 1704 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1705 switch (fb->bits_per_pixel) { 1706 case 8: 1707 dspcntr |= DISPPLANE_8BPP; 1708 break; 1709 case 16: 1710 if (fb->depth != 16) { 1711 DRM_ERROR("bpp 16, depth %d\n", fb->depth); 1712 return -EINVAL; 1713 } 1714 1715 dspcntr |= DISPPLANE_BGRX565; 1716 break; 1717 case 24: 1718 case 32: 1719 if (fb->depth == 24) 1720 dspcntr |= DISPPLANE_BGRX888; 1721 else if (fb->depth == 30) 1722 dspcntr |= DISPPLANE_BGRX101010; 1723 else { 1724 DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel, 1725 fb->depth); 1726 return -EINVAL; 1727 } 1728 break; 1729 default: 1730 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 1731 return -EINVAL; 1732 } 1733 1734 if (obj->tiling_mode != I915_TILING_NONE) 1735 dspcntr |= DISPPLANE_TILED; 1736 else 1737 dspcntr &= ~DISPPLANE_TILED; 1738 1739 /* must disable */ 1740 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1741 1742 I915_WRITE(reg, dspcntr); 1743 1744 Start = obj->gtt_offset; 1745 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 1746 1747 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1748 Start, Offset, x, y, fb->pitches[0]); 1749 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1750 I915_WRITE(DSPSURF(plane), Start); 1751 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1752 I915_WRITE(DSPADDR(plane), Offset); 1753 POSTING_READ(reg); 1754 1755 return 0; 1756 } 1757 1758 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 1759 static int 1760 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 1761 int x, int y, enum mode_set_atomic state) 1762 { 1763 struct drm_device *dev = crtc->dev; 1764 struct drm_i915_private *dev_priv = dev->dev_private; 1765 1766 if (dev_priv->display.disable_fbc) 1767 dev_priv->display.disable_fbc(dev); 1768 intel_increase_pllclock(crtc); 1769 1770 return dev_priv->display.update_plane(crtc, fb, x, y); 1771 } 1772 1773 static int 1774 intel_finish_fb(struct drm_framebuffer *old_fb) 1775 { 1776 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1777 struct drm_device *dev = obj->base.dev; 1778 struct drm_i915_private *dev_priv = dev->dev_private; 1779 bool was_interruptible = dev_priv->mm.interruptible; 1780 int ret; 1781 1782 /* XXX */ lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1783 while (!atomic_read(&dev_priv->mm.wedged) && 1784 atomic_read(&obj->pending_flip) != 0) { 1785 lksleep(&obj->pending_flip, &dev->event_lock, 1786 0, "915flp", 0); 1787 } 1788 /* XXX */ lockmgr(&dev->event_lock, LK_RELEASE); 1789 1790 /* Big Hammer, we also need to ensure that any pending 1791 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1792 * current scanout is retired before unpinning the old 1793 * framebuffer. 1794 * 1795 * This should only fail upon a hung GPU, in which case we 1796 * can safely continue. 1797 */ 1798 dev_priv->mm.interruptible = false; 1799 ret = i915_gem_object_finish_gpu(obj); 1800 dev_priv->mm.interruptible = was_interruptible; 1801 return ret; 1802 } 1803 1804 static int 1805 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1806 struct drm_framebuffer *old_fb) 1807 { 1808 struct drm_device *dev = crtc->dev; 1809 #if 0 1810 struct drm_i915_master_private *master_priv; 1811 #else 1812 drm_i915_private_t *dev_priv = dev->dev_private; 1813 #endif 1814 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1815 int ret; 1816 1817 /* no fb bound */ 1818 if (!crtc->fb) { 1819 DRM_ERROR("No FB bound\n"); 1820 return 0; 1821 } 1822 1823 switch (intel_crtc->plane) { 1824 case 0: 1825 case 1: 1826 break; 1827 case 2: 1828 if (IS_IVYBRIDGE(dev)) 1829 break; 1830 /* fall through otherwise */ 1831 default: 1832 DRM_ERROR("no plane for crtc\n"); 1833 return -EINVAL; 1834 } 1835 1836 DRM_LOCK(dev); 1837 ret = intel_pin_and_fence_fb_obj(dev, 1838 to_intel_framebuffer(crtc->fb)->obj, 1839 NULL); 1840 if (ret != 0) { 1841 DRM_UNLOCK(dev); 1842 DRM_ERROR("pin & fence failed\n"); 1843 return ret; 1844 } 1845 1846 if (old_fb) 1847 intel_finish_fb(old_fb); 1848 1849 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1850 LEAVE_ATOMIC_MODE_SET); 1851 if (ret) { 1852 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 1853 DRM_UNLOCK(dev); 1854 DRM_ERROR("failed to update base address\n"); 1855 return ret; 1856 } 1857 1858 if (old_fb) { 1859 intel_wait_for_vblank(dev, intel_crtc->pipe); 1860 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 1861 } 1862 1863 DRM_UNLOCK(dev); 1864 1865 #if 0 1866 if (!dev->primary->master) 1867 return 0; 1868 1869 master_priv = dev->primary->master->driver_priv; 1870 if (!master_priv->sarea_priv) 1871 return 0; 1872 1873 if (intel_crtc->pipe) { 1874 master_priv->sarea_priv->pipeB_x = x; 1875 master_priv->sarea_priv->pipeB_y = y; 1876 } else { 1877 master_priv->sarea_priv->pipeA_x = x; 1878 master_priv->sarea_priv->pipeA_y = y; 1879 } 1880 #else 1881 1882 if (!dev_priv->sarea_priv) 1883 return 0; 1884 1885 if (intel_crtc->pipe) { 1886 dev_priv->sarea_priv->planeB_x = x; 1887 dev_priv->sarea_priv->planeB_y = y; 1888 } else { 1889 dev_priv->sarea_priv->planeA_x = x; 1890 dev_priv->sarea_priv->planeA_y = y; 1891 } 1892 #endif 1893 1894 return 0; 1895 } 1896 1897 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 1898 { 1899 struct drm_device *dev = crtc->dev; 1900 struct drm_i915_private *dev_priv = dev->dev_private; 1901 u32 dpa_ctl; 1902 1903 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 1904 dpa_ctl = I915_READ(DP_A); 1905 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1906 1907 if (clock < 200000) { 1908 u32 temp; 1909 dpa_ctl |= DP_PLL_FREQ_160MHZ; 1910 /* workaround for 160Mhz: 1911 1) program 0x4600c bits 15:0 = 0x8124 1912 2) program 0x46010 bit 0 = 1 1913 3) program 0x46034 bit 24 = 1 1914 4) program 0x64000 bit 14 = 1 1915 */ 1916 temp = I915_READ(0x4600c); 1917 temp &= 0xffff0000; 1918 I915_WRITE(0x4600c, temp | 0x8124); 1919 1920 temp = I915_READ(0x46010); 1921 I915_WRITE(0x46010, temp | 1); 1922 1923 temp = I915_READ(0x46034); 1924 I915_WRITE(0x46034, temp | (1 << 24)); 1925 } else { 1926 dpa_ctl |= DP_PLL_FREQ_270MHZ; 1927 } 1928 I915_WRITE(DP_A, dpa_ctl); 1929 1930 POSTING_READ(DP_A); 1931 DELAY(500); 1932 } 1933 1934 static void intel_fdi_normal_train(struct drm_crtc *crtc) 1935 { 1936 struct drm_device *dev = crtc->dev; 1937 struct drm_i915_private *dev_priv = dev->dev_private; 1938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1939 int pipe = intel_crtc->pipe; 1940 u32 reg, temp; 1941 1942 /* enable normal train */ 1943 reg = FDI_TX_CTL(pipe); 1944 temp = I915_READ(reg); 1945 if (IS_IVYBRIDGE(dev)) { 1946 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 1947 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 1948 } else { 1949 temp &= ~FDI_LINK_TRAIN_NONE; 1950 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 1951 } 1952 I915_WRITE(reg, temp); 1953 1954 reg = FDI_RX_CTL(pipe); 1955 temp = I915_READ(reg); 1956 if (HAS_PCH_CPT(dev)) { 1957 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1958 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 1959 } else { 1960 temp &= ~FDI_LINK_TRAIN_NONE; 1961 temp |= FDI_LINK_TRAIN_NONE; 1962 } 1963 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 1964 1965 /* wait one idle pattern time */ 1966 POSTING_READ(reg); 1967 DELAY(1000); 1968 1969 /* IVB wants error correction enabled */ 1970 if (IS_IVYBRIDGE(dev)) 1971 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 1972 FDI_FE_ERRC_ENABLE); 1973 } 1974 1975 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) 1976 { 1977 struct drm_i915_private *dev_priv = dev->dev_private; 1978 u32 flags = I915_READ(SOUTH_CHICKEN1); 1979 1980 flags |= FDI_PHASE_SYNC_OVR(pipe); 1981 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ 1982 flags |= FDI_PHASE_SYNC_EN(pipe); 1983 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ 1984 POSTING_READ(SOUTH_CHICKEN1); 1985 } 1986 1987 /* The FDI link training functions for ILK/Ibexpeak. */ 1988 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1989 { 1990 struct drm_device *dev = crtc->dev; 1991 struct drm_i915_private *dev_priv = dev->dev_private; 1992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1993 int pipe = intel_crtc->pipe; 1994 int plane = intel_crtc->plane; 1995 u32 reg, temp, tries; 1996 1997 /* FDI needs bits from pipe & plane first */ 1998 assert_pipe_enabled(dev_priv, pipe); 1999 assert_plane_enabled(dev_priv, plane); 2000 2001 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2002 for train result */ 2003 reg = FDI_RX_IMR(pipe); 2004 temp = I915_READ(reg); 2005 temp &= ~FDI_RX_SYMBOL_LOCK; 2006 temp &= ~FDI_RX_BIT_LOCK; 2007 I915_WRITE(reg, temp); 2008 I915_READ(reg); 2009 DELAY(150); 2010 2011 /* enable CPU FDI TX and PCH FDI RX */ 2012 reg = FDI_TX_CTL(pipe); 2013 temp = I915_READ(reg); 2014 temp &= ~(7 << 19); 2015 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2016 temp &= ~FDI_LINK_TRAIN_NONE; 2017 temp |= FDI_LINK_TRAIN_PATTERN_1; 2018 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2019 2020 reg = FDI_RX_CTL(pipe); 2021 temp = I915_READ(reg); 2022 temp &= ~FDI_LINK_TRAIN_NONE; 2023 temp |= FDI_LINK_TRAIN_PATTERN_1; 2024 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2025 2026 POSTING_READ(reg); 2027 DELAY(150); 2028 2029 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2030 if (HAS_PCH_IBX(dev)) { 2031 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2032 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2033 FDI_RX_PHASE_SYNC_POINTER_EN); 2034 } 2035 2036 reg = FDI_RX_IIR(pipe); 2037 for (tries = 0; tries < 5; tries++) { 2038 temp = I915_READ(reg); 2039 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2040 2041 if ((temp & FDI_RX_BIT_LOCK)) { 2042 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2043 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2044 break; 2045 } 2046 } 2047 if (tries == 5) 2048 DRM_ERROR("FDI train 1 fail!\n"); 2049 2050 /* Train 2 */ 2051 reg = FDI_TX_CTL(pipe); 2052 temp = I915_READ(reg); 2053 temp &= ~FDI_LINK_TRAIN_NONE; 2054 temp |= FDI_LINK_TRAIN_PATTERN_2; 2055 I915_WRITE(reg, temp); 2056 2057 reg = FDI_RX_CTL(pipe); 2058 temp = I915_READ(reg); 2059 temp &= ~FDI_LINK_TRAIN_NONE; 2060 temp |= FDI_LINK_TRAIN_PATTERN_2; 2061 I915_WRITE(reg, temp); 2062 2063 POSTING_READ(reg); 2064 DELAY(150); 2065 2066 reg = FDI_RX_IIR(pipe); 2067 for (tries = 0; tries < 5; tries++) { 2068 temp = I915_READ(reg); 2069 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2070 2071 if (temp & FDI_RX_SYMBOL_LOCK) { 2072 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2073 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2074 break; 2075 } 2076 } 2077 if (tries == 5) 2078 DRM_ERROR("FDI train 2 fail!\n"); 2079 2080 DRM_DEBUG_KMS("FDI train done\n"); 2081 2082 } 2083 2084 static const int snb_b_fdi_train_param[] = { 2085 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 2086 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 2087 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 2088 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 2089 }; 2090 2091 /* The FDI link training functions for SNB/Cougarpoint. */ 2092 static void gen6_fdi_link_train(struct drm_crtc *crtc) 2093 { 2094 struct drm_device *dev = crtc->dev; 2095 struct drm_i915_private *dev_priv = dev->dev_private; 2096 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2097 int pipe = intel_crtc->pipe; 2098 u32 reg, temp, i; 2099 2100 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2101 for train result */ 2102 reg = FDI_RX_IMR(pipe); 2103 temp = I915_READ(reg); 2104 temp &= ~FDI_RX_SYMBOL_LOCK; 2105 temp &= ~FDI_RX_BIT_LOCK; 2106 I915_WRITE(reg, temp); 2107 2108 POSTING_READ(reg); 2109 DELAY(150); 2110 2111 /* enable CPU FDI TX and PCH FDI RX */ 2112 reg = FDI_TX_CTL(pipe); 2113 temp = I915_READ(reg); 2114 temp &= ~(7 << 19); 2115 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2116 temp &= ~FDI_LINK_TRAIN_NONE; 2117 temp |= FDI_LINK_TRAIN_PATTERN_1; 2118 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2119 /* SNB-B */ 2120 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2121 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2122 2123 reg = FDI_RX_CTL(pipe); 2124 temp = I915_READ(reg); 2125 if (HAS_PCH_CPT(dev)) { 2126 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2127 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2128 } else { 2129 temp &= ~FDI_LINK_TRAIN_NONE; 2130 temp |= FDI_LINK_TRAIN_PATTERN_1; 2131 } 2132 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2133 2134 POSTING_READ(reg); 2135 DELAY(150); 2136 2137 if (HAS_PCH_CPT(dev)) 2138 cpt_phase_pointer_enable(dev, pipe); 2139 2140 for (i = 0; i < 4; i++) { 2141 reg = FDI_TX_CTL(pipe); 2142 temp = I915_READ(reg); 2143 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2144 temp |= snb_b_fdi_train_param[i]; 2145 I915_WRITE(reg, temp); 2146 2147 POSTING_READ(reg); 2148 DELAY(500); 2149 2150 reg = FDI_RX_IIR(pipe); 2151 temp = I915_READ(reg); 2152 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2153 2154 if (temp & FDI_RX_BIT_LOCK) { 2155 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2156 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2157 break; 2158 } 2159 } 2160 if (i == 4) 2161 DRM_ERROR("FDI train 1 fail!\n"); 2162 2163 /* Train 2 */ 2164 reg = FDI_TX_CTL(pipe); 2165 temp = I915_READ(reg); 2166 temp &= ~FDI_LINK_TRAIN_NONE; 2167 temp |= FDI_LINK_TRAIN_PATTERN_2; 2168 if (IS_GEN6(dev)) { 2169 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2170 /* SNB-B */ 2171 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2172 } 2173 I915_WRITE(reg, temp); 2174 2175 reg = FDI_RX_CTL(pipe); 2176 temp = I915_READ(reg); 2177 if (HAS_PCH_CPT(dev)) { 2178 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2179 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2180 } else { 2181 temp &= ~FDI_LINK_TRAIN_NONE; 2182 temp |= FDI_LINK_TRAIN_PATTERN_2; 2183 } 2184 I915_WRITE(reg, temp); 2185 2186 POSTING_READ(reg); 2187 DELAY(150); 2188 2189 for (i = 0; i < 4; i++) { 2190 reg = FDI_TX_CTL(pipe); 2191 temp = I915_READ(reg); 2192 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2193 temp |= snb_b_fdi_train_param[i]; 2194 I915_WRITE(reg, temp); 2195 2196 POSTING_READ(reg); 2197 DELAY(500); 2198 2199 reg = FDI_RX_IIR(pipe); 2200 temp = I915_READ(reg); 2201 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2202 2203 if (temp & FDI_RX_SYMBOL_LOCK) { 2204 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2205 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2206 break; 2207 } 2208 } 2209 if (i == 4) 2210 DRM_ERROR("FDI train 2 fail!\n"); 2211 2212 DRM_DEBUG_KMS("FDI train done.\n"); 2213 } 2214 2215 /* Manual link training for Ivy Bridge A0 parts */ 2216 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 2217 { 2218 struct drm_device *dev = crtc->dev; 2219 struct drm_i915_private *dev_priv = dev->dev_private; 2220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2221 int pipe = intel_crtc->pipe; 2222 u32 reg, temp, i; 2223 2224 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2225 for train result */ 2226 reg = FDI_RX_IMR(pipe); 2227 temp = I915_READ(reg); 2228 temp &= ~FDI_RX_SYMBOL_LOCK; 2229 temp &= ~FDI_RX_BIT_LOCK; 2230 I915_WRITE(reg, temp); 2231 2232 POSTING_READ(reg); 2233 DELAY(150); 2234 2235 /* enable CPU FDI TX and PCH FDI RX */ 2236 reg = FDI_TX_CTL(pipe); 2237 temp = I915_READ(reg); 2238 temp &= ~(7 << 19); 2239 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2240 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 2241 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 2242 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2243 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2244 temp |= FDI_COMPOSITE_SYNC; 2245 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2246 2247 reg = FDI_RX_CTL(pipe); 2248 temp = I915_READ(reg); 2249 temp &= ~FDI_LINK_TRAIN_AUTO; 2250 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2251 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2252 temp |= FDI_COMPOSITE_SYNC; 2253 I915_WRITE(reg, temp | FDI_RX_ENABLE); 2254 2255 POSTING_READ(reg); 2256 DELAY(150); 2257 2258 for (i = 0; i < 4; i++) { 2259 reg = FDI_TX_CTL(pipe); 2260 temp = I915_READ(reg); 2261 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2262 temp |= snb_b_fdi_train_param[i]; 2263 I915_WRITE(reg, temp); 2264 2265 POSTING_READ(reg); 2266 DELAY(500); 2267 2268 reg = FDI_RX_IIR(pipe); 2269 temp = I915_READ(reg); 2270 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2271 2272 if (temp & FDI_RX_BIT_LOCK || 2273 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2274 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2275 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2276 break; 2277 } 2278 } 2279 if (i == 4) 2280 DRM_ERROR("FDI train 1 fail!\n"); 2281 2282 /* Train 2 */ 2283 reg = FDI_TX_CTL(pipe); 2284 temp = I915_READ(reg); 2285 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 2286 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 2287 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2288 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2289 I915_WRITE(reg, temp); 2290 2291 reg = FDI_RX_CTL(pipe); 2292 temp = I915_READ(reg); 2293 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2294 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 2295 I915_WRITE(reg, temp); 2296 2297 POSTING_READ(reg); 2298 DELAY(150); 2299 2300 for (i = 0; i < 4; i++ ) { 2301 reg = FDI_TX_CTL(pipe); 2302 temp = I915_READ(reg); 2303 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 2304 temp |= snb_b_fdi_train_param[i]; 2305 I915_WRITE(reg, temp); 2306 2307 POSTING_READ(reg); 2308 DELAY(500); 2309 2310 reg = FDI_RX_IIR(pipe); 2311 temp = I915_READ(reg); 2312 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2313 2314 if (temp & FDI_RX_SYMBOL_LOCK) { 2315 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2316 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2317 break; 2318 } 2319 } 2320 if (i == 4) 2321 DRM_ERROR("FDI train 2 fail!\n"); 2322 2323 DRM_DEBUG_KMS("FDI train done.\n"); 2324 } 2325 2326 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2327 { 2328 struct drm_device *dev = crtc->dev; 2329 struct drm_i915_private *dev_priv = dev->dev_private; 2330 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2331 int pipe = intel_crtc->pipe; 2332 u32 reg, temp; 2333 2334 /* Write the TU size bits so error detection works */ 2335 I915_WRITE(FDI_RX_TUSIZE1(pipe), 2336 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 2337 2338 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2339 reg = FDI_RX_CTL(pipe); 2340 temp = I915_READ(reg); 2341 temp &= ~((0x7 << 19) | (0x7 << 16)); 2342 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2343 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2344 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2345 2346 POSTING_READ(reg); 2347 DELAY(200); 2348 2349 /* Switch from Rawclk to PCDclk */ 2350 temp = I915_READ(reg); 2351 I915_WRITE(reg, temp | FDI_PCDCLK); 2352 2353 POSTING_READ(reg); 2354 DELAY(200); 2355 2356 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2357 reg = FDI_TX_CTL(pipe); 2358 temp = I915_READ(reg); 2359 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2360 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2361 2362 POSTING_READ(reg); 2363 DELAY(100); 2364 } 2365 } 2366 2367 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) 2368 { 2369 struct drm_i915_private *dev_priv = dev->dev_private; 2370 u32 flags = I915_READ(SOUTH_CHICKEN1); 2371 2372 flags &= ~(FDI_PHASE_SYNC_EN(pipe)); 2373 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ 2374 flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); 2375 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ 2376 POSTING_READ(SOUTH_CHICKEN1); 2377 } 2378 2379 static void ironlake_fdi_disable(struct drm_crtc *crtc) 2380 { 2381 struct drm_device *dev = crtc->dev; 2382 struct drm_i915_private *dev_priv = dev->dev_private; 2383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2384 int pipe = intel_crtc->pipe; 2385 u32 reg, temp; 2386 2387 /* disable CPU FDI tx and PCH FDI rx */ 2388 reg = FDI_TX_CTL(pipe); 2389 temp = I915_READ(reg); 2390 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 2391 POSTING_READ(reg); 2392 2393 reg = FDI_RX_CTL(pipe); 2394 temp = I915_READ(reg); 2395 temp &= ~(0x7 << 16); 2396 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2397 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2398 2399 POSTING_READ(reg); 2400 DELAY(100); 2401 2402 /* Ironlake workaround, disable clock pointer after downing FDI */ 2403 if (HAS_PCH_IBX(dev)) { 2404 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2405 I915_WRITE(FDI_RX_CHICKEN(pipe), 2406 I915_READ(FDI_RX_CHICKEN(pipe) & 2407 ~FDI_RX_PHASE_SYNC_POINTER_EN)); 2408 } else if (HAS_PCH_CPT(dev)) { 2409 cpt_phase_pointer_disable(dev, pipe); 2410 } 2411 2412 /* still set train pattern 1 */ 2413 reg = FDI_TX_CTL(pipe); 2414 temp = I915_READ(reg); 2415 temp &= ~FDI_LINK_TRAIN_NONE; 2416 temp |= FDI_LINK_TRAIN_PATTERN_1; 2417 I915_WRITE(reg, temp); 2418 2419 reg = FDI_RX_CTL(pipe); 2420 temp = I915_READ(reg); 2421 if (HAS_PCH_CPT(dev)) { 2422 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 2423 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 2424 } else { 2425 temp &= ~FDI_LINK_TRAIN_NONE; 2426 temp |= FDI_LINK_TRAIN_PATTERN_1; 2427 } 2428 /* BPC in FDI rx is consistent with that in PIPECONF */ 2429 temp &= ~(0x07 << 16); 2430 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2431 I915_WRITE(reg, temp); 2432 2433 POSTING_READ(reg); 2434 DELAY(100); 2435 } 2436 2437 /* 2438 * When we disable a pipe, we need to clear any pending scanline wait events 2439 * to avoid hanging the ring, which we assume we are waiting on. 2440 */ 2441 static void intel_clear_scanline_wait(struct drm_device *dev) 2442 { 2443 struct drm_i915_private *dev_priv = dev->dev_private; 2444 struct intel_ring_buffer *ring; 2445 u32 tmp; 2446 2447 if (IS_GEN2(dev)) 2448 /* Can't break the hang on i8xx */ 2449 return; 2450 2451 ring = LP_RING(dev_priv); 2452 tmp = I915_READ_CTL(ring); 2453 if (tmp & RING_WAIT) 2454 I915_WRITE_CTL(ring, tmp); 2455 } 2456 2457 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2458 { 2459 struct drm_i915_gem_object *obj; 2460 struct drm_i915_private *dev_priv; 2461 struct drm_device *dev; 2462 2463 if (crtc->fb == NULL) 2464 return; 2465 2466 obj = to_intel_framebuffer(crtc->fb)->obj; 2467 dev = crtc->dev; 2468 dev_priv = dev->dev_private; 2469 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2470 while (atomic_read(&obj->pending_flip) != 0) 2471 lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0); 2472 lockmgr(&dev->event_lock, LK_RELEASE); 2473 } 2474 2475 static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2476 { 2477 struct drm_device *dev = crtc->dev; 2478 struct drm_mode_config *mode_config = &dev->mode_config; 2479 struct intel_encoder *encoder; 2480 2481 /* 2482 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2483 * must be driven by its own crtc; no sharing is possible. 2484 */ 2485 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 2486 if (encoder->base.crtc != crtc) 2487 continue; 2488 2489 switch (encoder->type) { 2490 case INTEL_OUTPUT_EDP: 2491 if (!intel_encoder_is_pch_edp(&encoder->base)) 2492 return false; 2493 continue; 2494 } 2495 } 2496 2497 return true; 2498 } 2499 2500 /* 2501 * Enable PCH resources required for PCH ports: 2502 * - PCH PLLs 2503 * - FDI training & RX/TX 2504 * - update transcoder timings 2505 * - DP transcoding bits 2506 * - transcoder 2507 */ 2508 static void ironlake_pch_enable(struct drm_crtc *crtc) 2509 { 2510 struct drm_device *dev = crtc->dev; 2511 struct drm_i915_private *dev_priv = dev->dev_private; 2512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2513 int pipe = intel_crtc->pipe; 2514 u32 reg, temp, transc_sel; 2515 2516 /* For PCH output, training FDI link */ 2517 dev_priv->display.fdi_link_train(crtc); 2518 2519 intel_enable_pch_pll(dev_priv, pipe); 2520 2521 if (HAS_PCH_CPT(dev)) { 2522 transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : 2523 TRANSC_DPLLB_SEL; 2524 2525 /* Be sure PCH DPLL SEL is set */ 2526 temp = I915_READ(PCH_DPLL_SEL); 2527 if (pipe == 0) { 2528 temp &= ~(TRANSA_DPLLB_SEL); 2529 temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); 2530 } else if (pipe == 1) { 2531 temp &= ~(TRANSB_DPLLB_SEL); 2532 temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2533 } else if (pipe == 2) { 2534 temp &= ~(TRANSC_DPLLB_SEL); 2535 temp |= (TRANSC_DPLL_ENABLE | transc_sel); 2536 } 2537 I915_WRITE(PCH_DPLL_SEL, temp); 2538 } 2539 2540 /* set transcoder timing, panel must allow it */ 2541 assert_panel_unlocked(dev_priv, pipe); 2542 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); 2543 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); 2544 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); 2545 2546 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); 2547 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2548 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2549 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 2550 2551 intel_fdi_normal_train(crtc); 2552 2553 /* For PCH DP, enable TRANS_DP_CTL */ 2554 if (HAS_PCH_CPT(dev) && 2555 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2556 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2557 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2558 reg = TRANS_DP_CTL(pipe); 2559 temp = I915_READ(reg); 2560 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2561 TRANS_DP_SYNC_MASK | 2562 TRANS_DP_BPC_MASK); 2563 temp |= (TRANS_DP_OUTPUT_ENABLE | 2564 TRANS_DP_ENH_FRAMING); 2565 temp |= bpc << 9; /* same format but at 11:9 */ 2566 2567 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2568 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2569 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 2570 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 2571 2572 switch (intel_trans_dp_port_sel(crtc)) { 2573 case PCH_DP_B: 2574 temp |= TRANS_DP_PORT_SEL_B; 2575 break; 2576 case PCH_DP_C: 2577 temp |= TRANS_DP_PORT_SEL_C; 2578 break; 2579 case PCH_DP_D: 2580 temp |= TRANS_DP_PORT_SEL_D; 2581 break; 2582 default: 2583 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 2584 temp |= TRANS_DP_PORT_SEL_B; 2585 break; 2586 } 2587 2588 I915_WRITE(reg, temp); 2589 } 2590 2591 intel_enable_transcoder(dev_priv, pipe); 2592 } 2593 2594 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 2595 { 2596 struct drm_i915_private *dev_priv = dev->dev_private; 2597 int dslreg = PIPEDSL(pipe); 2598 u32 temp; 2599 2600 temp = I915_READ(dslreg); 2601 udelay(500); 2602 if (wait_for(I915_READ(dslreg) != temp, 5)) { 2603 if (wait_for(I915_READ(dslreg) != temp, 5)) 2604 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 2605 } 2606 } 2607 2608 static void ironlake_crtc_enable(struct drm_crtc *crtc) 2609 { 2610 struct drm_device *dev = crtc->dev; 2611 struct drm_i915_private *dev_priv = dev->dev_private; 2612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2613 int pipe = intel_crtc->pipe; 2614 int plane = intel_crtc->plane; 2615 u32 temp; 2616 bool is_pch_port; 2617 2618 if (intel_crtc->active) 2619 return; 2620 2621 intel_crtc->active = true; 2622 intel_update_watermarks(dev); 2623 2624 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2625 temp = I915_READ(PCH_LVDS); 2626 if ((temp & LVDS_PORT_EN) == 0) 2627 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2628 } 2629 2630 is_pch_port = intel_crtc_driving_pch(crtc); 2631 2632 if (is_pch_port) 2633 ironlake_fdi_pll_enable(crtc); 2634 else 2635 ironlake_fdi_disable(crtc); 2636 2637 /* Enable panel fitting for LVDS */ 2638 if (dev_priv->pch_pf_size && 2639 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 2640 /* Force use of hard-coded filter coefficients 2641 * as some pre-programmed values are broken, 2642 * e.g. x201. 2643 */ 2644 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 2645 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 2646 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 2647 } 2648 2649 intel_enable_pipe(dev_priv, pipe, is_pch_port); 2650 intel_enable_plane(dev_priv, plane, pipe); 2651 2652 if (is_pch_port) 2653 ironlake_pch_enable(crtc); 2654 2655 intel_crtc_load_lut(crtc); 2656 2657 DRM_LOCK(dev); 2658 intel_update_fbc(dev); 2659 DRM_UNLOCK(dev); 2660 2661 intel_crtc_update_cursor(crtc, true); 2662 } 2663 2664 static void ironlake_crtc_disable(struct drm_crtc *crtc) 2665 { 2666 struct drm_device *dev = crtc->dev; 2667 struct drm_i915_private *dev_priv = dev->dev_private; 2668 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2669 int pipe = intel_crtc->pipe; 2670 int plane = intel_crtc->plane; 2671 u32 reg, temp; 2672 2673 if (!intel_crtc->active) 2674 return; 2675 2676 intel_crtc_wait_for_pending_flips(crtc); 2677 drm_vblank_off(dev, pipe); 2678 intel_crtc_update_cursor(crtc, false); 2679 2680 intel_disable_plane(dev_priv, plane, pipe); 2681 2682 if (dev_priv->cfb_plane == plane) 2683 intel_disable_fbc(dev); 2684 2685 intel_disable_pipe(dev_priv, pipe); 2686 2687 /* Disable PF */ 2688 I915_WRITE(PF_CTL(pipe), 0); 2689 I915_WRITE(PF_WIN_SZ(pipe), 0); 2690 2691 ironlake_fdi_disable(crtc); 2692 2693 /* This is a horrible layering violation; we should be doing this in 2694 * the connector/encoder ->prepare instead, but we don't always have 2695 * enough information there about the config to know whether it will 2696 * actually be necessary or just cause undesired flicker. 2697 */ 2698 intel_disable_pch_ports(dev_priv, pipe); 2699 2700 intel_disable_transcoder(dev_priv, pipe); 2701 2702 if (HAS_PCH_CPT(dev)) { 2703 /* disable TRANS_DP_CTL */ 2704 reg = TRANS_DP_CTL(pipe); 2705 temp = I915_READ(reg); 2706 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); 2707 temp |= TRANS_DP_PORT_SEL_NONE; 2708 I915_WRITE(reg, temp); 2709 2710 /* disable DPLL_SEL */ 2711 temp = I915_READ(PCH_DPLL_SEL); 2712 switch (pipe) { 2713 case 0: 2714 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); 2715 break; 2716 case 1: 2717 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); 2718 break; 2719 case 2: 2720 /* C shares PLL A or B */ 2721 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); 2722 break; 2723 default: 2724 KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */ 2725 } 2726 I915_WRITE(PCH_DPLL_SEL, temp); 2727 } 2728 2729 /* disable PCH DPLL */ 2730 if (!intel_crtc->no_pll) 2731 intel_disable_pch_pll(dev_priv, pipe); 2732 2733 /* Switch from PCDclk to Rawclk */ 2734 reg = FDI_RX_CTL(pipe); 2735 temp = I915_READ(reg); 2736 I915_WRITE(reg, temp & ~FDI_PCDCLK); 2737 2738 /* Disable CPU FDI TX PLL */ 2739 reg = FDI_TX_CTL(pipe); 2740 temp = I915_READ(reg); 2741 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 2742 2743 POSTING_READ(reg); 2744 DELAY(100); 2745 2746 reg = FDI_RX_CTL(pipe); 2747 temp = I915_READ(reg); 2748 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 2749 2750 /* Wait for the clocks to turn off. */ 2751 POSTING_READ(reg); 2752 DELAY(100); 2753 2754 intel_crtc->active = false; 2755 intel_update_watermarks(dev); 2756 2757 DRM_LOCK(dev); 2758 intel_update_fbc(dev); 2759 intel_clear_scanline_wait(dev); 2760 DRM_UNLOCK(dev); 2761 } 2762 2763 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) 2764 { 2765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2766 int pipe = intel_crtc->pipe; 2767 int plane = intel_crtc->plane; 2768 2769 /* XXX: When our outputs are all unaware of DPMS modes other than off 2770 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2771 */ 2772 switch (mode) { 2773 case DRM_MODE_DPMS_ON: 2774 case DRM_MODE_DPMS_STANDBY: 2775 case DRM_MODE_DPMS_SUSPEND: 2776 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); 2777 ironlake_crtc_enable(crtc); 2778 break; 2779 2780 case DRM_MODE_DPMS_OFF: 2781 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); 2782 ironlake_crtc_disable(crtc); 2783 break; 2784 } 2785 } 2786 2787 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 2788 { 2789 if (!enable && intel_crtc->overlay) { 2790 struct drm_device *dev = intel_crtc->base.dev; 2791 struct drm_i915_private *dev_priv = dev->dev_private; 2792 2793 DRM_LOCK(dev); 2794 dev_priv->mm.interruptible = false; 2795 (void) intel_overlay_switch_off(intel_crtc->overlay); 2796 dev_priv->mm.interruptible = true; 2797 DRM_UNLOCK(dev); 2798 } 2799 2800 /* Let userspace switch the overlay on again. In most cases userspace 2801 * has to recompute where to put it anyway. 2802 */ 2803 } 2804 2805 static void i9xx_crtc_enable(struct drm_crtc *crtc) 2806 { 2807 struct drm_device *dev = crtc->dev; 2808 struct drm_i915_private *dev_priv = dev->dev_private; 2809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2810 int pipe = intel_crtc->pipe; 2811 int plane = intel_crtc->plane; 2812 2813 if (intel_crtc->active) 2814 return; 2815 2816 intel_crtc->active = true; 2817 intel_update_watermarks(dev); 2818 2819 intel_enable_pll(dev_priv, pipe); 2820 intel_enable_pipe(dev_priv, pipe, false); 2821 intel_enable_plane(dev_priv, plane, pipe); 2822 2823 intel_crtc_load_lut(crtc); 2824 intel_update_fbc(dev); 2825 2826 /* Give the overlay scaler a chance to enable if it's on this pipe */ 2827 intel_crtc_dpms_overlay(intel_crtc, true); 2828 intel_crtc_update_cursor(crtc, true); 2829 } 2830 2831 static void i9xx_crtc_disable(struct drm_crtc *crtc) 2832 { 2833 struct drm_device *dev = crtc->dev; 2834 struct drm_i915_private *dev_priv = dev->dev_private; 2835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2836 int pipe = intel_crtc->pipe; 2837 int plane = intel_crtc->plane; 2838 2839 if (!intel_crtc->active) 2840 return; 2841 2842 /* Give the overlay scaler a chance to disable if it's on this pipe */ 2843 intel_crtc_wait_for_pending_flips(crtc); 2844 drm_vblank_off(dev, pipe); 2845 intel_crtc_dpms_overlay(intel_crtc, false); 2846 intel_crtc_update_cursor(crtc, false); 2847 2848 if (dev_priv->cfb_plane == plane) 2849 intel_disable_fbc(dev); 2850 2851 intel_disable_plane(dev_priv, plane, pipe); 2852 intel_disable_pipe(dev_priv, pipe); 2853 intel_disable_pll(dev_priv, pipe); 2854 2855 intel_crtc->active = false; 2856 intel_update_fbc(dev); 2857 intel_update_watermarks(dev); 2858 intel_clear_scanline_wait(dev); 2859 } 2860 2861 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 2862 { 2863 /* XXX: When our outputs are all unaware of DPMS modes other than off 2864 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 2865 */ 2866 switch (mode) { 2867 case DRM_MODE_DPMS_ON: 2868 case DRM_MODE_DPMS_STANDBY: 2869 case DRM_MODE_DPMS_SUSPEND: 2870 i9xx_crtc_enable(crtc); 2871 break; 2872 case DRM_MODE_DPMS_OFF: 2873 i9xx_crtc_disable(crtc); 2874 break; 2875 } 2876 } 2877 2878 /** 2879 * Sets the power management mode of the pipe and plane. 2880 */ 2881 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 2882 { 2883 struct drm_device *dev = crtc->dev; 2884 struct drm_i915_private *dev_priv = dev->dev_private; 2885 #if 0 2886 struct drm_i915_master_private *master_priv; 2887 #endif 2888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2889 int pipe = intel_crtc->pipe; 2890 bool enabled; 2891 2892 if (intel_crtc->dpms_mode == mode) 2893 return; 2894 2895 intel_crtc->dpms_mode = mode; 2896 2897 dev_priv->display.dpms(crtc, mode); 2898 2899 #if 0 2900 if (!dev->primary->master) 2901 return; 2902 2903 master_priv = dev->primary->master->driver_priv; 2904 if (!master_priv->sarea_priv) 2905 return; 2906 #else 2907 if (!dev_priv->sarea_priv) 2908 return; 2909 #endif 2910 2911 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; 2912 2913 switch (pipe) { 2914 case 0: 2915 #if 0 2916 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 2917 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; 2918 #else 2919 dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; 2920 dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; 2921 #endif 2922 break; 2923 case 1: 2924 #if 0 2925 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; 2926 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; 2927 #else 2928 dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; 2929 dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; 2930 #endif 2931 break; 2932 default: 2933 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); 2934 break; 2935 } 2936 } 2937 2938 static void intel_crtc_disable(struct drm_crtc *crtc) 2939 { 2940 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 2941 struct drm_device *dev = crtc->dev; 2942 2943 /* Flush any pending WAITs before we disable the pipe. Note that 2944 * we need to drop the struct_mutex in order to acquire it again 2945 * during the lowlevel dpms routines around a couple of the 2946 * operations. It does not look trivial nor desirable to move 2947 * that locking higher. So instead we leave a window for the 2948 * submission of further commands on the fb before we can actually 2949 * disable it. This race with userspace exists anyway, and we can 2950 * only rely on the pipe being disabled by userspace after it 2951 * receives the hotplug notification and has flushed any pending 2952 * batches. 2953 */ 2954 if (crtc->fb) { 2955 DRM_LOCK(dev); 2956 intel_finish_fb(crtc->fb); 2957 DRM_UNLOCK(dev); 2958 } 2959 2960 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 2961 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 2962 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); 2963 2964 if (crtc->fb) { 2965 DRM_LOCK(dev); 2966 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2967 DRM_UNLOCK(dev); 2968 } 2969 } 2970 2971 /* Prepare for a mode set. 2972 * 2973 * Note we could be a lot smarter here. We need to figure out which outputs 2974 * will be enabled, which disabled (in short, how the config will changes) 2975 * and perform the minimum necessary steps to accomplish that, e.g. updating 2976 * watermarks, FBC configuration, making sure PLLs are programmed correctly, 2977 * panel fitting is in the proper state, etc. 2978 */ 2979 static void i9xx_crtc_prepare(struct drm_crtc *crtc) 2980 { 2981 i9xx_crtc_disable(crtc); 2982 } 2983 2984 static void i9xx_crtc_commit(struct drm_crtc *crtc) 2985 { 2986 i9xx_crtc_enable(crtc); 2987 } 2988 2989 static void ironlake_crtc_prepare(struct drm_crtc *crtc) 2990 { 2991 ironlake_crtc_disable(crtc); 2992 } 2993 2994 static void ironlake_crtc_commit(struct drm_crtc *crtc) 2995 { 2996 ironlake_crtc_enable(crtc); 2997 } 2998 2999 void intel_encoder_prepare(struct drm_encoder *encoder) 3000 { 3001 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3002 /* lvds has its own version of prepare see intel_lvds_prepare */ 3003 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3004 } 3005 3006 void intel_encoder_commit(struct drm_encoder *encoder) 3007 { 3008 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3009 struct drm_device *dev = encoder->dev; 3010 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3011 struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 3012 3013 /* lvds has its own version of commit see intel_lvds_commit */ 3014 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3015 3016 if (HAS_PCH_CPT(dev)) 3017 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3018 } 3019 3020 void intel_encoder_destroy(struct drm_encoder *encoder) 3021 { 3022 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3023 3024 drm_encoder_cleanup(encoder); 3025 drm_free(intel_encoder, DRM_MEM_KMS); 3026 } 3027 3028 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3029 const struct drm_display_mode *mode, 3030 struct drm_display_mode *adjusted_mode) 3031 { 3032 struct drm_device *dev = crtc->dev; 3033 3034 if (HAS_PCH_SPLIT(dev)) { 3035 /* FDI link clock is fixed at 2.7G */ 3036 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 3037 return false; 3038 } 3039 3040 /* All interlaced capable intel hw wants timings in frames. Note though 3041 * that intel_lvds_mode_fixup does some funny tricks with the crtc 3042 * timings, so we need to be careful not to clobber these.*/ 3043 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3044 drm_mode_set_crtcinfo(adjusted_mode, 0); 3045 3046 return true; 3047 } 3048 3049 static int i945_get_display_clock_speed(struct drm_device *dev) 3050 { 3051 return 400000; 3052 } 3053 3054 static int i915_get_display_clock_speed(struct drm_device *dev) 3055 { 3056 return 333000; 3057 } 3058 3059 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 3060 { 3061 return 200000; 3062 } 3063 3064 static int i915gm_get_display_clock_speed(struct drm_device *dev) 3065 { 3066 u16 gcfgc = 0; 3067 3068 gcfgc = pci_read_config(dev->dev, GCFGC, 2); 3069 3070 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 3071 return 133000; 3072 else { 3073 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 3074 case GC_DISPLAY_CLOCK_333_MHZ: 3075 return 333000; 3076 default: 3077 case GC_DISPLAY_CLOCK_190_200_MHZ: 3078 return 190000; 3079 } 3080 } 3081 } 3082 3083 static int i865_get_display_clock_speed(struct drm_device *dev) 3084 { 3085 return 266000; 3086 } 3087 3088 static int i855_get_display_clock_speed(struct drm_device *dev) 3089 { 3090 u16 hpllcc = 0; 3091 /* Assume that the hardware is in the high speed state. This 3092 * should be the default. 3093 */ 3094 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 3095 case GC_CLOCK_133_200: 3096 case GC_CLOCK_100_200: 3097 return 200000; 3098 case GC_CLOCK_166_250: 3099 return 250000; 3100 case GC_CLOCK_100_133: 3101 return 133000; 3102 } 3103 3104 /* Shouldn't happen */ 3105 return 0; 3106 } 3107 3108 static int i830_get_display_clock_speed(struct drm_device *dev) 3109 { 3110 return 133000; 3111 } 3112 3113 struct fdi_m_n { 3114 u32 tu; 3115 u32 gmch_m; 3116 u32 gmch_n; 3117 u32 link_m; 3118 u32 link_n; 3119 }; 3120 3121 static void 3122 fdi_reduce_ratio(u32 *num, u32 *den) 3123 { 3124 while (*num > 0xffffff || *den > 0xffffff) { 3125 *num >>= 1; 3126 *den >>= 1; 3127 } 3128 } 3129 3130 static void 3131 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3132 int link_clock, struct fdi_m_n *m_n) 3133 { 3134 m_n->tu = 64; /* default size */ 3135 3136 /* BUG_ON(pixel_clock > INT_MAX / 36); */ 3137 m_n->gmch_m = bits_per_pixel * pixel_clock; 3138 m_n->gmch_n = link_clock * nlanes * 8; 3139 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3140 3141 m_n->link_m = pixel_clock; 3142 m_n->link_n = link_clock; 3143 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3144 } 3145 3146 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3147 { 3148 if (i915_panel_use_ssc >= 0) 3149 return i915_panel_use_ssc != 0; 3150 return dev_priv->lvds_use_ssc 3151 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 3152 } 3153 3154 /** 3155 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send 3156 * @crtc: CRTC structure 3157 * @mode: requested mode 3158 * 3159 * A pipe may be connected to one or more outputs. Based on the depth of the 3160 * attached framebuffer, choose a good color depth to use on the pipe. 3161 * 3162 * If possible, match the pipe depth to the fb depth. In some cases, this 3163 * isn't ideal, because the connected output supports a lesser or restricted 3164 * set of depths. Resolve that here: 3165 * LVDS typically supports only 6bpc, so clamp down in that case 3166 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc 3167 * Displays may support a restricted set as well, check EDID and clamp as 3168 * appropriate. 3169 * DP may want to dither down to 6bpc to fit larger modes 3170 * 3171 * RETURNS: 3172 * Dithering requirement (i.e. false if display bpc and pipe bpc match, 3173 * true if they don't match). 3174 */ 3175 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3176 unsigned int *pipe_bpp, 3177 struct drm_display_mode *mode) 3178 { 3179 struct drm_device *dev = crtc->dev; 3180 struct drm_i915_private *dev_priv = dev->dev_private; 3181 struct drm_encoder *encoder; 3182 struct drm_connector *connector; 3183 unsigned int display_bpc = UINT_MAX, bpc; 3184 3185 /* Walk the encoders & connectors on this crtc, get min bpc */ 3186 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3187 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3188 3189 if (encoder->crtc != crtc) 3190 continue; 3191 3192 if (intel_encoder->type == INTEL_OUTPUT_LVDS) { 3193 unsigned int lvds_bpc; 3194 3195 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == 3196 LVDS_A3_POWER_UP) 3197 lvds_bpc = 8; 3198 else 3199 lvds_bpc = 6; 3200 3201 if (lvds_bpc < display_bpc) { 3202 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 3203 display_bpc = lvds_bpc; 3204 } 3205 continue; 3206 } 3207 3208 if (intel_encoder->type == INTEL_OUTPUT_EDP) { 3209 /* Use VBT settings if we have an eDP panel */ 3210 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 3211 3212 if (edp_bpc < display_bpc) { 3213 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 3214 display_bpc = edp_bpc; 3215 } 3216 continue; 3217 } 3218 3219 /* Not one of the known troublemakers, check the EDID */ 3220 list_for_each_entry(connector, &dev->mode_config.connector_list, 3221 head) { 3222 if (connector->encoder != encoder) 3223 continue; 3224 3225 /* Don't use an invalid EDID bpc value */ 3226 if (connector->display_info.bpc && 3227 connector->display_info.bpc < display_bpc) { 3228 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 3229 display_bpc = connector->display_info.bpc; 3230 } 3231 } 3232 3233 /* 3234 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 3235 * through, clamp it down. (Note: >12bpc will be caught below.) 3236 */ 3237 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 3238 if (display_bpc > 8 && display_bpc < 12) { 3239 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 3240 display_bpc = 12; 3241 } else { 3242 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 3243 display_bpc = 8; 3244 } 3245 } 3246 } 3247 3248 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3249 DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); 3250 display_bpc = 6; 3251 } 3252 3253 /* 3254 * We could just drive the pipe at the highest bpc all the time and 3255 * enable dithering as needed, but that costs bandwidth. So choose 3256 * the minimum value that expresses the full color range of the fb but 3257 * also stays within the max display bpc discovered above. 3258 */ 3259 3260 switch (crtc->fb->depth) { 3261 case 8: 3262 bpc = 8; /* since we go through a colormap */ 3263 break; 3264 case 15: 3265 case 16: 3266 bpc = 6; /* min is 18bpp */ 3267 break; 3268 case 24: 3269 bpc = 8; 3270 break; 3271 case 30: 3272 bpc = 10; 3273 break; 3274 case 48: 3275 bpc = 12; 3276 break; 3277 default: 3278 DRM_DEBUG("unsupported depth, assuming 24 bits\n"); 3279 bpc = min((unsigned int)8, display_bpc); 3280 break; 3281 } 3282 3283 display_bpc = min(display_bpc, bpc); 3284 3285 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 3286 bpc, display_bpc); 3287 3288 *pipe_bpp = display_bpc * 3; 3289 3290 return display_bpc != bpc; 3291 } 3292 3293 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 3294 { 3295 struct drm_device *dev = crtc->dev; 3296 struct drm_i915_private *dev_priv = dev->dev_private; 3297 int refclk; 3298 3299 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3300 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3301 refclk = dev_priv->lvds_ssc_freq * 1000; 3302 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3303 refclk / 1000); 3304 } else if (!IS_GEN2(dev)) { 3305 refclk = 96000; 3306 } else { 3307 refclk = 48000; 3308 } 3309 3310 return refclk; 3311 } 3312 3313 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, 3314 intel_clock_t *clock) 3315 { 3316 /* SDVO TV has fixed PLL values depend on its clock range, 3317 this mirrors vbios setting. */ 3318 if (adjusted_mode->clock >= 100000 3319 && adjusted_mode->clock < 140500) { 3320 clock->p1 = 2; 3321 clock->p2 = 10; 3322 clock->n = 3; 3323 clock->m1 = 16; 3324 clock->m2 = 8; 3325 } else if (adjusted_mode->clock >= 140500 3326 && adjusted_mode->clock <= 200000) { 3327 clock->p1 = 1; 3328 clock->p2 = 10; 3329 clock->n = 6; 3330 clock->m1 = 12; 3331 clock->m2 = 8; 3332 } 3333 } 3334 3335 static void i9xx_update_pll_dividers(struct drm_crtc *crtc, 3336 intel_clock_t *clock, 3337 intel_clock_t *reduced_clock) 3338 { 3339 struct drm_device *dev = crtc->dev; 3340 struct drm_i915_private *dev_priv = dev->dev_private; 3341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3342 int pipe = intel_crtc->pipe; 3343 u32 fp, fp2 = 0; 3344 3345 if (IS_PINEVIEW(dev)) { 3346 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; 3347 if (reduced_clock) 3348 fp2 = (1 << reduced_clock->n) << 16 | 3349 reduced_clock->m1 << 8 | reduced_clock->m2; 3350 } else { 3351 fp = clock->n << 16 | clock->m1 << 8 | clock->m2; 3352 if (reduced_clock) 3353 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | 3354 reduced_clock->m2; 3355 } 3356 3357 I915_WRITE(FP0(pipe), fp); 3358 3359 intel_crtc->lowfreq_avail = false; 3360 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 3361 reduced_clock && i915_powersave) { 3362 I915_WRITE(FP1(pipe), fp2); 3363 intel_crtc->lowfreq_avail = true; 3364 } else { 3365 I915_WRITE(FP1(pipe), fp); 3366 } 3367 } 3368 3369 static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 3370 struct drm_display_mode *mode, 3371 struct drm_display_mode *adjusted_mode, 3372 int x, int y, 3373 struct drm_framebuffer *old_fb) 3374 { 3375 struct drm_device *dev = crtc->dev; 3376 struct drm_i915_private *dev_priv = dev->dev_private; 3377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3378 int pipe = intel_crtc->pipe; 3379 int plane = intel_crtc->plane; 3380 int refclk, num_connectors = 0; 3381 intel_clock_t clock, reduced_clock; 3382 u32 dpll, dspcntr, pipeconf, vsyncshift; 3383 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3384 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3385 struct drm_mode_config *mode_config = &dev->mode_config; 3386 struct intel_encoder *encoder; 3387 const intel_limit_t *limit; 3388 int ret; 3389 u32 temp; 3390 u32 lvds_sync = 0; 3391 3392 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3393 if (encoder->base.crtc != crtc) 3394 continue; 3395 3396 switch (encoder->type) { 3397 case INTEL_OUTPUT_LVDS: 3398 is_lvds = true; 3399 break; 3400 case INTEL_OUTPUT_SDVO: 3401 case INTEL_OUTPUT_HDMI: 3402 is_sdvo = true; 3403 if (encoder->needs_tv_clock) 3404 is_tv = true; 3405 break; 3406 case INTEL_OUTPUT_DVO: 3407 is_dvo = true; 3408 break; 3409 case INTEL_OUTPUT_TVOUT: 3410 is_tv = true; 3411 break; 3412 case INTEL_OUTPUT_ANALOG: 3413 is_crt = true; 3414 break; 3415 case INTEL_OUTPUT_DISPLAYPORT: 3416 is_dp = true; 3417 break; 3418 } 3419 3420 num_connectors++; 3421 } 3422 3423 refclk = i9xx_get_refclk(crtc, num_connectors); 3424 3425 /* 3426 * Returns a set of divisors for the desired target clock with the given 3427 * refclk, or false. The returned values represent the clock equation: 3428 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3429 */ 3430 limit = intel_limit(crtc, refclk); 3431 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3432 &clock); 3433 if (!ok) { 3434 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3435 return -EINVAL; 3436 } 3437 3438 /* Ensure that the cursor is valid for the new mode before changing... */ 3439 intel_crtc_update_cursor(crtc, true); 3440 3441 if (is_lvds && dev_priv->lvds_downclock_avail) { 3442 /* 3443 * Ensure we match the reduced clock's P to the target clock. 3444 * If the clocks don't match, we can't switch the display clock 3445 * by using the FP0/FP1. In such case we will disable the LVDS 3446 * downclock feature. 3447 */ 3448 has_reduced_clock = limit->find_pll(limit, crtc, 3449 dev_priv->lvds_downclock, 3450 refclk, 3451 &clock, 3452 &reduced_clock); 3453 } 3454 3455 if (is_sdvo && is_tv) 3456 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 3457 3458 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? 3459 &reduced_clock : NULL); 3460 3461 dpll = DPLL_VGA_MODE_DIS; 3462 3463 if (!IS_GEN2(dev)) { 3464 if (is_lvds) 3465 dpll |= DPLLB_MODE_LVDS; 3466 else 3467 dpll |= DPLLB_MODE_DAC_SERIAL; 3468 if (is_sdvo) { 3469 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 3470 if (pixel_multiplier > 1) { 3471 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3472 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3473 } 3474 dpll |= DPLL_DVO_HIGH_SPEED; 3475 } 3476 if (is_dp) 3477 dpll |= DPLL_DVO_HIGH_SPEED; 3478 3479 /* compute bitmask from p1 value */ 3480 if (IS_PINEVIEW(dev)) 3481 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 3482 else { 3483 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3484 if (IS_G4X(dev) && has_reduced_clock) 3485 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3486 } 3487 switch (clock.p2) { 3488 case 5: 3489 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 3490 break; 3491 case 7: 3492 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 3493 break; 3494 case 10: 3495 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 3496 break; 3497 case 14: 3498 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3499 break; 3500 } 3501 if (INTEL_INFO(dev)->gen >= 4) 3502 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3503 } else { 3504 if (is_lvds) { 3505 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3506 } else { 3507 if (clock.p1 == 2) 3508 dpll |= PLL_P1_DIVIDE_BY_TWO; 3509 else 3510 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3511 if (clock.p2 == 4) 3512 dpll |= PLL_P2_DIVIDE_BY_4; 3513 } 3514 } 3515 3516 if (is_sdvo && is_tv) 3517 dpll |= PLL_REF_INPUT_TVCLKINBC; 3518 else if (is_tv) 3519 /* XXX: just matching BIOS for now */ 3520 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3521 dpll |= 3; 3522 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 3523 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3524 else 3525 dpll |= PLL_REF_INPUT_DREFCLK; 3526 3527 /* setup pipeconf */ 3528 pipeconf = I915_READ(PIPECONF(pipe)); 3529 3530 /* Set up the display plane register */ 3531 dspcntr = DISPPLANE_GAMMA_ENABLE; 3532 3533 if (pipe == 0) 3534 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3535 else 3536 dspcntr |= DISPPLANE_SEL_PIPE_B; 3537 3538 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 3539 /* Enable pixel doubling when the dot clock is > 90% of the (display) 3540 * core speed. 3541 * 3542 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the 3543 * pipe == 0 check? 3544 */ 3545 if (mode->clock > 3546 dev_priv->display.get_display_clock_speed(dev) * 9 / 10) 3547 pipeconf |= PIPECONF_DOUBLE_WIDE; 3548 else 3549 pipeconf &= ~PIPECONF_DOUBLE_WIDE; 3550 } 3551 3552 /* default to 8bpc */ 3553 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 3554 if (is_dp) { 3555 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 3556 pipeconf |= PIPECONF_BPP_6 | 3557 PIPECONF_DITHER_EN | 3558 PIPECONF_DITHER_TYPE_SP; 3559 } 3560 } 3561 3562 dpll |= DPLL_VCO_ENABLE; 3563 3564 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3565 drm_mode_debug_printmodeline(mode); 3566 3567 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 3568 3569 POSTING_READ(DPLL(pipe)); 3570 DELAY(150); 3571 3572 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3573 * This is an exception to the general rule that mode_set doesn't turn 3574 * things on. 3575 */ 3576 if (is_lvds) { 3577 temp = I915_READ(LVDS); 3578 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3579 if (pipe == 1) { 3580 temp |= LVDS_PIPEB_SELECT; 3581 } else { 3582 temp &= ~LVDS_PIPEB_SELECT; 3583 } 3584 /* set the corresponsding LVDS_BORDER bit */ 3585 temp |= dev_priv->lvds_border_bits; 3586 /* Set the B0-B3 data pairs corresponding to whether we're going to 3587 * set the DPLLs for dual-channel mode or not. 3588 */ 3589 if (clock.p2 == 7) 3590 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 3591 else 3592 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 3593 3594 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 3595 * appropriately here, but we need to look more thoroughly into how 3596 * panels behave in the two modes. 3597 */ 3598 /* set the dithering flag on LVDS as needed */ 3599 if (INTEL_INFO(dev)->gen >= 4) { 3600 if (dev_priv->lvds_dither) 3601 temp |= LVDS_ENABLE_DITHER; 3602 else 3603 temp &= ~LVDS_ENABLE_DITHER; 3604 } 3605 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 3606 lvds_sync |= LVDS_HSYNC_POLARITY; 3607 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 3608 lvds_sync |= LVDS_VSYNC_POLARITY; 3609 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 3610 != lvds_sync) { 3611 char flags[2] = "-+"; 3612 DRM_INFO("Changing LVDS panel from " 3613 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 3614 flags[!(temp & LVDS_HSYNC_POLARITY)], 3615 flags[!(temp & LVDS_VSYNC_POLARITY)], 3616 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 3617 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 3618 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 3619 temp |= lvds_sync; 3620 } 3621 I915_WRITE(LVDS, temp); 3622 } 3623 3624 if (is_dp) { 3625 intel_dp_set_m_n(crtc, mode, adjusted_mode); 3626 } 3627 3628 I915_WRITE(DPLL(pipe), dpll); 3629 3630 /* Wait for the clocks to stabilize. */ 3631 POSTING_READ(DPLL(pipe)); 3632 DELAY(150); 3633 3634 if (INTEL_INFO(dev)->gen >= 4) { 3635 temp = 0; 3636 if (is_sdvo) { 3637 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 3638 if (temp > 1) 3639 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 3640 else 3641 temp = 0; 3642 } 3643 I915_WRITE(DPLL_MD(pipe), temp); 3644 } else { 3645 /* The pixel multiplier can only be updated once the 3646 * DPLL is enabled and the clocks are stable. 3647 * 3648 * So write it again. 3649 */ 3650 I915_WRITE(DPLL(pipe), dpll); 3651 } 3652 3653 if (HAS_PIPE_CXSR(dev)) { 3654 if (intel_crtc->lowfreq_avail) { 3655 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 3656 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3657 } else { 3658 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 3659 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3660 } 3661 } 3662 3663 pipeconf &= ~PIPECONF_INTERLACE_MASK; 3664 if (!IS_GEN2(dev) && 3665 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 3666 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3667 /* the chip adds 2 halflines automatically */ 3668 adjusted_mode->crtc_vtotal -= 1; 3669 adjusted_mode->crtc_vblank_end -= 1; 3670 vsyncshift = adjusted_mode->crtc_hsync_start 3671 - adjusted_mode->crtc_htotal/2; 3672 } else { 3673 pipeconf |= PIPECONF_PROGRESSIVE; 3674 vsyncshift = 0; 3675 } 3676 3677 if (!IS_GEN3(dev)) 3678 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift); 3679 3680 I915_WRITE(HTOTAL(pipe), 3681 (adjusted_mode->crtc_hdisplay - 1) | 3682 ((adjusted_mode->crtc_htotal - 1) << 16)); 3683 I915_WRITE(HBLANK(pipe), 3684 (adjusted_mode->crtc_hblank_start - 1) | 3685 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 3686 I915_WRITE(HSYNC(pipe), 3687 (adjusted_mode->crtc_hsync_start - 1) | 3688 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 3689 3690 I915_WRITE(VTOTAL(pipe), 3691 (adjusted_mode->crtc_vdisplay - 1) | 3692 ((adjusted_mode->crtc_vtotal - 1) << 16)); 3693 I915_WRITE(VBLANK(pipe), 3694 (adjusted_mode->crtc_vblank_start - 1) | 3695 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 3696 I915_WRITE(VSYNC(pipe), 3697 (adjusted_mode->crtc_vsync_start - 1) | 3698 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 3699 3700 /* pipesrc and dspsize control the size that is scaled from, 3701 * which should always be the user's requested size. 3702 */ 3703 I915_WRITE(DSPSIZE(plane), 3704 ((mode->vdisplay - 1) << 16) | 3705 (mode->hdisplay - 1)); 3706 I915_WRITE(DSPPOS(plane), 0); 3707 I915_WRITE(PIPESRC(pipe), 3708 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3709 3710 I915_WRITE(PIPECONF(pipe), pipeconf); 3711 POSTING_READ(PIPECONF(pipe)); 3712 intel_enable_pipe(dev_priv, pipe, false); 3713 3714 intel_wait_for_vblank(dev, pipe); 3715 3716 I915_WRITE(DSPCNTR(plane), dspcntr); 3717 POSTING_READ(DSPCNTR(plane)); 3718 intel_enable_plane(dev_priv, plane, pipe); 3719 3720 ret = intel_pipe_set_base(crtc, x, y, old_fb); 3721 3722 intel_update_watermarks(dev); 3723 3724 return ret; 3725 } 3726 3727 /* 3728 * Initialize reference clocks when the driver loads 3729 */ 3730 void ironlake_init_pch_refclk(struct drm_device *dev) 3731 { 3732 struct drm_i915_private *dev_priv = dev->dev_private; 3733 struct drm_mode_config *mode_config = &dev->mode_config; 3734 struct intel_encoder *encoder; 3735 u32 temp; 3736 bool has_lvds = false; 3737 bool has_cpu_edp = false; 3738 bool has_pch_edp = false; 3739 bool has_panel = false; 3740 bool has_ck505 = false; 3741 bool can_ssc = false; 3742 3743 /* We need to take the global config into account */ 3744 list_for_each_entry(encoder, &mode_config->encoder_list, 3745 base.head) { 3746 switch (encoder->type) { 3747 case INTEL_OUTPUT_LVDS: 3748 has_panel = true; 3749 has_lvds = true; 3750 break; 3751 case INTEL_OUTPUT_EDP: 3752 has_panel = true; 3753 if (intel_encoder_is_pch_edp(&encoder->base)) 3754 has_pch_edp = true; 3755 else 3756 has_cpu_edp = true; 3757 break; 3758 } 3759 } 3760 3761 if (HAS_PCH_IBX(dev)) { 3762 has_ck505 = dev_priv->display_clock_mode; 3763 can_ssc = has_ck505; 3764 } else { 3765 has_ck505 = false; 3766 can_ssc = true; 3767 } 3768 3769 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", 3770 has_panel, has_lvds, has_pch_edp, has_cpu_edp, 3771 has_ck505); 3772 3773 /* Ironlake: try to setup display ref clock before DPLL 3774 * enabling. This is only under driver's control after 3775 * PCH B stepping, previous chipset stepping should be 3776 * ignoring this setting. 3777 */ 3778 temp = I915_READ(PCH_DREF_CONTROL); 3779 /* Always enable nonspread source */ 3780 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3781 3782 if (has_ck505) 3783 temp |= DREF_NONSPREAD_CK505_ENABLE; 3784 else 3785 temp |= DREF_NONSPREAD_SOURCE_ENABLE; 3786 3787 if (has_panel) { 3788 temp &= ~DREF_SSC_SOURCE_MASK; 3789 temp |= DREF_SSC_SOURCE_ENABLE; 3790 3791 /* SSC must be turned on before enabling the CPU output */ 3792 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3793 DRM_DEBUG_KMS("Using SSC on panel\n"); 3794 temp |= DREF_SSC1_ENABLE; 3795 } else 3796 temp &= ~DREF_SSC1_ENABLE; 3797 3798 /* Get SSC going before enabling the outputs */ 3799 I915_WRITE(PCH_DREF_CONTROL, temp); 3800 POSTING_READ(PCH_DREF_CONTROL); 3801 DELAY(200); 3802 3803 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3804 3805 /* Enable CPU source on CPU attached eDP */ 3806 if (has_cpu_edp) { 3807 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3808 DRM_DEBUG_KMS("Using SSC on eDP\n"); 3809 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3810 } 3811 else 3812 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3813 } else 3814 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3815 3816 I915_WRITE(PCH_DREF_CONTROL, temp); 3817 POSTING_READ(PCH_DREF_CONTROL); 3818 DELAY(200); 3819 } else { 3820 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 3821 3822 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3823 3824 /* Turn off CPU output */ 3825 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3826 3827 I915_WRITE(PCH_DREF_CONTROL, temp); 3828 POSTING_READ(PCH_DREF_CONTROL); 3829 DELAY(200); 3830 3831 /* Turn off the SSC source */ 3832 temp &= ~DREF_SSC_SOURCE_MASK; 3833 temp |= DREF_SSC_SOURCE_DISABLE; 3834 3835 /* Turn off SSC1 */ 3836 temp &= ~ DREF_SSC1_ENABLE; 3837 3838 I915_WRITE(PCH_DREF_CONTROL, temp); 3839 POSTING_READ(PCH_DREF_CONTROL); 3840 DELAY(200); 3841 } 3842 } 3843 3844 static int ironlake_get_refclk(struct drm_crtc *crtc) 3845 { 3846 struct drm_device *dev = crtc->dev; 3847 struct drm_i915_private *dev_priv = dev->dev_private; 3848 struct intel_encoder *encoder; 3849 struct drm_mode_config *mode_config = &dev->mode_config; 3850 struct intel_encoder *edp_encoder = NULL; 3851 int num_connectors = 0; 3852 bool is_lvds = false; 3853 3854 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3855 if (encoder->base.crtc != crtc) 3856 continue; 3857 3858 switch (encoder->type) { 3859 case INTEL_OUTPUT_LVDS: 3860 is_lvds = true; 3861 break; 3862 case INTEL_OUTPUT_EDP: 3863 edp_encoder = encoder; 3864 break; 3865 } 3866 num_connectors++; 3867 } 3868 3869 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 3870 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 3871 dev_priv->lvds_ssc_freq); 3872 return dev_priv->lvds_ssc_freq * 1000; 3873 } 3874 3875 return 120000; 3876 } 3877 3878 static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 3879 struct drm_display_mode *mode, 3880 struct drm_display_mode *adjusted_mode, 3881 int x, int y, 3882 struct drm_framebuffer *old_fb) 3883 { 3884 struct drm_device *dev = crtc->dev; 3885 struct drm_i915_private *dev_priv = dev->dev_private; 3886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3887 int pipe = intel_crtc->pipe; 3888 int plane = intel_crtc->plane; 3889 int refclk, num_connectors = 0; 3890 intel_clock_t clock, reduced_clock; 3891 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 3892 bool ok, has_reduced_clock = false, is_sdvo = false; 3893 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3894 struct intel_encoder *has_edp_encoder = NULL; 3895 struct drm_mode_config *mode_config = &dev->mode_config; 3896 struct intel_encoder *encoder; 3897 const intel_limit_t *limit; 3898 int ret; 3899 struct fdi_m_n m_n = {0}; 3900 u32 temp; 3901 u32 lvds_sync = 0; 3902 int target_clock, pixel_multiplier, lane, link_bw, factor; 3903 unsigned int pipe_bpp; 3904 bool dither; 3905 3906 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 3907 if (encoder->base.crtc != crtc) 3908 continue; 3909 3910 switch (encoder->type) { 3911 case INTEL_OUTPUT_LVDS: 3912 is_lvds = true; 3913 break; 3914 case INTEL_OUTPUT_SDVO: 3915 case INTEL_OUTPUT_HDMI: 3916 is_sdvo = true; 3917 if (encoder->needs_tv_clock) 3918 is_tv = true; 3919 break; 3920 case INTEL_OUTPUT_TVOUT: 3921 is_tv = true; 3922 break; 3923 case INTEL_OUTPUT_ANALOG: 3924 is_crt = true; 3925 break; 3926 case INTEL_OUTPUT_DISPLAYPORT: 3927 is_dp = true; 3928 break; 3929 case INTEL_OUTPUT_EDP: 3930 has_edp_encoder = encoder; 3931 break; 3932 } 3933 3934 num_connectors++; 3935 } 3936 3937 refclk = ironlake_get_refclk(crtc); 3938 3939 /* 3940 * Returns a set of divisors for the desired target clock with the given 3941 * refclk, or false. The returned values represent the clock equation: 3942 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3943 */ 3944 limit = intel_limit(crtc, refclk); 3945 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, 3946 &clock); 3947 if (!ok) { 3948 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3949 return -EINVAL; 3950 } 3951 3952 /* Ensure that the cursor is valid for the new mode before changing... */ 3953 intel_crtc_update_cursor(crtc, true); 3954 3955 if (is_lvds && dev_priv->lvds_downclock_avail) { 3956 /* 3957 * Ensure we match the reduced clock's P to the target clock. 3958 * If the clocks don't match, we can't switch the display clock 3959 * by using the FP0/FP1. In such case we will disable the LVDS 3960 * downclock feature. 3961 */ 3962 has_reduced_clock = limit->find_pll(limit, crtc, 3963 dev_priv->lvds_downclock, 3964 refclk, 3965 &clock, 3966 &reduced_clock); 3967 } 3968 /* SDVO TV has fixed PLL values depend on its clock range, 3969 this mirrors vbios setting. */ 3970 if (is_sdvo && is_tv) { 3971 if (adjusted_mode->clock >= 100000 3972 && adjusted_mode->clock < 140500) { 3973 clock.p1 = 2; 3974 clock.p2 = 10; 3975 clock.n = 3; 3976 clock.m1 = 16; 3977 clock.m2 = 8; 3978 } else if (adjusted_mode->clock >= 140500 3979 && adjusted_mode->clock <= 200000) { 3980 clock.p1 = 1; 3981 clock.p2 = 10; 3982 clock.n = 6; 3983 clock.m1 = 12; 3984 clock.m2 = 8; 3985 } 3986 } 3987 3988 /* FDI link */ 3989 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 3990 lane = 0; 3991 /* CPU eDP doesn't require FDI link, so just set DP M/N 3992 according to current link config */ 3993 if (has_edp_encoder && 3994 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 3995 target_clock = mode->clock; 3996 intel_edp_link_config(has_edp_encoder, 3997 &lane, &link_bw); 3998 } else { 3999 /* [e]DP over FDI requires target mode clock 4000 instead of link clock */ 4001 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4002 target_clock = mode->clock; 4003 else 4004 target_clock = adjusted_mode->clock; 4005 4006 /* FDI is a binary signal running at ~2.7GHz, encoding 4007 * each output octet as 10 bits. The actual frequency 4008 * is stored as a divider into a 100MHz clock, and the 4009 * mode pixel clock is stored in units of 1KHz. 4010 * Hence the bw of each lane in terms of the mode signal 4011 * is: 4012 */ 4013 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4014 } 4015 4016 /* determine panel color depth */ 4017 temp = I915_READ(PIPECONF(pipe)); 4018 temp &= ~PIPE_BPC_MASK; 4019 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); 4020 switch (pipe_bpp) { 4021 case 18: 4022 temp |= PIPE_6BPC; 4023 break; 4024 case 24: 4025 temp |= PIPE_8BPC; 4026 break; 4027 case 30: 4028 temp |= PIPE_10BPC; 4029 break; 4030 case 36: 4031 temp |= PIPE_12BPC; 4032 break; 4033 default: 4034 kprintf("intel_choose_pipe_bpp returned invalid value %d\n", 4035 pipe_bpp); 4036 temp |= PIPE_8BPC; 4037 pipe_bpp = 24; 4038 break; 4039 } 4040 4041 intel_crtc->bpp = pipe_bpp; 4042 I915_WRITE(PIPECONF(pipe), temp); 4043 4044 if (!lane) { 4045 /* 4046 * Account for spread spectrum to avoid 4047 * oversubscribing the link. Max center spread 4048 * is 2.5%; use 5% for safety's sake. 4049 */ 4050 u32 bps = target_clock * intel_crtc->bpp * 21 / 20; 4051 lane = bps / (link_bw * 8) + 1; 4052 } 4053 4054 intel_crtc->fdi_lanes = lane; 4055 4056 if (pixel_multiplier > 1) 4057 link_bw *= pixel_multiplier; 4058 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 4059 &m_n); 4060 4061 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 4062 if (has_reduced_clock) 4063 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 4064 reduced_clock.m2; 4065 4066 /* Enable autotuning of the PLL clock (if permissible) */ 4067 factor = 21; 4068 if (is_lvds) { 4069 if ((intel_panel_use_ssc(dev_priv) && 4070 dev_priv->lvds_ssc_freq == 100) || 4071 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 4072 factor = 25; 4073 } else if (is_sdvo && is_tv) 4074 factor = 20; 4075 4076 if (clock.m < factor * clock.n) 4077 fp |= FP_CB_TUNE; 4078 4079 dpll = 0; 4080 4081 if (is_lvds) 4082 dpll |= DPLLB_MODE_LVDS; 4083 else 4084 dpll |= DPLLB_MODE_DAC_SERIAL; 4085 if (is_sdvo) { 4086 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4087 if (pixel_multiplier > 1) { 4088 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 4089 } 4090 dpll |= DPLL_DVO_HIGH_SPEED; 4091 } 4092 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4093 dpll |= DPLL_DVO_HIGH_SPEED; 4094 4095 /* compute bitmask from p1 value */ 4096 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 4097 /* also FPA1 */ 4098 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 4099 4100 switch (clock.p2) { 4101 case 5: 4102 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 4103 break; 4104 case 7: 4105 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 4106 break; 4107 case 10: 4108 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 4109 break; 4110 case 14: 4111 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 4112 break; 4113 } 4114 4115 if (is_sdvo && is_tv) 4116 dpll |= PLL_REF_INPUT_TVCLKINBC; 4117 else if (is_tv) 4118 /* XXX: just matching BIOS for now */ 4119 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4120 dpll |= 3; 4121 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4122 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4123 else 4124 dpll |= PLL_REF_INPUT_DREFCLK; 4125 4126 /* setup pipeconf */ 4127 pipeconf = I915_READ(PIPECONF(pipe)); 4128 4129 /* Set up the display plane register */ 4130 dspcntr = DISPPLANE_GAMMA_ENABLE; 4131 4132 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4133 drm_mode_debug_printmodeline(mode); 4134 4135 /* PCH eDP needs FDI, but CPU eDP does not */ 4136 if (!intel_crtc->no_pll) { 4137 if (!has_edp_encoder || 4138 intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4139 I915_WRITE(_PCH_FP0(pipe), fp); 4140 I915_WRITE(_PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4141 4142 POSTING_READ(_PCH_DPLL(pipe)); 4143 DELAY(150); 4144 } 4145 } else { 4146 if (dpll == (I915_READ(_PCH_DPLL(0)) & 0x7fffffff) && 4147 fp == I915_READ(_PCH_FP0(0))) { 4148 intel_crtc->use_pll_a = true; 4149 DRM_DEBUG_KMS("using pipe a dpll\n"); 4150 } else if (dpll == (I915_READ(_PCH_DPLL(1)) & 0x7fffffff) && 4151 fp == I915_READ(_PCH_FP0(1))) { 4152 intel_crtc->use_pll_a = false; 4153 DRM_DEBUG_KMS("using pipe b dpll\n"); 4154 } else { 4155 DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); 4156 return -EINVAL; 4157 } 4158 } 4159 4160 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4161 * This is an exception to the general rule that mode_set doesn't turn 4162 * things on. 4163 */ 4164 if (is_lvds) { 4165 temp = I915_READ(PCH_LVDS); 4166 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4167 if (HAS_PCH_CPT(dev)) { 4168 temp &= ~PORT_TRANS_SEL_MASK; 4169 temp |= PORT_TRANS_SEL_CPT(pipe); 4170 } else { 4171 if (pipe == 1) 4172 temp |= LVDS_PIPEB_SELECT; 4173 else 4174 temp &= ~LVDS_PIPEB_SELECT; 4175 } 4176 4177 /* set the corresponsding LVDS_BORDER bit */ 4178 temp |= dev_priv->lvds_border_bits; 4179 /* Set the B0-B3 data pairs corresponding to whether we're going to 4180 * set the DPLLs for dual-channel mode or not. 4181 */ 4182 if (clock.p2 == 7) 4183 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4184 else 4185 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4186 4187 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4188 * appropriately here, but we need to look more thoroughly into how 4189 * panels behave in the two modes. 4190 */ 4191 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4192 lvds_sync |= LVDS_HSYNC_POLARITY; 4193 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4194 lvds_sync |= LVDS_VSYNC_POLARITY; 4195 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) 4196 != lvds_sync) { 4197 char flags[2] = "-+"; 4198 DRM_INFO("Changing LVDS panel from " 4199 "(%chsync, %cvsync) to (%chsync, %cvsync)\n", 4200 flags[!(temp & LVDS_HSYNC_POLARITY)], 4201 flags[!(temp & LVDS_VSYNC_POLARITY)], 4202 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], 4203 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); 4204 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4205 temp |= lvds_sync; 4206 } 4207 I915_WRITE(PCH_LVDS, temp); 4208 } 4209 4210 pipeconf &= ~PIPECONF_DITHER_EN; 4211 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 4212 if ((is_lvds && dev_priv->lvds_dither) || dither) { 4213 pipeconf |= PIPECONF_DITHER_EN; 4214 pipeconf |= PIPECONF_DITHER_TYPE_SP; 4215 } 4216 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4217 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4218 } else { 4219 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4220 I915_WRITE(TRANSDATA_M1(pipe), 0); 4221 I915_WRITE(TRANSDATA_N1(pipe), 0); 4222 I915_WRITE(TRANSDPLINK_M1(pipe), 0); 4223 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4224 } 4225 4226 if (!intel_crtc->no_pll && 4227 (!has_edp_encoder || 4228 intel_encoder_is_pch_edp(&has_edp_encoder->base))) { 4229 I915_WRITE(_PCH_DPLL(pipe), dpll); 4230 4231 /* Wait for the clocks to stabilize. */ 4232 POSTING_READ(_PCH_DPLL(pipe)); 4233 DELAY(150); 4234 4235 /* The pixel multiplier can only be updated once the 4236 * DPLL is enabled and the clocks are stable. 4237 * 4238 * So write it again. 4239 */ 4240 I915_WRITE(_PCH_DPLL(pipe), dpll); 4241 } 4242 4243 intel_crtc->lowfreq_avail = false; 4244 if (!intel_crtc->no_pll) { 4245 if (is_lvds && has_reduced_clock && i915_powersave) { 4246 I915_WRITE(_PCH_FP1(pipe), fp2); 4247 intel_crtc->lowfreq_avail = true; 4248 if (HAS_PIPE_CXSR(dev)) { 4249 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 4250 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 4251 } 4252 } else { 4253 I915_WRITE(_PCH_FP1(pipe), fp); 4254 if (HAS_PIPE_CXSR(dev)) { 4255 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 4256 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 4257 } 4258 } 4259 } 4260 4261 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4262 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4263 pipeconf |= PIPECONF_INTERLACED_ILK; 4264 /* the chip adds 2 halflines automatically */ 4265 adjusted_mode->crtc_vtotal -= 1; 4266 adjusted_mode->crtc_vblank_end -= 1; 4267 I915_WRITE(VSYNCSHIFT(pipe), 4268 adjusted_mode->crtc_hsync_start 4269 - adjusted_mode->crtc_htotal/2); 4270 } else { 4271 pipeconf |= PIPECONF_PROGRESSIVE; 4272 I915_WRITE(VSYNCSHIFT(pipe), 0); 4273 } 4274 4275 I915_WRITE(HTOTAL(pipe), 4276 (adjusted_mode->crtc_hdisplay - 1) | 4277 ((adjusted_mode->crtc_htotal - 1) << 16)); 4278 I915_WRITE(HBLANK(pipe), 4279 (adjusted_mode->crtc_hblank_start - 1) | 4280 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 4281 I915_WRITE(HSYNC(pipe), 4282 (adjusted_mode->crtc_hsync_start - 1) | 4283 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 4284 4285 I915_WRITE(VTOTAL(pipe), 4286 (adjusted_mode->crtc_vdisplay - 1) | 4287 ((adjusted_mode->crtc_vtotal - 1) << 16)); 4288 I915_WRITE(VBLANK(pipe), 4289 (adjusted_mode->crtc_vblank_start - 1) | 4290 ((adjusted_mode->crtc_vblank_end - 1) << 16)); 4291 I915_WRITE(VSYNC(pipe), 4292 (adjusted_mode->crtc_vsync_start - 1) | 4293 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 4294 4295 /* pipesrc controls the size that is scaled from, which should 4296 * always be the user's requested size. 4297 */ 4298 I915_WRITE(PIPESRC(pipe), 4299 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 4300 4301 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 4302 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); 4303 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 4304 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 4305 4306 if (has_edp_encoder && 4307 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4308 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4309 } 4310 4311 I915_WRITE(PIPECONF(pipe), pipeconf); 4312 POSTING_READ(PIPECONF(pipe)); 4313 4314 intel_wait_for_vblank(dev, pipe); 4315 4316 I915_WRITE(DSPCNTR(plane), dspcntr); 4317 POSTING_READ(DSPCNTR(plane)); 4318 4319 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4320 4321 intel_update_watermarks(dev); 4322 4323 return ret; 4324 } 4325 4326 static int intel_crtc_mode_set(struct drm_crtc *crtc, 4327 struct drm_display_mode *mode, 4328 struct drm_display_mode *adjusted_mode, 4329 int x, int y, 4330 struct drm_framebuffer *old_fb) 4331 { 4332 struct drm_device *dev = crtc->dev; 4333 struct drm_i915_private *dev_priv = dev->dev_private; 4334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4335 int pipe = intel_crtc->pipe; 4336 int ret; 4337 4338 drm_vblank_pre_modeset(dev, pipe); 4339 4340 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 4341 x, y, old_fb); 4342 drm_vblank_post_modeset(dev, pipe); 4343 4344 if (ret) 4345 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4346 else 4347 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; 4348 4349 return ret; 4350 } 4351 4352 static bool intel_eld_uptodate(struct drm_connector *connector, 4353 int reg_eldv, uint32_t bits_eldv, 4354 int reg_elda, uint32_t bits_elda, 4355 int reg_edid) 4356 { 4357 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4358 uint8_t *eld = connector->eld; 4359 uint32_t i; 4360 4361 i = I915_READ(reg_eldv); 4362 i &= bits_eldv; 4363 4364 if (!eld[0]) 4365 return !i; 4366 4367 if (!i) 4368 return false; 4369 4370 i = I915_READ(reg_elda); 4371 i &= ~bits_elda; 4372 I915_WRITE(reg_elda, i); 4373 4374 for (i = 0; i < eld[2]; i++) 4375 if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) 4376 return false; 4377 4378 return true; 4379 } 4380 4381 static void g4x_write_eld(struct drm_connector *connector, 4382 struct drm_crtc *crtc) 4383 { 4384 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4385 uint8_t *eld = connector->eld; 4386 uint32_t eldv; 4387 uint32_t len; 4388 uint32_t i; 4389 4390 i = I915_READ(G4X_AUD_VID_DID); 4391 4392 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) 4393 eldv = G4X_ELDV_DEVCL_DEVBLC; 4394 else 4395 eldv = G4X_ELDV_DEVCTG; 4396 4397 if (intel_eld_uptodate(connector, 4398 G4X_AUD_CNTL_ST, eldv, 4399 G4X_AUD_CNTL_ST, G4X_ELD_ADDR, 4400 G4X_HDMIW_HDMIEDID)) 4401 return; 4402 4403 i = I915_READ(G4X_AUD_CNTL_ST); 4404 i &= ~(eldv | G4X_ELD_ADDR); 4405 len = (i >> 9) & 0x1f; /* ELD buffer size */ 4406 I915_WRITE(G4X_AUD_CNTL_ST, i); 4407 4408 if (!eld[0]) 4409 return; 4410 4411 if (eld[2] < (uint8_t)len) 4412 len = eld[2]; 4413 DRM_DEBUG_KMS("ELD size %d\n", len); 4414 for (i = 0; i < len; i++) 4415 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); 4416 4417 i = I915_READ(G4X_AUD_CNTL_ST); 4418 i |= eldv; 4419 I915_WRITE(G4X_AUD_CNTL_ST, i); 4420 } 4421 4422 static void ironlake_write_eld(struct drm_connector *connector, 4423 struct drm_crtc *crtc) 4424 { 4425 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4426 uint8_t *eld = connector->eld; 4427 uint32_t eldv; 4428 uint32_t i; 4429 int len; 4430 int hdmiw_hdmiedid; 4431 int aud_config; 4432 int aud_cntl_st; 4433 int aud_cntrl_st2; 4434 4435 if (HAS_PCH_IBX(connector->dev)) { 4436 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; 4437 aud_config = IBX_AUD_CONFIG_A; 4438 aud_cntl_st = IBX_AUD_CNTL_ST_A; 4439 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 4440 } else { 4441 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; 4442 aud_config = CPT_AUD_CONFIG_A; 4443 aud_cntl_st = CPT_AUD_CNTL_ST_A; 4444 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 4445 } 4446 4447 i = to_intel_crtc(crtc)->pipe; 4448 hdmiw_hdmiedid += i * 0x100; 4449 aud_cntl_st += i * 0x100; 4450 aud_config += i * 0x100; 4451 4452 DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i)); 4453 4454 i = I915_READ(aud_cntl_st); 4455 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ 4456 if (!i) { 4457 DRM_DEBUG_KMS("Audio directed to unknown port\n"); 4458 /* operate blindly on all ports */ 4459 eldv = IBX_ELD_VALIDB; 4460 eldv |= IBX_ELD_VALIDB << 4; 4461 eldv |= IBX_ELD_VALIDB << 8; 4462 } else { 4463 DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i); 4464 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 4465 } 4466 4467 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 4468 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 4469 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 4470 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ 4471 } else 4472 I915_WRITE(aud_config, 0); 4473 4474 if (intel_eld_uptodate(connector, 4475 aud_cntrl_st2, eldv, 4476 aud_cntl_st, IBX_ELD_ADDRESS, 4477 hdmiw_hdmiedid)) 4478 return; 4479 4480 i = I915_READ(aud_cntrl_st2); 4481 i &= ~eldv; 4482 I915_WRITE(aud_cntrl_st2, i); 4483 4484 if (!eld[0]) 4485 return; 4486 4487 i = I915_READ(aud_cntl_st); 4488 i &= ~IBX_ELD_ADDRESS; 4489 I915_WRITE(aud_cntl_st, i); 4490 4491 /* 84 bytes of hw ELD buffer */ 4492 len = 21; 4493 if (eld[2] < (uint8_t)len) 4494 len = eld[2]; 4495 DRM_DEBUG_KMS("ELD size %d\n", len); 4496 for (i = 0; i < len; i++) 4497 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); 4498 4499 i = I915_READ(aud_cntrl_st2); 4500 i |= eldv; 4501 I915_WRITE(aud_cntrl_st2, i); 4502 } 4503 4504 void intel_write_eld(struct drm_encoder *encoder, 4505 struct drm_display_mode *mode) 4506 { 4507 struct drm_crtc *crtc = encoder->crtc; 4508 struct drm_connector *connector; 4509 struct drm_device *dev = encoder->dev; 4510 struct drm_i915_private *dev_priv = dev->dev_private; 4511 4512 connector = drm_select_eld(encoder, mode); 4513 if (!connector) 4514 return; 4515 4516 DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4517 connector->base.id, 4518 drm_get_connector_name(connector), 4519 connector->encoder->base.id, 4520 drm_get_encoder_name(connector->encoder)); 4521 4522 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 4523 4524 if (dev_priv->display.write_eld) 4525 dev_priv->display.write_eld(connector, crtc); 4526 } 4527 4528 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4529 void intel_crtc_load_lut(struct drm_crtc *crtc) 4530 { 4531 struct drm_device *dev = crtc->dev; 4532 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4534 int palreg = PALETTE(intel_crtc->pipe); 4535 int i; 4536 4537 /* The clocks have to be on to load the palette. */ 4538 if (!crtc->enabled || !intel_crtc->active) 4539 return; 4540 4541 /* use legacy palette for Ironlake */ 4542 if (HAS_PCH_SPLIT(dev)) 4543 palreg = LGC_PALETTE(intel_crtc->pipe); 4544 4545 for (i = 0; i < 256; i++) { 4546 I915_WRITE(palreg + 4 * i, 4547 (intel_crtc->lut_r[i] << 16) | 4548 (intel_crtc->lut_g[i] << 8) | 4549 intel_crtc->lut_b[i]); 4550 } 4551 } 4552 4553 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 4554 { 4555 struct drm_device *dev = crtc->dev; 4556 struct drm_i915_private *dev_priv = dev->dev_private; 4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4558 bool visible = base != 0; 4559 u32 cntl; 4560 4561 if (intel_crtc->cursor_visible == visible) 4562 return; 4563 4564 cntl = I915_READ(_CURACNTR); 4565 if (visible) { 4566 /* On these chipsets we can only modify the base whilst 4567 * the cursor is disabled. 4568 */ 4569 I915_WRITE(_CURABASE, base); 4570 4571 cntl &= ~(CURSOR_FORMAT_MASK); 4572 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 4573 cntl |= CURSOR_ENABLE | 4574 CURSOR_GAMMA_ENABLE | 4575 CURSOR_FORMAT_ARGB; 4576 } else 4577 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); 4578 I915_WRITE(_CURACNTR, cntl); 4579 4580 intel_crtc->cursor_visible = visible; 4581 } 4582 4583 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 4584 { 4585 struct drm_device *dev = crtc->dev; 4586 struct drm_i915_private *dev_priv = dev->dev_private; 4587 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4588 int pipe = intel_crtc->pipe; 4589 bool visible = base != 0; 4590 4591 if (intel_crtc->cursor_visible != visible) { 4592 uint32_t cntl = I915_READ(CURCNTR(pipe)); 4593 if (base) { 4594 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); 4595 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4596 cntl |= pipe << 28; /* Connect to correct pipe */ 4597 } else { 4598 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4599 cntl |= CURSOR_MODE_DISABLE; 4600 } 4601 I915_WRITE(CURCNTR(pipe), cntl); 4602 4603 intel_crtc->cursor_visible = visible; 4604 } 4605 /* and commit changes on next vblank */ 4606 I915_WRITE(CURBASE(pipe), base); 4607 } 4608 4609 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 4610 { 4611 struct drm_device *dev = crtc->dev; 4612 struct drm_i915_private *dev_priv = dev->dev_private; 4613 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4614 int pipe = intel_crtc->pipe; 4615 bool visible = base != 0; 4616 4617 if (intel_crtc->cursor_visible != visible) { 4618 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); 4619 if (base) { 4620 cntl &= ~CURSOR_MODE; 4621 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 4622 } else { 4623 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 4624 cntl |= CURSOR_MODE_DISABLE; 4625 } 4626 I915_WRITE(CURCNTR_IVB(pipe), cntl); 4627 4628 intel_crtc->cursor_visible = visible; 4629 } 4630 /* and commit changes on next vblank */ 4631 I915_WRITE(CURBASE_IVB(pipe), base); 4632 } 4633 4634 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4635 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 4636 bool on) 4637 { 4638 struct drm_device *dev = crtc->dev; 4639 struct drm_i915_private *dev_priv = dev->dev_private; 4640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4641 int pipe = intel_crtc->pipe; 4642 int x = intel_crtc->cursor_x; 4643 int y = intel_crtc->cursor_y; 4644 u32 base, pos; 4645 bool visible; 4646 4647 pos = 0; 4648 4649 if (on && crtc->enabled && crtc->fb) { 4650 base = intel_crtc->cursor_addr; 4651 if (x > (int) crtc->fb->width) 4652 base = 0; 4653 4654 if (y > (int) crtc->fb->height) 4655 base = 0; 4656 } else 4657 base = 0; 4658 4659 if (x < 0) { 4660 if (x + intel_crtc->cursor_width < 0) 4661 base = 0; 4662 4663 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 4664 x = -x; 4665 } 4666 pos |= x << CURSOR_X_SHIFT; 4667 4668 if (y < 0) { 4669 if (y + intel_crtc->cursor_height < 0) 4670 base = 0; 4671 4672 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 4673 y = -y; 4674 } 4675 pos |= y << CURSOR_Y_SHIFT; 4676 4677 visible = base != 0; 4678 if (!visible && !intel_crtc->cursor_visible) 4679 return; 4680 4681 if (IS_IVYBRIDGE(dev)) { 4682 I915_WRITE(CURPOS_IVB(pipe), pos); 4683 ivb_update_cursor(crtc, base); 4684 } else { 4685 I915_WRITE(CURPOS(pipe), pos); 4686 if (IS_845G(dev) || IS_I865G(dev)) 4687 i845_update_cursor(crtc, base); 4688 else 4689 i9xx_update_cursor(crtc, base); 4690 } 4691 4692 if (visible) 4693 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); 4694 } 4695 4696 static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4697 struct drm_file *file, 4698 uint32_t handle, 4699 uint32_t width, uint32_t height) 4700 { 4701 struct drm_device *dev = crtc->dev; 4702 struct drm_i915_private *dev_priv = dev->dev_private; 4703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4704 struct drm_i915_gem_object *obj; 4705 uint32_t addr; 4706 int ret; 4707 4708 DRM_DEBUG_KMS("\n"); 4709 4710 /* if we want to turn off the cursor ignore width and height */ 4711 if (!handle) { 4712 DRM_DEBUG_KMS("cursor off\n"); 4713 addr = 0; 4714 obj = NULL; 4715 DRM_LOCK(dev); 4716 goto finish; 4717 } 4718 4719 /* Currently we only support 64x64 cursors */ 4720 if (width != 64 || height != 64) { 4721 DRM_ERROR("we currently only support 64x64 cursors\n"); 4722 return -EINVAL; 4723 } 4724 4725 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 4726 if (&obj->base == NULL) 4727 return -ENOENT; 4728 4729 if (obj->base.size < width * height * 4) { 4730 DRM_ERROR("buffer is to small\n"); 4731 ret = -ENOMEM; 4732 goto fail; 4733 } 4734 4735 /* we only need to pin inside GTT if cursor is non-phy */ 4736 DRM_LOCK(dev); 4737 if (!dev_priv->info->cursor_needs_physical) { 4738 if (obj->tiling_mode) { 4739 DRM_ERROR("cursor cannot be tiled\n"); 4740 ret = -EINVAL; 4741 goto fail_locked; 4742 } 4743 4744 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); 4745 if (ret) { 4746 DRM_ERROR("failed to move cursor bo into the GTT\n"); 4747 goto fail_locked; 4748 } 4749 4750 ret = i915_gem_object_put_fence(obj); 4751 if (ret) { 4752 DRM_ERROR("failed to release fence for cursor\n"); 4753 goto fail_unpin; 4754 } 4755 4756 addr = obj->gtt_offset; 4757 } else { 4758 int align = IS_I830(dev) ? 16 * 1024 : 256; 4759 ret = i915_gem_attach_phys_object(dev, obj, 4760 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 4761 align); 4762 if (ret) { 4763 DRM_ERROR("failed to attach phys object\n"); 4764 goto fail_locked; 4765 } 4766 addr = obj->phys_obj->handle->busaddr; 4767 } 4768 4769 if (IS_GEN2(dev)) 4770 I915_WRITE(CURSIZE, (height << 12) | width); 4771 4772 finish: 4773 if (intel_crtc->cursor_bo) { 4774 if (dev_priv->info->cursor_needs_physical) { 4775 if (intel_crtc->cursor_bo != obj) 4776 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 4777 } else 4778 i915_gem_object_unpin(intel_crtc->cursor_bo); 4779 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 4780 } 4781 4782 DRM_UNLOCK(dev); 4783 4784 intel_crtc->cursor_addr = addr; 4785 intel_crtc->cursor_bo = obj; 4786 intel_crtc->cursor_width = width; 4787 intel_crtc->cursor_height = height; 4788 4789 intel_crtc_update_cursor(crtc, true); 4790 4791 return 0; 4792 fail_unpin: 4793 i915_gem_object_unpin(obj); 4794 fail_locked: 4795 DRM_UNLOCK(dev); 4796 fail: 4797 drm_gem_object_unreference_unlocked(&obj->base); 4798 return ret; 4799 } 4800 4801 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4802 { 4803 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4804 4805 intel_crtc->cursor_x = x; 4806 intel_crtc->cursor_y = y; 4807 4808 intel_crtc_update_cursor(crtc, true); 4809 4810 return 0; 4811 } 4812 4813 /** Sets the color ramps on behalf of RandR */ 4814 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 4815 u16 blue, int regno) 4816 { 4817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4818 4819 intel_crtc->lut_r[regno] = red >> 8; 4820 intel_crtc->lut_g[regno] = green >> 8; 4821 intel_crtc->lut_b[regno] = blue >> 8; 4822 } 4823 4824 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 4825 u16 *blue, int regno) 4826 { 4827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4828 4829 *red = intel_crtc->lut_r[regno] << 8; 4830 *green = intel_crtc->lut_g[regno] << 8; 4831 *blue = intel_crtc->lut_b[regno] << 8; 4832 } 4833 4834 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 4835 u16 *blue, uint32_t start, uint32_t size) 4836 { 4837 int end = (start + size > 256) ? 256 : start + size, i; 4838 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4839 4840 for (i = start; i < end; i++) { 4841 intel_crtc->lut_r[i] = red[i] >> 8; 4842 intel_crtc->lut_g[i] = green[i] >> 8; 4843 intel_crtc->lut_b[i] = blue[i] >> 8; 4844 } 4845 4846 intel_crtc_load_lut(crtc); 4847 } 4848 4849 /** 4850 * Get a pipe with a simple mode set on it for doing load-based monitor 4851 * detection. 4852 * 4853 * It will be up to the load-detect code to adjust the pipe as appropriate for 4854 * its requirements. The pipe will be connected to no other encoders. 4855 * 4856 * Currently this code will only succeed if there is a pipe with no encoders 4857 * configured for it. In the future, it could choose to temporarily disable 4858 * some outputs to free up a pipe for its use. 4859 * 4860 * \return crtc, or NULL if no pipes are available. 4861 */ 4862 4863 /* VESA 640x480x72Hz mode to set on the pipe */ 4864 static struct drm_display_mode load_detect_mode = { 4865 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 4866 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 4867 }; 4868 4869 static int 4870 intel_framebuffer_create(struct drm_device *dev, 4871 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj, 4872 struct drm_framebuffer **res) 4873 { 4874 struct intel_framebuffer *intel_fb; 4875 int ret; 4876 4877 intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO); 4878 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 4879 if (ret) { 4880 drm_gem_object_unreference_unlocked(&obj->base); 4881 drm_free(intel_fb, DRM_MEM_KMS); 4882 return (ret); 4883 } 4884 4885 *res = &intel_fb->base; 4886 return (0); 4887 } 4888 4889 static u32 4890 intel_framebuffer_pitch_for_width(int width, int bpp) 4891 { 4892 u32 pitch = howmany(width * bpp, 8); 4893 return roundup2(pitch, 64); 4894 } 4895 4896 static u32 4897 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 4898 { 4899 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 4900 return roundup2(pitch * mode->vdisplay, PAGE_SIZE); 4901 } 4902 4903 static int 4904 intel_framebuffer_create_for_mode(struct drm_device *dev, 4905 struct drm_display_mode *mode, int depth, int bpp, 4906 struct drm_framebuffer **res) 4907 { 4908 struct drm_i915_gem_object *obj; 4909 struct drm_mode_fb_cmd2 mode_cmd; 4910 4911 obj = i915_gem_alloc_object(dev, 4912 intel_framebuffer_size_for_mode(mode, bpp)); 4913 if (obj == NULL) 4914 return (-ENOMEM); 4915 4916 mode_cmd.width = mode->hdisplay; 4917 mode_cmd.height = mode->vdisplay; 4918 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 4919 bpp); 4920 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 4921 4922 return (intel_framebuffer_create(dev, &mode_cmd, obj, res)); 4923 } 4924 4925 static int 4926 mode_fits_in_fbdev(struct drm_device *dev, 4927 struct drm_display_mode *mode, struct drm_framebuffer **res) 4928 { 4929 struct drm_i915_private *dev_priv = dev->dev_private; 4930 struct drm_i915_gem_object *obj; 4931 struct drm_framebuffer *fb; 4932 4933 if (dev_priv->fbdev == NULL) { 4934 *res = NULL; 4935 return (0); 4936 } 4937 4938 obj = dev_priv->fbdev->ifb.obj; 4939 if (obj == NULL) { 4940 *res = NULL; 4941 return (0); 4942 } 4943 4944 fb = &dev_priv->fbdev->ifb.base; 4945 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 4946 fb->bits_per_pixel)) { 4947 *res = NULL; 4948 return (0); 4949 } 4950 4951 if (obj->base.size < mode->vdisplay * fb->pitches[0]) { 4952 *res = NULL; 4953 return (0); 4954 } 4955 4956 *res = fb; 4957 return (0); 4958 } 4959 4960 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 4961 struct drm_connector *connector, 4962 struct drm_display_mode *mode, 4963 struct intel_load_detect_pipe *old) 4964 { 4965 struct intel_crtc *intel_crtc; 4966 struct drm_crtc *possible_crtc; 4967 struct drm_encoder *encoder = &intel_encoder->base; 4968 struct drm_crtc *crtc = NULL; 4969 struct drm_device *dev = encoder->dev; 4970 struct drm_framebuffer *old_fb; 4971 int i = -1, r; 4972 4973 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 4974 connector->base.id, drm_get_connector_name(connector), 4975 encoder->base.id, drm_get_encoder_name(encoder)); 4976 4977 /* 4978 * Algorithm gets a little messy: 4979 * 4980 * - if the connector already has an assigned crtc, use it (but make 4981 * sure it's on first) 4982 * 4983 * - try to find the first unused crtc that can drive this connector, 4984 * and use that if we find one 4985 */ 4986 4987 /* See if we already have a CRTC for this connector */ 4988 if (encoder->crtc) { 4989 crtc = encoder->crtc; 4990 4991 intel_crtc = to_intel_crtc(crtc); 4992 old->dpms_mode = intel_crtc->dpms_mode; 4993 old->load_detect_temp = false; 4994 4995 /* Make sure the crtc and connector are running */ 4996 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 4997 struct drm_encoder_helper_funcs *encoder_funcs; 4998 struct drm_crtc_helper_funcs *crtc_funcs; 4999 5000 crtc_funcs = crtc->helper_private; 5001 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 5002 5003 encoder_funcs = encoder->helper_private; 5004 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 5005 } 5006 5007 return true; 5008 } 5009 5010 /* Find an unused one (if possible) */ 5011 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { 5012 i++; 5013 if (!(encoder->possible_crtcs & (1 << i))) 5014 continue; 5015 if (!possible_crtc->enabled) { 5016 crtc = possible_crtc; 5017 break; 5018 } 5019 } 5020 5021 /* 5022 * If we didn't find an unused CRTC, don't use any. 5023 */ 5024 if (!crtc) { 5025 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 5026 return false; 5027 } 5028 5029 encoder->crtc = crtc; 5030 connector->encoder = encoder; 5031 5032 intel_crtc = to_intel_crtc(crtc); 5033 old->dpms_mode = intel_crtc->dpms_mode; 5034 old->load_detect_temp = true; 5035 old->release_fb = NULL; 5036 5037 if (!mode) 5038 mode = &load_detect_mode; 5039 5040 old_fb = crtc->fb; 5041 5042 /* We need a framebuffer large enough to accommodate all accesses 5043 * that the plane may generate whilst we perform load detection. 5044 * We can not rely on the fbcon either being present (we get called 5045 * during its initialisation to detect all boot displays, or it may 5046 * not even exist) or that it is large enough to satisfy the 5047 * requested mode. 5048 */ 5049 r = mode_fits_in_fbdev(dev, mode, &crtc->fb); 5050 if (crtc->fb == NULL) { 5051 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5052 r = intel_framebuffer_create_for_mode(dev, mode, 24, 32, 5053 &crtc->fb); 5054 old->release_fb = crtc->fb; 5055 } else 5056 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5057 if (r != 0) { 5058 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5059 crtc->fb = old_fb; 5060 return false; 5061 } 5062 5063 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5064 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5065 if (old->release_fb) 5066 old->release_fb->funcs->destroy(old->release_fb); 5067 crtc->fb = old_fb; 5068 return false; 5069 } 5070 5071 /* let the connector get through one full cycle before testing */ 5072 intel_wait_for_vblank(dev, intel_crtc->pipe); 5073 5074 return true; 5075 } 5076 5077 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5078 struct drm_connector *connector, 5079 struct intel_load_detect_pipe *old) 5080 { 5081 struct drm_encoder *encoder = &intel_encoder->base; 5082 struct drm_device *dev = encoder->dev; 5083 struct drm_crtc *crtc = encoder->crtc; 5084 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 5085 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 5086 5087 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5088 connector->base.id, drm_get_connector_name(connector), 5089 encoder->base.id, drm_get_encoder_name(encoder)); 5090 5091 if (old->load_detect_temp) { 5092 connector->encoder = NULL; 5093 drm_helper_disable_unused_functions(dev); 5094 5095 if (old->release_fb) 5096 old->release_fb->funcs->destroy(old->release_fb); 5097 5098 return; 5099 } 5100 5101 /* Switch crtc and encoder back off if necessary */ 5102 if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5103 encoder_funcs->dpms(encoder, old->dpms_mode); 5104 crtc_funcs->dpms(crtc, old->dpms_mode); 5105 } 5106 } 5107 5108 /* Returns the clock of the currently programmed mode of the given pipe. */ 5109 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) 5110 { 5111 struct drm_i915_private *dev_priv = dev->dev_private; 5112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5113 int pipe = intel_crtc->pipe; 5114 u32 dpll = I915_READ(DPLL(pipe)); 5115 u32 fp; 5116 intel_clock_t clock; 5117 5118 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 5119 fp = I915_READ(FP0(pipe)); 5120 else 5121 fp = I915_READ(FP1(pipe)); 5122 5123 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 5124 if (IS_PINEVIEW(dev)) { 5125 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 5126 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 5127 } else { 5128 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 5129 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 5130 } 5131 5132 if (!IS_GEN2(dev)) { 5133 if (IS_PINEVIEW(dev)) 5134 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 5135 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 5136 else 5137 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 5138 DPLL_FPA01_P1_POST_DIV_SHIFT); 5139 5140 switch (dpll & DPLL_MODE_MASK) { 5141 case DPLLB_MODE_DAC_SERIAL: 5142 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 5143 5 : 10; 5144 break; 5145 case DPLLB_MODE_LVDS: 5146 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 5147 7 : 14; 5148 break; 5149 default: 5150 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 5151 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 5152 return 0; 5153 } 5154 5155 /* XXX: Handle the 100Mhz refclk */ 5156 intel_clock(dev, 96000, &clock); 5157 } else { 5158 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 5159 5160 if (is_lvds) { 5161 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 5162 DPLL_FPA01_P1_POST_DIV_SHIFT); 5163 clock.p2 = 14; 5164 5165 if ((dpll & PLL_REF_INPUT_MASK) == 5166 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 5167 /* XXX: might not be 66MHz */ 5168 intel_clock(dev, 66000, &clock); 5169 } else 5170 intel_clock(dev, 48000, &clock); 5171 } else { 5172 if (dpll & PLL_P1_DIVIDE_BY_TWO) 5173 clock.p1 = 2; 5174 else { 5175 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 5176 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 5177 } 5178 if (dpll & PLL_P2_DIVIDE_BY_4) 5179 clock.p2 = 4; 5180 else 5181 clock.p2 = 2; 5182 5183 intel_clock(dev, 48000, &clock); 5184 } 5185 } 5186 5187 /* XXX: It would be nice to validate the clocks, but we can't reuse 5188 * i830PllIsValid() because it relies on the xf86_config connector 5189 * configuration being accurate, which it isn't necessarily. 5190 */ 5191 5192 return clock.dot; 5193 } 5194 5195 /** Returns the currently programmed mode of the given pipe. */ 5196 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 5197 struct drm_crtc *crtc) 5198 { 5199 struct drm_i915_private *dev_priv = dev->dev_private; 5200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5201 int pipe = intel_crtc->pipe; 5202 struct drm_display_mode *mode; 5203 int htot = I915_READ(HTOTAL(pipe)); 5204 int hsync = I915_READ(HSYNC(pipe)); 5205 int vtot = I915_READ(VTOTAL(pipe)); 5206 int vsync = I915_READ(VSYNC(pipe)); 5207 5208 mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO); 5209 5210 mode->clock = intel_crtc_clock_get(dev, crtc); 5211 mode->hdisplay = (htot & 0xffff) + 1; 5212 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 5213 mode->hsync_start = (hsync & 0xffff) + 1; 5214 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 5215 mode->vdisplay = (vtot & 0xffff) + 1; 5216 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 5217 mode->vsync_start = (vsync & 0xffff) + 1; 5218 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 5219 5220 drm_mode_set_name(mode); 5221 drm_mode_set_crtcinfo(mode, 0); 5222 5223 return mode; 5224 } 5225 5226 #define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz) 5227 5228 /* When this timer fires, we've been idle for awhile */ 5229 static void intel_gpu_idle_timer(void *arg) 5230 { 5231 struct drm_device *dev = arg; 5232 drm_i915_private_t *dev_priv = dev->dev_private; 5233 5234 if (!list_empty(&dev_priv->mm.active_list)) { 5235 /* Still processing requests, so just re-arm the timer. */ 5236 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT, 5237 i915_hangcheck_elapsed, dev); 5238 return; 5239 } 5240 5241 dev_priv->busy = false; 5242 taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); 5243 } 5244 5245 #define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz) 5246 5247 static void intel_crtc_idle_timer(void *arg) 5248 { 5249 struct intel_crtc *intel_crtc = arg; 5250 struct drm_crtc *crtc = &intel_crtc->base; 5251 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 5252 struct intel_framebuffer *intel_fb; 5253 5254 intel_fb = to_intel_framebuffer(crtc->fb); 5255 if (intel_fb && intel_fb->obj->active) { 5256 /* The framebuffer is still being accessed by the GPU. */ 5257 callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, 5258 i915_hangcheck_elapsed, crtc->dev); 5259 return; 5260 } 5261 5262 intel_crtc->busy = false; 5263 taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task); 5264 } 5265 5266 static void intel_increase_pllclock(struct drm_crtc *crtc) 5267 { 5268 struct drm_device *dev = crtc->dev; 5269 drm_i915_private_t *dev_priv = dev->dev_private; 5270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5271 int pipe = intel_crtc->pipe; 5272 int dpll_reg = DPLL(pipe); 5273 int dpll; 5274 5275 if (HAS_PCH_SPLIT(dev)) 5276 return; 5277 5278 if (!dev_priv->lvds_downclock_avail) 5279 return; 5280 5281 dpll = I915_READ(dpll_reg); 5282 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 5283 DRM_DEBUG_DRIVER("upclocking LVDS\n"); 5284 5285 assert_panel_unlocked(dev_priv, pipe); 5286 5287 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5288 I915_WRITE(dpll_reg, dpll); 5289 intel_wait_for_vblank(dev, pipe); 5290 5291 dpll = I915_READ(dpll_reg); 5292 if (dpll & DISPLAY_RATE_SELECT_FPA1) 5293 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 5294 } 5295 5296 /* Schedule downclock */ 5297 callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT, 5298 intel_crtc_idle_timer, intel_crtc); 5299 } 5300 5301 static void intel_decrease_pllclock(struct drm_crtc *crtc) 5302 { 5303 struct drm_device *dev = crtc->dev; 5304 drm_i915_private_t *dev_priv = dev->dev_private; 5305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5306 5307 if (HAS_PCH_SPLIT(dev)) 5308 return; 5309 5310 if (!dev_priv->lvds_downclock_avail) 5311 return; 5312 5313 /* 5314 * Since this is called by a timer, we should never get here in 5315 * the manual case. 5316 */ 5317 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 5318 int pipe = intel_crtc->pipe; 5319 int dpll_reg = DPLL(pipe); 5320 u32 dpll; 5321 5322 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 5323 5324 assert_panel_unlocked(dev_priv, pipe); 5325 5326 dpll = I915_READ(dpll_reg); 5327 dpll |= DISPLAY_RATE_SELECT_FPA1; 5328 I915_WRITE(dpll_reg, dpll); 5329 intel_wait_for_vblank(dev, pipe); 5330 dpll = I915_READ(dpll_reg); 5331 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 5332 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 5333 } 5334 } 5335 5336 /** 5337 * intel_idle_update - adjust clocks for idleness 5338 * @work: work struct 5339 * 5340 * Either the GPU or display (or both) went idle. Check the busy status 5341 * here and adjust the CRTC and GPU clocks as necessary. 5342 */ 5343 static void intel_idle_update(void *arg, int pending) 5344 { 5345 drm_i915_private_t *dev_priv = arg; 5346 struct drm_device *dev = dev_priv->dev; 5347 struct drm_crtc *crtc; 5348 struct intel_crtc *intel_crtc; 5349 5350 if (!i915_powersave) 5351 return; 5352 5353 DRM_LOCK(dev); 5354 5355 i915_update_gfx_val(dev_priv); 5356 5357 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5358 /* Skip inactive CRTCs */ 5359 if (!crtc->fb) 5360 continue; 5361 5362 intel_crtc = to_intel_crtc(crtc); 5363 if (!intel_crtc->busy) 5364 intel_decrease_pllclock(crtc); 5365 } 5366 5367 DRM_UNLOCK(dev); 5368 } 5369 5370 /** 5371 * intel_mark_busy - mark the GPU and possibly the display busy 5372 * @dev: drm device 5373 * @obj: object we're operating on 5374 * 5375 * Callers can use this function to indicate that the GPU is busy processing 5376 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout 5377 * buffer), we'll also mark the display as busy, so we know to increase its 5378 * clock frequency. 5379 */ 5380 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) 5381 { 5382 drm_i915_private_t *dev_priv = dev->dev_private; 5383 struct drm_crtc *crtc = NULL; 5384 struct intel_framebuffer *intel_fb; 5385 struct intel_crtc *intel_crtc; 5386 5387 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5388 return; 5389 5390 if (!dev_priv->busy) 5391 dev_priv->busy = true; 5392 else 5393 callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT, 5394 intel_gpu_idle_timer, dev); 5395 5396 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5397 if (!crtc->fb) 5398 continue; 5399 5400 intel_crtc = to_intel_crtc(crtc); 5401 intel_fb = to_intel_framebuffer(crtc->fb); 5402 if (intel_fb->obj == obj) { 5403 if (!intel_crtc->busy) { 5404 /* Non-busy -> busy, upclock */ 5405 intel_increase_pllclock(crtc); 5406 intel_crtc->busy = true; 5407 } else { 5408 /* Busy -> busy, put off timer */ 5409 callout_reset(&intel_crtc->idle_callout, 5410 CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer, 5411 intel_crtc); 5412 } 5413 } 5414 } 5415 } 5416 5417 static void intel_crtc_destroy(struct drm_crtc *crtc) 5418 { 5419 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5420 struct drm_device *dev = crtc->dev; 5421 struct drm_i915_private *dev_priv = dev->dev_private; 5422 struct intel_unpin_work *work; 5423 5424 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5425 work = intel_crtc->unpin_work; 5426 intel_crtc->unpin_work = NULL; 5427 lockmgr(&dev->event_lock, LK_RELEASE); 5428 5429 if (work) { 5430 taskqueue_cancel(dev_priv->tq, &work->task, NULL); 5431 taskqueue_drain(dev_priv->tq, &work->task); 5432 drm_free(work, DRM_MEM_KMS); 5433 } 5434 5435 drm_crtc_cleanup(crtc); 5436 5437 drm_free(intel_crtc, DRM_MEM_KMS); 5438 } 5439 5440 static void intel_unpin_work_fn(void *arg, int pending) 5441 { 5442 struct intel_unpin_work *work = arg; 5443 struct drm_device *dev; 5444 5445 dev = work->dev; 5446 DRM_LOCK(dev); 5447 intel_unpin_fb_obj(work->old_fb_obj); 5448 drm_gem_object_unreference(&work->pending_flip_obj->base); 5449 drm_gem_object_unreference(&work->old_fb_obj->base); 5450 5451 intel_update_fbc(work->dev); 5452 DRM_UNLOCK(dev); 5453 drm_free(work, DRM_MEM_KMS); 5454 } 5455 5456 static void do_intel_finish_page_flip(struct drm_device *dev, 5457 struct drm_crtc *crtc) 5458 { 5459 drm_i915_private_t *dev_priv = dev->dev_private; 5460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5461 struct intel_unpin_work *work; 5462 struct drm_i915_gem_object *obj; 5463 5464 /* Ignore early vblank irqs */ 5465 if (intel_crtc == NULL) 5466 return; 5467 5468 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5469 work = intel_crtc->unpin_work; 5470 if (work == NULL || !atomic_read(&work->pending)) { 5471 lockmgr(&dev->event_lock, LK_RELEASE); 5472 return; 5473 } 5474 5475 intel_crtc->unpin_work = NULL; 5476 5477 if (work->event) 5478 drm_send_vblank_event(dev, intel_crtc->pipe, work->event); 5479 5480 drm_vblank_put(dev, intel_crtc->pipe); 5481 5482 lockmgr(&dev->event_lock, LK_RELEASE); 5483 5484 obj = work->old_fb_obj; 5485 5486 atomic_clear_mask(1 << intel_crtc->plane, 5487 &obj->pending_flip.counter); 5488 wakeup(&obj->pending_flip); 5489 5490 taskqueue_enqueue(dev_priv->tq, &work->task); 5491 } 5492 5493 void intel_finish_page_flip(struct drm_device *dev, int pipe) 5494 { 5495 drm_i915_private_t *dev_priv = dev->dev_private; 5496 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 5497 5498 do_intel_finish_page_flip(dev, crtc); 5499 } 5500 5501 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 5502 { 5503 drm_i915_private_t *dev_priv = dev->dev_private; 5504 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 5505 5506 do_intel_finish_page_flip(dev, crtc); 5507 } 5508 5509 void intel_prepare_page_flip(struct drm_device *dev, int plane) 5510 { 5511 drm_i915_private_t *dev_priv = dev->dev_private; 5512 struct intel_crtc *intel_crtc = 5513 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 5514 5515 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5516 if (intel_crtc->unpin_work) 5517 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 5518 lockmgr(&dev->event_lock, LK_RELEASE); 5519 } 5520 5521 static int intel_gen2_queue_flip(struct drm_device *dev, 5522 struct drm_crtc *crtc, 5523 struct drm_framebuffer *fb, 5524 struct drm_i915_gem_object *obj) 5525 { 5526 struct drm_i915_private *dev_priv = dev->dev_private; 5527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5528 unsigned long offset; 5529 u32 flip_mask; 5530 int ret; 5531 5532 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5533 if (ret) 5534 goto out; 5535 5536 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5537 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5538 5539 ret = BEGIN_LP_RING(6); 5540 if (ret) 5541 goto out; 5542 5543 /* Can't queue multiple flips, so wait for the previous 5544 * one to finish before executing the next. 5545 */ 5546 if (intel_crtc->plane) 5547 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5548 else 5549 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5550 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5551 OUT_RING(MI_NOOP); 5552 OUT_RING(MI_DISPLAY_FLIP | 5553 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5554 OUT_RING(fb->pitches[0]); 5555 OUT_RING(obj->gtt_offset + offset); 5556 OUT_RING(0); /* aux display base address, unused */ 5557 ADVANCE_LP_RING(); 5558 out: 5559 return ret; 5560 } 5561 5562 static int intel_gen3_queue_flip(struct drm_device *dev, 5563 struct drm_crtc *crtc, 5564 struct drm_framebuffer *fb, 5565 struct drm_i915_gem_object *obj) 5566 { 5567 struct drm_i915_private *dev_priv = dev->dev_private; 5568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5569 unsigned long offset; 5570 u32 flip_mask; 5571 int ret; 5572 5573 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5574 if (ret) 5575 goto out; 5576 5577 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5578 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5579 5580 ret = BEGIN_LP_RING(6); 5581 if (ret) 5582 goto out; 5583 5584 if (intel_crtc->plane) 5585 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5586 else 5587 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 5588 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); 5589 OUT_RING(MI_NOOP); 5590 OUT_RING(MI_DISPLAY_FLIP_I915 | 5591 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5592 OUT_RING(fb->pitches[0]); 5593 OUT_RING(obj->gtt_offset + offset); 5594 OUT_RING(MI_NOOP); 5595 5596 ADVANCE_LP_RING(); 5597 out: 5598 return ret; 5599 } 5600 5601 static int intel_gen4_queue_flip(struct drm_device *dev, 5602 struct drm_crtc *crtc, 5603 struct drm_framebuffer *fb, 5604 struct drm_i915_gem_object *obj) 5605 { 5606 struct drm_i915_private *dev_priv = dev->dev_private; 5607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5608 uint32_t pf, pipesrc; 5609 int ret; 5610 5611 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5612 if (ret) 5613 goto out; 5614 5615 ret = BEGIN_LP_RING(4); 5616 if (ret) 5617 goto out; 5618 5619 /* i965+ uses the linear or tiled offsets from the 5620 * Display Registers (which do not change across a page-flip) 5621 * so we need only reprogram the base address. 5622 */ 5623 OUT_RING(MI_DISPLAY_FLIP | 5624 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5625 OUT_RING(fb->pitches[0]); 5626 OUT_RING(obj->gtt_offset | obj->tiling_mode); 5627 5628 /* XXX Enabling the panel-fitter across page-flip is so far 5629 * untested on non-native modes, so ignore it for now. 5630 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 5631 */ 5632 pf = 0; 5633 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5634 OUT_RING(pf | pipesrc); 5635 ADVANCE_LP_RING(); 5636 out: 5637 return ret; 5638 } 5639 5640 static int intel_gen6_queue_flip(struct drm_device *dev, 5641 struct drm_crtc *crtc, 5642 struct drm_framebuffer *fb, 5643 struct drm_i915_gem_object *obj) 5644 { 5645 struct drm_i915_private *dev_priv = dev->dev_private; 5646 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5647 uint32_t pf, pipesrc; 5648 int ret; 5649 5650 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5651 if (ret) 5652 goto out; 5653 5654 ret = BEGIN_LP_RING(4); 5655 if (ret) 5656 goto out; 5657 5658 OUT_RING(MI_DISPLAY_FLIP | 5659 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5660 OUT_RING(fb->pitches[0] | obj->tiling_mode); 5661 OUT_RING(obj->gtt_offset); 5662 5663 /* Contrary to the suggestions in the documentation, 5664 * "Enable Panel Fitter" does not seem to be required when page 5665 * flipping with a non-native mode, and worse causes a normal 5666 * modeset to fail. 5667 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 5668 */ 5669 pf = 0; 5670 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5671 OUT_RING(pf | pipesrc); 5672 ADVANCE_LP_RING(); 5673 out: 5674 return ret; 5675 } 5676 5677 /* 5678 * On gen7 we currently use the blit ring because (in early silicon at least) 5679 * the render ring doesn't give us interrpts for page flip completion, which 5680 * means clients will hang after the first flip is queued. Fortunately the 5681 * blit ring generates interrupts properly, so use it instead. 5682 */ 5683 static int intel_gen7_queue_flip(struct drm_device *dev, 5684 struct drm_crtc *crtc, 5685 struct drm_framebuffer *fb, 5686 struct drm_i915_gem_object *obj) 5687 { 5688 struct drm_i915_private *dev_priv = dev->dev_private; 5689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5690 struct intel_ring_buffer *ring = &dev_priv->rings[BCS]; 5691 int ret; 5692 5693 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 5694 if (ret) 5695 goto out; 5696 5697 ret = intel_ring_begin(ring, 4); 5698 if (ret) 5699 goto out; 5700 5701 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 5702 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 5703 intel_ring_emit(ring, (obj->gtt_offset)); 5704 intel_ring_emit(ring, (MI_NOOP)); 5705 intel_ring_advance(ring); 5706 out: 5707 return ret; 5708 } 5709 5710 static int intel_default_queue_flip(struct drm_device *dev, 5711 struct drm_crtc *crtc, 5712 struct drm_framebuffer *fb, 5713 struct drm_i915_gem_object *obj) 5714 { 5715 return -ENODEV; 5716 } 5717 5718 static int intel_crtc_page_flip(struct drm_crtc *crtc, 5719 struct drm_framebuffer *fb, 5720 struct drm_pending_vblank_event *event) 5721 { 5722 struct drm_device *dev = crtc->dev; 5723 struct drm_i915_private *dev_priv = dev->dev_private; 5724 struct intel_framebuffer *intel_fb; 5725 struct drm_i915_gem_object *obj; 5726 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5727 struct intel_unpin_work *work; 5728 int ret; 5729 5730 work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO); 5731 5732 work->event = event; 5733 work->dev = crtc->dev; 5734 intel_fb = to_intel_framebuffer(crtc->fb); 5735 work->old_fb_obj = intel_fb->obj; 5736 TASK_INIT(&work->task, 0, intel_unpin_work_fn, work); 5737 5738 ret = drm_vblank_get(dev, intel_crtc->pipe); 5739 if (ret) 5740 goto free_work; 5741 5742 /* We borrow the event spin lock for protecting unpin_work */ 5743 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5744 if (intel_crtc->unpin_work) { 5745 lockmgr(&dev->event_lock, LK_RELEASE); 5746 drm_free(work, DRM_MEM_KMS); 5747 drm_vblank_put(dev, intel_crtc->pipe); 5748 5749 DRM_DEBUG("flip queue: crtc already busy\n"); 5750 return -EBUSY; 5751 } 5752 intel_crtc->unpin_work = work; 5753 lockmgr(&dev->event_lock, LK_RELEASE); 5754 5755 intel_fb = to_intel_framebuffer(fb); 5756 obj = intel_fb->obj; 5757 5758 DRM_LOCK(dev); 5759 5760 /* Reference the objects for the scheduled work. */ 5761 drm_gem_object_reference(&work->old_fb_obj->base); 5762 drm_gem_object_reference(&obj->base); 5763 5764 crtc->fb = fb; 5765 5766 work->pending_flip_obj = obj; 5767 5768 work->enable_stall_check = true; 5769 5770 /* Block clients from rendering to the new back buffer until 5771 * the flip occurs and the object is no longer visible. 5772 */ 5773 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 5774 5775 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 5776 if (ret) 5777 goto cleanup_pending; 5778 intel_disable_fbc(dev); 5779 DRM_UNLOCK(dev); 5780 5781 return 0; 5782 5783 cleanup_pending: 5784 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 5785 drm_gem_object_unreference(&work->old_fb_obj->base); 5786 drm_gem_object_unreference(&obj->base); 5787 DRM_UNLOCK(dev); 5788 5789 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 5790 intel_crtc->unpin_work = NULL; 5791 lockmgr(&dev->event_lock, LK_RELEASE); 5792 5793 drm_vblank_put(dev, intel_crtc->pipe); 5794 free_work: 5795 drm_free(work, DRM_MEM_KMS); 5796 5797 return ret; 5798 } 5799 5800 static void intel_sanitize_modesetting(struct drm_device *dev, 5801 int pipe, int plane) 5802 { 5803 struct drm_i915_private *dev_priv = dev->dev_private; 5804 u32 reg, val; 5805 5806 /* Clear any frame start delays used for debugging left by the BIOS */ 5807 for_each_pipe(pipe) { 5808 reg = PIPECONF(pipe); 5809 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 5810 } 5811 5812 if (HAS_PCH_SPLIT(dev)) 5813 return; 5814 5815 /* Who knows what state these registers were left in by the BIOS or 5816 * grub? 5817 * 5818 * If we leave the registers in a conflicting state (e.g. with the 5819 * display plane reading from the other pipe than the one we intend 5820 * to use) then when we attempt to teardown the active mode, we will 5821 * not disable the pipes and planes in the correct order -- leaving 5822 * a plane reading from a disabled pipe and possibly leading to 5823 * undefined behaviour. 5824 */ 5825 5826 reg = DSPCNTR(plane); 5827 val = I915_READ(reg); 5828 5829 if ((val & DISPLAY_PLANE_ENABLE) == 0) 5830 return; 5831 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 5832 return; 5833 5834 /* This display plane is active and attached to the other CPU pipe. */ 5835 pipe = !pipe; 5836 5837 /* Disable the plane and wait for it to stop reading from the pipe. */ 5838 intel_disable_plane(dev_priv, plane, pipe); 5839 intel_disable_pipe(dev_priv, pipe); 5840 } 5841 5842 static void intel_crtc_reset(struct drm_crtc *crtc) 5843 { 5844 struct drm_device *dev = crtc->dev; 5845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5846 5847 /* Reset flags back to the 'unknown' status so that they 5848 * will be correctly set on the initial modeset. 5849 */ 5850 intel_crtc->dpms_mode = -1; 5851 5852 /* We need to fix up any BIOS configuration that conflicts with 5853 * our expectations. 5854 */ 5855 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 5856 } 5857 5858 static struct drm_crtc_helper_funcs intel_helper_funcs = { 5859 .dpms = intel_crtc_dpms, 5860 .mode_fixup = intel_crtc_mode_fixup, 5861 .mode_set = intel_crtc_mode_set, 5862 .mode_set_base = intel_pipe_set_base, 5863 .mode_set_base_atomic = intel_pipe_set_base_atomic, 5864 .load_lut = intel_crtc_load_lut, 5865 .disable = intel_crtc_disable, 5866 }; 5867 5868 static const struct drm_crtc_funcs intel_crtc_funcs = { 5869 .reset = intel_crtc_reset, 5870 .cursor_set = intel_crtc_cursor_set, 5871 .cursor_move = intel_crtc_cursor_move, 5872 .gamma_set = intel_crtc_gamma_set, 5873 .set_config = drm_crtc_helper_set_config, 5874 .destroy = intel_crtc_destroy, 5875 .page_flip = intel_crtc_page_flip, 5876 }; 5877 5878 static void intel_crtc_init(struct drm_device *dev, int pipe) 5879 { 5880 drm_i915_private_t *dev_priv = dev->dev_private; 5881 struct intel_crtc *intel_crtc; 5882 int i; 5883 5884 intel_crtc = kmalloc(sizeof(struct intel_crtc) + 5885 (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), 5886 DRM_MEM_KMS, M_WAITOK | M_ZERO); 5887 5888 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); 5889 5890 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 5891 for (i = 0; i < 256; i++) { 5892 intel_crtc->lut_r[i] = i; 5893 intel_crtc->lut_g[i] = i; 5894 intel_crtc->lut_b[i] = i; 5895 } 5896 5897 /* Swap pipes & planes for FBC on pre-965 */ 5898 intel_crtc->pipe = pipe; 5899 intel_crtc->plane = pipe; 5900 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 5901 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 5902 intel_crtc->plane = !pipe; 5903 } 5904 5905 KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) && 5906 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL, 5907 ("plane_to_crtc is already initialized")); 5908 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 5909 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5910 5911 intel_crtc_reset(&intel_crtc->base); 5912 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 5913 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 5914 5915 if (HAS_PCH_SPLIT(dev)) { 5916 if (pipe == 2 && IS_IVYBRIDGE(dev)) 5917 intel_crtc->no_pll = true; 5918 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5919 intel_helper_funcs.commit = ironlake_crtc_commit; 5920 } else { 5921 intel_helper_funcs.prepare = i9xx_crtc_prepare; 5922 intel_helper_funcs.commit = i9xx_crtc_commit; 5923 } 5924 5925 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5926 5927 intel_crtc->busy = false; 5928 5929 callout_init_mp(&intel_crtc->idle_callout); 5930 } 5931 5932 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5933 struct drm_file *file) 5934 { 5935 drm_i915_private_t *dev_priv = dev->dev_private; 5936 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 5937 struct drm_mode_object *drmmode_obj; 5938 struct intel_crtc *crtc; 5939 5940 if (!dev_priv) { 5941 DRM_ERROR("called with no initialization\n"); 5942 return -EINVAL; 5943 } 5944 5945 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, 5946 DRM_MODE_OBJECT_CRTC); 5947 5948 if (!drmmode_obj) { 5949 DRM_ERROR("no such CRTC id\n"); 5950 return -EINVAL; 5951 } 5952 5953 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 5954 pipe_from_crtc_id->pipe = crtc->pipe; 5955 5956 return 0; 5957 } 5958 5959 static int intel_encoder_clones(struct drm_device *dev, int type_mask) 5960 { 5961 struct intel_encoder *encoder; 5962 int index_mask = 0; 5963 int entry = 0; 5964 5965 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 5966 if (type_mask & encoder->clone_mask) 5967 index_mask |= (1 << entry); 5968 entry++; 5969 } 5970 5971 return index_mask; 5972 } 5973 5974 static bool has_edp_a(struct drm_device *dev) 5975 { 5976 struct drm_i915_private *dev_priv = dev->dev_private; 5977 5978 if (!IS_MOBILE(dev)) 5979 return false; 5980 5981 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 5982 return false; 5983 5984 if (IS_GEN5(dev) && 5985 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) 5986 return false; 5987 5988 return true; 5989 } 5990 5991 static void intel_setup_outputs(struct drm_device *dev) 5992 { 5993 struct drm_i915_private *dev_priv = dev->dev_private; 5994 struct intel_encoder *encoder; 5995 bool dpd_is_edp = false; 5996 bool has_lvds; 5997 5998 has_lvds = intel_lvds_init(dev); 5999 if (!has_lvds && !HAS_PCH_SPLIT(dev)) { 6000 /* disable the panel fitter on everything but LVDS */ 6001 I915_WRITE(PFIT_CONTROL, 0); 6002 } 6003 6004 if (HAS_PCH_SPLIT(dev)) { 6005 dpd_is_edp = intel_dpd_is_edp(dev); 6006 6007 if (has_edp_a(dev)) 6008 intel_dp_init(dev, DP_A); 6009 6010 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6011 intel_dp_init(dev, PCH_DP_D); 6012 } 6013 6014 intel_crt_init(dev); 6015 6016 if (HAS_PCH_SPLIT(dev)) { 6017 int found; 6018 6019 DRM_DEBUG_KMS( 6020 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n", 6021 (I915_READ(HDMIB) & PORT_DETECTED) != 0, 6022 (I915_READ(PCH_DP_B) & DP_DETECTED) != 0, 6023 (I915_READ(HDMIC) & PORT_DETECTED) != 0, 6024 (I915_READ(HDMID) & PORT_DETECTED) != 0, 6025 (I915_READ(PCH_DP_C) & DP_DETECTED) != 0, 6026 (I915_READ(PCH_DP_D) & DP_DETECTED) != 0, 6027 (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0); 6028 6029 if (I915_READ(HDMIB) & PORT_DETECTED) { 6030 /* PCH SDVOB multiplex with HDMIB */ 6031 found = intel_sdvo_init(dev, PCH_SDVOB); 6032 if (!found) 6033 intel_hdmi_init(dev, HDMIB); 6034 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 6035 intel_dp_init(dev, PCH_DP_B); 6036 } 6037 6038 if (I915_READ(HDMIC) & PORT_DETECTED) 6039 intel_hdmi_init(dev, HDMIC); 6040 6041 if (I915_READ(HDMID) & PORT_DETECTED) 6042 intel_hdmi_init(dev, HDMID); 6043 6044 if (I915_READ(PCH_DP_C) & DP_DETECTED) 6045 intel_dp_init(dev, PCH_DP_C); 6046 6047 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 6048 intel_dp_init(dev, PCH_DP_D); 6049 6050 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 6051 bool found = false; 6052 6053 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6054 DRM_DEBUG_KMS("probing SDVOB\n"); 6055 found = intel_sdvo_init(dev, SDVOB); 6056 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 6057 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 6058 intel_hdmi_init(dev, SDVOB); 6059 } 6060 6061 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 6062 DRM_DEBUG_KMS("probing DP_B\n"); 6063 intel_dp_init(dev, DP_B); 6064 } 6065 } 6066 6067 /* Before G4X SDVOC doesn't have its own detect register */ 6068 6069 if (I915_READ(SDVOB) & SDVO_DETECTED) { 6070 DRM_DEBUG_KMS("probing SDVOC\n"); 6071 found = intel_sdvo_init(dev, SDVOC); 6072 } 6073 6074 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 6075 6076 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 6077 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 6078 intel_hdmi_init(dev, SDVOC); 6079 } 6080 if (SUPPORTS_INTEGRATED_DP(dev)) { 6081 DRM_DEBUG_KMS("probing DP_C\n"); 6082 intel_dp_init(dev, DP_C); 6083 } 6084 } 6085 6086 if (SUPPORTS_INTEGRATED_DP(dev) && 6087 (I915_READ(DP_D) & DP_DETECTED)) { 6088 DRM_DEBUG_KMS("probing DP_D\n"); 6089 intel_dp_init(dev, DP_D); 6090 } 6091 } else if (IS_GEN2(dev)) { 6092 #if 1 6093 KIB_NOTYET(); 6094 #else 6095 intel_dvo_init(dev); 6096 #endif 6097 } 6098 6099 if (SUPPORTS_TV(dev)) 6100 intel_tv_init(dev); 6101 6102 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 6103 encoder->base.possible_crtcs = encoder->crtc_mask; 6104 encoder->base.possible_clones = 6105 intel_encoder_clones(dev, encoder->clone_mask); 6106 } 6107 6108 /* disable all the possible outputs/crtcs before entering KMS mode */ 6109 drm_helper_disable_unused_functions(dev); 6110 6111 if (HAS_PCH_SPLIT(dev)) 6112 ironlake_init_pch_refclk(dev); 6113 } 6114 6115 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 6116 { 6117 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6118 6119 drm_framebuffer_cleanup(fb); 6120 drm_gem_object_unreference_unlocked(&intel_fb->obj->base); 6121 6122 drm_free(intel_fb, DRM_MEM_KMS); 6123 } 6124 6125 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 6126 struct drm_file *file, 6127 unsigned int *handle) 6128 { 6129 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 6130 struct drm_i915_gem_object *obj = intel_fb->obj; 6131 6132 return drm_gem_handle_create(file, &obj->base, handle); 6133 } 6134 6135 static const struct drm_framebuffer_funcs intel_fb_funcs = { 6136 .destroy = intel_user_framebuffer_destroy, 6137 .create_handle = intel_user_framebuffer_create_handle, 6138 }; 6139 6140 int intel_framebuffer_init(struct drm_device *dev, 6141 struct intel_framebuffer *intel_fb, 6142 struct drm_mode_fb_cmd2 *mode_cmd, 6143 struct drm_i915_gem_object *obj) 6144 { 6145 int ret; 6146 6147 if (obj->tiling_mode == I915_TILING_Y) 6148 return -EINVAL; 6149 6150 if (mode_cmd->pitches[0] & 63) 6151 return -EINVAL; 6152 6153 switch (mode_cmd->pixel_format) { 6154 case DRM_FORMAT_RGB332: 6155 case DRM_FORMAT_RGB565: 6156 case DRM_FORMAT_XRGB8888: 6157 case DRM_FORMAT_XBGR8888: 6158 case DRM_FORMAT_ARGB8888: 6159 case DRM_FORMAT_XRGB2101010: 6160 case DRM_FORMAT_ARGB2101010: 6161 /* RGB formats are common across chipsets */ 6162 break; 6163 case DRM_FORMAT_YUYV: 6164 case DRM_FORMAT_UYVY: 6165 case DRM_FORMAT_YVYU: 6166 case DRM_FORMAT_VYUY: 6167 break; 6168 default: 6169 DRM_DEBUG_KMS("unsupported pixel format %u\n", 6170 mode_cmd->pixel_format); 6171 return -EINVAL; 6172 } 6173 6174 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 6175 if (ret) { 6176 DRM_ERROR("framebuffer init failed %d\n", ret); 6177 return ret; 6178 } 6179 6180 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 6181 intel_fb->obj = obj; 6182 return 0; 6183 } 6184 6185 static int 6186 intel_user_framebuffer_create(struct drm_device *dev, 6187 struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd, 6188 struct drm_framebuffer **res) 6189 { 6190 struct drm_i915_gem_object *obj; 6191 6192 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 6193 mode_cmd->handles[0])); 6194 if (&obj->base == NULL) 6195 return (-ENOENT); 6196 6197 return (intel_framebuffer_create(dev, mode_cmd, obj, res)); 6198 } 6199 6200 static const struct drm_mode_config_funcs intel_mode_funcs = { 6201 .fb_create = intel_user_framebuffer_create, 6202 .output_poll_changed = intel_fb_output_poll_changed, 6203 }; 6204 6205 /* Set up chip specific display functions */ 6206 static void intel_init_display(struct drm_device *dev) 6207 { 6208 struct drm_i915_private *dev_priv = dev->dev_private; 6209 6210 /* We always want a DPMS function */ 6211 if (HAS_PCH_SPLIT(dev)) { 6212 dev_priv->display.dpms = ironlake_crtc_dpms; 6213 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 6214 dev_priv->display.update_plane = ironlake_update_plane; 6215 } else { 6216 dev_priv->display.dpms = i9xx_crtc_dpms; 6217 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 6218 dev_priv->display.update_plane = i9xx_update_plane; 6219 } 6220 6221 if (I915_HAS_FBC(dev)) { 6222 if (HAS_PCH_SPLIT(dev)) { 6223 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6224 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6225 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6226 } else if (IS_GM45(dev)) { 6227 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 6228 dev_priv->display.enable_fbc = g4x_enable_fbc; 6229 dev_priv->display.disable_fbc = g4x_disable_fbc; 6230 } else if (IS_CRESTLINE(dev)) { 6231 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 6232 dev_priv->display.enable_fbc = i8xx_enable_fbc; 6233 dev_priv->display.disable_fbc = i8xx_disable_fbc; 6234 } 6235 /* 855GM needs testing */ 6236 } 6237 6238 /* Returns the core display clock speed */ 6239 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 6240 dev_priv->display.get_display_clock_speed = 6241 i945_get_display_clock_speed; 6242 else if (IS_I915G(dev)) 6243 dev_priv->display.get_display_clock_speed = 6244 i915_get_display_clock_speed; 6245 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) 6246 dev_priv->display.get_display_clock_speed = 6247 i9xx_misc_get_display_clock_speed; 6248 else if (IS_I915GM(dev)) 6249 dev_priv->display.get_display_clock_speed = 6250 i915gm_get_display_clock_speed; 6251 else if (IS_I865G(dev)) 6252 dev_priv->display.get_display_clock_speed = 6253 i865_get_display_clock_speed; 6254 else if (IS_I85X(dev)) 6255 dev_priv->display.get_display_clock_speed = 6256 i855_get_display_clock_speed; 6257 else /* 852, 830 */ 6258 dev_priv->display.get_display_clock_speed = 6259 i830_get_display_clock_speed; 6260 6261 /* For FIFO watermark updates */ 6262 if (HAS_PCH_SPLIT(dev)) { 6263 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; 6264 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; 6265 6266 /* IVB configs may use multi-threaded forcewake */ 6267 if (IS_IVYBRIDGE(dev)) { 6268 u32 ecobus; 6269 6270 /* A small trick here - if the bios hasn't configured MT forcewake, 6271 * and if the device is in RC6, then force_wake_mt_get will not wake 6272 * the device and the ECOBUS read will return zero. Which will be 6273 * (correctly) interpreted by the test below as MT forcewake being 6274 * disabled. 6275 */ 6276 DRM_LOCK(dev); 6277 __gen6_gt_force_wake_mt_get(dev_priv); 6278 ecobus = I915_READ_NOTRACE(ECOBUS); 6279 __gen6_gt_force_wake_mt_put(dev_priv); 6280 DRM_UNLOCK(dev); 6281 6282 if (ecobus & FORCEWAKE_MT_ENABLE) { 6283 DRM_DEBUG_KMS("Using MT version of forcewake\n"); 6284 dev_priv->display.force_wake_get = 6285 __gen6_gt_force_wake_mt_get; 6286 dev_priv->display.force_wake_put = 6287 __gen6_gt_force_wake_mt_put; 6288 } 6289 } 6290 6291 if (HAS_PCH_IBX(dev)) 6292 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; 6293 else if (HAS_PCH_CPT(dev)) 6294 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; 6295 6296 if (IS_GEN5(dev)) { 6297 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 6298 dev_priv->display.update_wm = ironlake_update_wm; 6299 else { 6300 DRM_DEBUG_KMS("Failed to get proper latency. " 6301 "Disable CxSR\n"); 6302 dev_priv->display.update_wm = NULL; 6303 } 6304 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 6305 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 6306 dev_priv->display.write_eld = ironlake_write_eld; 6307 } else if (IS_GEN6(dev)) { 6308 if (SNB_READ_WM0_LATENCY()) { 6309 dev_priv->display.update_wm = sandybridge_update_wm; 6310 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6311 } else { 6312 DRM_DEBUG_KMS("Failed to read display plane latency. " 6313 "Disable CxSR\n"); 6314 dev_priv->display.update_wm = NULL; 6315 } 6316 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 6317 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 6318 dev_priv->display.write_eld = ironlake_write_eld; 6319 } else if (IS_IVYBRIDGE(dev)) { 6320 /* FIXME: detect B0+ stepping and use auto training */ 6321 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6322 if (SNB_READ_WM0_LATENCY()) { 6323 dev_priv->display.update_wm = sandybridge_update_wm; 6324 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 6325 } else { 6326 DRM_DEBUG_KMS("Failed to read display plane latency. " 6327 "Disable CxSR\n"); 6328 dev_priv->display.update_wm = NULL; 6329 } 6330 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 6331 dev_priv->display.write_eld = ironlake_write_eld; 6332 } else 6333 dev_priv->display.update_wm = NULL; 6334 } else if (IS_PINEVIEW(dev)) { 6335 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6336 dev_priv->is_ddr3, 6337 dev_priv->fsb_freq, 6338 dev_priv->mem_freq)) { 6339 DRM_INFO("failed to find known CxSR latency " 6340 "(found ddr%s fsb freq %d, mem freq %d), " 6341 "disabling CxSR\n", 6342 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6343 dev_priv->fsb_freq, dev_priv->mem_freq); 6344 /* Disable CxSR and never update its watermark again */ 6345 pineview_disable_cxsr(dev); 6346 dev_priv->display.update_wm = NULL; 6347 } else 6348 dev_priv->display.update_wm = pineview_update_wm; 6349 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6350 } else if (IS_G4X(dev)) { 6351 dev_priv->display.write_eld = g4x_write_eld; 6352 dev_priv->display.update_wm = g4x_update_wm; 6353 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 6354 } else if (IS_GEN4(dev)) { 6355 dev_priv->display.update_wm = i965_update_wm; 6356 if (IS_CRESTLINE(dev)) 6357 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 6358 else if (IS_BROADWATER(dev)) 6359 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 6360 } else if (IS_GEN3(dev)) { 6361 dev_priv->display.update_wm = i9xx_update_wm; 6362 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 6363 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6364 } else if (IS_I865G(dev)) { 6365 dev_priv->display.update_wm = i830_update_wm; 6366 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6367 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6368 } else if (IS_I85X(dev)) { 6369 dev_priv->display.update_wm = i9xx_update_wm; 6370 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 6371 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6372 } else { 6373 dev_priv->display.update_wm = i830_update_wm; 6374 dev_priv->display.init_clock_gating = i830_init_clock_gating; 6375 if (IS_845G(dev)) 6376 dev_priv->display.get_fifo_size = i845_get_fifo_size; 6377 else 6378 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6379 } 6380 6381 /* Default just returns -ENODEV to indicate unsupported */ 6382 dev_priv->display.queue_flip = intel_default_queue_flip; 6383 6384 switch (INTEL_INFO(dev)->gen) { 6385 case 2: 6386 dev_priv->display.queue_flip = intel_gen2_queue_flip; 6387 break; 6388 6389 case 3: 6390 dev_priv->display.queue_flip = intel_gen3_queue_flip; 6391 break; 6392 6393 case 4: 6394 case 5: 6395 dev_priv->display.queue_flip = intel_gen4_queue_flip; 6396 break; 6397 6398 case 6: 6399 dev_priv->display.queue_flip = intel_gen6_queue_flip; 6400 break; 6401 case 7: 6402 dev_priv->display.queue_flip = intel_gen7_queue_flip; 6403 break; 6404 } 6405 } 6406 6407 /* 6408 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 6409 * resume, or other times. This quirk makes sure that's the case for 6410 * affected systems. 6411 */ 6412 static void quirk_pipea_force(struct drm_device *dev) 6413 { 6414 struct drm_i915_private *dev_priv = dev->dev_private; 6415 6416 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 6417 DRM_DEBUG("applying pipe a force quirk\n"); 6418 } 6419 6420 /* 6421 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 6422 */ 6423 static void quirk_ssc_force_disable(struct drm_device *dev) 6424 { 6425 struct drm_i915_private *dev_priv = dev->dev_private; 6426 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 6427 } 6428 6429 struct intel_quirk { 6430 int device; 6431 int subsystem_vendor; 6432 int subsystem_device; 6433 void (*hook)(struct drm_device *dev); 6434 }; 6435 6436 #define PCI_ANY_ID (~0u) 6437 6438 struct intel_quirk intel_quirks[] = { 6439 /* HP Mini needs pipe A force quirk (LP: #322104) */ 6440 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 6441 6442 /* Thinkpad R31 needs pipe A force quirk */ 6443 { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, 6444 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 6445 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 6446 6447 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ 6448 { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, 6449 /* ThinkPad X40 needs pipe A force quirk */ 6450 6451 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 6452 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 6453 6454 /* 855 & before need to leave pipe A & dpll A up */ 6455 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6456 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 6457 6458 /* Lenovo U160 cannot use SSC on LVDS */ 6459 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 6460 6461 /* Sony Vaio Y cannot use SSC on LVDS */ 6462 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 6463 }; 6464 6465 static void intel_init_quirks(struct drm_device *dev) 6466 { 6467 struct intel_quirk *q; 6468 device_t d; 6469 int i; 6470 6471 d = dev->dev; 6472 for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) { 6473 q = &intel_quirks[i]; 6474 if (pci_get_device(d) == q->device && 6475 (pci_get_subvendor(d) == q->subsystem_vendor || 6476 q->subsystem_vendor == PCI_ANY_ID) && 6477 (pci_get_subdevice(d) == q->subsystem_device || 6478 q->subsystem_device == PCI_ANY_ID)) 6479 q->hook(dev); 6480 } 6481 } 6482 6483 /* Disable the VGA plane that we never use */ 6484 static void i915_disable_vga(struct drm_device *dev) 6485 { 6486 struct drm_i915_private *dev_priv = dev->dev_private; 6487 u8 sr1; 6488 u32 vga_reg; 6489 6490 if (HAS_PCH_SPLIT(dev)) 6491 vga_reg = CPU_VGACNTRL; 6492 else 6493 vga_reg = VGACNTRL; 6494 6495 #if 0 6496 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6497 #endif 6498 outb(VGA_SR_INDEX, 1); 6499 sr1 = inb(VGA_SR_DATA); 6500 outb(VGA_SR_DATA, sr1 | 1 << 5); 6501 #if 0 6502 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6503 #endif 6504 DELAY(300); 6505 6506 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 6507 POSTING_READ(vga_reg); 6508 } 6509 6510 void intel_modeset_init(struct drm_device *dev) 6511 { 6512 struct drm_i915_private *dev_priv = dev->dev_private; 6513 int i, ret; 6514 6515 drm_mode_config_init(dev); 6516 6517 dev->mode_config.min_width = 0; 6518 dev->mode_config.min_height = 0; 6519 6520 dev->mode_config.preferred_depth = 24; 6521 dev->mode_config.prefer_shadow = 1; 6522 6523 dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *, 6524 &intel_mode_funcs); 6525 6526 intel_init_quirks(dev); 6527 6528 intel_init_display(dev); 6529 6530 if (IS_GEN2(dev)) { 6531 dev->mode_config.max_width = 2048; 6532 dev->mode_config.max_height = 2048; 6533 } else if (IS_GEN3(dev)) { 6534 dev->mode_config.max_width = 4096; 6535 dev->mode_config.max_height = 4096; 6536 } else { 6537 dev->mode_config.max_width = 8192; 6538 dev->mode_config.max_height = 8192; 6539 } 6540 dev->mode_config.fb_base = dev->agp->base; 6541 6542 DRM_DEBUG_KMS("%d display pipe%s available.\n", 6543 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 6544 6545 for (i = 0; i < dev_priv->num_pipe; i++) { 6546 intel_crtc_init(dev, i); 6547 ret = intel_plane_init(dev, i); 6548 if (ret) 6549 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 6550 } 6551 6552 /* Just disable it once at startup */ 6553 i915_disable_vga(dev); 6554 intel_setup_outputs(dev); 6555 6556 intel_init_clock_gating(dev); 6557 6558 if (IS_IRONLAKE_M(dev)) { 6559 ironlake_enable_drps(dev); 6560 intel_init_emon(dev); 6561 } 6562 6563 if (IS_GEN6(dev)) { 6564 gen6_enable_rps(dev_priv); 6565 gen6_update_ring_freq(dev_priv); 6566 } 6567 6568 TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv); 6569 callout_init_mp(&dev_priv->idle_callout); 6570 } 6571 6572 void intel_modeset_gem_init(struct drm_device *dev) 6573 { 6574 if (IS_IRONLAKE_M(dev)) 6575 ironlake_enable_rc6(dev); 6576 6577 intel_setup_overlay(dev); 6578 } 6579 6580 void intel_modeset_cleanup(struct drm_device *dev) 6581 { 6582 struct drm_i915_private *dev_priv = dev->dev_private; 6583 struct drm_crtc *crtc; 6584 struct intel_crtc *intel_crtc; 6585 6586 drm_kms_helper_poll_fini(dev); 6587 DRM_LOCK(dev); 6588 6589 #if 0 6590 intel_unregister_dsm_handler(); 6591 #endif 6592 6593 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6594 /* Skip inactive CRTCs */ 6595 if (!crtc->fb) 6596 continue; 6597 6598 intel_crtc = to_intel_crtc(crtc); 6599 intel_increase_pllclock(crtc); 6600 } 6601 6602 intel_disable_fbc(dev); 6603 6604 if (IS_IRONLAKE_M(dev)) 6605 ironlake_disable_drps(dev); 6606 if (IS_GEN6(dev)) 6607 gen6_disable_rps(dev); 6608 6609 if (IS_IRONLAKE_M(dev)) 6610 ironlake_disable_rc6(dev); 6611 6612 /* Disable the irq before mode object teardown, for the irq might 6613 * enqueue unpin/hotplug work. */ 6614 drm_irq_uninstall(dev); 6615 DRM_UNLOCK(dev); 6616 6617 if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL)) 6618 taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task); 6619 if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL)) 6620 taskqueue_drain(dev_priv->tq, &dev_priv->rps_task); 6621 6622 /* Shut off idle work before the crtcs get freed. */ 6623 if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL)) 6624 taskqueue_drain(dev_priv->tq, &dev_priv->idle_task); 6625 6626 drm_mode_config_cleanup(dev); 6627 } 6628 6629 /* 6630 * Return which encoder is currently attached for connector. 6631 */ 6632 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 6633 { 6634 return &intel_attached_encoder(connector)->base; 6635 } 6636 6637 void intel_connector_attach_encoder(struct intel_connector *connector, 6638 struct intel_encoder *encoder) 6639 { 6640 connector->encoder = encoder; 6641 drm_mode_connector_attach_encoder(&connector->base, 6642 &encoder->base); 6643 } 6644 6645 /* 6646 * set vga decode state - true == enable VGA decode 6647 */ 6648 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 6649 { 6650 struct drm_i915_private *dev_priv; 6651 device_t bridge_dev; 6652 u16 gmch_ctrl; 6653 6654 dev_priv = dev->dev_private; 6655 bridge_dev = intel_gtt_get_bridge_device(); 6656 gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2); 6657 if (state) 6658 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 6659 else 6660 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 6661 pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2); 6662 return (0); 6663 } 6664 6665 struct intel_display_error_state { 6666 struct intel_cursor_error_state { 6667 u32 control; 6668 u32 position; 6669 u32 base; 6670 u32 size; 6671 } cursor[2]; 6672 6673 struct intel_pipe_error_state { 6674 u32 conf; 6675 u32 source; 6676 6677 u32 htotal; 6678 u32 hblank; 6679 u32 hsync; 6680 u32 vtotal; 6681 u32 vblank; 6682 u32 vsync; 6683 } pipe[2]; 6684 6685 struct intel_plane_error_state { 6686 u32 control; 6687 u32 stride; 6688 u32 size; 6689 u32 pos; 6690 u32 addr; 6691 u32 surface; 6692 u32 tile_offset; 6693 } plane[2]; 6694 }; 6695 6696 struct intel_display_error_state * 6697 intel_display_capture_error_state(struct drm_device *dev) 6698 { 6699 drm_i915_private_t *dev_priv = dev->dev_private; 6700 struct intel_display_error_state *error; 6701 int i; 6702 6703 error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT); 6704 if (error == NULL) 6705 return NULL; 6706 6707 for (i = 0; i < 2; i++) { 6708 error->cursor[i].control = I915_READ(CURCNTR(i)); 6709 error->cursor[i].position = I915_READ(CURPOS(i)); 6710 error->cursor[i].base = I915_READ(CURBASE(i)); 6711 6712 error->plane[i].control = I915_READ(DSPCNTR(i)); 6713 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 6714 error->plane[i].size = I915_READ(DSPSIZE(i)); 6715 error->plane[i].pos = I915_READ(DSPPOS(i)); 6716 error->plane[i].addr = I915_READ(DSPADDR(i)); 6717 if (INTEL_INFO(dev)->gen >= 4) { 6718 error->plane[i].surface = I915_READ(DSPSURF(i)); 6719 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 6720 } 6721 6722 error->pipe[i].conf = I915_READ(PIPECONF(i)); 6723 error->pipe[i].source = I915_READ(PIPESRC(i)); 6724 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 6725 error->pipe[i].hblank = I915_READ(HBLANK(i)); 6726 error->pipe[i].hsync = I915_READ(HSYNC(i)); 6727 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 6728 error->pipe[i].vblank = I915_READ(VBLANK(i)); 6729 error->pipe[i].vsync = I915_READ(VSYNC(i)); 6730 } 6731 6732 return error; 6733 } 6734 6735 void 6736 intel_display_print_error_state(struct sbuf *m, 6737 struct drm_device *dev, 6738 struct intel_display_error_state *error) 6739 { 6740 int i; 6741 6742 for (i = 0; i < 2; i++) { 6743 sbuf_printf(m, "Pipe [%d]:\n", i); 6744 sbuf_printf(m, " CONF: %08x\n", error->pipe[i].conf); 6745 sbuf_printf(m, " SRC: %08x\n", error->pipe[i].source); 6746 sbuf_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); 6747 sbuf_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); 6748 sbuf_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); 6749 sbuf_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); 6750 sbuf_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); 6751 sbuf_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); 6752 6753 sbuf_printf(m, "Plane [%d]:\n", i); 6754 sbuf_printf(m, " CNTR: %08x\n", error->plane[i].control); 6755 sbuf_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 6756 sbuf_printf(m, " SIZE: %08x\n", error->plane[i].size); 6757 sbuf_printf(m, " POS: %08x\n", error->plane[i].pos); 6758 sbuf_printf(m, " ADDR: %08x\n", error->plane[i].addr); 6759 if (INTEL_INFO(dev)->gen >= 4) { 6760 sbuf_printf(m, " SURF: %08x\n", error->plane[i].surface); 6761 sbuf_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 6762 } 6763 6764 sbuf_printf(m, "Cursor [%d]:\n", i); 6765 sbuf_printf(m, " CNTR: %08x\n", error->cursor[i].control); 6766 sbuf_printf(m, " POS: %08x\n", error->cursor[i].position); 6767 sbuf_printf(m, " BASE: %08x\n", error->cursor[i].base); 6768 } 6769 } 6770