1c349dbc7Sjsg /* 2c349dbc7Sjsg * Copyright © 2006-2007 Intel Corporation 3c349dbc7Sjsg * 4c349dbc7Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 5c349dbc7Sjsg * copy of this software and associated documentation files (the "Software"), 6c349dbc7Sjsg * to deal in the Software without restriction, including without limitation 7c349dbc7Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c349dbc7Sjsg * and/or sell copies of the Software, and to permit persons to whom the 9c349dbc7Sjsg * Software is furnished to do so, subject to the following conditions: 10c349dbc7Sjsg * 11c349dbc7Sjsg * The above copyright notice and this permission notice (including the next 12c349dbc7Sjsg * paragraph) shall be included in all copies or substantial portions of the 13c349dbc7Sjsg * Software. 14c349dbc7Sjsg * 15c349dbc7Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16c349dbc7Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17c349dbc7Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18c349dbc7Sjsg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19c349dbc7Sjsg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20c349dbc7Sjsg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21c349dbc7Sjsg * DEALINGS IN THE SOFTWARE. 22c349dbc7Sjsg * 23c349dbc7Sjsg * Authors: 24c349dbc7Sjsg * Eric Anholt <eric@anholt.net> 25c349dbc7Sjsg */ 26c349dbc7Sjsg 27f005ef32Sjsg #include <linux/dma-resv.h> 28c349dbc7Sjsg #include <linux/i2c.h> 29c349dbc7Sjsg #include <linux/input.h> 30c349dbc7Sjsg #include <linux/kernel.h> 31c349dbc7Sjsg #include <linux/module.h> 32c349dbc7Sjsg #include <linux/slab.h> 331bb76ff1Sjsg #include <linux/string_helpers.h> 34c349dbc7Sjsg 351bb76ff1Sjsg #include <drm/display/drm_dp_helper.h> 36c349dbc7Sjsg #include <drm/drm_atomic.h> 37c349dbc7Sjsg #include <drm/drm_atomic_helper.h> 38c349dbc7Sjsg #include <drm/drm_atomic_uapi.h> 39ad8b1aafSjsg #include <drm/drm_damage_helper.h> 40c349dbc7Sjsg #include <drm/drm_edid.h> 41c349dbc7Sjsg #include <drm/drm_fourcc.h> 42c349dbc7Sjsg #include <drm/drm_probe_helper.h> 43c349dbc7Sjsg #include <drm/drm_rect.h> 44c349dbc7Sjsg 455ca02815Sjsg #include "gem/i915_gem_lmem.h" 465ca02815Sjsg #include "gem/i915_gem_object.h" 47c349dbc7Sjsg 485ca02815Sjsg #include "g4x_dp.h" 495ca02815Sjsg #include "g4x_hdmi.h" 501bb76ff1Sjsg #include "hsw_ips.h" 51c349dbc7Sjsg #include "i915_drv.h" 52f005ef32Sjsg #include "i915_reg.h" 531bb76ff1Sjsg #include "i915_utils.h" 54f005ef32Sjsg #include "i9xx_plane.h" 55f005ef32Sjsg #include "i9xx_wm.h" 56c349dbc7Sjsg #include "intel_atomic.h" 57c349dbc7Sjsg #include "intel_atomic_plane.h" 58f005ef32Sjsg #include "intel_audio.h" 59c349dbc7Sjsg #include "intel_bw.h" 60c349dbc7Sjsg #include "intel_cdclk.h" 61f005ef32Sjsg #include "intel_clock_gating.h" 62c349dbc7Sjsg #include "intel_color.h" 63f005ef32Sjsg #include "intel_crt.h" 645ca02815Sjsg #include "intel_crtc.h" 651bb76ff1Sjsg #include "intel_crtc_state_dump.h" 66f005ef32Sjsg #include "intel_ddi.h" 675ca02815Sjsg #include "intel_de.h" 68f005ef32Sjsg #include "intel_display_driver.h" 69f005ef32Sjsg #include "intel_display_power.h" 70c349dbc7Sjsg #include "intel_display_types.h" 715ca02815Sjsg #include "intel_dmc.h" 72f005ef32Sjsg #include "intel_dp.h" 73c349dbc7Sjsg #include "intel_dp_link_training.h" 74f005ef32Sjsg #include "intel_dp_mst.h" 75f005ef32Sjsg #include "intel_dpio_phy.h" 76f005ef32Sjsg #include "intel_dpll.h" 77f005ef32Sjsg #include "intel_dpll_mgr.h" 78de97bdebSjsg #include "intel_dpt.h" 79f005ef32Sjsg #include "intel_drrs.h" 80f005ef32Sjsg #include "intel_dsi.h" 81f005ef32Sjsg #include "intel_dvo.h" 82f005ef32Sjsg #include "intel_fb.h" 83c349dbc7Sjsg #include "intel_fbc.h" 84c349dbc7Sjsg #include "intel_fbdev.h" 851bb76ff1Sjsg #include "intel_fdi.h" 86c349dbc7Sjsg #include "intel_fifo_underrun.h" 87c349dbc7Sjsg #include "intel_frontbuffer.h" 88f005ef32Sjsg #include "intel_hdmi.h" 89c349dbc7Sjsg #include "intel_hotplug.h" 90f005ef32Sjsg #include "intel_lvds.h" 91f005ef32Sjsg #include "intel_lvds_regs.h" 921bb76ff1Sjsg #include "intel_modeset_setup.h" 93f005ef32Sjsg #include "intel_modeset_verify.h" 94c349dbc7Sjsg #include "intel_overlay.h" 951bb76ff1Sjsg #include "intel_panel.h" 961bb76ff1Sjsg #include "intel_pch_display.h" 971bb76ff1Sjsg #include "intel_pch_refclk.h" 981bb76ff1Sjsg #include "intel_pcode.h" 99c349dbc7Sjsg #include "intel_pipe_crc.h" 1001bb76ff1Sjsg #include "intel_plane_initial.h" 101f005ef32Sjsg #include "intel_pmdemand.h" 1025ca02815Sjsg #include "intel_pps.h" 103c349dbc7Sjsg #include "intel_psr.h" 104f005ef32Sjsg #include "intel_sdvo.h" 105f005ef32Sjsg #include "intel_snps_phy.h" 106c349dbc7Sjsg #include "intel_tc.h" 107f005ef32Sjsg #include "intel_tv.h" 108f005ef32Sjsg #include "intel_vblank.h" 109f005ef32Sjsg #include "intel_vdsc.h" 110f005ef32Sjsg #include "intel_vdsc_regs.h" 111c349dbc7Sjsg #include "intel_vga.h" 112f005ef32Sjsg #include "intel_vrr.h" 113f005ef32Sjsg #include "intel_wm.h" 1145ca02815Sjsg #include "skl_scaler.h" 1155ca02815Sjsg #include "skl_universal_plane.h" 1161bb76ff1Sjsg #include "skl_watermark.h" 1171bb76ff1Sjsg #include "vlv_dsi.h" 1181bb76ff1Sjsg #include "vlv_dsi_pll.h" 1191bb76ff1Sjsg #include "vlv_dsi_regs.h" 1201bb76ff1Sjsg #include "vlv_sideband.h" 121c349dbc7Sjsg 1225ca02815Sjsg static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 123c349dbc7Sjsg static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 1241bb76ff1Sjsg static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 125f005ef32Sjsg static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); 126c349dbc7Sjsg 127c349dbc7Sjsg /* returns HPLL frequency in kHz */ 128c349dbc7Sjsg int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 129c349dbc7Sjsg { 130c349dbc7Sjsg int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 131c349dbc7Sjsg 132c349dbc7Sjsg /* Obtain SKU information */ 133c349dbc7Sjsg hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 134c349dbc7Sjsg CCK_FUSE_HPLL_FREQ_MASK; 135c349dbc7Sjsg 136c349dbc7Sjsg return vco_freq[hpll_freq] * 1000; 137c349dbc7Sjsg } 138c349dbc7Sjsg 139c349dbc7Sjsg int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 140c349dbc7Sjsg const char *name, u32 reg, int ref_freq) 141c349dbc7Sjsg { 142c349dbc7Sjsg u32 val; 143c349dbc7Sjsg int divider; 144c349dbc7Sjsg 145c349dbc7Sjsg val = vlv_cck_read(dev_priv, reg); 146c349dbc7Sjsg divider = val & CCK_FREQUENCY_VALUES; 147c349dbc7Sjsg 148c349dbc7Sjsg drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != 149c349dbc7Sjsg (divider << CCK_FREQUENCY_STATUS_SHIFT), 150c349dbc7Sjsg "%s change in progress\n", name); 151c349dbc7Sjsg 152c349dbc7Sjsg return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 153c349dbc7Sjsg } 154c349dbc7Sjsg 155c349dbc7Sjsg int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 156c349dbc7Sjsg const char *name, u32 reg) 157c349dbc7Sjsg { 158c349dbc7Sjsg int hpll; 159c349dbc7Sjsg 160c349dbc7Sjsg vlv_cck_get(dev_priv); 161c349dbc7Sjsg 162c349dbc7Sjsg if (dev_priv->hpll_freq == 0) 163c349dbc7Sjsg dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 164c349dbc7Sjsg 165c349dbc7Sjsg hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); 166c349dbc7Sjsg 167c349dbc7Sjsg vlv_cck_put(dev_priv); 168c349dbc7Sjsg 169c349dbc7Sjsg return hpll; 170c349dbc7Sjsg } 171c349dbc7Sjsg 172f005ef32Sjsg void intel_update_czclk(struct drm_i915_private *dev_priv) 173c349dbc7Sjsg { 174c349dbc7Sjsg if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 175c349dbc7Sjsg return; 176c349dbc7Sjsg 177c349dbc7Sjsg dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 178c349dbc7Sjsg CCK_CZ_CLOCK_CONTROL); 179c349dbc7Sjsg 180c349dbc7Sjsg drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", 181c349dbc7Sjsg dev_priv->czclk_freq); 182c349dbc7Sjsg } 183c349dbc7Sjsg 1841bb76ff1Sjsg static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) 1851bb76ff1Sjsg { 1861bb76ff1Sjsg return (crtc_state->active_planes & 1871bb76ff1Sjsg ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; 1881bb76ff1Sjsg } 1891bb76ff1Sjsg 190c349dbc7Sjsg /* WA Display #0827: Gen9:all */ 191c349dbc7Sjsg static void 192c349dbc7Sjsg skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 193c349dbc7Sjsg { 194c349dbc7Sjsg if (enable) 195f005ef32Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 196f005ef32Sjsg 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); 197c349dbc7Sjsg else 198f005ef32Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 199f005ef32Sjsg DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); 200c349dbc7Sjsg } 201c349dbc7Sjsg 202ad8b1aafSjsg /* Wa_2006604312:icl,ehl */ 203c349dbc7Sjsg static void 204c349dbc7Sjsg icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 205c349dbc7Sjsg bool enable) 206c349dbc7Sjsg { 207c349dbc7Sjsg if (enable) 208f005ef32Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); 209c349dbc7Sjsg else 210f005ef32Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); 211c349dbc7Sjsg } 212c349dbc7Sjsg 2131bb76ff1Sjsg /* Wa_1604331009:icl,jsl,ehl */ 2141bb76ff1Sjsg static void 2151bb76ff1Sjsg icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 2161bb76ff1Sjsg bool enable) 2171bb76ff1Sjsg { 2181bb76ff1Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, 2191bb76ff1Sjsg enable ? CURSOR_GATING_DIS : 0); 2201bb76ff1Sjsg } 2211bb76ff1Sjsg 222c349dbc7Sjsg static bool 223c349dbc7Sjsg is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) 224c349dbc7Sjsg { 225c349dbc7Sjsg return crtc_state->master_transcoder != INVALID_TRANSCODER; 226c349dbc7Sjsg } 227c349dbc7Sjsg 228f005ef32Sjsg bool 229ad8b1aafSjsg is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) 230ad8b1aafSjsg { 231ad8b1aafSjsg return crtc_state->sync_mode_slaves_mask != 0; 232ad8b1aafSjsg } 233ad8b1aafSjsg 234ad8b1aafSjsg bool 235ad8b1aafSjsg is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) 236ad8b1aafSjsg { 237ad8b1aafSjsg return is_trans_port_sync_master(crtc_state) || 238ad8b1aafSjsg is_trans_port_sync_slave(crtc_state); 239ad8b1aafSjsg } 240ad8b1aafSjsg 2411bb76ff1Sjsg static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) 2421bb76ff1Sjsg { 2431bb76ff1Sjsg return ffs(crtc_state->bigjoiner_pipes) - 1; 2441bb76ff1Sjsg } 2451bb76ff1Sjsg 2461bb76ff1Sjsg u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) 2471bb76ff1Sjsg { 2481bb76ff1Sjsg if (crtc_state->bigjoiner_pipes) 2491bb76ff1Sjsg return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); 2501bb76ff1Sjsg else 2511bb76ff1Sjsg return 0; 2521bb76ff1Sjsg } 2531bb76ff1Sjsg 2541bb76ff1Sjsg bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) 2551bb76ff1Sjsg { 2561bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2571bb76ff1Sjsg 2581bb76ff1Sjsg return crtc_state->bigjoiner_pipes && 2591bb76ff1Sjsg crtc->pipe != bigjoiner_master_pipe(crtc_state); 2601bb76ff1Sjsg } 2611bb76ff1Sjsg 2621bb76ff1Sjsg bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) 2631bb76ff1Sjsg { 2641bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2651bb76ff1Sjsg 2661bb76ff1Sjsg return crtc_state->bigjoiner_pipes && 2671bb76ff1Sjsg crtc->pipe == bigjoiner_master_pipe(crtc_state); 2681bb76ff1Sjsg } 2691bb76ff1Sjsg 2701bb76ff1Sjsg static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) 2711bb76ff1Sjsg { 2721bb76ff1Sjsg return hweight8(crtc_state->bigjoiner_pipes); 2731bb76ff1Sjsg } 2741bb76ff1Sjsg 2751bb76ff1Sjsg struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 2761bb76ff1Sjsg { 2771bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2781bb76ff1Sjsg 2791bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(crtc_state)) 2801bb76ff1Sjsg return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); 2811bb76ff1Sjsg else 2821bb76ff1Sjsg return to_intel_crtc(crtc_state->uapi.crtc); 2831bb76ff1Sjsg } 2841bb76ff1Sjsg 285c349dbc7Sjsg static void 286c349dbc7Sjsg intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) 287c349dbc7Sjsg { 288c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 289c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 290c349dbc7Sjsg 2915ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 4) { 292c349dbc7Sjsg enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 293c349dbc7Sjsg 294c349dbc7Sjsg /* Wait for the Pipe State to go off */ 295f005ef32Sjsg if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), 296f005ef32Sjsg TRANSCONF_STATE_ENABLE, 100)) 2971bb76ff1Sjsg drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 298c349dbc7Sjsg } else { 299c349dbc7Sjsg intel_wait_for_pipe_scanline_stopped(crtc); 300c349dbc7Sjsg } 301c349dbc7Sjsg } 302c349dbc7Sjsg 3031bb76ff1Sjsg void assert_transcoder(struct drm_i915_private *dev_priv, 304c349dbc7Sjsg enum transcoder cpu_transcoder, bool state) 305c349dbc7Sjsg { 306c349dbc7Sjsg bool cur_state; 307c349dbc7Sjsg enum intel_display_power_domain power_domain; 308c349dbc7Sjsg intel_wakeref_t wakeref; 309c349dbc7Sjsg 310c349dbc7Sjsg /* we keep both pipes enabled on 830 */ 311c349dbc7Sjsg if (IS_I830(dev_priv)) 312c349dbc7Sjsg state = true; 313c349dbc7Sjsg 314c349dbc7Sjsg power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 315c349dbc7Sjsg wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 316c349dbc7Sjsg if (wakeref) { 317f005ef32Sjsg u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 318f005ef32Sjsg cur_state = !!(val & TRANSCONF_ENABLE); 319c349dbc7Sjsg 320c349dbc7Sjsg intel_display_power_put(dev_priv, power_domain, wakeref); 321c349dbc7Sjsg } else { 322c349dbc7Sjsg cur_state = false; 323c349dbc7Sjsg } 324c349dbc7Sjsg 325f005ef32Sjsg I915_STATE_WARN(dev_priv, cur_state != state, 326c349dbc7Sjsg "transcoder %s assertion failure (expected %s, current %s)\n", 327f005ef32Sjsg transcoder_name(cpu_transcoder), str_on_off(state), 328f005ef32Sjsg str_on_off(cur_state)); 329c349dbc7Sjsg } 330c349dbc7Sjsg 331c349dbc7Sjsg static void assert_plane(struct intel_plane *plane, bool state) 332c349dbc7Sjsg { 333f005ef32Sjsg struct drm_i915_private *i915 = to_i915(plane->base.dev); 334c349dbc7Sjsg enum pipe pipe; 335c349dbc7Sjsg bool cur_state; 336c349dbc7Sjsg 337c349dbc7Sjsg cur_state = plane->get_hw_state(plane, &pipe); 338c349dbc7Sjsg 339f005ef32Sjsg I915_STATE_WARN(i915, cur_state != state, 340c349dbc7Sjsg "%s assertion failure (expected %s, current %s)\n", 3411bb76ff1Sjsg plane->base.name, str_on_off(state), 3421bb76ff1Sjsg str_on_off(cur_state)); 343c349dbc7Sjsg } 344c349dbc7Sjsg 345c349dbc7Sjsg #define assert_plane_enabled(p) assert_plane(p, true) 346c349dbc7Sjsg #define assert_plane_disabled(p) assert_plane(p, false) 347c349dbc7Sjsg 348c349dbc7Sjsg static void assert_planes_disabled(struct intel_crtc *crtc) 349c349dbc7Sjsg { 350c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 351c349dbc7Sjsg struct intel_plane *plane; 352c349dbc7Sjsg 353c349dbc7Sjsg for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 354c349dbc7Sjsg assert_plane_disabled(plane); 355c349dbc7Sjsg } 356c349dbc7Sjsg 357c349dbc7Sjsg void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 358ad8b1aafSjsg struct intel_digital_port *dig_port, 359c349dbc7Sjsg unsigned int expected_mask) 360c349dbc7Sjsg { 361c349dbc7Sjsg u32 port_mask; 362c349dbc7Sjsg i915_reg_t dpll_reg; 363c349dbc7Sjsg 364ad8b1aafSjsg switch (dig_port->base.port) { 3651bb76ff1Sjsg default: 3661bb76ff1Sjsg MISSING_CASE(dig_port->base.port); 3671bb76ff1Sjsg fallthrough; 368c349dbc7Sjsg case PORT_B: 369c349dbc7Sjsg port_mask = DPLL_PORTB_READY_MASK; 370c349dbc7Sjsg dpll_reg = DPLL(0); 371c349dbc7Sjsg break; 372c349dbc7Sjsg case PORT_C: 373c349dbc7Sjsg port_mask = DPLL_PORTC_READY_MASK; 374c349dbc7Sjsg dpll_reg = DPLL(0); 375c349dbc7Sjsg expected_mask <<= 4; 376c349dbc7Sjsg break; 377c349dbc7Sjsg case PORT_D: 378c349dbc7Sjsg port_mask = DPLL_PORTD_READY_MASK; 379c349dbc7Sjsg dpll_reg = DPIO_PHY_STATUS; 380c349dbc7Sjsg break; 381c349dbc7Sjsg } 382c349dbc7Sjsg 383c349dbc7Sjsg if (intel_de_wait_for_register(dev_priv, dpll_reg, 384c349dbc7Sjsg port_mask, expected_mask, 1000)) 385c349dbc7Sjsg drm_WARN(&dev_priv->drm, 1, 386c349dbc7Sjsg "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", 387ad8b1aafSjsg dig_port->base.base.base.id, dig_port->base.base.name, 388c349dbc7Sjsg intel_de_read(dev_priv, dpll_reg) & port_mask, 389c349dbc7Sjsg expected_mask); 390c349dbc7Sjsg } 391c349dbc7Sjsg 3921bb76ff1Sjsg void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) 393c349dbc7Sjsg { 394c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 395c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 396c349dbc7Sjsg enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 397c349dbc7Sjsg enum pipe pipe = crtc->pipe; 398c349dbc7Sjsg i915_reg_t reg; 399c349dbc7Sjsg u32 val; 400c349dbc7Sjsg 401c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); 402c349dbc7Sjsg 403c349dbc7Sjsg assert_planes_disabled(crtc); 404c349dbc7Sjsg 405c349dbc7Sjsg /* 406c349dbc7Sjsg * A pipe without a PLL won't actually be able to drive bits from 407c349dbc7Sjsg * a plane. On ILK+ the pipe PLLs are integrated, so we don't 408c349dbc7Sjsg * need the check. 409c349dbc7Sjsg */ 410c349dbc7Sjsg if (HAS_GMCH(dev_priv)) { 411c349dbc7Sjsg if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) 412c349dbc7Sjsg assert_dsi_pll_enabled(dev_priv); 413c349dbc7Sjsg else 414c349dbc7Sjsg assert_pll_enabled(dev_priv, pipe); 415c349dbc7Sjsg } else { 416c349dbc7Sjsg if (new_crtc_state->has_pch_encoder) { 417c349dbc7Sjsg /* if driving the PCH, we need FDI enabled */ 418c349dbc7Sjsg assert_fdi_rx_pll_enabled(dev_priv, 419c349dbc7Sjsg intel_crtc_pch_transcoder(crtc)); 420c349dbc7Sjsg assert_fdi_tx_pll_enabled(dev_priv, 421c349dbc7Sjsg (enum pipe) cpu_transcoder); 422c349dbc7Sjsg } 423c349dbc7Sjsg /* FIXME: assert CPU port conditions for SNB+ */ 424c349dbc7Sjsg } 425c349dbc7Sjsg 4265ca02815Sjsg /* Wa_22012358565:adl-p */ 4275ca02815Sjsg if (DISPLAY_VER(dev_priv) == 13) 4285ca02815Sjsg intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 4295ca02815Sjsg 0, PIPE_ARB_USE_PROG_SLOTS); 430c349dbc7Sjsg 431f005ef32Sjsg reg = TRANSCONF(cpu_transcoder); 432c349dbc7Sjsg val = intel_de_read(dev_priv, reg); 433f005ef32Sjsg if (val & TRANSCONF_ENABLE) { 434c349dbc7Sjsg /* we keep both pipes enabled on 830 */ 435c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 436c349dbc7Sjsg return; 437c349dbc7Sjsg } 438c349dbc7Sjsg 439f005ef32Sjsg intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); 440c349dbc7Sjsg intel_de_posting_read(dev_priv, reg); 441c349dbc7Sjsg 442c349dbc7Sjsg /* 443c349dbc7Sjsg * Until the pipe starts PIPEDSL reads will return a stale value, 444c349dbc7Sjsg * which causes an apparent vblank timestamp jump when PIPEDSL 445c349dbc7Sjsg * resets to its proper value. That also messes up the frame count 446c349dbc7Sjsg * when it's derived from the timestamps. So let's wait for the 447c349dbc7Sjsg * pipe to start properly before we call drm_crtc_vblank_on() 448c349dbc7Sjsg */ 449c349dbc7Sjsg if (intel_crtc_max_vblank_count(new_crtc_state) == 0) 450c349dbc7Sjsg intel_wait_for_pipe_scanline_moving(crtc); 451c349dbc7Sjsg } 452c349dbc7Sjsg 4531bb76ff1Sjsg void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) 454c349dbc7Sjsg { 455c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 456c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 457c349dbc7Sjsg enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 458c349dbc7Sjsg enum pipe pipe = crtc->pipe; 459c349dbc7Sjsg i915_reg_t reg; 460c349dbc7Sjsg u32 val; 461c349dbc7Sjsg 462c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); 463c349dbc7Sjsg 464c349dbc7Sjsg /* 465c349dbc7Sjsg * Make sure planes won't keep trying to pump pixels to us, 466c349dbc7Sjsg * or we might hang the display. 467c349dbc7Sjsg */ 468c349dbc7Sjsg assert_planes_disabled(crtc); 469c349dbc7Sjsg 470f005ef32Sjsg reg = TRANSCONF(cpu_transcoder); 471c349dbc7Sjsg val = intel_de_read(dev_priv, reg); 472f005ef32Sjsg if ((val & TRANSCONF_ENABLE) == 0) 473c349dbc7Sjsg return; 474c349dbc7Sjsg 475c349dbc7Sjsg /* 476c349dbc7Sjsg * Double wide has implications for planes 477c349dbc7Sjsg * so best keep it disabled when not needed. 478c349dbc7Sjsg */ 479c349dbc7Sjsg if (old_crtc_state->double_wide) 480f005ef32Sjsg val &= ~TRANSCONF_DOUBLE_WIDE; 481c349dbc7Sjsg 482c349dbc7Sjsg /* Don't disable pipe or pipe PLLs if needed */ 483c349dbc7Sjsg if (!IS_I830(dev_priv)) 484f005ef32Sjsg val &= ~TRANSCONF_ENABLE; 485c349dbc7Sjsg 4861bb76ff1Sjsg if (DISPLAY_VER(dev_priv) >= 14) 4871bb76ff1Sjsg intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 4881bb76ff1Sjsg FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 4891bb76ff1Sjsg else if (DISPLAY_VER(dev_priv) >= 12) 4905ca02815Sjsg intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 4915ca02815Sjsg FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 4925ca02815Sjsg 493c349dbc7Sjsg intel_de_write(dev_priv, reg, val); 494f005ef32Sjsg if ((val & TRANSCONF_ENABLE) == 0) 495c349dbc7Sjsg intel_wait_for_pipe_off(old_crtc_state); 496c349dbc7Sjsg } 497c349dbc7Sjsg 498c349dbc7Sjsg unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 499c349dbc7Sjsg { 500c349dbc7Sjsg unsigned int size = 0; 501c349dbc7Sjsg int i; 502c349dbc7Sjsg 503c349dbc7Sjsg for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 5045ca02815Sjsg size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; 505c349dbc7Sjsg 506c349dbc7Sjsg return size; 507c349dbc7Sjsg } 508c349dbc7Sjsg 509c349dbc7Sjsg unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) 510c349dbc7Sjsg { 511c349dbc7Sjsg unsigned int size = 0; 512c349dbc7Sjsg int i; 513c349dbc7Sjsg 5141bb76ff1Sjsg for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 5151bb76ff1Sjsg unsigned int plane_size; 5161bb76ff1Sjsg 5171bb76ff1Sjsg if (rem_info->plane[i].linear) 5181bb76ff1Sjsg plane_size = rem_info->plane[i].size; 5191bb76ff1Sjsg else 5201bb76ff1Sjsg plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 5211bb76ff1Sjsg 5221bb76ff1Sjsg if (plane_size == 0) 5231bb76ff1Sjsg continue; 5241bb76ff1Sjsg 5251bb76ff1Sjsg if (rem_info->plane_alignment) 526f005ef32Sjsg size = ALIGN(size, rem_info->plane_alignment); 5271bb76ff1Sjsg 5281bb76ff1Sjsg size += plane_size; 5291bb76ff1Sjsg } 530c349dbc7Sjsg 531c349dbc7Sjsg return size; 532c349dbc7Sjsg } 533c349dbc7Sjsg 5341bb76ff1Sjsg bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) 535c349dbc7Sjsg { 536c349dbc7Sjsg struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 537c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 538c349dbc7Sjsg 5395ca02815Sjsg return DISPLAY_VER(dev_priv) < 4 || 5401bb76ff1Sjsg (plane->fbc && 5411bb76ff1Sjsg plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); 542c349dbc7Sjsg } 543c349dbc7Sjsg 544c349dbc7Sjsg /* 545c349dbc7Sjsg * Convert the x/y offsets into a linear offset. 546c349dbc7Sjsg * Only valid with 0/180 degree rotation, which is fine since linear 547c349dbc7Sjsg * offset is only used with linear buffers on pre-hsw and tiled buffers 548c349dbc7Sjsg * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 549c349dbc7Sjsg */ 550c349dbc7Sjsg u32 intel_fb_xy_to_linear(int x, int y, 551c349dbc7Sjsg const struct intel_plane_state *state, 552c349dbc7Sjsg int color_plane) 553c349dbc7Sjsg { 554c349dbc7Sjsg const struct drm_framebuffer *fb = state->hw.fb; 555c349dbc7Sjsg unsigned int cpp = fb->format->cpp[color_plane]; 5561bb76ff1Sjsg unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 557c349dbc7Sjsg 558c349dbc7Sjsg return y * pitch + x * cpp; 559c349dbc7Sjsg } 560c349dbc7Sjsg 561c349dbc7Sjsg /* 562c349dbc7Sjsg * Add the x/y offsets derived from fb->offsets[] to the user 563c349dbc7Sjsg * specified plane src x/y offsets. The resulting x/y offsets 564c349dbc7Sjsg * specify the start of scanout from the beginning of the gtt mapping. 565c349dbc7Sjsg */ 566c349dbc7Sjsg void intel_add_fb_offsets(int *x, int *y, 567c349dbc7Sjsg const struct intel_plane_state *state, 568c349dbc7Sjsg int color_plane) 569c349dbc7Sjsg 570c349dbc7Sjsg { 5715ca02815Sjsg *x += state->view.color_plane[color_plane].x; 5725ca02815Sjsg *y += state->view.color_plane[color_plane].y; 573c349dbc7Sjsg } 574c349dbc7Sjsg 575c349dbc7Sjsg u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 576c349dbc7Sjsg u32 pixel_format, u64 modifier) 577c349dbc7Sjsg { 578c349dbc7Sjsg struct intel_crtc *crtc; 579c349dbc7Sjsg struct intel_plane *plane; 580c349dbc7Sjsg 5815ca02815Sjsg if (!HAS_DISPLAY(dev_priv)) 5825ca02815Sjsg return 0; 5835ca02815Sjsg 584c349dbc7Sjsg /* 585c349dbc7Sjsg * We assume the primary plane for pipe A has 586c349dbc7Sjsg * the highest stride limits of them all, 587c349dbc7Sjsg * if in case pipe A is disabled, use the first pipe from pipe_mask. 588c349dbc7Sjsg */ 5891bb76ff1Sjsg crtc = intel_first_crtc(dev_priv); 590c349dbc7Sjsg if (!crtc) 591c349dbc7Sjsg return 0; 592c349dbc7Sjsg 593c349dbc7Sjsg plane = to_intel_plane(crtc->base.primary); 594c349dbc7Sjsg 595c349dbc7Sjsg return plane->max_stride(plane, pixel_format, modifier, 596c349dbc7Sjsg DRM_MODE_ROTATE_0); 597c349dbc7Sjsg } 598c349dbc7Sjsg 5991bb76ff1Sjsg void intel_set_plane_visible(struct intel_crtc_state *crtc_state, 600c349dbc7Sjsg struct intel_plane_state *plane_state, 601c349dbc7Sjsg bool visible) 602c349dbc7Sjsg { 603c349dbc7Sjsg struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 604c349dbc7Sjsg 605c349dbc7Sjsg plane_state->uapi.visible = visible; 606c349dbc7Sjsg 607c349dbc7Sjsg if (visible) 608c349dbc7Sjsg crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); 609c349dbc7Sjsg else 610c349dbc7Sjsg crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 611c349dbc7Sjsg } 612c349dbc7Sjsg 6131bb76ff1Sjsg void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) 614c349dbc7Sjsg { 615c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 616c349dbc7Sjsg struct drm_plane *plane; 617c349dbc7Sjsg 618c349dbc7Sjsg /* 619c349dbc7Sjsg * Active_planes aliases if multiple "primary" or cursor planes 620c349dbc7Sjsg * have been used on the same (or wrong) pipe. plane_mask uses 621c349dbc7Sjsg * unique ids, hence we can use that to reconstruct active_planes. 622c349dbc7Sjsg */ 6235ca02815Sjsg crtc_state->enabled_planes = 0; 624c349dbc7Sjsg crtc_state->active_planes = 0; 625c349dbc7Sjsg 626c349dbc7Sjsg drm_for_each_plane_mask(plane, &dev_priv->drm, 6275ca02815Sjsg crtc_state->uapi.plane_mask) { 6285ca02815Sjsg crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 629c349dbc7Sjsg crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 630c349dbc7Sjsg } 6315ca02815Sjsg } 632c349dbc7Sjsg 633b6d43d21Sjsg void intel_plane_disable_noatomic(struct intel_crtc *crtc, 634c349dbc7Sjsg struct intel_plane *plane) 635c349dbc7Sjsg { 636c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 637c349dbc7Sjsg struct intel_crtc_state *crtc_state = 638c349dbc7Sjsg to_intel_crtc_state(crtc->base.state); 639c349dbc7Sjsg struct intel_plane_state *plane_state = 640c349dbc7Sjsg to_intel_plane_state(plane->base.state); 641c349dbc7Sjsg 642c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 643c349dbc7Sjsg "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", 644c349dbc7Sjsg plane->base.base.id, plane->base.name, 645c349dbc7Sjsg crtc->base.base.id, crtc->base.name); 646c349dbc7Sjsg 647c349dbc7Sjsg intel_set_plane_visible(crtc_state, plane_state, false); 6481bb76ff1Sjsg intel_plane_fixup_bitmasks(crtc_state); 649c349dbc7Sjsg crtc_state->data_rate[plane->id] = 0; 6501bb76ff1Sjsg crtc_state->data_rate_y[plane->id] = 0; 6511bb76ff1Sjsg crtc_state->rel_data_rate[plane->id] = 0; 6521bb76ff1Sjsg crtc_state->rel_data_rate_y[plane->id] = 0; 653c349dbc7Sjsg crtc_state->min_cdclk[plane->id] = 0; 654c349dbc7Sjsg 6551bb76ff1Sjsg if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && 6561bb76ff1Sjsg hsw_ips_disable(crtc_state)) { 6571bb76ff1Sjsg crtc_state->ips_enabled = false; 6581bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 6591bb76ff1Sjsg } 660c349dbc7Sjsg 661c349dbc7Sjsg /* 662c349dbc7Sjsg * Vblank time updates from the shadow to live plane control register 663c349dbc7Sjsg * are blocked if the memory self-refresh mode is active at that 664c349dbc7Sjsg * moment. So to make sure the plane gets truly disabled, disable 665c349dbc7Sjsg * first the self-refresh mode. The self-refresh enable bit in turn 666c349dbc7Sjsg * will be checked/applied by the HW only at the next frame start 667c349dbc7Sjsg * event which is after the vblank start event, so we need to have a 668c349dbc7Sjsg * wait-for-vblank between disabling the plane and the pipe. 669c349dbc7Sjsg */ 670c349dbc7Sjsg if (HAS_GMCH(dev_priv) && 671c349dbc7Sjsg intel_set_memory_cxsr(dev_priv, false)) 6721bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 673c349dbc7Sjsg 674c349dbc7Sjsg /* 675c349dbc7Sjsg * Gen2 reports pipe underruns whenever all planes are disabled. 676c349dbc7Sjsg * So disable underrun reporting before all the planes get disabled. 677c349dbc7Sjsg */ 6785ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 679c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 680c349dbc7Sjsg 6811bb76ff1Sjsg intel_plane_disable_arm(plane, crtc_state); 6821bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 683c349dbc7Sjsg } 684c349dbc7Sjsg 685ad8b1aafSjsg unsigned int 686ad8b1aafSjsg intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) 687ad8b1aafSjsg { 688ad8b1aafSjsg int x = 0, y = 0; 689ad8b1aafSjsg 690ad8b1aafSjsg intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, 6915ca02815Sjsg plane_state->view.color_plane[0].offset, 0); 692ad8b1aafSjsg 693ad8b1aafSjsg return y; 694ad8b1aafSjsg } 695ad8b1aafSjsg 6965ca02815Sjsg static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 6975ca02815Sjsg { 6985ca02815Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 699c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 700c349dbc7Sjsg enum pipe pipe = crtc->pipe; 701c349dbc7Sjsg u32 tmp; 702c349dbc7Sjsg 703c349dbc7Sjsg tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); 704c349dbc7Sjsg 705c349dbc7Sjsg /* 706c349dbc7Sjsg * Display WA #1153: icl 707c349dbc7Sjsg * enable hardware to bypass the alpha math 708c349dbc7Sjsg * and rounding for per-pixel values 00 and 0xff 709c349dbc7Sjsg */ 710c349dbc7Sjsg tmp |= PER_PIXEL_ALPHA_BYPASS_EN; 711c349dbc7Sjsg /* 712c349dbc7Sjsg * Display WA # 1605353570: icl 713c349dbc7Sjsg * Set the pixel rounding bit to 1 for allowing 714c349dbc7Sjsg * passthrough of Frame buffer pixels unmodified 715c349dbc7Sjsg * across pipe 716c349dbc7Sjsg */ 717c349dbc7Sjsg tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 7185ca02815Sjsg 7195ca02815Sjsg /* 7201bb76ff1Sjsg * Underrun recovery must always be disabled on display 13+. 7211bb76ff1Sjsg * DG2 chicken bit meaning is inverted compared to other platforms. 7225ca02815Sjsg */ 7231bb76ff1Sjsg if (IS_DG2(dev_priv)) 7245ca02815Sjsg tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 7251bb76ff1Sjsg else if (DISPLAY_VER(dev_priv) >= 13) 7265ca02815Sjsg tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 7271bb76ff1Sjsg 7281bb76ff1Sjsg /* Wa_14010547955:dg2 */ 7291bb76ff1Sjsg if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) 7301bb76ff1Sjsg tmp |= DG2_RENDER_CCSTAG_4_3_EN; 7315ca02815Sjsg 732c349dbc7Sjsg intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 733c349dbc7Sjsg } 734c349dbc7Sjsg 735c349dbc7Sjsg bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 736c349dbc7Sjsg { 737c349dbc7Sjsg struct drm_crtc *crtc; 738c349dbc7Sjsg bool cleanup_done; 739c349dbc7Sjsg 740c349dbc7Sjsg drm_for_each_crtc(crtc, &dev_priv->drm) { 741c349dbc7Sjsg struct drm_crtc_commit *commit; 742c349dbc7Sjsg spin_lock(&crtc->commit_lock); 743c349dbc7Sjsg commit = list_first_entry_or_null(&crtc->commit_list, 744c349dbc7Sjsg struct drm_crtc_commit, commit_entry); 745c349dbc7Sjsg cleanup_done = commit ? 746c349dbc7Sjsg try_wait_for_completion(&commit->cleanup_done) : true; 747c349dbc7Sjsg spin_unlock(&crtc->commit_lock); 748c349dbc7Sjsg 749c349dbc7Sjsg if (cleanup_done) 750c349dbc7Sjsg continue; 751c349dbc7Sjsg 7521bb76ff1Sjsg intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); 753c349dbc7Sjsg 754c349dbc7Sjsg return true; 755c349dbc7Sjsg } 756c349dbc7Sjsg 757c349dbc7Sjsg return false; 758c349dbc7Sjsg } 759c349dbc7Sjsg 760c349dbc7Sjsg /* 761c349dbc7Sjsg * Finds the encoder associated with the given CRTC. This can only be 762c349dbc7Sjsg * used when we know that the CRTC isn't feeding multiple encoders! 763c349dbc7Sjsg */ 7645ca02815Sjsg struct intel_encoder * 765c349dbc7Sjsg intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 766c349dbc7Sjsg const struct intel_crtc_state *crtc_state) 767c349dbc7Sjsg { 768c349dbc7Sjsg const struct drm_connector_state *connector_state; 769c349dbc7Sjsg const struct drm_connector *connector; 770c349dbc7Sjsg struct intel_encoder *encoder = NULL; 7711bb76ff1Sjsg struct intel_crtc *master_crtc; 772c349dbc7Sjsg int num_encoders = 0; 773c349dbc7Sjsg int i; 774c349dbc7Sjsg 7751bb76ff1Sjsg master_crtc = intel_master_crtc(crtc_state); 7761bb76ff1Sjsg 777c349dbc7Sjsg for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 7781bb76ff1Sjsg if (connector_state->crtc != &master_crtc->base) 779c349dbc7Sjsg continue; 780c349dbc7Sjsg 781c349dbc7Sjsg encoder = to_intel_encoder(connector_state->best_encoder); 782c349dbc7Sjsg num_encoders++; 783c349dbc7Sjsg } 784c349dbc7Sjsg 78561ca8cb4Sjsg drm_WARN(state->base.dev, num_encoders != 1, 786c349dbc7Sjsg "%d encoders for pipe %c\n", 7871bb76ff1Sjsg num_encoders, pipe_name(master_crtc->pipe)); 788c349dbc7Sjsg 789c349dbc7Sjsg return encoder; 790c349dbc7Sjsg } 791c349dbc7Sjsg 792c349dbc7Sjsg static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) 793c349dbc7Sjsg { 794c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 795c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 796ad8b1aafSjsg const struct drm_rect *dst = &crtc_state->pch_pfit.dst; 797c349dbc7Sjsg enum pipe pipe = crtc->pipe; 798ad8b1aafSjsg int width = drm_rect_width(dst); 799ad8b1aafSjsg int height = drm_rect_height(dst); 800ad8b1aafSjsg int x = dst->x1; 801ad8b1aafSjsg int y = dst->y1; 802c349dbc7Sjsg 803ad8b1aafSjsg if (!crtc_state->pch_pfit.enabled) 804ad8b1aafSjsg return; 805ad8b1aafSjsg 806c349dbc7Sjsg /* Force use of hard-coded filter coefficients 807c349dbc7Sjsg * as some pre-programmed values are broken, 808c349dbc7Sjsg * e.g. x201. 809c349dbc7Sjsg */ 810c349dbc7Sjsg if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 8111bb76ff1Sjsg intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 812ad8b1aafSjsg PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); 813c349dbc7Sjsg else 8141bb76ff1Sjsg intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | 815ad8b1aafSjsg PF_FILTER_MED_3x3); 816f005ef32Sjsg intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 817f005ef32Sjsg PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); 818f005ef32Sjsg intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 819f005ef32Sjsg PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); 820c349dbc7Sjsg } 821c349dbc7Sjsg 8225ca02815Sjsg static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) 823c349dbc7Sjsg { 8245ca02815Sjsg if (crtc->overlay) 8255ca02815Sjsg (void) intel_overlay_switch_off(crtc->overlay); 826c349dbc7Sjsg 827c349dbc7Sjsg /* Let userspace switch the overlay on again. In most cases userspace 828c349dbc7Sjsg * has to recompute where to put it anyway. 829c349dbc7Sjsg */ 830c349dbc7Sjsg } 831c349dbc7Sjsg 832c349dbc7Sjsg static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) 833c349dbc7Sjsg { 834c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 835c349dbc7Sjsg 836c349dbc7Sjsg if (!crtc_state->nv12_planes) 837c349dbc7Sjsg return false; 838c349dbc7Sjsg 839c349dbc7Sjsg /* WA Display #0827: Gen9:all */ 8405ca02815Sjsg if (DISPLAY_VER(dev_priv) == 9) 841c349dbc7Sjsg return true; 842c349dbc7Sjsg 843c349dbc7Sjsg return false; 844c349dbc7Sjsg } 845c349dbc7Sjsg 846c349dbc7Sjsg static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) 847c349dbc7Sjsg { 848c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 849c349dbc7Sjsg 850ad8b1aafSjsg /* Wa_2006604312:icl,ehl */ 8515ca02815Sjsg if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) 852c349dbc7Sjsg return true; 853c349dbc7Sjsg 854c349dbc7Sjsg return false; 855c349dbc7Sjsg } 856c349dbc7Sjsg 8571bb76ff1Sjsg static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) 8581bb76ff1Sjsg { 8591bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 8601bb76ff1Sjsg 8611bb76ff1Sjsg /* Wa_1604331009:icl,jsl,ehl */ 8621bb76ff1Sjsg if (is_hdr_mode(crtc_state) && 8631bb76ff1Sjsg crtc_state->active_planes & BIT(PLANE_CURSOR) && 8641bb76ff1Sjsg DISPLAY_VER(dev_priv) == 11) 8651bb76ff1Sjsg return true; 8661bb76ff1Sjsg 8671bb76ff1Sjsg return false; 8681bb76ff1Sjsg } 8691bb76ff1Sjsg 8701bb76ff1Sjsg static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, 8711bb76ff1Sjsg enum pipe pipe, bool enable) 8721bb76ff1Sjsg { 8731bb76ff1Sjsg if (DISPLAY_VER(i915) == 9) { 8741bb76ff1Sjsg /* 8751bb76ff1Sjsg * "Plane N strech max must be programmed to 11b (x1) 8761bb76ff1Sjsg * when Async flips are enabled on that plane." 8771bb76ff1Sjsg */ 8781bb76ff1Sjsg intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 8791bb76ff1Sjsg SKL_PLANE1_STRETCH_MAX_MASK, 8801bb76ff1Sjsg enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); 8811bb76ff1Sjsg } else { 8821bb76ff1Sjsg /* Also needed on HSW/BDW albeit undocumented */ 8831bb76ff1Sjsg intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), 8841bb76ff1Sjsg HSW_PRI_STRETCH_MAX_MASK, 8851bb76ff1Sjsg enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); 8861bb76ff1Sjsg } 8871bb76ff1Sjsg } 8881bb76ff1Sjsg 8891bb76ff1Sjsg static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) 8901bb76ff1Sjsg { 8911bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 8921bb76ff1Sjsg 8931bb76ff1Sjsg return crtc_state->uapi.async_flip && i915_vtd_active(i915) && 8941bb76ff1Sjsg (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 8951bb76ff1Sjsg } 8961bb76ff1Sjsg 897f005ef32Sjsg #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 898f005ef32Sjsg ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 899f005ef32Sjsg (new_crtc_state)->feature) 900f005ef32Sjsg #define is_disabling(feature, old_crtc_state, new_crtc_state) \ 901f005ef32Sjsg ((old_crtc_state)->feature && \ 902f005ef32Sjsg (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) 903f005ef32Sjsg 904c349dbc7Sjsg static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 905c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state) 906c349dbc7Sjsg { 907f005ef32Sjsg return is_enabling(active_planes, old_crtc_state, new_crtc_state); 908c349dbc7Sjsg } 909c349dbc7Sjsg 910c349dbc7Sjsg static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 911c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state) 912c349dbc7Sjsg { 913f005ef32Sjsg return is_disabling(active_planes, old_crtc_state, new_crtc_state); 914c349dbc7Sjsg } 915c349dbc7Sjsg 916f005ef32Sjsg static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, 917f005ef32Sjsg const struct intel_crtc_state *new_crtc_state) 918f005ef32Sjsg { 919f005ef32Sjsg return is_enabling(vrr.enable, old_crtc_state, new_crtc_state); 920f005ef32Sjsg } 921f005ef32Sjsg 922f005ef32Sjsg static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, 923f005ef32Sjsg const struct intel_crtc_state *new_crtc_state) 924f005ef32Sjsg { 925f005ef32Sjsg return is_disabling(vrr.enable, old_crtc_state, new_crtc_state); 926f005ef32Sjsg } 927f005ef32Sjsg 928f005ef32Sjsg #undef is_disabling 929f005ef32Sjsg #undef is_enabling 930f005ef32Sjsg 931c349dbc7Sjsg static void intel_post_plane_update(struct intel_atomic_state *state, 932c349dbc7Sjsg struct intel_crtc *crtc) 933c349dbc7Sjsg { 934c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 935c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 936c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 937c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 938c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 939c349dbc7Sjsg enum pipe pipe = crtc->pipe; 940c349dbc7Sjsg 941c349dbc7Sjsg intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); 942c349dbc7Sjsg 943c349dbc7Sjsg if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) 9441bb76ff1Sjsg intel_update_watermarks(dev_priv); 945c349dbc7Sjsg 946c349dbc7Sjsg intel_fbc_post_update(state, crtc); 947c349dbc7Sjsg 9481bb76ff1Sjsg if (needs_async_flip_vtd_wa(old_crtc_state) && 9491bb76ff1Sjsg !needs_async_flip_vtd_wa(new_crtc_state)) 9501bb76ff1Sjsg intel_async_flip_vtd_wa(dev_priv, pipe, false); 9511bb76ff1Sjsg 952c349dbc7Sjsg if (needs_nv12_wa(old_crtc_state) && 953c349dbc7Sjsg !needs_nv12_wa(new_crtc_state)) 954c349dbc7Sjsg skl_wa_827(dev_priv, pipe, false); 955c349dbc7Sjsg 956c349dbc7Sjsg if (needs_scalerclk_wa(old_crtc_state) && 957c349dbc7Sjsg !needs_scalerclk_wa(new_crtc_state)) 958c349dbc7Sjsg icl_wa_scalerclkgating(dev_priv, pipe, false); 9591bb76ff1Sjsg 9601bb76ff1Sjsg if (needs_cursorclk_wa(old_crtc_state) && 9611bb76ff1Sjsg !needs_cursorclk_wa(new_crtc_state)) 9621bb76ff1Sjsg icl_wa_cursorclkgating(dev_priv, pipe, false); 9631bb76ff1Sjsg 964f005ef32Sjsg if (intel_crtc_needs_color_update(new_crtc_state)) 965f005ef32Sjsg intel_color_post_update(new_crtc_state); 966c349dbc7Sjsg } 967c349dbc7Sjsg 9685ca02815Sjsg static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, 9695ca02815Sjsg struct intel_crtc *crtc) 9705ca02815Sjsg { 9715ca02815Sjsg const struct intel_crtc_state *crtc_state = 9725ca02815Sjsg intel_atomic_get_new_crtc_state(state, crtc); 9735ca02815Sjsg u8 update_planes = crtc_state->update_planes; 974f005ef32Sjsg const struct intel_plane_state __maybe_unused *plane_state; 9755ca02815Sjsg struct intel_plane *plane; 9765ca02815Sjsg int i; 9775ca02815Sjsg 9785ca02815Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 9791bb76ff1Sjsg if (plane->pipe == crtc->pipe && 9805ca02815Sjsg update_planes & BIT(plane->id)) 9815ca02815Sjsg plane->enable_flip_done(plane); 9825ca02815Sjsg } 9835ca02815Sjsg } 9845ca02815Sjsg 9855ca02815Sjsg static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, 9865ca02815Sjsg struct intel_crtc *crtc) 9875ca02815Sjsg { 9885ca02815Sjsg const struct intel_crtc_state *crtc_state = 9895ca02815Sjsg intel_atomic_get_new_crtc_state(state, crtc); 9905ca02815Sjsg u8 update_planes = crtc_state->update_planes; 991f005ef32Sjsg const struct intel_plane_state __maybe_unused *plane_state; 9925ca02815Sjsg struct intel_plane *plane; 9935ca02815Sjsg int i; 9945ca02815Sjsg 9955ca02815Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 9961bb76ff1Sjsg if (plane->pipe == crtc->pipe && 9975ca02815Sjsg update_planes & BIT(plane->id)) 9985ca02815Sjsg plane->disable_flip_done(plane); 9995ca02815Sjsg } 10005ca02815Sjsg } 10015ca02815Sjsg 10025ca02815Sjsg static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, 10035ca02815Sjsg struct intel_crtc *crtc) 10045ca02815Sjsg { 10055ca02815Sjsg const struct intel_crtc_state *old_crtc_state = 10065ca02815Sjsg intel_atomic_get_old_crtc_state(state, crtc); 10075ca02815Sjsg const struct intel_crtc_state *new_crtc_state = 10085ca02815Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1009f005ef32Sjsg u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1010f005ef32Sjsg ~new_crtc_state->async_flip_planes; 10115ca02815Sjsg const struct intel_plane_state *old_plane_state; 10125ca02815Sjsg struct intel_plane *plane; 10135ca02815Sjsg bool need_vbl_wait = false; 10145ca02815Sjsg int i; 10155ca02815Sjsg 10165ca02815Sjsg for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 10175ca02815Sjsg if (plane->need_async_flip_disable_wa && 10185ca02815Sjsg plane->pipe == crtc->pipe && 1019f005ef32Sjsg disable_async_flip_planes & BIT(plane->id)) { 10205ca02815Sjsg /* 10215ca02815Sjsg * Apart from the async flip bit we want to 10225ca02815Sjsg * preserve the old state for the plane. 10235ca02815Sjsg */ 10245ca02815Sjsg plane->async_flip(plane, old_crtc_state, 10255ca02815Sjsg old_plane_state, false); 10265ca02815Sjsg need_vbl_wait = true; 10275ca02815Sjsg } 10285ca02815Sjsg } 10295ca02815Sjsg 10305ca02815Sjsg if (need_vbl_wait) 10311bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 10325ca02815Sjsg } 10335ca02815Sjsg 1034c349dbc7Sjsg static void intel_pre_plane_update(struct intel_atomic_state *state, 1035c349dbc7Sjsg struct intel_crtc *crtc) 1036c349dbc7Sjsg { 1037c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1038c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 1039c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1040c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1041c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1042c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1043c349dbc7Sjsg 1044f005ef32Sjsg if (vrr_disabling(old_crtc_state, new_crtc_state)) { 1045f005ef32Sjsg intel_vrr_disable(old_crtc_state); 1046f005ef32Sjsg intel_crtc_update_active_timings(old_crtc_state, false); 1047f005ef32Sjsg } 1048f005ef32Sjsg 10491bb76ff1Sjsg intel_drrs_deactivate(old_crtc_state); 10501bb76ff1Sjsg 10511bb76ff1Sjsg intel_psr_pre_plane_update(state, crtc); 10521bb76ff1Sjsg 10531bb76ff1Sjsg if (hsw_ips_pre_update(state, crtc)) 10541bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 1055c349dbc7Sjsg 1056c349dbc7Sjsg if (intel_fbc_pre_update(state, crtc)) 10571bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 10581bb76ff1Sjsg 10591bb76ff1Sjsg if (!needs_async_flip_vtd_wa(old_crtc_state) && 10601bb76ff1Sjsg needs_async_flip_vtd_wa(new_crtc_state)) 10611bb76ff1Sjsg intel_async_flip_vtd_wa(dev_priv, pipe, true); 1062c349dbc7Sjsg 1063c349dbc7Sjsg /* Display WA 827 */ 1064c349dbc7Sjsg if (!needs_nv12_wa(old_crtc_state) && 1065c349dbc7Sjsg needs_nv12_wa(new_crtc_state)) 1066c349dbc7Sjsg skl_wa_827(dev_priv, pipe, true); 1067c349dbc7Sjsg 1068ad8b1aafSjsg /* Wa_2006604312:icl,ehl */ 1069c349dbc7Sjsg if (!needs_scalerclk_wa(old_crtc_state) && 1070c349dbc7Sjsg needs_scalerclk_wa(new_crtc_state)) 1071c349dbc7Sjsg icl_wa_scalerclkgating(dev_priv, pipe, true); 1072c349dbc7Sjsg 10731bb76ff1Sjsg /* Wa_1604331009:icl,jsl,ehl */ 10741bb76ff1Sjsg if (!needs_cursorclk_wa(old_crtc_state) && 10751bb76ff1Sjsg needs_cursorclk_wa(new_crtc_state)) 10761bb76ff1Sjsg icl_wa_cursorclkgating(dev_priv, pipe, true); 10771bb76ff1Sjsg 1078c349dbc7Sjsg /* 1079c349dbc7Sjsg * Vblank time updates from the shadow to live plane control register 1080c349dbc7Sjsg * are blocked if the memory self-refresh mode is active at that 1081c349dbc7Sjsg * moment. So to make sure the plane gets truly disabled, disable 1082c349dbc7Sjsg * first the self-refresh mode. The self-refresh enable bit in turn 1083c349dbc7Sjsg * will be checked/applied by the HW only at the next frame start 1084c349dbc7Sjsg * event which is after the vblank start event, so we need to have a 1085c349dbc7Sjsg * wait-for-vblank between disabling the plane and the pipe. 1086c349dbc7Sjsg */ 1087c349dbc7Sjsg if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && 1088c349dbc7Sjsg new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 10891bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 1090c349dbc7Sjsg 1091c349dbc7Sjsg /* 1092c349dbc7Sjsg * IVB workaround: must disable low power watermarks for at least 1093c349dbc7Sjsg * one frame before enabling scaling. LP watermarks can be re-enabled 1094c349dbc7Sjsg * when scaling is disabled. 1095c349dbc7Sjsg * 1096c349dbc7Sjsg * WaCxSRDisabledForSpriteScaling:ivb 1097c349dbc7Sjsg */ 1098c349dbc7Sjsg if (old_crtc_state->hw.active && 1099c349dbc7Sjsg new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) 11001bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 1101c349dbc7Sjsg 1102c349dbc7Sjsg /* 1103c349dbc7Sjsg * If we're doing a modeset we don't need to do any 1104c349dbc7Sjsg * pre-vblank watermark programming here. 1105c349dbc7Sjsg */ 11065ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) { 1107c349dbc7Sjsg /* 1108c349dbc7Sjsg * For platforms that support atomic watermarks, program the 1109c349dbc7Sjsg * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 1110c349dbc7Sjsg * will be the intermediate values that are safe for both pre- and 1111c349dbc7Sjsg * post- vblank; when vblank happens, the 'active' values will be set 1112c349dbc7Sjsg * to the final 'target' values and we'll do this again to get the 1113c349dbc7Sjsg * optimal watermarks. For gen9+ platforms, the values we program here 1114c349dbc7Sjsg * will be the final target values which will get automatically latched 1115c349dbc7Sjsg * at vblank time; no further programming will be necessary. 1116c349dbc7Sjsg * 1117c349dbc7Sjsg * If a platform hasn't been transitioned to atomic watermarks yet, 1118c349dbc7Sjsg * we'll continue to update watermarks the old way, if flags tell 1119c349dbc7Sjsg * us to. 1120c349dbc7Sjsg */ 11211bb76ff1Sjsg if (!intel_initial_watermarks(state, crtc)) 11221bb76ff1Sjsg if (new_crtc_state->update_wm_pre) 11231bb76ff1Sjsg intel_update_watermarks(dev_priv); 1124c349dbc7Sjsg } 1125c349dbc7Sjsg 1126c349dbc7Sjsg /* 1127c349dbc7Sjsg * Gen2 reports pipe underruns whenever all planes are disabled. 1128c349dbc7Sjsg * So disable underrun reporting before all the planes get disabled. 1129c349dbc7Sjsg * 1130c349dbc7Sjsg * We do this after .initial_watermarks() so that we have a 1131c349dbc7Sjsg * chance of catching underruns with the intermediate watermarks 1132c349dbc7Sjsg * vs. the old plane configuration. 1133c349dbc7Sjsg */ 11345ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) 1135c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 11365ca02815Sjsg 11375ca02815Sjsg /* 11385ca02815Sjsg * WA for platforms where async address update enable bit 11395ca02815Sjsg * is double buffered and only latched at start of vblank. 11405ca02815Sjsg */ 1141f005ef32Sjsg if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 11425ca02815Sjsg intel_crtc_async_flip_disable_wa(state, crtc); 1143c349dbc7Sjsg } 1144c349dbc7Sjsg 1145c349dbc7Sjsg static void intel_crtc_disable_planes(struct intel_atomic_state *state, 1146c349dbc7Sjsg struct intel_crtc *crtc) 1147c349dbc7Sjsg { 1148c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1149c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1150c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1151c349dbc7Sjsg unsigned int update_mask = new_crtc_state->update_planes; 1152c349dbc7Sjsg const struct intel_plane_state *old_plane_state; 1153c349dbc7Sjsg struct intel_plane *plane; 1154c349dbc7Sjsg unsigned fb_bits = 0; 1155c349dbc7Sjsg int i; 1156c349dbc7Sjsg 1157c349dbc7Sjsg intel_crtc_dpms_overlay_disable(crtc); 1158c349dbc7Sjsg 1159c349dbc7Sjsg for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1160c349dbc7Sjsg if (crtc->pipe != plane->pipe || 1161c349dbc7Sjsg !(update_mask & BIT(plane->id))) 1162c349dbc7Sjsg continue; 1163c349dbc7Sjsg 11641bb76ff1Sjsg intel_plane_disable_arm(plane, new_crtc_state); 1165c349dbc7Sjsg 1166c349dbc7Sjsg if (old_plane_state->uapi.visible) 1167c349dbc7Sjsg fb_bits |= plane->frontbuffer_bit; 1168c349dbc7Sjsg } 1169c349dbc7Sjsg 1170c349dbc7Sjsg intel_frontbuffer_flip(dev_priv, fb_bits); 1171c349dbc7Sjsg } 1172c349dbc7Sjsg 1173c349dbc7Sjsg static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1174c349dbc7Sjsg { 11751bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 11761bb76ff1Sjsg struct intel_crtc_state *new_crtc_state, *old_crtc_state; 11771bb76ff1Sjsg struct intel_crtc *crtc; 1178c349dbc7Sjsg int i; 1179c349dbc7Sjsg 11801bb76ff1Sjsg /* 11811bb76ff1Sjsg * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 11821bb76ff1Sjsg * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 11831bb76ff1Sjsg */ 11841bb76ff1Sjsg if (i915->display.dpll.mgr) { 11851bb76ff1Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11861bb76ff1Sjsg if (intel_crtc_needs_modeset(new_crtc_state)) 11871bb76ff1Sjsg continue; 11881bb76ff1Sjsg 11891bb76ff1Sjsg new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 11901bb76ff1Sjsg new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 11911bb76ff1Sjsg } 11921bb76ff1Sjsg } 1193c349dbc7Sjsg } 1194c349dbc7Sjsg 1195c349dbc7Sjsg static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, 1196c349dbc7Sjsg struct intel_crtc *crtc) 1197c349dbc7Sjsg { 1198c349dbc7Sjsg const struct intel_crtc_state *crtc_state = 1199c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1200c349dbc7Sjsg const struct drm_connector_state *conn_state; 1201c349dbc7Sjsg struct drm_connector *conn; 1202c349dbc7Sjsg int i; 1203c349dbc7Sjsg 1204c349dbc7Sjsg for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1205c349dbc7Sjsg struct intel_encoder *encoder = 1206c349dbc7Sjsg to_intel_encoder(conn_state->best_encoder); 1207c349dbc7Sjsg 1208c349dbc7Sjsg if (conn_state->crtc != &crtc->base) 1209c349dbc7Sjsg continue; 1210c349dbc7Sjsg 1211c349dbc7Sjsg if (encoder->pre_pll_enable) 1212ad8b1aafSjsg encoder->pre_pll_enable(state, encoder, 1213ad8b1aafSjsg crtc_state, conn_state); 1214c349dbc7Sjsg } 1215c349dbc7Sjsg } 1216c349dbc7Sjsg 1217c349dbc7Sjsg static void intel_encoders_pre_enable(struct intel_atomic_state *state, 1218c349dbc7Sjsg struct intel_crtc *crtc) 1219c349dbc7Sjsg { 1220c349dbc7Sjsg const struct intel_crtc_state *crtc_state = 1221c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1222c349dbc7Sjsg const struct drm_connector_state *conn_state; 1223c349dbc7Sjsg struct drm_connector *conn; 1224c349dbc7Sjsg int i; 1225c349dbc7Sjsg 1226c349dbc7Sjsg for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1227c349dbc7Sjsg struct intel_encoder *encoder = 1228c349dbc7Sjsg to_intel_encoder(conn_state->best_encoder); 1229c349dbc7Sjsg 1230c349dbc7Sjsg if (conn_state->crtc != &crtc->base) 1231c349dbc7Sjsg continue; 1232c349dbc7Sjsg 1233c349dbc7Sjsg if (encoder->pre_enable) 1234ad8b1aafSjsg encoder->pre_enable(state, encoder, 1235ad8b1aafSjsg crtc_state, conn_state); 1236c349dbc7Sjsg } 1237c349dbc7Sjsg } 1238c349dbc7Sjsg 1239c349dbc7Sjsg static void intel_encoders_enable(struct intel_atomic_state *state, 1240c349dbc7Sjsg struct intel_crtc *crtc) 1241c349dbc7Sjsg { 1242c349dbc7Sjsg const struct intel_crtc_state *crtc_state = 1243c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1244c349dbc7Sjsg const struct drm_connector_state *conn_state; 1245c349dbc7Sjsg struct drm_connector *conn; 1246c349dbc7Sjsg int i; 1247c349dbc7Sjsg 1248c349dbc7Sjsg for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1249c349dbc7Sjsg struct intel_encoder *encoder = 1250c349dbc7Sjsg to_intel_encoder(conn_state->best_encoder); 1251c349dbc7Sjsg 1252c349dbc7Sjsg if (conn_state->crtc != &crtc->base) 1253c349dbc7Sjsg continue; 1254c349dbc7Sjsg 1255c349dbc7Sjsg if (encoder->enable) 1256ad8b1aafSjsg encoder->enable(state, encoder, 1257ad8b1aafSjsg crtc_state, conn_state); 1258c349dbc7Sjsg intel_opregion_notify_encoder(encoder, true); 1259c349dbc7Sjsg } 1260c349dbc7Sjsg } 1261c349dbc7Sjsg 1262c349dbc7Sjsg static void intel_encoders_disable(struct intel_atomic_state *state, 1263c349dbc7Sjsg struct intel_crtc *crtc) 1264c349dbc7Sjsg { 1265c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 1266c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1267c349dbc7Sjsg const struct drm_connector_state *old_conn_state; 1268c349dbc7Sjsg struct drm_connector *conn; 1269c349dbc7Sjsg int i; 1270c349dbc7Sjsg 1271c349dbc7Sjsg for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1272c349dbc7Sjsg struct intel_encoder *encoder = 1273c349dbc7Sjsg to_intel_encoder(old_conn_state->best_encoder); 1274c349dbc7Sjsg 1275c349dbc7Sjsg if (old_conn_state->crtc != &crtc->base) 1276c349dbc7Sjsg continue; 1277c349dbc7Sjsg 1278c349dbc7Sjsg intel_opregion_notify_encoder(encoder, false); 1279c349dbc7Sjsg if (encoder->disable) 1280ad8b1aafSjsg encoder->disable(state, encoder, 1281ad8b1aafSjsg old_crtc_state, old_conn_state); 1282c349dbc7Sjsg } 1283c349dbc7Sjsg } 1284c349dbc7Sjsg 1285c349dbc7Sjsg static void intel_encoders_post_disable(struct intel_atomic_state *state, 1286c349dbc7Sjsg struct intel_crtc *crtc) 1287c349dbc7Sjsg { 1288c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 1289c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1290c349dbc7Sjsg const struct drm_connector_state *old_conn_state; 1291c349dbc7Sjsg struct drm_connector *conn; 1292c349dbc7Sjsg int i; 1293c349dbc7Sjsg 1294c349dbc7Sjsg for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1295c349dbc7Sjsg struct intel_encoder *encoder = 1296c349dbc7Sjsg to_intel_encoder(old_conn_state->best_encoder); 1297c349dbc7Sjsg 1298c349dbc7Sjsg if (old_conn_state->crtc != &crtc->base) 1299c349dbc7Sjsg continue; 1300c349dbc7Sjsg 1301c349dbc7Sjsg if (encoder->post_disable) 1302ad8b1aafSjsg encoder->post_disable(state, encoder, 1303ad8b1aafSjsg old_crtc_state, old_conn_state); 1304c349dbc7Sjsg } 1305c349dbc7Sjsg } 1306c349dbc7Sjsg 1307c349dbc7Sjsg static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, 1308c349dbc7Sjsg struct intel_crtc *crtc) 1309c349dbc7Sjsg { 1310c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 1311c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1312c349dbc7Sjsg const struct drm_connector_state *old_conn_state; 1313c349dbc7Sjsg struct drm_connector *conn; 1314c349dbc7Sjsg int i; 1315c349dbc7Sjsg 1316c349dbc7Sjsg for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1317c349dbc7Sjsg struct intel_encoder *encoder = 1318c349dbc7Sjsg to_intel_encoder(old_conn_state->best_encoder); 1319c349dbc7Sjsg 1320c349dbc7Sjsg if (old_conn_state->crtc != &crtc->base) 1321c349dbc7Sjsg continue; 1322c349dbc7Sjsg 1323c349dbc7Sjsg if (encoder->post_pll_disable) 1324ad8b1aafSjsg encoder->post_pll_disable(state, encoder, 1325ad8b1aafSjsg old_crtc_state, old_conn_state); 1326c349dbc7Sjsg } 1327c349dbc7Sjsg } 1328c349dbc7Sjsg 1329c349dbc7Sjsg static void intel_encoders_update_pipe(struct intel_atomic_state *state, 1330c349dbc7Sjsg struct intel_crtc *crtc) 1331c349dbc7Sjsg { 1332c349dbc7Sjsg const struct intel_crtc_state *crtc_state = 1333c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1334c349dbc7Sjsg const struct drm_connector_state *conn_state; 1335c349dbc7Sjsg struct drm_connector *conn; 1336c349dbc7Sjsg int i; 1337c349dbc7Sjsg 1338c349dbc7Sjsg for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1339c349dbc7Sjsg struct intel_encoder *encoder = 1340c349dbc7Sjsg to_intel_encoder(conn_state->best_encoder); 1341c349dbc7Sjsg 1342c349dbc7Sjsg if (conn_state->crtc != &crtc->base) 1343c349dbc7Sjsg continue; 1344c349dbc7Sjsg 1345c349dbc7Sjsg if (encoder->update_pipe) 1346ad8b1aafSjsg encoder->update_pipe(state, encoder, 1347ad8b1aafSjsg crtc_state, conn_state); 1348c349dbc7Sjsg } 1349c349dbc7Sjsg } 1350c349dbc7Sjsg 1351c349dbc7Sjsg static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) 1352c349dbc7Sjsg { 1353c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1354c349dbc7Sjsg struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1355c349dbc7Sjsg 13561bb76ff1Sjsg plane->disable_arm(plane, crtc_state); 13571bb76ff1Sjsg } 13581bb76ff1Sjsg 13591bb76ff1Sjsg static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 13601bb76ff1Sjsg { 13611bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 13621bb76ff1Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 13631bb76ff1Sjsg 13641bb76ff1Sjsg if (crtc_state->has_pch_encoder) { 13651bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 13661bb76ff1Sjsg &crtc_state->fdi_m_n); 13671bb76ff1Sjsg } else if (intel_crtc_has_dp_encoder(crtc_state)) { 13681bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 13691bb76ff1Sjsg &crtc_state->dp_m_n); 13701bb76ff1Sjsg intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 13711bb76ff1Sjsg &crtc_state->dp_m2_n2); 13721bb76ff1Sjsg } 13731bb76ff1Sjsg 13741bb76ff1Sjsg intel_set_transcoder_timings(crtc_state); 13751bb76ff1Sjsg 13761bb76ff1Sjsg ilk_set_pipeconf(crtc_state); 1377c349dbc7Sjsg } 1378c349dbc7Sjsg 1379c349dbc7Sjsg static void ilk_crtc_enable(struct intel_atomic_state *state, 1380c349dbc7Sjsg struct intel_crtc *crtc) 1381c349dbc7Sjsg { 1382c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1383c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1384c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1385c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1386c349dbc7Sjsg 1387c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1388c349dbc7Sjsg return; 1389c349dbc7Sjsg 1390c349dbc7Sjsg /* 1391c349dbc7Sjsg * Sometimes spurious CPU pipe underruns happen during FDI 1392c349dbc7Sjsg * training, at least with VGA+HDMI cloning. Suppress them. 1393c349dbc7Sjsg * 1394c349dbc7Sjsg * On ILK we get an occasional spurious CPU pipe underruns 1395c349dbc7Sjsg * between eDP port A enable and vdd enable. Also PCH port 1396c349dbc7Sjsg * enable seems to result in the occasional CPU pipe underrun. 1397c349dbc7Sjsg * 1398c349dbc7Sjsg * Spurious PCH underruns also occur during PCH enabling. 1399c349dbc7Sjsg */ 1400c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1401c349dbc7Sjsg intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1402c349dbc7Sjsg 14031bb76ff1Sjsg ilk_configure_cpu_transcoder(new_crtc_state); 1404c349dbc7Sjsg 1405c349dbc7Sjsg intel_set_pipe_src_size(new_crtc_state); 1406c349dbc7Sjsg 1407c349dbc7Sjsg crtc->active = true; 1408c349dbc7Sjsg 1409c349dbc7Sjsg intel_encoders_pre_enable(state, crtc); 1410c349dbc7Sjsg 1411c349dbc7Sjsg if (new_crtc_state->has_pch_encoder) { 14121bb76ff1Sjsg ilk_pch_pre_enable(state, crtc); 1413c349dbc7Sjsg } else { 1414c349dbc7Sjsg assert_fdi_tx_disabled(dev_priv, pipe); 1415c349dbc7Sjsg assert_fdi_rx_disabled(dev_priv, pipe); 1416c349dbc7Sjsg } 1417c349dbc7Sjsg 1418c349dbc7Sjsg ilk_pfit_enable(new_crtc_state); 1419c349dbc7Sjsg 1420c349dbc7Sjsg /* 1421c349dbc7Sjsg * On ILK+ LUT must be loaded before the pipe is running but with 1422c349dbc7Sjsg * clocks enabled 1423c349dbc7Sjsg */ 1424c349dbc7Sjsg intel_color_load_luts(new_crtc_state); 14251bb76ff1Sjsg intel_color_commit_noarm(new_crtc_state); 14261bb76ff1Sjsg intel_color_commit_arm(new_crtc_state); 1427c349dbc7Sjsg /* update DSPCNTR to configure gamma for pipe bottom color */ 1428c349dbc7Sjsg intel_disable_primary_plane(new_crtc_state); 1429c349dbc7Sjsg 14301bb76ff1Sjsg intel_initial_watermarks(state, crtc); 14311bb76ff1Sjsg intel_enable_transcoder(new_crtc_state); 1432c349dbc7Sjsg 1433c349dbc7Sjsg if (new_crtc_state->has_pch_encoder) 14341bb76ff1Sjsg ilk_pch_enable(state, crtc); 1435c349dbc7Sjsg 1436c349dbc7Sjsg intel_crtc_vblank_on(new_crtc_state); 1437c349dbc7Sjsg 1438c349dbc7Sjsg intel_encoders_enable(state, crtc); 1439c349dbc7Sjsg 1440c349dbc7Sjsg if (HAS_PCH_CPT(dev_priv)) 1441f005ef32Sjsg intel_wait_for_pipe_scanline_moving(crtc); 1442c349dbc7Sjsg 1443c349dbc7Sjsg /* 1444c349dbc7Sjsg * Must wait for vblank to avoid spurious PCH FIFO underruns. 1445c349dbc7Sjsg * And a second vblank wait is needed at least on ILK with 1446c349dbc7Sjsg * some interlaced HDMI modes. Let's do the double wait always 1447c349dbc7Sjsg * in case there are more corner cases we don't know about. 1448c349dbc7Sjsg */ 1449c349dbc7Sjsg if (new_crtc_state->has_pch_encoder) { 14501bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 14511bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 1452c349dbc7Sjsg } 1453c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1454c349dbc7Sjsg intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1455c349dbc7Sjsg } 1456c349dbc7Sjsg 1457c349dbc7Sjsg static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 1458c349dbc7Sjsg enum pipe pipe, bool apply) 1459c349dbc7Sjsg { 1460c349dbc7Sjsg u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); 1461c349dbc7Sjsg u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 1462c349dbc7Sjsg 1463c349dbc7Sjsg if (apply) 1464c349dbc7Sjsg val |= mask; 1465c349dbc7Sjsg else 1466c349dbc7Sjsg val &= ~mask; 1467c349dbc7Sjsg 1468c349dbc7Sjsg intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); 1469c349dbc7Sjsg } 1470c349dbc7Sjsg 1471c349dbc7Sjsg static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) 1472c349dbc7Sjsg { 1473c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1474c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1475c349dbc7Sjsg 1476c349dbc7Sjsg intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), 1477c349dbc7Sjsg HSW_LINETIME(crtc_state->linetime) | 1478c349dbc7Sjsg HSW_IPS_LINETIME(crtc_state->ips_linetime)); 1479c349dbc7Sjsg } 1480c349dbc7Sjsg 1481c349dbc7Sjsg static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1482c349dbc7Sjsg { 1483c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1484c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14851bb76ff1Sjsg enum transcoder transcoder = crtc_state->cpu_transcoder; 14861bb76ff1Sjsg i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : 14871bb76ff1Sjsg CHICKEN_TRANS(transcoder); 1488c349dbc7Sjsg 1489f005ef32Sjsg intel_de_rmw(dev_priv, reg, 1490f005ef32Sjsg HSW_FRAME_START_DELAY_MASK, 1491f005ef32Sjsg HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1492c349dbc7Sjsg } 1493c349dbc7Sjsg 14945ca02815Sjsg static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 14955ca02815Sjsg const struct intel_crtc_state *crtc_state) 14965ca02815Sjsg { 14971bb76ff1Sjsg struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); 14985ca02815Sjsg 14995ca02815Sjsg /* 15005ca02815Sjsg * Enable sequence steps 1-7 on bigjoiner master 15015ca02815Sjsg */ 15021bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(crtc_state)) 15031bb76ff1Sjsg intel_encoders_pre_pll_enable(state, master_crtc); 15045ca02815Sjsg 15051bb76ff1Sjsg if (crtc_state->shared_dpll) 15061bb76ff1Sjsg intel_enable_shared_dpll(crtc_state); 15071bb76ff1Sjsg 15081bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(crtc_state)) 15091bb76ff1Sjsg intel_encoders_pre_enable(state, master_crtc); 15105ca02815Sjsg } 15115ca02815Sjsg 15121bb76ff1Sjsg static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 15131bb76ff1Sjsg { 15141bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 15151bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15161bb76ff1Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 15171bb76ff1Sjsg 15181bb76ff1Sjsg if (crtc_state->has_pch_encoder) { 15191bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 15201bb76ff1Sjsg &crtc_state->fdi_m_n); 15211bb76ff1Sjsg } else if (intel_crtc_has_dp_encoder(crtc_state)) { 15221bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 15231bb76ff1Sjsg &crtc_state->dp_m_n); 15241bb76ff1Sjsg intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 15251bb76ff1Sjsg &crtc_state->dp_m2_n2); 15261bb76ff1Sjsg } 15271bb76ff1Sjsg 15281bb76ff1Sjsg intel_set_transcoder_timings(crtc_state); 1529f005ef32Sjsg if (HAS_VRR(dev_priv)) 1530f005ef32Sjsg intel_vrr_set_transcoder_timings(crtc_state); 15311bb76ff1Sjsg 15321bb76ff1Sjsg if (cpu_transcoder != TRANSCODER_EDP) 1533f005ef32Sjsg intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), 15341bb76ff1Sjsg crtc_state->pixel_multiplier - 1); 15351bb76ff1Sjsg 15361bb76ff1Sjsg hsw_set_frame_start_delay(crtc_state); 15371bb76ff1Sjsg 15381bb76ff1Sjsg hsw_set_transconf(crtc_state); 15395ca02815Sjsg } 15405ca02815Sjsg 1541c349dbc7Sjsg static void hsw_crtc_enable(struct intel_atomic_state *state, 1542c349dbc7Sjsg struct intel_crtc *crtc) 1543c349dbc7Sjsg { 1544c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1545c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1546c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1547c349dbc7Sjsg enum pipe pipe = crtc->pipe, hsw_workaround_pipe; 1548c349dbc7Sjsg enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 1549c349dbc7Sjsg bool psl_clkgate_wa; 1550c349dbc7Sjsg 1551c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1552c349dbc7Sjsg return; 1553c349dbc7Sjsg 1554f005ef32Sjsg intel_dmc_enable_pipe(dev_priv, crtc->pipe); 1555f005ef32Sjsg 15561bb76ff1Sjsg if (!new_crtc_state->bigjoiner_pipes) { 1557c349dbc7Sjsg intel_encoders_pre_pll_enable(state, crtc); 1558c349dbc7Sjsg 1559c349dbc7Sjsg if (new_crtc_state->shared_dpll) 1560c349dbc7Sjsg intel_enable_shared_dpll(new_crtc_state); 1561c349dbc7Sjsg 1562c349dbc7Sjsg intel_encoders_pre_enable(state, crtc); 15635ca02815Sjsg } else { 15645ca02815Sjsg icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); 15655ca02815Sjsg } 1566c349dbc7Sjsg 15671bb76ff1Sjsg intel_dsc_enable(new_crtc_state); 15681bb76ff1Sjsg 15691bb76ff1Sjsg if (DISPLAY_VER(dev_priv) >= 13) 15701bb76ff1Sjsg intel_uncompressed_joiner_enable(new_crtc_state); 15711bb76ff1Sjsg 1572c349dbc7Sjsg intel_set_pipe_src_size(new_crtc_state); 15735ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1574f005ef32Sjsg bdw_set_pipe_misc(new_crtc_state); 1575c349dbc7Sjsg 15761bb76ff1Sjsg if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 15771bb76ff1Sjsg !transcoder_is_dsi(cpu_transcoder)) 15781bb76ff1Sjsg hsw_configure_cpu_transcoder(new_crtc_state); 1579c349dbc7Sjsg 1580c349dbc7Sjsg crtc->active = true; 1581c349dbc7Sjsg 15825ca02815Sjsg /* Display WA #1180: WaDisableScalarClockGating: glk */ 15835ca02815Sjsg psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && 1584c349dbc7Sjsg new_crtc_state->pch_pfit.enabled; 1585c349dbc7Sjsg if (psl_clkgate_wa) 1586c349dbc7Sjsg glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 1587c349dbc7Sjsg 15885ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) 1589c349dbc7Sjsg skl_pfit_enable(new_crtc_state); 1590c349dbc7Sjsg else 1591c349dbc7Sjsg ilk_pfit_enable(new_crtc_state); 1592c349dbc7Sjsg 1593c349dbc7Sjsg /* 1594c349dbc7Sjsg * On ILK+ LUT must be loaded before the pipe is running but with 1595c349dbc7Sjsg * clocks enabled 1596c349dbc7Sjsg */ 1597c349dbc7Sjsg intel_color_load_luts(new_crtc_state); 15981bb76ff1Sjsg intel_color_commit_noarm(new_crtc_state); 15991bb76ff1Sjsg intel_color_commit_arm(new_crtc_state); 1600c349dbc7Sjsg /* update DSPCNTR to configure gamma/csc for pipe bottom color */ 16015ca02815Sjsg if (DISPLAY_VER(dev_priv) < 9) 1602c349dbc7Sjsg intel_disable_primary_plane(new_crtc_state); 1603c349dbc7Sjsg 1604c349dbc7Sjsg hsw_set_linetime_wm(new_crtc_state); 1605c349dbc7Sjsg 16065ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11) 16075ca02815Sjsg icl_set_pipe_chicken(new_crtc_state); 1608c349dbc7Sjsg 16091bb76ff1Sjsg intel_initial_watermarks(state, crtc); 1610c349dbc7Sjsg 16111bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 16125ca02815Sjsg intel_crtc_vblank_on(new_crtc_state); 1613c349dbc7Sjsg 1614c349dbc7Sjsg intel_encoders_enable(state, crtc); 1615c349dbc7Sjsg 1616c349dbc7Sjsg if (psl_clkgate_wa) { 16171bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 1618c349dbc7Sjsg glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 1619c349dbc7Sjsg } 1620c349dbc7Sjsg 1621c349dbc7Sjsg /* If we change the relative order between pipe/planes enabling, we need 1622c349dbc7Sjsg * to change the workaround. */ 1623c349dbc7Sjsg hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; 1624c349dbc7Sjsg if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 16251bb76ff1Sjsg struct intel_crtc *wa_crtc; 16261bb76ff1Sjsg 16271bb76ff1Sjsg wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); 16281bb76ff1Sjsg 16291bb76ff1Sjsg intel_crtc_wait_for_next_vblank(wa_crtc); 16301bb76ff1Sjsg intel_crtc_wait_for_next_vblank(wa_crtc); 1631c349dbc7Sjsg } 1632c349dbc7Sjsg } 1633c349dbc7Sjsg 1634c349dbc7Sjsg void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) 1635c349dbc7Sjsg { 1636c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1637c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1638c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1639c349dbc7Sjsg 1640c349dbc7Sjsg /* To avoid upsetting the power well on haswell only disable the pfit if 1641c349dbc7Sjsg * it's in use. The hw state code will make sure we get this right. */ 1642ad8b1aafSjsg if (!old_crtc_state->pch_pfit.enabled) 1643ad8b1aafSjsg return; 1644ad8b1aafSjsg 16451bb76ff1Sjsg intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); 16461bb76ff1Sjsg intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); 16471bb76ff1Sjsg intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); 1648c349dbc7Sjsg } 1649c349dbc7Sjsg 1650c349dbc7Sjsg static void ilk_crtc_disable(struct intel_atomic_state *state, 1651c349dbc7Sjsg struct intel_crtc *crtc) 1652c349dbc7Sjsg { 1653c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 1654c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1655c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1656c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1657c349dbc7Sjsg 1658c349dbc7Sjsg /* 1659c349dbc7Sjsg * Sometimes spurious CPU pipe underruns happen when the 1660c349dbc7Sjsg * pipe is already disabled, but FDI RX/TX is still enabled. 1661c349dbc7Sjsg * Happens at least with VGA+HDMI cloning. Suppress them. 1662c349dbc7Sjsg */ 1663c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 1664c349dbc7Sjsg intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 1665c349dbc7Sjsg 1666c349dbc7Sjsg intel_encoders_disable(state, crtc); 1667c349dbc7Sjsg 1668c349dbc7Sjsg intel_crtc_vblank_off(old_crtc_state); 1669c349dbc7Sjsg 16701bb76ff1Sjsg intel_disable_transcoder(old_crtc_state); 1671c349dbc7Sjsg 1672c349dbc7Sjsg ilk_pfit_disable(old_crtc_state); 1673c349dbc7Sjsg 1674c349dbc7Sjsg if (old_crtc_state->has_pch_encoder) 16751bb76ff1Sjsg ilk_pch_disable(state, crtc); 1676c349dbc7Sjsg 1677c349dbc7Sjsg intel_encoders_post_disable(state, crtc); 1678c349dbc7Sjsg 16791bb76ff1Sjsg if (old_crtc_state->has_pch_encoder) 16801bb76ff1Sjsg ilk_pch_post_disable(state, crtc); 1681c349dbc7Sjsg 1682c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1683c349dbc7Sjsg intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1684f005ef32Sjsg 1685f005ef32Sjsg intel_disable_shared_dpll(old_crtc_state); 1686c349dbc7Sjsg } 1687c349dbc7Sjsg 1688c349dbc7Sjsg static void hsw_crtc_disable(struct intel_atomic_state *state, 1689c349dbc7Sjsg struct intel_crtc *crtc) 1690c349dbc7Sjsg { 16911bb76ff1Sjsg const struct intel_crtc_state *old_crtc_state = 16921bb76ff1Sjsg intel_atomic_get_old_crtc_state(state, crtc); 1693f005ef32Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 16941bb76ff1Sjsg 1695c349dbc7Sjsg /* 1696c349dbc7Sjsg * FIXME collapse everything to one hook. 1697c349dbc7Sjsg * Need care with mst->ddi interactions. 1698c349dbc7Sjsg */ 16991bb76ff1Sjsg if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1700c349dbc7Sjsg intel_encoders_disable(state, crtc); 1701c349dbc7Sjsg intel_encoders_post_disable(state, crtc); 1702c349dbc7Sjsg } 1703f005ef32Sjsg 1704f005ef32Sjsg intel_disable_shared_dpll(old_crtc_state); 1705f005ef32Sjsg 1706f005ef32Sjsg if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { 1707f005ef32Sjsg struct intel_crtc *slave_crtc; 1708f005ef32Sjsg 1709f005ef32Sjsg intel_encoders_post_pll_disable(state, crtc); 1710f005ef32Sjsg 1711f005ef32Sjsg intel_dmc_disable_pipe(i915, crtc->pipe); 1712f005ef32Sjsg 1713f005ef32Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 1714f005ef32Sjsg intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) 1715f005ef32Sjsg intel_dmc_disable_pipe(i915, slave_crtc->pipe); 1716f005ef32Sjsg } 17171bb76ff1Sjsg } 1718c349dbc7Sjsg 1719c349dbc7Sjsg static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) 1720c349dbc7Sjsg { 1721c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1722c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1723c349dbc7Sjsg 1724c349dbc7Sjsg if (!crtc_state->gmch_pfit.control) 1725c349dbc7Sjsg return; 1726c349dbc7Sjsg 1727c349dbc7Sjsg /* 1728c349dbc7Sjsg * The panel fitter should only be adjusted whilst the pipe is disabled, 1729c349dbc7Sjsg * according to register description and PRM. 1730c349dbc7Sjsg */ 1731c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 1732c349dbc7Sjsg intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); 17331bb76ff1Sjsg assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); 1734c349dbc7Sjsg 1735c349dbc7Sjsg intel_de_write(dev_priv, PFIT_PGM_RATIOS, 1736c349dbc7Sjsg crtc_state->gmch_pfit.pgm_ratios); 1737c349dbc7Sjsg intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); 1738c349dbc7Sjsg 1739c349dbc7Sjsg /* Border color in case we don't scale up to the full screen. Black by 1740c349dbc7Sjsg * default, change to something else for debugging. */ 1741c349dbc7Sjsg intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); 1742c349dbc7Sjsg } 1743c349dbc7Sjsg 1744c349dbc7Sjsg bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) 1745c349dbc7Sjsg { 1746c349dbc7Sjsg if (phy == PHY_NONE) 1747c349dbc7Sjsg return false; 17485ca02815Sjsg else if (IS_ALDERLAKE_S(dev_priv)) 17495ca02815Sjsg return phy <= PHY_E; 17505ca02815Sjsg else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) 1751ad8b1aafSjsg return phy <= PHY_D; 1752f005ef32Sjsg else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) 1753c349dbc7Sjsg return phy <= PHY_C; 17541bb76ff1Sjsg else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) 1755c349dbc7Sjsg return phy <= PHY_B; 1756ad8b1aafSjsg else 17571bb76ff1Sjsg /* 17581bb76ff1Sjsg * DG2 outputs labelled as "combo PHY" in the bspec use 17591bb76ff1Sjsg * SNPS PHYs with completely different programming, 17601bb76ff1Sjsg * hence we always return false here. 17611bb76ff1Sjsg */ 1762c349dbc7Sjsg return false; 1763c349dbc7Sjsg } 1764c349dbc7Sjsg 1765c349dbc7Sjsg bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1766c349dbc7Sjsg { 17675ca02815Sjsg if (IS_DG2(dev_priv)) 17685ca02815Sjsg /* DG2's "TC1" output uses a SNPS PHY */ 1769ad8b1aafSjsg return false; 1770f005ef32Sjsg else if (IS_ALDERLAKE_P(dev_priv) || IS_METEORLAKE(dev_priv)) 17715ca02815Sjsg return phy >= PHY_F && phy <= PHY_I; 17725ca02815Sjsg else if (IS_TIGERLAKE(dev_priv)) 1773c349dbc7Sjsg return phy >= PHY_D && phy <= PHY_I; 17745ca02815Sjsg else if (IS_ICELAKE(dev_priv)) 1775c349dbc7Sjsg return phy >= PHY_C && phy <= PHY_F; 1776ad8b1aafSjsg else 1777c349dbc7Sjsg return false; 1778c349dbc7Sjsg } 1779c349dbc7Sjsg 17805ca02815Sjsg bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 17815ca02815Sjsg { 17825ca02815Sjsg if (phy == PHY_NONE) 17835ca02815Sjsg return false; 17845ca02815Sjsg else if (IS_DG2(dev_priv)) 17855ca02815Sjsg /* 17865ca02815Sjsg * All four "combo" ports and the TC1 port (PHY E) use 17875ca02815Sjsg * Synopsis PHYs. 17885ca02815Sjsg */ 17895ca02815Sjsg return phy <= PHY_E; 17905ca02815Sjsg 17915ca02815Sjsg return false; 17925ca02815Sjsg } 17935ca02815Sjsg 1794c349dbc7Sjsg enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) 1795c349dbc7Sjsg { 17965ca02815Sjsg if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) 17975ca02815Sjsg return PHY_D + port - PORT_D_XELPD; 17985ca02815Sjsg else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) 17995ca02815Sjsg return PHY_F + port - PORT_TC1; 18005ca02815Sjsg else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) 18015ca02815Sjsg return PHY_B + port - PORT_TC1; 18025ca02815Sjsg else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) 18035ca02815Sjsg return PHY_C + port - PORT_TC1; 1804f005ef32Sjsg else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 1805f005ef32Sjsg port == PORT_D) 1806c349dbc7Sjsg return PHY_A; 1807c349dbc7Sjsg 18085ca02815Sjsg return PHY_A + port - PORT_A; 1809c349dbc7Sjsg } 1810c349dbc7Sjsg 1811c349dbc7Sjsg enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) 1812c349dbc7Sjsg { 1813c349dbc7Sjsg if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) 18145ca02815Sjsg return TC_PORT_NONE; 1815c349dbc7Sjsg 18165ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 12) 18175ca02815Sjsg return TC_PORT_1 + port - PORT_TC1; 18185ca02815Sjsg else 18195ca02815Sjsg return TC_PORT_1 + port - PORT_C; 1820c349dbc7Sjsg } 1821c349dbc7Sjsg 1822c349dbc7Sjsg enum intel_display_power_domain 1823c349dbc7Sjsg intel_aux_power_domain(struct intel_digital_port *dig_port) 1824c349dbc7Sjsg { 18251bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1826c349dbc7Sjsg 18271bb76ff1Sjsg if (intel_tc_port_in_tbt_alt_mode(dig_port)) 18281bb76ff1Sjsg return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); 18291bb76ff1Sjsg 18301bb76ff1Sjsg return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 1831c349dbc7Sjsg } 1832c349dbc7Sjsg 18331bb76ff1Sjsg static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, 18341bb76ff1Sjsg struct intel_power_domain_mask *mask) 1835c349dbc7Sjsg { 1836c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1837c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 18381bb76ff1Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1839c349dbc7Sjsg struct drm_encoder *encoder; 1840c349dbc7Sjsg enum pipe pipe = crtc->pipe; 18411bb76ff1Sjsg 18421bb76ff1Sjsg bitmap_zero(mask->bits, POWER_DOMAIN_NUM); 1843c349dbc7Sjsg 1844c349dbc7Sjsg if (!crtc_state->hw.active) 18451bb76ff1Sjsg return; 1846c349dbc7Sjsg 18471bb76ff1Sjsg set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); 18481bb76ff1Sjsg set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); 1849c349dbc7Sjsg if (crtc_state->pch_pfit.enabled || 1850c349dbc7Sjsg crtc_state->pch_pfit.force_thru) 18511bb76ff1Sjsg set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); 1852c349dbc7Sjsg 1853c349dbc7Sjsg drm_for_each_encoder_mask(encoder, &dev_priv->drm, 1854c349dbc7Sjsg crtc_state->uapi.encoder_mask) { 1855c349dbc7Sjsg struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 1856c349dbc7Sjsg 18571bb76ff1Sjsg set_bit(intel_encoder->power_domain, mask->bits); 1858c349dbc7Sjsg } 1859c349dbc7Sjsg 1860c349dbc7Sjsg if (HAS_DDI(dev_priv) && crtc_state->has_audio) 18611bb76ff1Sjsg set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); 1862c349dbc7Sjsg 1863c349dbc7Sjsg if (crtc_state->shared_dpll) 18641bb76ff1Sjsg set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); 1865c349dbc7Sjsg 18665ca02815Sjsg if (crtc_state->dsc.compression_enable) 18671bb76ff1Sjsg set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); 1868c349dbc7Sjsg } 1869c349dbc7Sjsg 18701bb76ff1Sjsg void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, 18711bb76ff1Sjsg struct intel_power_domain_mask *old_domains) 1872c349dbc7Sjsg { 1873c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1874c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1875c349dbc7Sjsg enum intel_display_power_domain domain; 18761bb76ff1Sjsg struct intel_power_domain_mask domains, new_domains; 1877c349dbc7Sjsg 18781bb76ff1Sjsg get_crtc_power_domains(crtc_state, &domains); 1879c349dbc7Sjsg 18801bb76ff1Sjsg bitmap_andnot(new_domains.bits, 18811bb76ff1Sjsg domains.bits, 18821bb76ff1Sjsg crtc->enabled_power_domains.mask.bits, 18831bb76ff1Sjsg POWER_DOMAIN_NUM); 18841bb76ff1Sjsg bitmap_andnot(old_domains->bits, 18851bb76ff1Sjsg crtc->enabled_power_domains.mask.bits, 18861bb76ff1Sjsg domains.bits, 18871bb76ff1Sjsg POWER_DOMAIN_NUM); 1888c349dbc7Sjsg 18891bb76ff1Sjsg for_each_power_domain(domain, &new_domains) 18905ca02815Sjsg intel_display_power_get_in_set(dev_priv, 18915ca02815Sjsg &crtc->enabled_power_domains, 18925ca02815Sjsg domain); 1893c349dbc7Sjsg } 1894c349dbc7Sjsg 18951bb76ff1Sjsg void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, 18961bb76ff1Sjsg struct intel_power_domain_mask *domains) 1897c349dbc7Sjsg { 18985ca02815Sjsg intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 18995ca02815Sjsg &crtc->enabled_power_domains, 19005ca02815Sjsg domains); 1901c349dbc7Sjsg } 1902c349dbc7Sjsg 19031bb76ff1Sjsg static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) 19041bb76ff1Sjsg { 19051bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 19061bb76ff1Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 19071bb76ff1Sjsg 19081bb76ff1Sjsg if (intel_crtc_has_dp_encoder(crtc_state)) { 19091bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, 19101bb76ff1Sjsg &crtc_state->dp_m_n); 19111bb76ff1Sjsg intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, 19121bb76ff1Sjsg &crtc_state->dp_m2_n2); 19131bb76ff1Sjsg } 19141bb76ff1Sjsg 19151bb76ff1Sjsg intel_set_transcoder_timings(crtc_state); 19161bb76ff1Sjsg 19171bb76ff1Sjsg i9xx_set_pipeconf(crtc_state); 19181bb76ff1Sjsg } 19191bb76ff1Sjsg 1920c349dbc7Sjsg static void valleyview_crtc_enable(struct intel_atomic_state *state, 1921c349dbc7Sjsg struct intel_crtc *crtc) 1922c349dbc7Sjsg { 1923c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1924c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1925c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1926c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1927c349dbc7Sjsg 1928c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1929c349dbc7Sjsg return; 1930c349dbc7Sjsg 19311bb76ff1Sjsg i9xx_configure_cpu_transcoder(new_crtc_state); 1932c349dbc7Sjsg 1933c349dbc7Sjsg intel_set_pipe_src_size(new_crtc_state); 1934c349dbc7Sjsg 1935f005ef32Sjsg intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 1936f005ef32Sjsg 1937c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 1938c349dbc7Sjsg intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); 1939c349dbc7Sjsg intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); 1940c349dbc7Sjsg } 1941c349dbc7Sjsg 1942c349dbc7Sjsg crtc->active = true; 1943c349dbc7Sjsg 1944c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1945c349dbc7Sjsg 1946c349dbc7Sjsg intel_encoders_pre_pll_enable(state, crtc); 1947c349dbc7Sjsg 19481bb76ff1Sjsg if (IS_CHERRYVIEW(dev_priv)) 19491bb76ff1Sjsg chv_enable_pll(new_crtc_state); 19501bb76ff1Sjsg else 19511bb76ff1Sjsg vlv_enable_pll(new_crtc_state); 1952c349dbc7Sjsg 1953c349dbc7Sjsg intel_encoders_pre_enable(state, crtc); 1954c349dbc7Sjsg 1955c349dbc7Sjsg i9xx_pfit_enable(new_crtc_state); 1956c349dbc7Sjsg 1957c349dbc7Sjsg intel_color_load_luts(new_crtc_state); 19581bb76ff1Sjsg intel_color_commit_noarm(new_crtc_state); 19591bb76ff1Sjsg intel_color_commit_arm(new_crtc_state); 1960c349dbc7Sjsg /* update DSPCNTR to configure gamma for pipe bottom color */ 1961c349dbc7Sjsg intel_disable_primary_plane(new_crtc_state); 1962c349dbc7Sjsg 19631bb76ff1Sjsg intel_initial_watermarks(state, crtc); 19641bb76ff1Sjsg intel_enable_transcoder(new_crtc_state); 1965c349dbc7Sjsg 1966c349dbc7Sjsg intel_crtc_vblank_on(new_crtc_state); 1967c349dbc7Sjsg 1968c349dbc7Sjsg intel_encoders_enable(state, crtc); 1969c349dbc7Sjsg } 1970c349dbc7Sjsg 1971c349dbc7Sjsg static void i9xx_crtc_enable(struct intel_atomic_state *state, 1972c349dbc7Sjsg struct intel_crtc *crtc) 1973c349dbc7Sjsg { 1974c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state = 1975c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 1976c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1977c349dbc7Sjsg enum pipe pipe = crtc->pipe; 1978c349dbc7Sjsg 1979c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, crtc->active)) 1980c349dbc7Sjsg return; 1981c349dbc7Sjsg 19821bb76ff1Sjsg i9xx_configure_cpu_transcoder(new_crtc_state); 1983c349dbc7Sjsg 1984c349dbc7Sjsg intel_set_pipe_src_size(new_crtc_state); 1985c349dbc7Sjsg 1986c349dbc7Sjsg crtc->active = true; 1987c349dbc7Sjsg 19885ca02815Sjsg if (DISPLAY_VER(dev_priv) != 2) 1989c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1990c349dbc7Sjsg 1991c349dbc7Sjsg intel_encoders_pre_enable(state, crtc); 1992c349dbc7Sjsg 19931bb76ff1Sjsg i9xx_enable_pll(new_crtc_state); 1994c349dbc7Sjsg 1995c349dbc7Sjsg i9xx_pfit_enable(new_crtc_state); 1996c349dbc7Sjsg 1997c349dbc7Sjsg intel_color_load_luts(new_crtc_state); 19981bb76ff1Sjsg intel_color_commit_noarm(new_crtc_state); 19991bb76ff1Sjsg intel_color_commit_arm(new_crtc_state); 2000c349dbc7Sjsg /* update DSPCNTR to configure gamma for pipe bottom color */ 2001c349dbc7Sjsg intel_disable_primary_plane(new_crtc_state); 2002c349dbc7Sjsg 20031bb76ff1Sjsg if (!intel_initial_watermarks(state, crtc)) 20041bb76ff1Sjsg intel_update_watermarks(dev_priv); 20051bb76ff1Sjsg intel_enable_transcoder(new_crtc_state); 2006c349dbc7Sjsg 2007c349dbc7Sjsg intel_crtc_vblank_on(new_crtc_state); 2008c349dbc7Sjsg 2009c349dbc7Sjsg intel_encoders_enable(state, crtc); 2010ad8b1aafSjsg 2011ad8b1aafSjsg /* prevents spurious underruns */ 20125ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2) 20131bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 2014c349dbc7Sjsg } 2015c349dbc7Sjsg 2016c349dbc7Sjsg static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) 2017c349dbc7Sjsg { 2018c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 2019c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2020c349dbc7Sjsg 2021c349dbc7Sjsg if (!old_crtc_state->gmch_pfit.control) 2022c349dbc7Sjsg return; 2023c349dbc7Sjsg 20241bb76ff1Sjsg assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); 2025c349dbc7Sjsg 2026c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", 2027c349dbc7Sjsg intel_de_read(dev_priv, PFIT_CONTROL)); 2028c349dbc7Sjsg intel_de_write(dev_priv, PFIT_CONTROL, 0); 2029c349dbc7Sjsg } 2030c349dbc7Sjsg 2031c349dbc7Sjsg static void i9xx_crtc_disable(struct intel_atomic_state *state, 2032c349dbc7Sjsg struct intel_crtc *crtc) 2033c349dbc7Sjsg { 2034c349dbc7Sjsg struct intel_crtc_state *old_crtc_state = 2035c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 2036c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2037c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2038c349dbc7Sjsg 2039c349dbc7Sjsg /* 2040c349dbc7Sjsg * On gen2 planes are double buffered but the pipe isn't, so we must 2041c349dbc7Sjsg * wait for planes to fully turn off before disabling the pipe. 2042c349dbc7Sjsg */ 20435ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2) 20441bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 2045c349dbc7Sjsg 2046c349dbc7Sjsg intel_encoders_disable(state, crtc); 2047c349dbc7Sjsg 2048c349dbc7Sjsg intel_crtc_vblank_off(old_crtc_state); 2049c349dbc7Sjsg 20501bb76ff1Sjsg intel_disable_transcoder(old_crtc_state); 2051c349dbc7Sjsg 2052c349dbc7Sjsg i9xx_pfit_disable(old_crtc_state); 2053c349dbc7Sjsg 2054c349dbc7Sjsg intel_encoders_post_disable(state, crtc); 2055c349dbc7Sjsg 2056c349dbc7Sjsg if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { 2057c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv)) 2058c349dbc7Sjsg chv_disable_pll(dev_priv, pipe); 2059c349dbc7Sjsg else if (IS_VALLEYVIEW(dev_priv)) 2060c349dbc7Sjsg vlv_disable_pll(dev_priv, pipe); 2061c349dbc7Sjsg else 2062c349dbc7Sjsg i9xx_disable_pll(old_crtc_state); 2063c349dbc7Sjsg } 2064c349dbc7Sjsg 2065c349dbc7Sjsg intel_encoders_post_pll_disable(state, crtc); 2066c349dbc7Sjsg 20675ca02815Sjsg if (DISPLAY_VER(dev_priv) != 2) 2068c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 2069c349dbc7Sjsg 20701bb76ff1Sjsg if (!dev_priv->display.funcs.wm->initial_watermarks) 20711bb76ff1Sjsg intel_update_watermarks(dev_priv); 2072c349dbc7Sjsg 2073c349dbc7Sjsg /* clock the pipe down to 640x480@60 to potentially save power */ 2074c349dbc7Sjsg if (IS_I830(dev_priv)) 2075c349dbc7Sjsg i830_enable_pipe(dev_priv, pipe); 2076c349dbc7Sjsg } 2077c349dbc7Sjsg 2078c349dbc7Sjsg void intel_encoder_destroy(struct drm_encoder *encoder) 2079c349dbc7Sjsg { 2080c349dbc7Sjsg struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 2081c349dbc7Sjsg 2082c349dbc7Sjsg drm_encoder_cleanup(encoder); 2083c349dbc7Sjsg kfree(intel_encoder); 2084c349dbc7Sjsg } 2085c349dbc7Sjsg 2086c349dbc7Sjsg static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 2087c349dbc7Sjsg { 2088c349dbc7Sjsg const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2089c349dbc7Sjsg 2090c349dbc7Sjsg /* GDG double wide on either pipe, otherwise pipe A only */ 20915ca02815Sjsg return DISPLAY_VER(dev_priv) < 4 && 2092c349dbc7Sjsg (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 2093c349dbc7Sjsg } 2094c349dbc7Sjsg 2095ad8b1aafSjsg static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) 2096c349dbc7Sjsg { 20975ca02815Sjsg u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; 20985ca02815Sjsg struct drm_rect src; 2099c349dbc7Sjsg 2100c349dbc7Sjsg /* 2101c349dbc7Sjsg * We only use IF-ID interlacing. If we ever use 2102c349dbc7Sjsg * PF-ID we'll need to adjust the pixel_rate here. 2103c349dbc7Sjsg */ 2104c349dbc7Sjsg 2105ad8b1aafSjsg if (!crtc_state->pch_pfit.enabled) 2106ad8b1aafSjsg return pixel_rate; 2107c349dbc7Sjsg 21085ca02815Sjsg drm_rect_init(&src, 0, 0, 21091bb76ff1Sjsg drm_rect_width(&crtc_state->pipe_src) << 16, 21101bb76ff1Sjsg drm_rect_height(&crtc_state->pipe_src) << 16); 2111c349dbc7Sjsg 21125ca02815Sjsg return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, 21135ca02815Sjsg pixel_rate); 21145ca02815Sjsg } 2115ad8b1aafSjsg 21165ca02815Sjsg static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, 21175ca02815Sjsg const struct drm_display_mode *timings) 21185ca02815Sjsg { 21195ca02815Sjsg mode->hdisplay = timings->crtc_hdisplay; 21205ca02815Sjsg mode->htotal = timings->crtc_htotal; 21215ca02815Sjsg mode->hsync_start = timings->crtc_hsync_start; 21225ca02815Sjsg mode->hsync_end = timings->crtc_hsync_end; 2123c349dbc7Sjsg 21245ca02815Sjsg mode->vdisplay = timings->crtc_vdisplay; 21255ca02815Sjsg mode->vtotal = timings->crtc_vtotal; 21265ca02815Sjsg mode->vsync_start = timings->crtc_vsync_start; 21275ca02815Sjsg mode->vsync_end = timings->crtc_vsync_end; 2128c349dbc7Sjsg 21295ca02815Sjsg mode->flags = timings->flags; 21305ca02815Sjsg mode->type = DRM_MODE_TYPE_DRIVER; 21315ca02815Sjsg 21325ca02815Sjsg mode->clock = timings->crtc_clock; 21335ca02815Sjsg 21345ca02815Sjsg drm_mode_set_name(mode); 2135c349dbc7Sjsg } 2136c349dbc7Sjsg 2137c349dbc7Sjsg static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 2138c349dbc7Sjsg { 2139c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2140c349dbc7Sjsg 2141c349dbc7Sjsg if (HAS_GMCH(dev_priv)) 2142c349dbc7Sjsg /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 2143c349dbc7Sjsg crtc_state->pixel_rate = 21445ca02815Sjsg crtc_state->hw.pipe_mode.crtc_clock; 2145c349dbc7Sjsg else 2146c349dbc7Sjsg crtc_state->pixel_rate = 2147c349dbc7Sjsg ilk_pipe_pixel_rate(crtc_state); 2148c349dbc7Sjsg } 2149c349dbc7Sjsg 21501bb76ff1Sjsg static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, 21511bb76ff1Sjsg struct drm_display_mode *mode) 21525ca02815Sjsg { 21531bb76ff1Sjsg int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 21545ca02815Sjsg 21551bb76ff1Sjsg if (num_pipes < 2) 21561bb76ff1Sjsg return; 21575ca02815Sjsg 21581bb76ff1Sjsg mode->crtc_clock /= num_pipes; 21591bb76ff1Sjsg mode->crtc_hdisplay /= num_pipes; 21601bb76ff1Sjsg mode->crtc_hblank_start /= num_pipes; 21611bb76ff1Sjsg mode->crtc_hblank_end /= num_pipes; 21621bb76ff1Sjsg mode->crtc_hsync_start /= num_pipes; 21631bb76ff1Sjsg mode->crtc_hsync_end /= num_pipes; 21641bb76ff1Sjsg mode->crtc_htotal /= num_pipes; 21655ca02815Sjsg } 21665ca02815Sjsg 21671bb76ff1Sjsg static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, 21681bb76ff1Sjsg struct drm_display_mode *mode) 21691bb76ff1Sjsg { 21705ca02815Sjsg int overlap = crtc_state->splitter.pixel_overlap; 21711bb76ff1Sjsg int n = crtc_state->splitter.link_count; 21721bb76ff1Sjsg 21731bb76ff1Sjsg if (!crtc_state->splitter.enable) 21741bb76ff1Sjsg return; 21755ca02815Sjsg 21765ca02815Sjsg /* 21775ca02815Sjsg * eDP MSO uses segment timings from EDID for transcoder 21785ca02815Sjsg * timings, but full mode for everything else. 21795ca02815Sjsg * 21805ca02815Sjsg * h_full = (h_segment - pixel_overlap) * link_count 21815ca02815Sjsg */ 21821bb76ff1Sjsg mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; 21831bb76ff1Sjsg mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; 21841bb76ff1Sjsg mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; 21851bb76ff1Sjsg mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; 21861bb76ff1Sjsg mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; 21871bb76ff1Sjsg mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; 21881bb76ff1Sjsg mode->crtc_clock *= n; 21895ca02815Sjsg } 21905ca02815Sjsg 21911bb76ff1Sjsg static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) 21921bb76ff1Sjsg { 21931bb76ff1Sjsg struct drm_display_mode *mode = &crtc_state->hw.mode; 21941bb76ff1Sjsg struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 21951bb76ff1Sjsg struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 21961bb76ff1Sjsg 21971bb76ff1Sjsg /* 21981bb76ff1Sjsg * Start with the adjusted_mode crtc timings, which 21991bb76ff1Sjsg * have been filled with the transcoder timings. 22001bb76ff1Sjsg */ 22011bb76ff1Sjsg drm_mode_copy(pipe_mode, adjusted_mode); 22021bb76ff1Sjsg 22031bb76ff1Sjsg /* Expand MSO per-segment transcoder timings to full */ 22041bb76ff1Sjsg intel_splitter_adjust_timings(crtc_state, pipe_mode); 22051bb76ff1Sjsg 22061bb76ff1Sjsg /* 22071bb76ff1Sjsg * We want the full numbers in adjusted_mode normal timings, 22081bb76ff1Sjsg * adjusted_mode crtc timings are left with the raw transcoder 22091bb76ff1Sjsg * timings. 22101bb76ff1Sjsg */ 22111bb76ff1Sjsg intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); 22121bb76ff1Sjsg 22131bb76ff1Sjsg /* Populate the "user" mode with full numbers */ 22141bb76ff1Sjsg drm_mode_copy(mode, pipe_mode); 22151bb76ff1Sjsg intel_mode_from_crtc_timings(mode, mode); 22161bb76ff1Sjsg mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * 22171bb76ff1Sjsg (intel_bigjoiner_num_pipes(crtc_state) ?: 1); 22181bb76ff1Sjsg mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); 22191bb76ff1Sjsg 22201bb76ff1Sjsg /* Derive per-pipe timings in case bigjoiner is used */ 22211bb76ff1Sjsg intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 22221bb76ff1Sjsg intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 22231bb76ff1Sjsg 22245ca02815Sjsg intel_crtc_compute_pixel_rate(crtc_state); 22255ca02815Sjsg } 22265ca02815Sjsg 22271bb76ff1Sjsg void intel_encoder_get_config(struct intel_encoder *encoder, 22285ca02815Sjsg struct intel_crtc_state *crtc_state) 22295ca02815Sjsg { 22305ca02815Sjsg encoder->get_config(encoder, crtc_state); 22315ca02815Sjsg 22325ca02815Sjsg intel_crtc_readout_derived_state(crtc_state); 22335ca02815Sjsg } 22345ca02815Sjsg 22351bb76ff1Sjsg static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) 2236c349dbc7Sjsg { 22371bb76ff1Sjsg int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 22381bb76ff1Sjsg int width, height; 2239c349dbc7Sjsg 22401bb76ff1Sjsg if (num_pipes < 2) 22411bb76ff1Sjsg return; 22425ca02815Sjsg 22431bb76ff1Sjsg width = drm_rect_width(&crtc_state->pipe_src); 22441bb76ff1Sjsg height = drm_rect_height(&crtc_state->pipe_src); 22451bb76ff1Sjsg 22461bb76ff1Sjsg drm_rect_init(&crtc_state->pipe_src, 0, 0, 22471bb76ff1Sjsg width / num_pipes, height); 22485ca02815Sjsg } 22495ca02815Sjsg 22501bb76ff1Sjsg static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) 22511bb76ff1Sjsg { 22521bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 22531bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 22545ca02815Sjsg 22551bb76ff1Sjsg intel_bigjoiner_compute_pipe_src(crtc_state); 2256c349dbc7Sjsg 2257c349dbc7Sjsg /* 2258c349dbc7Sjsg * Pipe horizontal size must be even in: 2259c349dbc7Sjsg * - DVO ganged mode 2260c349dbc7Sjsg * - LVDS dual channel mode 2261c349dbc7Sjsg * - Double wide pipe 2262c349dbc7Sjsg */ 22631bb76ff1Sjsg if (drm_rect_width(&crtc_state->pipe_src) & 1) { 22641bb76ff1Sjsg if (crtc_state->double_wide) { 22651bb76ff1Sjsg drm_dbg_kms(&i915->drm, 22661bb76ff1Sjsg "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", 22671bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 2268c349dbc7Sjsg return -EINVAL; 2269c349dbc7Sjsg } 2270c349dbc7Sjsg 22711bb76ff1Sjsg if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 22721bb76ff1Sjsg intel_is_dual_link_lvds(i915)) { 22731bb76ff1Sjsg drm_dbg_kms(&i915->drm, 22741bb76ff1Sjsg "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", 22751bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 2276c349dbc7Sjsg return -EINVAL; 2277c349dbc7Sjsg } 2278c349dbc7Sjsg } 2279c349dbc7Sjsg 22801bb76ff1Sjsg return 0; 22811bb76ff1Sjsg } 22821bb76ff1Sjsg 22831bb76ff1Sjsg static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) 22841bb76ff1Sjsg { 22851bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 22861bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 22871bb76ff1Sjsg struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 22881bb76ff1Sjsg struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; 22891bb76ff1Sjsg int clock_limit = i915->max_dotclk_freq; 22901bb76ff1Sjsg 22911bb76ff1Sjsg /* 22921bb76ff1Sjsg * Start with the adjusted_mode crtc timings, which 22931bb76ff1Sjsg * have been filled with the transcoder timings. 2294c349dbc7Sjsg */ 22951bb76ff1Sjsg drm_mode_copy(pipe_mode, adjusted_mode); 22961bb76ff1Sjsg 22971bb76ff1Sjsg /* Expand MSO per-segment transcoder timings to full */ 22981bb76ff1Sjsg intel_splitter_adjust_timings(crtc_state, pipe_mode); 22991bb76ff1Sjsg 23001bb76ff1Sjsg /* Derive per-pipe timings in case bigjoiner is used */ 23011bb76ff1Sjsg intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); 23021bb76ff1Sjsg intel_mode_from_crtc_timings(pipe_mode, pipe_mode); 23031bb76ff1Sjsg 23041bb76ff1Sjsg if (DISPLAY_VER(i915) < 4) { 23051bb76ff1Sjsg clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; 23061bb76ff1Sjsg 23071bb76ff1Sjsg /* 23081bb76ff1Sjsg * Enable double wide mode when the dot clock 23091bb76ff1Sjsg * is > 90% of the (display) core speed. 23101bb76ff1Sjsg */ 23111bb76ff1Sjsg if (intel_crtc_supports_double_wide(crtc) && 23121bb76ff1Sjsg pipe_mode->crtc_clock > clock_limit) { 23131bb76ff1Sjsg clock_limit = i915->max_dotclk_freq; 23141bb76ff1Sjsg crtc_state->double_wide = true; 23151bb76ff1Sjsg } 23161bb76ff1Sjsg } 23171bb76ff1Sjsg 23181bb76ff1Sjsg if (pipe_mode->crtc_clock > clock_limit) { 23191bb76ff1Sjsg drm_dbg_kms(&i915->drm, 23201bb76ff1Sjsg "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 23211bb76ff1Sjsg crtc->base.base.id, crtc->base.name, 23221bb76ff1Sjsg pipe_mode->crtc_clock, clock_limit, 23231bb76ff1Sjsg str_yes_no(crtc_state->double_wide)); 2324c349dbc7Sjsg return -EINVAL; 23251bb76ff1Sjsg } 2326c349dbc7Sjsg 23271bb76ff1Sjsg return 0; 23281bb76ff1Sjsg } 2329c349dbc7Sjsg 23301bb76ff1Sjsg static int intel_crtc_compute_config(struct intel_atomic_state *state, 23311bb76ff1Sjsg struct intel_crtc *crtc) 23321bb76ff1Sjsg { 23331bb76ff1Sjsg struct intel_crtc_state *crtc_state = 23341bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 23351bb76ff1Sjsg int ret; 23361bb76ff1Sjsg 23371bb76ff1Sjsg ret = intel_dpll_crtc_compute_clock(state, crtc); 23381bb76ff1Sjsg if (ret) 23391bb76ff1Sjsg return ret; 23401bb76ff1Sjsg 23411bb76ff1Sjsg ret = intel_crtc_compute_pipe_src(crtc_state); 23421bb76ff1Sjsg if (ret) 23431bb76ff1Sjsg return ret; 23441bb76ff1Sjsg 23451bb76ff1Sjsg ret = intel_crtc_compute_pipe_mode(crtc_state); 23461bb76ff1Sjsg if (ret) 23471bb76ff1Sjsg return ret; 23481bb76ff1Sjsg 23491bb76ff1Sjsg intel_crtc_compute_pixel_rate(crtc_state); 23501bb76ff1Sjsg 23511bb76ff1Sjsg if (crtc_state->has_pch_encoder) 23521bb76ff1Sjsg return ilk_fdi_compute_config(crtc, crtc_state); 2353c349dbc7Sjsg 2354c349dbc7Sjsg return 0; 2355c349dbc7Sjsg } 2356c349dbc7Sjsg 2357c349dbc7Sjsg static void 2358c349dbc7Sjsg intel_reduce_m_n_ratio(u32 *num, u32 *den) 2359c349dbc7Sjsg { 2360c349dbc7Sjsg while (*num > DATA_LINK_M_N_MASK || 2361c349dbc7Sjsg *den > DATA_LINK_M_N_MASK) { 2362c349dbc7Sjsg *num >>= 1; 2363c349dbc7Sjsg *den >>= 1; 2364c349dbc7Sjsg } 2365c349dbc7Sjsg } 2366c349dbc7Sjsg 23671bb76ff1Sjsg static void compute_m_n(u32 *ret_m, u32 *ret_n, 23681bb76ff1Sjsg u32 m, u32 n, u32 constant_n) 2369c349dbc7Sjsg { 2370c349dbc7Sjsg if (constant_n) 23711bb76ff1Sjsg *ret_n = constant_n; 2372c349dbc7Sjsg else 2373c349dbc7Sjsg *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 2374c349dbc7Sjsg 2375c349dbc7Sjsg *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); 2376c349dbc7Sjsg intel_reduce_m_n_ratio(ret_m, ret_n); 2377c349dbc7Sjsg } 2378c349dbc7Sjsg 2379c349dbc7Sjsg void 2380c349dbc7Sjsg intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 2381c349dbc7Sjsg int pixel_clock, int link_clock, 2382c349dbc7Sjsg struct intel_link_m_n *m_n, 23831bb76ff1Sjsg bool fec_enable) 2384c349dbc7Sjsg { 2385c349dbc7Sjsg u32 data_clock = bits_per_pixel * pixel_clock; 2386c349dbc7Sjsg 2387c349dbc7Sjsg if (fec_enable) 2388c349dbc7Sjsg data_clock = intel_dp_mode_to_fec_clock(data_clock); 2389c349dbc7Sjsg 23901bb76ff1Sjsg /* 23911bb76ff1Sjsg * Windows/BIOS uses fixed M/N values always. Follow suit. 23921bb76ff1Sjsg * 23931bb76ff1Sjsg * Also several DP dongles in particular seem to be fussy 23941bb76ff1Sjsg * about too large link M/N values. Presumably the 20bit 23951bb76ff1Sjsg * value used by Windows/BIOS is acceptable to everyone. 23961bb76ff1Sjsg */ 2397c349dbc7Sjsg m_n->tu = 64; 23981bb76ff1Sjsg compute_m_n(&m_n->data_m, &m_n->data_n, 23991bb76ff1Sjsg data_clock, link_clock * nlanes * 8, 24001bb76ff1Sjsg 0x8000000); 2401c349dbc7Sjsg 24021bb76ff1Sjsg compute_m_n(&m_n->link_m, &m_n->link_n, 24031bb76ff1Sjsg pixel_clock, link_clock, 24041bb76ff1Sjsg 0x80000); 2405c349dbc7Sjsg } 2406c349dbc7Sjsg 2407f005ef32Sjsg void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) 2408c349dbc7Sjsg { 2409c349dbc7Sjsg /* 2410c349dbc7Sjsg * There may be no VBT; and if the BIOS enabled SSC we can 2411c349dbc7Sjsg * just keep using it to avoid unnecessary flicker. Whereas if the 2412c349dbc7Sjsg * BIOS isn't using it, don't assume it will work even if the VBT 2413c349dbc7Sjsg * indicates as much. 2414c349dbc7Sjsg */ 2415c349dbc7Sjsg if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 2416c349dbc7Sjsg bool bios_lvds_use_ssc = intel_de_read(dev_priv, 2417c349dbc7Sjsg PCH_DREF_CONTROL) & 2418c349dbc7Sjsg DREF_SSC1_ENABLE; 2419c349dbc7Sjsg 24201bb76ff1Sjsg if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { 2421c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 2422c349dbc7Sjsg "SSC %s by BIOS, overriding VBT which says %s\n", 24231bb76ff1Sjsg str_enabled_disabled(bios_lvds_use_ssc), 24241bb76ff1Sjsg str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); 24251bb76ff1Sjsg dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; 2426c349dbc7Sjsg } 2427c349dbc7Sjsg } 2428c349dbc7Sjsg } 2429c349dbc7Sjsg 24301bb76ff1Sjsg void intel_zero_m_n(struct intel_link_m_n *m_n) 2431c349dbc7Sjsg { 24321bb76ff1Sjsg /* corresponds to 0 register value */ 24331bb76ff1Sjsg memset(m_n, 0, sizeof(*m_n)); 24341bb76ff1Sjsg m_n->tu = 1; 2435c349dbc7Sjsg } 2436c349dbc7Sjsg 24371bb76ff1Sjsg void intel_set_m_n(struct drm_i915_private *i915, 24381bb76ff1Sjsg const struct intel_link_m_n *m_n, 24391bb76ff1Sjsg i915_reg_t data_m_reg, i915_reg_t data_n_reg, 24401bb76ff1Sjsg i915_reg_t link_m_reg, i915_reg_t link_n_reg) 24411bb76ff1Sjsg { 24421bb76ff1Sjsg intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); 24431bb76ff1Sjsg intel_de_write(i915, data_n_reg, m_n->data_n); 24441bb76ff1Sjsg intel_de_write(i915, link_m_reg, m_n->link_m); 24451bb76ff1Sjsg /* 24461bb76ff1Sjsg * On BDW+ writing LINK_N arms the double buffered update 24471bb76ff1Sjsg * of all the M/N registers, so it must be written last. 24481bb76ff1Sjsg */ 24491bb76ff1Sjsg intel_de_write(i915, link_n_reg, m_n->link_n); 24501bb76ff1Sjsg } 24511bb76ff1Sjsg 24521bb76ff1Sjsg bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, 2453c349dbc7Sjsg enum transcoder transcoder) 2454c349dbc7Sjsg { 2455c349dbc7Sjsg if (IS_HASWELL(dev_priv)) 2456c349dbc7Sjsg return transcoder == TRANSCODER_EDP; 2457c349dbc7Sjsg 24581bb76ff1Sjsg return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); 2459c349dbc7Sjsg } 2460c349dbc7Sjsg 24611bb76ff1Sjsg void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, 24621bb76ff1Sjsg enum transcoder transcoder, 24631bb76ff1Sjsg const struct intel_link_m_n *m_n) 2464c349dbc7Sjsg { 2465c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2466c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2467c349dbc7Sjsg 24681bb76ff1Sjsg if (DISPLAY_VER(dev_priv) >= 5) 24691bb76ff1Sjsg intel_set_m_n(dev_priv, m_n, 24701bb76ff1Sjsg PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 24711bb76ff1Sjsg PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 2472c349dbc7Sjsg else 24731bb76ff1Sjsg intel_set_m_n(dev_priv, m_n, 24741bb76ff1Sjsg PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 24751bb76ff1Sjsg PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 24761bb76ff1Sjsg } 24771bb76ff1Sjsg 24781bb76ff1Sjsg void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, 24791bb76ff1Sjsg enum transcoder transcoder, 24801bb76ff1Sjsg const struct intel_link_m_n *m_n) 24811bb76ff1Sjsg { 24821bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 24831bb76ff1Sjsg 24841bb76ff1Sjsg if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 24851bb76ff1Sjsg return; 24861bb76ff1Sjsg 24871bb76ff1Sjsg intel_set_m_n(dev_priv, m_n, 24881bb76ff1Sjsg PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 24891bb76ff1Sjsg PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 2490c349dbc7Sjsg } 2491c349dbc7Sjsg 24925ca02815Sjsg static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) 2493c349dbc7Sjsg { 2494c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2495c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2496c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2497c349dbc7Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2498c349dbc7Sjsg const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2499f005ef32Sjsg u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2500c349dbc7Sjsg int vsyncshift = 0; 2501c349dbc7Sjsg 2502c349dbc7Sjsg /* We need to be careful not to changed the adjusted mode, for otherwise 2503c349dbc7Sjsg * the hw state checker will get angry at the mismatch. */ 2504f005ef32Sjsg crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2505c349dbc7Sjsg crtc_vtotal = adjusted_mode->crtc_vtotal; 2506f005ef32Sjsg crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2507c349dbc7Sjsg crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2508c349dbc7Sjsg 2509c349dbc7Sjsg if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 2510c349dbc7Sjsg /* the chip adds 2 halflines automatically */ 2511c349dbc7Sjsg crtc_vtotal -= 1; 2512c349dbc7Sjsg crtc_vblank_end -= 1; 2513c349dbc7Sjsg 2514c349dbc7Sjsg if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2515c349dbc7Sjsg vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 2516c349dbc7Sjsg else 2517c349dbc7Sjsg vsyncshift = adjusted_mode->crtc_hsync_start - 2518c349dbc7Sjsg adjusted_mode->crtc_htotal / 2; 2519c349dbc7Sjsg if (vsyncshift < 0) 2520c349dbc7Sjsg vsyncshift += adjusted_mode->crtc_htotal; 2521c349dbc7Sjsg } 2522c349dbc7Sjsg 2523f005ef32Sjsg /* 2524f005ef32Sjsg * VBLANK_START no longer works on ADL+, instead we must use 2525f005ef32Sjsg * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2526f005ef32Sjsg */ 2527f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 13) { 2528f005ef32Sjsg intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), 2529f005ef32Sjsg crtc_vblank_start - crtc_vdisplay); 2530f005ef32Sjsg 2531f005ef32Sjsg /* 2532f005ef32Sjsg * VBLANK_START not used by hw, just clear it 2533f005ef32Sjsg * to make it stand out in register dumps. 2534f005ef32Sjsg */ 2535f005ef32Sjsg crtc_vblank_start = 1; 2536f005ef32Sjsg } 2537f005ef32Sjsg 25385ca02815Sjsg if (DISPLAY_VER(dev_priv) > 3) 2539f005ef32Sjsg intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), 2540c349dbc7Sjsg vsyncshift); 2541c349dbc7Sjsg 2542f005ef32Sjsg intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 2543f005ef32Sjsg HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2544f005ef32Sjsg HTOTAL(adjusted_mode->crtc_htotal - 1)); 2545f005ef32Sjsg intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 2546f005ef32Sjsg HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2547f005ef32Sjsg HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2548f005ef32Sjsg intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 2549f005ef32Sjsg HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2550f005ef32Sjsg HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2551c349dbc7Sjsg 2552f005ef32Sjsg intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2553f005ef32Sjsg VACTIVE(crtc_vdisplay - 1) | 2554f005ef32Sjsg VTOTAL(crtc_vtotal - 1)); 2555f005ef32Sjsg intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2556f005ef32Sjsg VBLANK_START(crtc_vblank_start - 1) | 2557f005ef32Sjsg VBLANK_END(crtc_vblank_end - 1)); 2558f005ef32Sjsg intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 2559f005ef32Sjsg VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2560f005ef32Sjsg VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2561c349dbc7Sjsg 2562c349dbc7Sjsg /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2563c349dbc7Sjsg * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 2564c349dbc7Sjsg * documented on the DDI_FUNC_CTL register description, EDP Input Select 2565c349dbc7Sjsg * bits. */ 2566c349dbc7Sjsg if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2567c349dbc7Sjsg (pipe == PIPE_B || pipe == PIPE_C)) 2568f005ef32Sjsg intel_de_write(dev_priv, TRANS_VTOTAL(pipe), 2569f005ef32Sjsg VACTIVE(crtc_vdisplay - 1) | 2570f005ef32Sjsg VTOTAL(crtc_vtotal - 1)); 2571c349dbc7Sjsg } 2572c349dbc7Sjsg 2573c349dbc7Sjsg static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) 2574c349dbc7Sjsg { 2575c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2576c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 25771bb76ff1Sjsg int width = drm_rect_width(&crtc_state->pipe_src); 25781bb76ff1Sjsg int height = drm_rect_height(&crtc_state->pipe_src); 2579c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2580c349dbc7Sjsg 2581c349dbc7Sjsg /* pipesrc controls the size that is scaled from, which should 2582c349dbc7Sjsg * always be the user's requested size. 2583c349dbc7Sjsg */ 2584c349dbc7Sjsg intel_de_write(dev_priv, PIPESRC(pipe), 25851bb76ff1Sjsg PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); 2586c349dbc7Sjsg } 2587c349dbc7Sjsg 2588c349dbc7Sjsg static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) 2589c349dbc7Sjsg { 2590c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2591c349dbc7Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2592c349dbc7Sjsg 25935ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2) 2594c349dbc7Sjsg return false; 2595c349dbc7Sjsg 25965ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 || 2597c349dbc7Sjsg IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2598f005ef32Sjsg return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2599c349dbc7Sjsg else 2600f005ef32Sjsg return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2601c349dbc7Sjsg } 2602c349dbc7Sjsg 26035ca02815Sjsg static void intel_get_transcoder_timings(struct intel_crtc *crtc, 2604c349dbc7Sjsg struct intel_crtc_state *pipe_config) 2605c349dbc7Sjsg { 2606c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 2607c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 2608c349dbc7Sjsg enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2609f005ef32Sjsg struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2610c349dbc7Sjsg u32 tmp; 2611c349dbc7Sjsg 2612f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); 2613f005ef32Sjsg adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2614f005ef32Sjsg adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2615c349dbc7Sjsg 2616c349dbc7Sjsg if (!transcoder_is_dsi(cpu_transcoder)) { 2617f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); 2618f005ef32Sjsg adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2619f005ef32Sjsg adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2620c349dbc7Sjsg } 2621c349dbc7Sjsg 2622f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); 2623f005ef32Sjsg adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2624f005ef32Sjsg adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2625c349dbc7Sjsg 2626f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); 2627f005ef32Sjsg adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2628f005ef32Sjsg adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2629f005ef32Sjsg 2630f005ef32Sjsg /* FIXME TGL+ DSI transcoders have this! */ 2631c349dbc7Sjsg if (!transcoder_is_dsi(cpu_transcoder)) { 2632f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); 2633f005ef32Sjsg adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2634f005ef32Sjsg adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2635c349dbc7Sjsg } 2636f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); 2637f005ef32Sjsg adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2638f005ef32Sjsg adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2639c349dbc7Sjsg 2640c349dbc7Sjsg if (intel_pipe_is_interlaced(pipe_config)) { 2641f005ef32Sjsg adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2642f005ef32Sjsg adjusted_mode->crtc_vtotal += 1; 2643f005ef32Sjsg adjusted_mode->crtc_vblank_end += 1; 2644c349dbc7Sjsg } 2645f005ef32Sjsg 2646f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2647f005ef32Sjsg adjusted_mode->crtc_vblank_start = 2648f005ef32Sjsg adjusted_mode->crtc_vdisplay + 2649f005ef32Sjsg intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); 2650c349dbc7Sjsg } 2651c349dbc7Sjsg 26521bb76ff1Sjsg static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) 26531bb76ff1Sjsg { 26541bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 26551bb76ff1Sjsg int num_pipes = intel_bigjoiner_num_pipes(crtc_state); 26561bb76ff1Sjsg enum pipe master_pipe, pipe = crtc->pipe; 26571bb76ff1Sjsg int width; 26581bb76ff1Sjsg 26591bb76ff1Sjsg if (num_pipes < 2) 26601bb76ff1Sjsg return; 26611bb76ff1Sjsg 26621bb76ff1Sjsg master_pipe = bigjoiner_master_pipe(crtc_state); 26631bb76ff1Sjsg width = drm_rect_width(&crtc_state->pipe_src); 26641bb76ff1Sjsg 26651bb76ff1Sjsg drm_rect_translate_to(&crtc_state->pipe_src, 26661bb76ff1Sjsg (pipe - master_pipe) * width, 0); 26671bb76ff1Sjsg } 26681bb76ff1Sjsg 2669c349dbc7Sjsg static void intel_get_pipe_src_size(struct intel_crtc *crtc, 2670c349dbc7Sjsg struct intel_crtc_state *pipe_config) 2671c349dbc7Sjsg { 2672c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 2673c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 2674c349dbc7Sjsg u32 tmp; 2675c349dbc7Sjsg 2676c349dbc7Sjsg tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); 26771bb76ff1Sjsg 26781bb76ff1Sjsg drm_rect_init(&pipe_config->pipe_src, 0, 0, 26791bb76ff1Sjsg REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, 26801bb76ff1Sjsg REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); 26811bb76ff1Sjsg 26821bb76ff1Sjsg intel_bigjoiner_adjust_pipe_src(pipe_config); 2683c349dbc7Sjsg } 2684c349dbc7Sjsg 26851bb76ff1Sjsg void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) 2686c349dbc7Sjsg { 2687c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2688c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2689f005ef32Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2690f005ef32Sjsg u32 val = 0; 2691c349dbc7Sjsg 26921bb76ff1Sjsg /* 26931bb76ff1Sjsg * - We keep both pipes enabled on 830 26941bb76ff1Sjsg * - During modeset the pipe is still disabled and must remain so 26951bb76ff1Sjsg * - During fastset the pipe is already enabled and must remain so 26961bb76ff1Sjsg */ 26971bb76ff1Sjsg if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2698f005ef32Sjsg val |= TRANSCONF_ENABLE; 2699c349dbc7Sjsg 2700c349dbc7Sjsg if (crtc_state->double_wide) 2701f005ef32Sjsg val |= TRANSCONF_DOUBLE_WIDE; 2702c349dbc7Sjsg 2703c349dbc7Sjsg /* only g4x and later have fancy bpc/dither controls */ 2704c349dbc7Sjsg if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2705c349dbc7Sjsg IS_CHERRYVIEW(dev_priv)) { 2706c349dbc7Sjsg /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2707c349dbc7Sjsg if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2708f005ef32Sjsg val |= TRANSCONF_DITHER_EN | 2709f005ef32Sjsg TRANSCONF_DITHER_TYPE_SP; 2710c349dbc7Sjsg 2711c349dbc7Sjsg switch (crtc_state->pipe_bpp) { 2712c349dbc7Sjsg default: 2713c349dbc7Sjsg /* Case prevented by intel_choose_pipe_bpp_dither. */ 27141bb76ff1Sjsg MISSING_CASE(crtc_state->pipe_bpp); 27151bb76ff1Sjsg fallthrough; 27161bb76ff1Sjsg case 18: 2717f005ef32Sjsg val |= TRANSCONF_BPC_6; 27181bb76ff1Sjsg break; 27191bb76ff1Sjsg case 24: 2720f005ef32Sjsg val |= TRANSCONF_BPC_8; 27211bb76ff1Sjsg break; 27221bb76ff1Sjsg case 30: 2723f005ef32Sjsg val |= TRANSCONF_BPC_10; 27241bb76ff1Sjsg break; 2725c349dbc7Sjsg } 2726c349dbc7Sjsg } 2727c349dbc7Sjsg 2728c349dbc7Sjsg if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 27295ca02815Sjsg if (DISPLAY_VER(dev_priv) < 4 || 2730c349dbc7Sjsg intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2731f005ef32Sjsg val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2732c349dbc7Sjsg else 2733f005ef32Sjsg val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 2734c349dbc7Sjsg } else { 2735f005ef32Sjsg val |= TRANSCONF_INTERLACE_PROGRESSIVE; 2736c349dbc7Sjsg } 2737c349dbc7Sjsg 2738c349dbc7Sjsg if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2739c349dbc7Sjsg crtc_state->limited_color_range) 2740f005ef32Sjsg val |= TRANSCONF_COLOR_RANGE_SELECT; 2741c349dbc7Sjsg 2742f005ef32Sjsg val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 2743c349dbc7Sjsg 2744f005ef32Sjsg if (crtc_state->wgc_enable) 2745f005ef32Sjsg val |= TRANSCONF_WGC_ENABLE; 2746c349dbc7Sjsg 2747f005ef32Sjsg val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 2748f005ef32Sjsg 2749f005ef32Sjsg intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 2750f005ef32Sjsg intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 2751c349dbc7Sjsg } 2752c349dbc7Sjsg 2753c349dbc7Sjsg static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) 2754c349dbc7Sjsg { 2755c349dbc7Sjsg if (IS_I830(dev_priv)) 2756c349dbc7Sjsg return false; 2757c349dbc7Sjsg 27585ca02815Sjsg return DISPLAY_VER(dev_priv) >= 4 || 2759c349dbc7Sjsg IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 2760c349dbc7Sjsg } 2761c349dbc7Sjsg 2762ad8b1aafSjsg static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) 2763c349dbc7Sjsg { 2764ad8b1aafSjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2765c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2766f005ef32Sjsg enum pipe pipe; 2767c349dbc7Sjsg u32 tmp; 2768c349dbc7Sjsg 2769c349dbc7Sjsg if (!i9xx_has_pfit(dev_priv)) 2770c349dbc7Sjsg return; 2771c349dbc7Sjsg 2772c349dbc7Sjsg tmp = intel_de_read(dev_priv, PFIT_CONTROL); 2773c349dbc7Sjsg if (!(tmp & PFIT_ENABLE)) 2774c349dbc7Sjsg return; 2775c349dbc7Sjsg 2776c349dbc7Sjsg /* Check whether the pfit is attached to our pipe. */ 2777f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 4) 2778f005ef32Sjsg pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); 2779f005ef32Sjsg else 2780f005ef32Sjsg pipe = PIPE_B; 2781f005ef32Sjsg 2782f005ef32Sjsg if (pipe != crtc->pipe) 2783c349dbc7Sjsg return; 2784c349dbc7Sjsg 2785ad8b1aafSjsg crtc_state->gmch_pfit.control = tmp; 2786ad8b1aafSjsg crtc_state->gmch_pfit.pgm_ratios = 2787ad8b1aafSjsg intel_de_read(dev_priv, PFIT_PGM_RATIOS); 2788c349dbc7Sjsg } 2789c349dbc7Sjsg 2790c349dbc7Sjsg static void vlv_crtc_clock_get(struct intel_crtc *crtc, 2791c349dbc7Sjsg struct intel_crtc_state *pipe_config) 2792c349dbc7Sjsg { 2793c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 2794c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 2795c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2796c349dbc7Sjsg struct dpll clock; 2797c349dbc7Sjsg u32 mdiv; 2798c349dbc7Sjsg int refclk = 100000; 2799c349dbc7Sjsg 2800c349dbc7Sjsg /* In case of DSI, DPLL will not be used */ 2801c349dbc7Sjsg if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 2802c349dbc7Sjsg return; 2803c349dbc7Sjsg 2804c349dbc7Sjsg vlv_dpio_get(dev_priv); 2805c349dbc7Sjsg mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 2806c349dbc7Sjsg vlv_dpio_put(dev_priv); 2807c349dbc7Sjsg 2808c349dbc7Sjsg clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 2809c349dbc7Sjsg clock.m2 = mdiv & DPIO_M2DIV_MASK; 2810c349dbc7Sjsg clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 2811c349dbc7Sjsg clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 2812c349dbc7Sjsg clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 2813c349dbc7Sjsg 2814c349dbc7Sjsg pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 2815c349dbc7Sjsg } 2816c349dbc7Sjsg 2817c349dbc7Sjsg static void chv_crtc_clock_get(struct intel_crtc *crtc, 2818c349dbc7Sjsg struct intel_crtc_state *pipe_config) 2819c349dbc7Sjsg { 2820c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 2821c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 2822c349dbc7Sjsg enum pipe pipe = crtc->pipe; 2823c349dbc7Sjsg enum dpio_channel port = vlv_pipe_to_channel(pipe); 2824c349dbc7Sjsg struct dpll clock; 2825c349dbc7Sjsg u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 2826c349dbc7Sjsg int refclk = 100000; 2827c349dbc7Sjsg 2828c349dbc7Sjsg /* In case of DSI, DPLL will not be used */ 2829c349dbc7Sjsg if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 2830c349dbc7Sjsg return; 2831c349dbc7Sjsg 2832c349dbc7Sjsg vlv_dpio_get(dev_priv); 2833c349dbc7Sjsg cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 2834c349dbc7Sjsg pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 2835c349dbc7Sjsg pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 2836c349dbc7Sjsg pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 2837c349dbc7Sjsg pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 2838c349dbc7Sjsg vlv_dpio_put(dev_priv); 2839c349dbc7Sjsg 2840c349dbc7Sjsg clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 2841c349dbc7Sjsg clock.m2 = (pll_dw0 & 0xff) << 22; 2842c349dbc7Sjsg if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 2843c349dbc7Sjsg clock.m2 |= pll_dw2 & 0x3fffff; 2844c349dbc7Sjsg clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 2845c349dbc7Sjsg clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 2846c349dbc7Sjsg clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 2847c349dbc7Sjsg 2848c349dbc7Sjsg pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 2849c349dbc7Sjsg } 2850c349dbc7Sjsg 2851c349dbc7Sjsg static enum intel_output_format 2852f005ef32Sjsg bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 2853c349dbc7Sjsg { 2854c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2855c349dbc7Sjsg u32 tmp; 2856c349dbc7Sjsg 2857f005ef32Sjsg tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 2858c349dbc7Sjsg 2859f005ef32Sjsg if (tmp & PIPE_MISC_YUV420_ENABLE) { 2860c349dbc7Sjsg /* We support 4:2:0 in full blend mode only */ 2861c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 2862f005ef32Sjsg (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 2863c349dbc7Sjsg 2864c349dbc7Sjsg return INTEL_OUTPUT_FORMAT_YCBCR420; 2865f005ef32Sjsg } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 2866c349dbc7Sjsg return INTEL_OUTPUT_FORMAT_YCBCR444; 2867c349dbc7Sjsg } else { 2868c349dbc7Sjsg return INTEL_OUTPUT_FORMAT_RGB; 2869c349dbc7Sjsg } 2870c349dbc7Sjsg } 2871c349dbc7Sjsg 2872c349dbc7Sjsg static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) 2873c349dbc7Sjsg { 2874c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2875c349dbc7Sjsg struct intel_plane *plane = to_intel_plane(crtc->base.primary); 2876c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2877c349dbc7Sjsg enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 2878c349dbc7Sjsg u32 tmp; 2879c349dbc7Sjsg 2880c349dbc7Sjsg tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 2881c349dbc7Sjsg 28821bb76ff1Sjsg if (tmp & DISP_PIPE_GAMMA_ENABLE) 2883c349dbc7Sjsg crtc_state->gamma_enable = true; 2884c349dbc7Sjsg 2885c349dbc7Sjsg if (!HAS_GMCH(dev_priv) && 28861bb76ff1Sjsg tmp & DISP_PIPE_CSC_ENABLE) 2887c349dbc7Sjsg crtc_state->csc_enable = true; 2888c349dbc7Sjsg } 2889c349dbc7Sjsg 2890c349dbc7Sjsg static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 2891c349dbc7Sjsg struct intel_crtc_state *pipe_config) 2892c349dbc7Sjsg { 2893c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2894c349dbc7Sjsg enum intel_display_power_domain power_domain; 2895c349dbc7Sjsg intel_wakeref_t wakeref; 2896c349dbc7Sjsg u32 tmp; 2897c349dbc7Sjsg bool ret; 2898c349dbc7Sjsg 2899c349dbc7Sjsg power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 2900c349dbc7Sjsg wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 2901c349dbc7Sjsg if (!wakeref) 2902c349dbc7Sjsg return false; 2903c349dbc7Sjsg 2904c349dbc7Sjsg pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 2905f005ef32Sjsg pipe_config->sink_format = pipe_config->output_format; 2906c349dbc7Sjsg pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 2907c349dbc7Sjsg pipe_config->shared_dpll = NULL; 2908c349dbc7Sjsg 2909c349dbc7Sjsg ret = false; 2910c349dbc7Sjsg 2911f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 2912f005ef32Sjsg if (!(tmp & TRANSCONF_ENABLE)) 2913c349dbc7Sjsg goto out; 2914c349dbc7Sjsg 2915c349dbc7Sjsg if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2916c349dbc7Sjsg IS_CHERRYVIEW(dev_priv)) { 2917f005ef32Sjsg switch (tmp & TRANSCONF_BPC_MASK) { 2918f005ef32Sjsg case TRANSCONF_BPC_6: 2919c349dbc7Sjsg pipe_config->pipe_bpp = 18; 2920c349dbc7Sjsg break; 2921f005ef32Sjsg case TRANSCONF_BPC_8: 2922c349dbc7Sjsg pipe_config->pipe_bpp = 24; 2923c349dbc7Sjsg break; 2924f005ef32Sjsg case TRANSCONF_BPC_10: 2925c349dbc7Sjsg pipe_config->pipe_bpp = 30; 2926c349dbc7Sjsg break; 2927c349dbc7Sjsg default: 29281bb76ff1Sjsg MISSING_CASE(tmp); 2929c349dbc7Sjsg break; 2930c349dbc7Sjsg } 2931c349dbc7Sjsg } 2932c349dbc7Sjsg 2933c349dbc7Sjsg if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2934f005ef32Sjsg (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 2935c349dbc7Sjsg pipe_config->limited_color_range = true; 2936c349dbc7Sjsg 2937f005ef32Sjsg pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 29381bb76ff1Sjsg 2939f005ef32Sjsg pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 2940f005ef32Sjsg 2941f005ef32Sjsg if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2942f005ef32Sjsg (tmp & TRANSCONF_WGC_ENABLE)) 2943f005ef32Sjsg pipe_config->wgc_enable = true; 2944c349dbc7Sjsg 2945c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv)) 2946c349dbc7Sjsg pipe_config->cgm_mode = intel_de_read(dev_priv, 2947c349dbc7Sjsg CGM_PIPE_MODE(crtc->pipe)); 2948c349dbc7Sjsg 2949c349dbc7Sjsg i9xx_get_pipe_color_config(pipe_config); 2950c349dbc7Sjsg intel_color_get_config(pipe_config); 2951c349dbc7Sjsg 29525ca02815Sjsg if (DISPLAY_VER(dev_priv) < 4) 2953f005ef32Sjsg pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 2954c349dbc7Sjsg 29555ca02815Sjsg intel_get_transcoder_timings(crtc, pipe_config); 2956c349dbc7Sjsg intel_get_pipe_src_size(crtc, pipe_config); 2957c349dbc7Sjsg 2958ad8b1aafSjsg i9xx_get_pfit_config(pipe_config); 2959c349dbc7Sjsg 29605ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 4) { 2961c349dbc7Sjsg /* No way to read it out on pipes B and C */ 2962c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 2963f005ef32Sjsg tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; 2964c349dbc7Sjsg else 2965c349dbc7Sjsg tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); 2966c349dbc7Sjsg pipe_config->pixel_multiplier = 2967c349dbc7Sjsg ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 2968c349dbc7Sjsg >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 2969c349dbc7Sjsg pipe_config->dpll_hw_state.dpll_md = tmp; 2970c349dbc7Sjsg } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 2971c349dbc7Sjsg IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 2972c349dbc7Sjsg tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); 2973c349dbc7Sjsg pipe_config->pixel_multiplier = 2974c349dbc7Sjsg ((tmp & SDVO_MULTIPLIER_MASK) 2975c349dbc7Sjsg >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 2976c349dbc7Sjsg } else { 2977c349dbc7Sjsg /* Note that on i915G/GM the pixel multiplier is in the sdvo 2978c349dbc7Sjsg * port and will be fixed up in the encoder->get_config 2979c349dbc7Sjsg * function. */ 2980c349dbc7Sjsg pipe_config->pixel_multiplier = 1; 2981c349dbc7Sjsg } 2982c349dbc7Sjsg pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, 2983c349dbc7Sjsg DPLL(crtc->pipe)); 2984c349dbc7Sjsg if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 2985c349dbc7Sjsg pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, 2986c349dbc7Sjsg FP0(crtc->pipe)); 2987c349dbc7Sjsg pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, 2988c349dbc7Sjsg FP1(crtc->pipe)); 2989c349dbc7Sjsg } else { 2990c349dbc7Sjsg /* Mask out read-only status bits. */ 2991c349dbc7Sjsg pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 2992c349dbc7Sjsg DPLL_PORTC_READY_MASK | 2993c349dbc7Sjsg DPLL_PORTB_READY_MASK); 2994c349dbc7Sjsg } 2995c349dbc7Sjsg 2996c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv)) 2997c349dbc7Sjsg chv_crtc_clock_get(crtc, pipe_config); 2998c349dbc7Sjsg else if (IS_VALLEYVIEW(dev_priv)) 2999c349dbc7Sjsg vlv_crtc_clock_get(crtc, pipe_config); 3000c349dbc7Sjsg else 3001c349dbc7Sjsg i9xx_crtc_clock_get(crtc, pipe_config); 3002c349dbc7Sjsg 3003c349dbc7Sjsg /* 3004c349dbc7Sjsg * Normally the dotclock is filled in by the encoder .get_config() 3005c349dbc7Sjsg * but in case the pipe is enabled w/o any ports we need a sane 3006c349dbc7Sjsg * default. 3007c349dbc7Sjsg */ 3008c349dbc7Sjsg pipe_config->hw.adjusted_mode.crtc_clock = 3009c349dbc7Sjsg pipe_config->port_clock / pipe_config->pixel_multiplier; 3010c349dbc7Sjsg 3011c349dbc7Sjsg ret = true; 3012c349dbc7Sjsg 3013c349dbc7Sjsg out: 3014c349dbc7Sjsg intel_display_power_put(dev_priv, power_domain, wakeref); 3015c349dbc7Sjsg 3016c349dbc7Sjsg return ret; 3017c349dbc7Sjsg } 3018c349dbc7Sjsg 30191bb76ff1Sjsg void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 3020c349dbc7Sjsg { 3021c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3022c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3023f005ef32Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 30241bb76ff1Sjsg u32 val = 0; 3025c349dbc7Sjsg 30261bb76ff1Sjsg /* 30271bb76ff1Sjsg * - During modeset the pipe is still disabled and must remain so 30281bb76ff1Sjsg * - During fastset the pipe is already enabled and must remain so 30291bb76ff1Sjsg */ 30301bb76ff1Sjsg if (!intel_crtc_needs_modeset(crtc_state)) 3031f005ef32Sjsg val |= TRANSCONF_ENABLE; 3032c349dbc7Sjsg 3033c349dbc7Sjsg switch (crtc_state->pipe_bpp) { 3034c349dbc7Sjsg default: 3035c349dbc7Sjsg /* Case prevented by intel_choose_pipe_bpp_dither. */ 30361bb76ff1Sjsg MISSING_CASE(crtc_state->pipe_bpp); 30371bb76ff1Sjsg fallthrough; 30381bb76ff1Sjsg case 18: 3039f005ef32Sjsg val |= TRANSCONF_BPC_6; 30401bb76ff1Sjsg break; 30411bb76ff1Sjsg case 24: 3042f005ef32Sjsg val |= TRANSCONF_BPC_8; 30431bb76ff1Sjsg break; 30441bb76ff1Sjsg case 30: 3045f005ef32Sjsg val |= TRANSCONF_BPC_10; 30461bb76ff1Sjsg break; 30471bb76ff1Sjsg case 36: 3048f005ef32Sjsg val |= TRANSCONF_BPC_12; 30491bb76ff1Sjsg break; 3050c349dbc7Sjsg } 3051c349dbc7Sjsg 3052c349dbc7Sjsg if (crtc_state->dither) 3053f005ef32Sjsg val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3054c349dbc7Sjsg 3055c349dbc7Sjsg if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3056f005ef32Sjsg val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3057c349dbc7Sjsg else 3058f005ef32Sjsg val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3059c349dbc7Sjsg 3060c349dbc7Sjsg /* 3061c349dbc7Sjsg * This would end up with an odd purple hue over 3062c349dbc7Sjsg * the entire display. Make sure we don't do it. 3063c349dbc7Sjsg */ 3064c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && 3065c349dbc7Sjsg crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 3066c349dbc7Sjsg 3067ad8b1aafSjsg if (crtc_state->limited_color_range && 3068ad8b1aafSjsg !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3069f005ef32Sjsg val |= TRANSCONF_COLOR_RANGE_SELECT; 3070c349dbc7Sjsg 3071c349dbc7Sjsg if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3072f005ef32Sjsg val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3073c349dbc7Sjsg 3074f005ef32Sjsg val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3075c349dbc7Sjsg 3076f005ef32Sjsg val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3077f005ef32Sjsg val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3078c349dbc7Sjsg 3079f005ef32Sjsg intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3080f005ef32Sjsg intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3081c349dbc7Sjsg } 3082c349dbc7Sjsg 30831bb76ff1Sjsg static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) 3084c349dbc7Sjsg { 3085c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3086c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3087c349dbc7Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3088c349dbc7Sjsg u32 val = 0; 3089c349dbc7Sjsg 30901bb76ff1Sjsg /* 30911bb76ff1Sjsg * - During modeset the pipe is still disabled and must remain so 30921bb76ff1Sjsg * - During fastset the pipe is already enabled and must remain so 30931bb76ff1Sjsg */ 30941bb76ff1Sjsg if (!intel_crtc_needs_modeset(crtc_state)) 3095f005ef32Sjsg val |= TRANSCONF_ENABLE; 30961bb76ff1Sjsg 3097c349dbc7Sjsg if (IS_HASWELL(dev_priv) && crtc_state->dither) 3098f005ef32Sjsg val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3099c349dbc7Sjsg 3100c349dbc7Sjsg if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3101f005ef32Sjsg val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3102c349dbc7Sjsg else 3103f005ef32Sjsg val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3104c349dbc7Sjsg 3105c349dbc7Sjsg if (IS_HASWELL(dev_priv) && 3106c349dbc7Sjsg crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3107f005ef32Sjsg val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3108c349dbc7Sjsg 3109f005ef32Sjsg intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3110f005ef32Sjsg intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3111c349dbc7Sjsg } 3112c349dbc7Sjsg 3113f005ef32Sjsg static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) 3114c349dbc7Sjsg { 3115c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3116c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3117c349dbc7Sjsg u32 val = 0; 3118c349dbc7Sjsg 3119c349dbc7Sjsg switch (crtc_state->pipe_bpp) { 3120c349dbc7Sjsg case 18: 3121f005ef32Sjsg val |= PIPE_MISC_BPC_6; 3122c349dbc7Sjsg break; 3123c349dbc7Sjsg case 24: 3124f005ef32Sjsg val |= PIPE_MISC_BPC_8; 3125c349dbc7Sjsg break; 3126c349dbc7Sjsg case 30: 3127f005ef32Sjsg val |= PIPE_MISC_BPC_10; 3128c349dbc7Sjsg break; 3129c349dbc7Sjsg case 36: 31305ca02815Sjsg /* Port output 12BPC defined for ADLP+ */ 31315ca02815Sjsg if (DISPLAY_VER(dev_priv) > 12) 3132f005ef32Sjsg val |= PIPE_MISC_BPC_12_ADLP; 3133c349dbc7Sjsg break; 3134c349dbc7Sjsg default: 3135c349dbc7Sjsg MISSING_CASE(crtc_state->pipe_bpp); 3136c349dbc7Sjsg break; 3137c349dbc7Sjsg } 3138c349dbc7Sjsg 3139c349dbc7Sjsg if (crtc_state->dither) 3140f005ef32Sjsg val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3141c349dbc7Sjsg 3142c349dbc7Sjsg if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3143c349dbc7Sjsg crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3144f005ef32Sjsg val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3145c349dbc7Sjsg 3146c349dbc7Sjsg if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3147f005ef32Sjsg val |= PIPE_MISC_YUV420_ENABLE | 3148f005ef32Sjsg PIPE_MISC_YUV420_MODE_FULL_BLEND; 3149c349dbc7Sjsg 31501bb76ff1Sjsg if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3151f005ef32Sjsg val |= PIPE_MISC_HDR_MODE_PRECISION; 3152c349dbc7Sjsg 31535ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 12) 3154f005ef32Sjsg val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3155c349dbc7Sjsg 3156f005ef32Sjsg /* allow PSR with sprite enabled */ 3157f005ef32Sjsg if (IS_BROADWELL(dev_priv)) 3158f005ef32Sjsg val |= PIPE_MISC_PSR_MASK_SPRITE_ENABLE; 3159f005ef32Sjsg 3160f005ef32Sjsg intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); 3161c349dbc7Sjsg } 3162c349dbc7Sjsg 3163f005ef32Sjsg int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3164c349dbc7Sjsg { 3165c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3166c349dbc7Sjsg u32 tmp; 3167c349dbc7Sjsg 3168f005ef32Sjsg tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3169c349dbc7Sjsg 3170f005ef32Sjsg switch (tmp & PIPE_MISC_BPC_MASK) { 3171f005ef32Sjsg case PIPE_MISC_BPC_6: 3172c349dbc7Sjsg return 18; 3173f005ef32Sjsg case PIPE_MISC_BPC_8: 3174c349dbc7Sjsg return 24; 3175f005ef32Sjsg case PIPE_MISC_BPC_10: 3176c349dbc7Sjsg return 30; 31775ca02815Sjsg /* 31785ca02815Sjsg * PORT OUTPUT 12 BPC defined for ADLP+. 31795ca02815Sjsg * 31805ca02815Sjsg * TODO: 31815ca02815Sjsg * For previous platforms with DSI interface, bits 5:7 31825ca02815Sjsg * are used for storing pipe_bpp irrespective of dithering. 31835ca02815Sjsg * Since the value of 12 BPC is not defined for these bits 31845ca02815Sjsg * on older platforms, need to find a workaround for 12 BPC 31855ca02815Sjsg * MIPI DSI HW readout. 31865ca02815Sjsg */ 3187f005ef32Sjsg case PIPE_MISC_BPC_12_ADLP: 31885ca02815Sjsg if (DISPLAY_VER(dev_priv) > 12) 3189c349dbc7Sjsg return 36; 31905ca02815Sjsg fallthrough; 3191c349dbc7Sjsg default: 3192c349dbc7Sjsg MISSING_CASE(tmp); 3193c349dbc7Sjsg return 0; 3194c349dbc7Sjsg } 3195c349dbc7Sjsg } 3196c349dbc7Sjsg 3197c349dbc7Sjsg int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) 3198c349dbc7Sjsg { 3199c349dbc7Sjsg /* 3200c349dbc7Sjsg * Account for spread spectrum to avoid 3201c349dbc7Sjsg * oversubscribing the link. Max center spread 3202c349dbc7Sjsg * is 2.5%; use 5% for safety's sake. 3203c349dbc7Sjsg */ 3204c349dbc7Sjsg u32 bps = target_clock * bpp * 21 / 20; 3205c349dbc7Sjsg return DIV_ROUND_UP(bps, link_bw * 8); 3206c349dbc7Sjsg } 3207c349dbc7Sjsg 32081bb76ff1Sjsg void intel_get_m_n(struct drm_i915_private *i915, 32091bb76ff1Sjsg struct intel_link_m_n *m_n, 32101bb76ff1Sjsg i915_reg_t data_m_reg, i915_reg_t data_n_reg, 32111bb76ff1Sjsg i915_reg_t link_m_reg, i915_reg_t link_n_reg) 3212c349dbc7Sjsg { 32131bb76ff1Sjsg m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; 32141bb76ff1Sjsg m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; 32151bb76ff1Sjsg m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; 32161bb76ff1Sjsg m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; 32171bb76ff1Sjsg m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; 3218c349dbc7Sjsg } 3219c349dbc7Sjsg 32201bb76ff1Sjsg void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, 3221c349dbc7Sjsg enum transcoder transcoder, 32221bb76ff1Sjsg struct intel_link_m_n *m_n) 3223c349dbc7Sjsg { 3224c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3225c349dbc7Sjsg enum pipe pipe = crtc->pipe; 3226c349dbc7Sjsg 32271bb76ff1Sjsg if (DISPLAY_VER(dev_priv) >= 5) 32281bb76ff1Sjsg intel_get_m_n(dev_priv, m_n, 32291bb76ff1Sjsg PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), 32301bb76ff1Sjsg PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); 3231c349dbc7Sjsg else 32321bb76ff1Sjsg intel_get_m_n(dev_priv, m_n, 32331bb76ff1Sjsg PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), 32341bb76ff1Sjsg PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); 3235c349dbc7Sjsg } 3236c349dbc7Sjsg 32371bb76ff1Sjsg void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 32381bb76ff1Sjsg enum transcoder transcoder, 32391bb76ff1Sjsg struct intel_link_m_n *m_n) 3240c349dbc7Sjsg { 32411bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 32421bb76ff1Sjsg 32431bb76ff1Sjsg if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) 32441bb76ff1Sjsg return; 32451bb76ff1Sjsg 32461bb76ff1Sjsg intel_get_m_n(dev_priv, m_n, 32471bb76ff1Sjsg PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), 32481bb76ff1Sjsg PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); 3249c349dbc7Sjsg } 3250c349dbc7Sjsg 3251ad8b1aafSjsg static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state) 3252c349dbc7Sjsg { 3253ad8b1aafSjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3254ad8b1aafSjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3255ad8b1aafSjsg u32 ctl, pos, size; 3256f005ef32Sjsg enum pipe pipe; 3257c349dbc7Sjsg 3258ad8b1aafSjsg ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); 3259ad8b1aafSjsg if ((ctl & PF_ENABLE) == 0) 3260ad8b1aafSjsg return; 3261c349dbc7Sjsg 3262f005ef32Sjsg if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 3263f005ef32Sjsg pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl); 3264f005ef32Sjsg else 3265f005ef32Sjsg pipe = crtc->pipe; 3266f005ef32Sjsg 3267ad8b1aafSjsg crtc_state->pch_pfit.enabled = true; 3268c349dbc7Sjsg 3269ad8b1aafSjsg pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe)); 3270ad8b1aafSjsg size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe)); 3271ad8b1aafSjsg 3272f005ef32Sjsg drm_rect_init(&crtc_state->pch_pfit.dst, 3273f005ef32Sjsg REG_FIELD_GET(PF_WIN_XPOS_MASK, pos), 3274f005ef32Sjsg REG_FIELD_GET(PF_WIN_YPOS_MASK, pos), 3275f005ef32Sjsg REG_FIELD_GET(PF_WIN_XSIZE_MASK, size), 3276f005ef32Sjsg REG_FIELD_GET(PF_WIN_YSIZE_MASK, size)); 3277ad8b1aafSjsg 3278ad8b1aafSjsg /* 3279ad8b1aafSjsg * We currently do not free assignements of panel fitters on 3280c349dbc7Sjsg * ivb/hsw (since we don't use the higher upscaling modes which 3281ad8b1aafSjsg * differentiates them) so just WARN about this case for now. 3282ad8b1aafSjsg */ 3283f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe); 3284c349dbc7Sjsg } 3285c349dbc7Sjsg 3286c349dbc7Sjsg static bool ilk_get_pipe_config(struct intel_crtc *crtc, 3287c349dbc7Sjsg struct intel_crtc_state *pipe_config) 3288c349dbc7Sjsg { 3289c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 3290c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 3291c349dbc7Sjsg enum intel_display_power_domain power_domain; 3292c349dbc7Sjsg intel_wakeref_t wakeref; 3293c349dbc7Sjsg u32 tmp; 3294c349dbc7Sjsg bool ret; 3295c349dbc7Sjsg 3296c349dbc7Sjsg power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 3297c349dbc7Sjsg wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 3298c349dbc7Sjsg if (!wakeref) 3299c349dbc7Sjsg return false; 3300c349dbc7Sjsg 3301c349dbc7Sjsg pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 3302c349dbc7Sjsg pipe_config->shared_dpll = NULL; 3303c349dbc7Sjsg 3304c349dbc7Sjsg ret = false; 3305f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3306f005ef32Sjsg if (!(tmp & TRANSCONF_ENABLE)) 3307c349dbc7Sjsg goto out; 3308c349dbc7Sjsg 3309f005ef32Sjsg switch (tmp & TRANSCONF_BPC_MASK) { 3310f005ef32Sjsg case TRANSCONF_BPC_6: 3311c349dbc7Sjsg pipe_config->pipe_bpp = 18; 3312c349dbc7Sjsg break; 3313f005ef32Sjsg case TRANSCONF_BPC_8: 3314c349dbc7Sjsg pipe_config->pipe_bpp = 24; 3315c349dbc7Sjsg break; 3316f005ef32Sjsg case TRANSCONF_BPC_10: 3317c349dbc7Sjsg pipe_config->pipe_bpp = 30; 3318c349dbc7Sjsg break; 3319f005ef32Sjsg case TRANSCONF_BPC_12: 3320c349dbc7Sjsg pipe_config->pipe_bpp = 36; 3321c349dbc7Sjsg break; 3322c349dbc7Sjsg default: 3323c349dbc7Sjsg break; 3324c349dbc7Sjsg } 3325c349dbc7Sjsg 3326f005ef32Sjsg if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3327c349dbc7Sjsg pipe_config->limited_color_range = true; 3328c349dbc7Sjsg 3329f005ef32Sjsg switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3330f005ef32Sjsg case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3331f005ef32Sjsg case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3332c349dbc7Sjsg pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3333c349dbc7Sjsg break; 3334c349dbc7Sjsg default: 3335c349dbc7Sjsg pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3336c349dbc7Sjsg break; 3337c349dbc7Sjsg } 3338c349dbc7Sjsg 3339f005ef32Sjsg pipe_config->sink_format = pipe_config->output_format; 33401bb76ff1Sjsg 3341f005ef32Sjsg pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 33421bb76ff1Sjsg 3343f005ef32Sjsg pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3344f005ef32Sjsg 3345f005ef32Sjsg pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3346c349dbc7Sjsg 3347c349dbc7Sjsg pipe_config->csc_mode = intel_de_read(dev_priv, 3348c349dbc7Sjsg PIPE_CSC_MODE(crtc->pipe)); 3349c349dbc7Sjsg 3350c349dbc7Sjsg i9xx_get_pipe_color_config(pipe_config); 3351c349dbc7Sjsg intel_color_get_config(pipe_config); 3352c349dbc7Sjsg 3353c349dbc7Sjsg pipe_config->pixel_multiplier = 1; 33541bb76ff1Sjsg 33551bb76ff1Sjsg ilk_pch_get_config(pipe_config); 3356c349dbc7Sjsg 33575ca02815Sjsg intel_get_transcoder_timings(crtc, pipe_config); 3358c349dbc7Sjsg intel_get_pipe_src_size(crtc, pipe_config); 3359c349dbc7Sjsg 3360ad8b1aafSjsg ilk_get_pfit_config(pipe_config); 3361c349dbc7Sjsg 3362c349dbc7Sjsg ret = true; 3363c349dbc7Sjsg 3364c349dbc7Sjsg out: 3365c349dbc7Sjsg intel_display_power_put(dev_priv, power_domain, wakeref); 3366c349dbc7Sjsg 3367c349dbc7Sjsg return ret; 3368c349dbc7Sjsg } 3369c349dbc7Sjsg 33701bb76ff1Sjsg static u8 bigjoiner_pipes(struct drm_i915_private *i915) 33711bb76ff1Sjsg { 33721bb76ff1Sjsg u8 pipes; 33731bb76ff1Sjsg 33741bb76ff1Sjsg if (DISPLAY_VER(i915) >= 12) 33751bb76ff1Sjsg pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 33761bb76ff1Sjsg else if (DISPLAY_VER(i915) >= 11) 33771bb76ff1Sjsg pipes = BIT(PIPE_B) | BIT(PIPE_C); 33781bb76ff1Sjsg else 33791bb76ff1Sjsg pipes = 0; 33801bb76ff1Sjsg 3381f005ef32Sjsg return pipes & DISPLAY_RUNTIME_INFO(i915)->pipe_mask; 33821bb76ff1Sjsg } 33831bb76ff1Sjsg 33841bb76ff1Sjsg static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, 33851bb76ff1Sjsg enum transcoder cpu_transcoder) 33861bb76ff1Sjsg { 33871bb76ff1Sjsg enum intel_display_power_domain power_domain; 33881bb76ff1Sjsg intel_wakeref_t wakeref; 33891bb76ff1Sjsg u32 tmp = 0; 33901bb76ff1Sjsg 33911bb76ff1Sjsg power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 33921bb76ff1Sjsg 33931bb76ff1Sjsg with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 33941bb76ff1Sjsg tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 33951bb76ff1Sjsg 33961bb76ff1Sjsg return tmp & TRANS_DDI_FUNC_ENABLE; 33971bb76ff1Sjsg } 33981bb76ff1Sjsg 33991bb76ff1Sjsg static void enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv, 34001bb76ff1Sjsg u8 *master_pipes, u8 *slave_pipes) 34011bb76ff1Sjsg { 34021bb76ff1Sjsg struct intel_crtc *crtc; 34031bb76ff1Sjsg 34041bb76ff1Sjsg *master_pipes = 0; 34051bb76ff1Sjsg *slave_pipes = 0; 34061bb76ff1Sjsg 34071bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, 34081bb76ff1Sjsg bigjoiner_pipes(dev_priv)) { 34091bb76ff1Sjsg enum intel_display_power_domain power_domain; 34101bb76ff1Sjsg enum pipe pipe = crtc->pipe; 34111bb76ff1Sjsg intel_wakeref_t wakeref; 34121bb76ff1Sjsg 34131bb76ff1Sjsg power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 34141bb76ff1Sjsg with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 34151bb76ff1Sjsg u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 34161bb76ff1Sjsg 34171bb76ff1Sjsg if (!(tmp & BIG_JOINER_ENABLE)) 34181bb76ff1Sjsg continue; 34191bb76ff1Sjsg 34201bb76ff1Sjsg if (tmp & MASTER_BIG_JOINER_ENABLE) 34211bb76ff1Sjsg *master_pipes |= BIT(pipe); 34221bb76ff1Sjsg else 34231bb76ff1Sjsg *slave_pipes |= BIT(pipe); 34241bb76ff1Sjsg } 34251bb76ff1Sjsg 34261bb76ff1Sjsg if (DISPLAY_VER(dev_priv) < 13) 34271bb76ff1Sjsg continue; 34281bb76ff1Sjsg 34291bb76ff1Sjsg power_domain = POWER_DOMAIN_PIPE(pipe); 34301bb76ff1Sjsg with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 34311bb76ff1Sjsg u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 34321bb76ff1Sjsg 34331bb76ff1Sjsg if (tmp & UNCOMPRESSED_JOINER_MASTER) 34341bb76ff1Sjsg *master_pipes |= BIT(pipe); 34351bb76ff1Sjsg if (tmp & UNCOMPRESSED_JOINER_SLAVE) 34361bb76ff1Sjsg *slave_pipes |= BIT(pipe); 34371bb76ff1Sjsg } 34381bb76ff1Sjsg } 34391bb76ff1Sjsg 34401bb76ff1Sjsg /* Bigjoiner pipes should always be consecutive master and slave */ 34411bb76ff1Sjsg drm_WARN(&dev_priv->drm, *slave_pipes != *master_pipes << 1, 34421bb76ff1Sjsg "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 34431bb76ff1Sjsg *master_pipes, *slave_pipes); 34441bb76ff1Sjsg } 34451bb76ff1Sjsg 34461bb76ff1Sjsg static enum pipe get_bigjoiner_master_pipe(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 34471bb76ff1Sjsg { 34481bb76ff1Sjsg if ((slave_pipes & BIT(pipe)) == 0) 34491bb76ff1Sjsg return pipe; 34501bb76ff1Sjsg 34511bb76ff1Sjsg /* ignore everything above our pipe */ 34521bb76ff1Sjsg master_pipes &= ~GENMASK(7, pipe); 34531bb76ff1Sjsg 34541bb76ff1Sjsg /* highest remaining bit should be our master pipe */ 34551bb76ff1Sjsg return fls(master_pipes) - 1; 34561bb76ff1Sjsg } 34571bb76ff1Sjsg 34581bb76ff1Sjsg static u8 get_bigjoiner_slave_pipes(enum pipe pipe, u8 master_pipes, u8 slave_pipes) 34591bb76ff1Sjsg { 34601bb76ff1Sjsg enum pipe master_pipe, next_master_pipe; 34611bb76ff1Sjsg 34621bb76ff1Sjsg master_pipe = get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes); 34631bb76ff1Sjsg 34641bb76ff1Sjsg if ((master_pipes & BIT(master_pipe)) == 0) 34651bb76ff1Sjsg return 0; 34661bb76ff1Sjsg 34671bb76ff1Sjsg /* ignore our master pipe and everything below it */ 34681bb76ff1Sjsg master_pipes &= ~GENMASK(master_pipe, 0); 34691bb76ff1Sjsg /* make sure a high bit is set for the ffs() */ 34701bb76ff1Sjsg master_pipes |= BIT(7); 34711bb76ff1Sjsg /* lowest remaining bit should be the next master pipe */ 34721bb76ff1Sjsg next_master_pipe = ffs(master_pipes) - 1; 34731bb76ff1Sjsg 34741bb76ff1Sjsg return slave_pipes & GENMASK(next_master_pipe - 1, master_pipe); 34751bb76ff1Sjsg } 34761bb76ff1Sjsg 34771bb76ff1Sjsg static u8 hsw_panel_transcoders(struct drm_i915_private *i915) 34781bb76ff1Sjsg { 34791bb76ff1Sjsg u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); 34801bb76ff1Sjsg 34811bb76ff1Sjsg if (DISPLAY_VER(i915) >= 11) 34821bb76ff1Sjsg panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); 34831bb76ff1Sjsg 34841bb76ff1Sjsg return panel_transcoder_mask; 34851bb76ff1Sjsg } 34861bb76ff1Sjsg 34871bb76ff1Sjsg static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) 3488c349dbc7Sjsg { 3489c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 3490c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 34911bb76ff1Sjsg u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); 34921bb76ff1Sjsg enum transcoder cpu_transcoder; 34931bb76ff1Sjsg u8 master_pipes, slave_pipes; 34941bb76ff1Sjsg u8 enabled_transcoders = 0; 3495c349dbc7Sjsg 3496c349dbc7Sjsg /* 3497c349dbc7Sjsg * XXX: Do intel_display_power_get_if_enabled before reading this (for 3498c349dbc7Sjsg * consistency and less surprising code; it's in always on power). 3499c349dbc7Sjsg */ 35001bb76ff1Sjsg for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, 3501ad8b1aafSjsg panel_transcoder_mask) { 35021bb76ff1Sjsg enum intel_display_power_domain power_domain; 35031bb76ff1Sjsg intel_wakeref_t wakeref; 3504c349dbc7Sjsg enum pipe trans_pipe; 35051bb76ff1Sjsg u32 tmp = 0; 3506c349dbc7Sjsg 35071bb76ff1Sjsg power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 35081bb76ff1Sjsg with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) 35091bb76ff1Sjsg tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 35101bb76ff1Sjsg 3511c349dbc7Sjsg if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 3512c349dbc7Sjsg continue; 3513c349dbc7Sjsg 3514c349dbc7Sjsg switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 3515c349dbc7Sjsg default: 3516c349dbc7Sjsg drm_WARN(dev, 1, 3517c349dbc7Sjsg "unknown pipe linked to transcoder %s\n", 35181bb76ff1Sjsg transcoder_name(cpu_transcoder)); 3519ad8b1aafSjsg fallthrough; 3520c349dbc7Sjsg case TRANS_DDI_EDP_INPUT_A_ONOFF: 3521c349dbc7Sjsg case TRANS_DDI_EDP_INPUT_A_ON: 3522c349dbc7Sjsg trans_pipe = PIPE_A; 3523c349dbc7Sjsg break; 3524c349dbc7Sjsg case TRANS_DDI_EDP_INPUT_B_ONOFF: 3525c349dbc7Sjsg trans_pipe = PIPE_B; 3526c349dbc7Sjsg break; 3527c349dbc7Sjsg case TRANS_DDI_EDP_INPUT_C_ONOFF: 3528c349dbc7Sjsg trans_pipe = PIPE_C; 3529c349dbc7Sjsg break; 3530c349dbc7Sjsg case TRANS_DDI_EDP_INPUT_D_ONOFF: 3531c349dbc7Sjsg trans_pipe = PIPE_D; 3532c349dbc7Sjsg break; 3533c349dbc7Sjsg } 3534c349dbc7Sjsg 35351bb76ff1Sjsg if (trans_pipe == crtc->pipe) 35361bb76ff1Sjsg enabled_transcoders |= BIT(cpu_transcoder); 3537c349dbc7Sjsg } 3538c349dbc7Sjsg 35391bb76ff1Sjsg /* single pipe or bigjoiner master */ 35401bb76ff1Sjsg cpu_transcoder = (enum transcoder) crtc->pipe; 35411bb76ff1Sjsg if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 35421bb76ff1Sjsg enabled_transcoders |= BIT(cpu_transcoder); 35431bb76ff1Sjsg 35441bb76ff1Sjsg /* bigjoiner slave -> consider the master pipe's transcoder as well */ 35451bb76ff1Sjsg enabled_bigjoiner_pipes(dev_priv, &master_pipes, &slave_pipes); 35461bb76ff1Sjsg if (slave_pipes & BIT(crtc->pipe)) { 35471bb76ff1Sjsg cpu_transcoder = (enum transcoder) 35481bb76ff1Sjsg get_bigjoiner_master_pipe(crtc->pipe, master_pipes, slave_pipes); 35491bb76ff1Sjsg if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 35501bb76ff1Sjsg enabled_transcoders |= BIT(cpu_transcoder); 35511bb76ff1Sjsg } 35521bb76ff1Sjsg 35531bb76ff1Sjsg return enabled_transcoders; 35541bb76ff1Sjsg } 35551bb76ff1Sjsg 35561bb76ff1Sjsg static bool has_edp_transcoders(u8 enabled_transcoders) 35571bb76ff1Sjsg { 35581bb76ff1Sjsg return enabled_transcoders & BIT(TRANSCODER_EDP); 35591bb76ff1Sjsg } 35601bb76ff1Sjsg 35611bb76ff1Sjsg static bool has_dsi_transcoders(u8 enabled_transcoders) 35621bb76ff1Sjsg { 35631bb76ff1Sjsg return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | 35641bb76ff1Sjsg BIT(TRANSCODER_DSI_1)); 35651bb76ff1Sjsg } 35661bb76ff1Sjsg 35671bb76ff1Sjsg static bool has_pipe_transcoders(u8 enabled_transcoders) 35681bb76ff1Sjsg { 35691bb76ff1Sjsg return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | 35701bb76ff1Sjsg BIT(TRANSCODER_DSI_0) | 35711bb76ff1Sjsg BIT(TRANSCODER_DSI_1)); 35721bb76ff1Sjsg } 35731bb76ff1Sjsg 35741bb76ff1Sjsg static void assert_enabled_transcoders(struct drm_i915_private *i915, 35751bb76ff1Sjsg u8 enabled_transcoders) 35761bb76ff1Sjsg { 35771bb76ff1Sjsg /* Only one type of transcoder please */ 35781bb76ff1Sjsg drm_WARN_ON(&i915->drm, 35791bb76ff1Sjsg has_edp_transcoders(enabled_transcoders) + 35801bb76ff1Sjsg has_dsi_transcoders(enabled_transcoders) + 35811bb76ff1Sjsg has_pipe_transcoders(enabled_transcoders) > 1); 35821bb76ff1Sjsg 35831bb76ff1Sjsg /* Only DSI transcoders can be ganged */ 35841bb76ff1Sjsg drm_WARN_ON(&i915->drm, 35851bb76ff1Sjsg !has_dsi_transcoders(enabled_transcoders) && 35861bb76ff1Sjsg !is_power_of_2(enabled_transcoders)); 35871bb76ff1Sjsg } 35881bb76ff1Sjsg 35891bb76ff1Sjsg static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 35901bb76ff1Sjsg struct intel_crtc_state *pipe_config, 35911bb76ff1Sjsg struct intel_display_power_domain_set *power_domain_set) 35921bb76ff1Sjsg { 35931bb76ff1Sjsg struct drm_device *dev = crtc->base.dev; 35941bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 35951bb76ff1Sjsg unsigned long enabled_transcoders; 35961bb76ff1Sjsg u32 tmp; 35971bb76ff1Sjsg 35981bb76ff1Sjsg enabled_transcoders = hsw_enabled_transcoders(crtc); 35991bb76ff1Sjsg if (!enabled_transcoders) 36001bb76ff1Sjsg return false; 36011bb76ff1Sjsg 36021bb76ff1Sjsg assert_enabled_transcoders(dev_priv, enabled_transcoders); 36031bb76ff1Sjsg 3604c349dbc7Sjsg /* 36051bb76ff1Sjsg * With the exception of DSI we should only ever have 36061bb76ff1Sjsg * a single enabled transcoder. With DSI let's just 36071bb76ff1Sjsg * pick the first one. 3608c349dbc7Sjsg */ 36091bb76ff1Sjsg pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; 3610c349dbc7Sjsg 36115ca02815Sjsg if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 36125ca02815Sjsg POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 3613c349dbc7Sjsg return false; 3614c349dbc7Sjsg 36151bb76ff1Sjsg if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { 36161bb76ff1Sjsg tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 36171bb76ff1Sjsg 36181bb76ff1Sjsg if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) 36191bb76ff1Sjsg pipe_config->pch_pfit.force_thru = true; 36201bb76ff1Sjsg } 36211bb76ff1Sjsg 3622f005ef32Sjsg tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3623c349dbc7Sjsg 3624f005ef32Sjsg return tmp & TRANSCONF_ENABLE; 3625c349dbc7Sjsg } 3626c349dbc7Sjsg 3627c349dbc7Sjsg static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 3628c349dbc7Sjsg struct intel_crtc_state *pipe_config, 36295ca02815Sjsg struct intel_display_power_domain_set *power_domain_set) 3630c349dbc7Sjsg { 3631c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 3632c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 3633c349dbc7Sjsg enum transcoder cpu_transcoder; 3634c349dbc7Sjsg enum port port; 3635c349dbc7Sjsg u32 tmp; 3636c349dbc7Sjsg 3637c349dbc7Sjsg for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 3638c349dbc7Sjsg if (port == PORT_A) 3639c349dbc7Sjsg cpu_transcoder = TRANSCODER_DSI_A; 3640c349dbc7Sjsg else 3641c349dbc7Sjsg cpu_transcoder = TRANSCODER_DSI_C; 3642c349dbc7Sjsg 36435ca02815Sjsg if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 36445ca02815Sjsg POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 3645c349dbc7Sjsg continue; 3646c349dbc7Sjsg 3647c349dbc7Sjsg /* 3648c349dbc7Sjsg * The PLL needs to be enabled with a valid divider 3649c349dbc7Sjsg * configuration, otherwise accessing DSI registers will hang 3650c349dbc7Sjsg * the machine. See BSpec North Display Engine 3651c349dbc7Sjsg * registers/MIPI[BXT]. We can break out here early, since we 3652c349dbc7Sjsg * need the same DSI PLL to be enabled for both DSI ports. 3653c349dbc7Sjsg */ 3654c349dbc7Sjsg if (!bxt_dsi_pll_is_enabled(dev_priv)) 3655c349dbc7Sjsg break; 3656c349dbc7Sjsg 3657c349dbc7Sjsg /* XXX: this works for video mode only */ 3658c349dbc7Sjsg tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 3659c349dbc7Sjsg if (!(tmp & DPI_ENABLE)) 3660c349dbc7Sjsg continue; 3661c349dbc7Sjsg 3662c349dbc7Sjsg tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 3663c349dbc7Sjsg if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 3664c349dbc7Sjsg continue; 3665c349dbc7Sjsg 3666c349dbc7Sjsg pipe_config->cpu_transcoder = cpu_transcoder; 3667c349dbc7Sjsg break; 3668c349dbc7Sjsg } 3669c349dbc7Sjsg 3670c349dbc7Sjsg return transcoder_is_dsi(pipe_config->cpu_transcoder); 3671c349dbc7Sjsg } 3672c349dbc7Sjsg 36731bb76ff1Sjsg static void intel_bigjoiner_get_config(struct intel_crtc_state *crtc_state) 3674c349dbc7Sjsg { 36751bb76ff1Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 36761bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 36771bb76ff1Sjsg u8 master_pipes, slave_pipes; 36781bb76ff1Sjsg enum pipe pipe = crtc->pipe; 3679c349dbc7Sjsg 36801bb76ff1Sjsg enabled_bigjoiner_pipes(i915, &master_pipes, &slave_pipes); 36811bb76ff1Sjsg 36821bb76ff1Sjsg if (((master_pipes | slave_pipes) & BIT(pipe)) == 0) 36835ca02815Sjsg return; 3684c349dbc7Sjsg 36851bb76ff1Sjsg crtc_state->bigjoiner_pipes = 36861bb76ff1Sjsg BIT(get_bigjoiner_master_pipe(pipe, master_pipes, slave_pipes)) | 36871bb76ff1Sjsg get_bigjoiner_slave_pipes(pipe, master_pipes, slave_pipes); 3688c349dbc7Sjsg } 3689c349dbc7Sjsg 3690c349dbc7Sjsg static bool hsw_get_pipe_config(struct intel_crtc *crtc, 3691c349dbc7Sjsg struct intel_crtc_state *pipe_config) 3692c349dbc7Sjsg { 3693c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3694c349dbc7Sjsg bool active; 3695c349dbc7Sjsg u32 tmp; 3696c349dbc7Sjsg 3697f005ef32Sjsg if (!intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 36985ca02815Sjsg POWER_DOMAIN_PIPE(crtc->pipe))) 3699c349dbc7Sjsg return false; 3700c349dbc7Sjsg 3701c349dbc7Sjsg pipe_config->shared_dpll = NULL; 3702c349dbc7Sjsg 3703f005ef32Sjsg active = hsw_get_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains); 3704c349dbc7Sjsg 37055ca02815Sjsg if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 3706f005ef32Sjsg bxt_get_dsi_transcoder_state(crtc, pipe_config, &crtc->hw_readout_power_domains)) { 3707c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, active); 3708c349dbc7Sjsg active = true; 3709c349dbc7Sjsg } 3710c349dbc7Sjsg 37111bb76ff1Sjsg if (!active) 3712c349dbc7Sjsg goto out; 3713c349dbc7Sjsg 37141bb76ff1Sjsg intel_dsc_get_config(pipe_config); 37151bb76ff1Sjsg intel_bigjoiner_get_config(pipe_config); 37165ca02815Sjsg 37171bb76ff1Sjsg if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 37181bb76ff1Sjsg DISPLAY_VER(dev_priv) >= 11) 37195ca02815Sjsg intel_get_transcoder_timings(crtc, pipe_config); 3720c349dbc7Sjsg 37215ca02815Sjsg if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 3722f005ef32Sjsg intel_vrr_get_config(pipe_config); 37235ca02815Sjsg 3724c349dbc7Sjsg intel_get_pipe_src_size(crtc, pipe_config); 3725c349dbc7Sjsg 3726c349dbc7Sjsg if (IS_HASWELL(dev_priv)) { 3727c349dbc7Sjsg u32 tmp = intel_de_read(dev_priv, 3728f005ef32Sjsg TRANSCONF(pipe_config->cpu_transcoder)); 3729c349dbc7Sjsg 3730f005ef32Sjsg if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3731c349dbc7Sjsg pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3732c349dbc7Sjsg else 3733c349dbc7Sjsg pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3734c349dbc7Sjsg } else { 3735c349dbc7Sjsg pipe_config->output_format = 3736f005ef32Sjsg bdw_get_pipe_misc_output_format(crtc); 3737c349dbc7Sjsg } 3738c349dbc7Sjsg 3739f005ef32Sjsg pipe_config->sink_format = pipe_config->output_format; 3740f005ef32Sjsg 3741c349dbc7Sjsg pipe_config->gamma_mode = intel_de_read(dev_priv, 3742c349dbc7Sjsg GAMMA_MODE(crtc->pipe)); 3743c349dbc7Sjsg 3744c349dbc7Sjsg pipe_config->csc_mode = intel_de_read(dev_priv, 3745c349dbc7Sjsg PIPE_CSC_MODE(crtc->pipe)); 3746c349dbc7Sjsg 37475ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) { 3748c349dbc7Sjsg tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); 3749c349dbc7Sjsg 3750c349dbc7Sjsg if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) 3751c349dbc7Sjsg pipe_config->gamma_enable = true; 3752c349dbc7Sjsg 3753c349dbc7Sjsg if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) 3754c349dbc7Sjsg pipe_config->csc_enable = true; 3755c349dbc7Sjsg } else { 3756c349dbc7Sjsg i9xx_get_pipe_color_config(pipe_config); 3757c349dbc7Sjsg } 3758c349dbc7Sjsg 3759c349dbc7Sjsg intel_color_get_config(pipe_config); 3760c349dbc7Sjsg 3761c349dbc7Sjsg tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); 3762c349dbc7Sjsg pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); 3763c349dbc7Sjsg if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 3764c349dbc7Sjsg pipe_config->ips_linetime = 3765c349dbc7Sjsg REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 3766c349dbc7Sjsg 3767f005ef32Sjsg if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains, 37685ca02815Sjsg POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 37695ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) 3770f005ef32Sjsg skl_scaler_get_config(pipe_config); 3771c349dbc7Sjsg else 3772ad8b1aafSjsg ilk_get_pfit_config(pipe_config); 3773c349dbc7Sjsg } 3774c349dbc7Sjsg 37751bb76ff1Sjsg hsw_ips_get_config(pipe_config); 3776c349dbc7Sjsg 37771bb76ff1Sjsg if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 3778c349dbc7Sjsg !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3779c349dbc7Sjsg pipe_config->pixel_multiplier = 3780c349dbc7Sjsg intel_de_read(dev_priv, 3781f005ef32Sjsg TRANS_MULT(pipe_config->cpu_transcoder)) + 1; 3782c349dbc7Sjsg } else { 3783c349dbc7Sjsg pipe_config->pixel_multiplier = 1; 3784c349dbc7Sjsg } 3785c349dbc7Sjsg 37861bb76ff1Sjsg if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 37871bb76ff1Sjsg tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? 37881bb76ff1Sjsg MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : 37891bb76ff1Sjsg CHICKEN_TRANS(pipe_config->cpu_transcoder)); 37901bb76ff1Sjsg 37911bb76ff1Sjsg pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 37921bb76ff1Sjsg } else { 37931bb76ff1Sjsg /* no idea if this is correct */ 37941bb76ff1Sjsg pipe_config->framestart_delay = 1; 37951bb76ff1Sjsg } 37961bb76ff1Sjsg 3797c349dbc7Sjsg out: 3798f005ef32Sjsg intel_display_power_put_all_in_set(dev_priv, &crtc->hw_readout_power_domains); 3799c349dbc7Sjsg 3800c349dbc7Sjsg return active; 3801c349dbc7Sjsg } 3802c349dbc7Sjsg 38031bb76ff1Sjsg bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) 3804c349dbc7Sjsg { 3805c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 38065ca02815Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3807c349dbc7Sjsg 38081bb76ff1Sjsg if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) 3809c349dbc7Sjsg return false; 3810c349dbc7Sjsg 38115ca02815Sjsg crtc_state->hw.active = true; 3812c349dbc7Sjsg 38135ca02815Sjsg intel_crtc_readout_derived_state(crtc_state); 3814c349dbc7Sjsg 3815c349dbc7Sjsg return true; 3816c349dbc7Sjsg } 3817c349dbc7Sjsg 3818c349dbc7Sjsg static int i9xx_pll_refclk(struct drm_device *dev, 3819c349dbc7Sjsg const struct intel_crtc_state *pipe_config) 3820c349dbc7Sjsg { 3821c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 3822c349dbc7Sjsg u32 dpll = pipe_config->dpll_hw_state.dpll; 3823c349dbc7Sjsg 3824c349dbc7Sjsg if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 38251bb76ff1Sjsg return dev_priv->display.vbt.lvds_ssc_freq; 3826c349dbc7Sjsg else if (HAS_PCH_SPLIT(dev_priv)) 3827c349dbc7Sjsg return 120000; 38285ca02815Sjsg else if (DISPLAY_VER(dev_priv) != 2) 3829c349dbc7Sjsg return 96000; 3830c349dbc7Sjsg else 3831c349dbc7Sjsg return 48000; 3832c349dbc7Sjsg } 3833c349dbc7Sjsg 3834c349dbc7Sjsg /* Returns the clock of the currently programmed mode of the given pipe. */ 38351bb76ff1Sjsg void i9xx_crtc_clock_get(struct intel_crtc *crtc, 3836c349dbc7Sjsg struct intel_crtc_state *pipe_config) 3837c349dbc7Sjsg { 3838c349dbc7Sjsg struct drm_device *dev = crtc->base.dev; 3839c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 3840c349dbc7Sjsg u32 dpll = pipe_config->dpll_hw_state.dpll; 3841c349dbc7Sjsg u32 fp; 3842c349dbc7Sjsg struct dpll clock; 3843c349dbc7Sjsg int port_clock; 3844c349dbc7Sjsg int refclk = i9xx_pll_refclk(dev, pipe_config); 3845c349dbc7Sjsg 3846c349dbc7Sjsg if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 3847c349dbc7Sjsg fp = pipe_config->dpll_hw_state.fp0; 3848c349dbc7Sjsg else 3849c349dbc7Sjsg fp = pipe_config->dpll_hw_state.fp1; 3850c349dbc7Sjsg 3851c349dbc7Sjsg clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 3852c349dbc7Sjsg if (IS_PINEVIEW(dev_priv)) { 3853c349dbc7Sjsg clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 3854c349dbc7Sjsg clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 3855c349dbc7Sjsg } else { 3856c349dbc7Sjsg clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 3857c349dbc7Sjsg clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 3858c349dbc7Sjsg } 3859c349dbc7Sjsg 38605ca02815Sjsg if (DISPLAY_VER(dev_priv) != 2) { 3861c349dbc7Sjsg if (IS_PINEVIEW(dev_priv)) 3862c349dbc7Sjsg clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 3863c349dbc7Sjsg DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 3864c349dbc7Sjsg else 3865c349dbc7Sjsg clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 3866c349dbc7Sjsg DPLL_FPA01_P1_POST_DIV_SHIFT); 3867c349dbc7Sjsg 3868c349dbc7Sjsg switch (dpll & DPLL_MODE_MASK) { 3869c349dbc7Sjsg case DPLLB_MODE_DAC_SERIAL: 3870c349dbc7Sjsg clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 3871c349dbc7Sjsg 5 : 10; 3872c349dbc7Sjsg break; 3873c349dbc7Sjsg case DPLLB_MODE_LVDS: 3874c349dbc7Sjsg clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 3875c349dbc7Sjsg 7 : 14; 3876c349dbc7Sjsg break; 3877c349dbc7Sjsg default: 3878c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 3879c349dbc7Sjsg "Unknown DPLL mode %08x in programmed " 3880c349dbc7Sjsg "mode\n", (int)(dpll & DPLL_MODE_MASK)); 3881c349dbc7Sjsg return; 3882c349dbc7Sjsg } 3883c349dbc7Sjsg 3884c349dbc7Sjsg if (IS_PINEVIEW(dev_priv)) 3885c349dbc7Sjsg port_clock = pnv_calc_dpll_params(refclk, &clock); 3886c349dbc7Sjsg else 3887c349dbc7Sjsg port_clock = i9xx_calc_dpll_params(refclk, &clock); 3888c349dbc7Sjsg } else { 38891bb76ff1Sjsg enum pipe lvds_pipe; 3890c349dbc7Sjsg 38911bb76ff1Sjsg if (IS_I85X(dev_priv) && 38921bb76ff1Sjsg intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 38931bb76ff1Sjsg lvds_pipe == crtc->pipe) { 38941bb76ff1Sjsg u32 lvds = intel_de_read(dev_priv, LVDS); 38951bb76ff1Sjsg 3896c349dbc7Sjsg clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 3897c349dbc7Sjsg DPLL_FPA01_P1_POST_DIV_SHIFT); 3898c349dbc7Sjsg 3899c349dbc7Sjsg if (lvds & LVDS_CLKB_POWER_UP) 3900c349dbc7Sjsg clock.p2 = 7; 3901c349dbc7Sjsg else 3902c349dbc7Sjsg clock.p2 = 14; 3903c349dbc7Sjsg } else { 3904c349dbc7Sjsg if (dpll & PLL_P1_DIVIDE_BY_TWO) 3905c349dbc7Sjsg clock.p1 = 2; 3906c349dbc7Sjsg else { 3907c349dbc7Sjsg clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 3908c349dbc7Sjsg DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 3909c349dbc7Sjsg } 3910c349dbc7Sjsg if (dpll & PLL_P2_DIVIDE_BY_4) 3911c349dbc7Sjsg clock.p2 = 4; 3912c349dbc7Sjsg else 3913c349dbc7Sjsg clock.p2 = 2; 3914c349dbc7Sjsg } 3915c349dbc7Sjsg 3916c349dbc7Sjsg port_clock = i9xx_calc_dpll_params(refclk, &clock); 3917c349dbc7Sjsg } 3918c349dbc7Sjsg 3919c349dbc7Sjsg /* 3920c349dbc7Sjsg * This value includes pixel_multiplier. We will use 3921c349dbc7Sjsg * port_clock to compute adjusted_mode.crtc_clock in the 3922c349dbc7Sjsg * encoder's get_config() function. 3923c349dbc7Sjsg */ 3924c349dbc7Sjsg pipe_config->port_clock = port_clock; 3925c349dbc7Sjsg } 3926c349dbc7Sjsg 3927c349dbc7Sjsg int intel_dotclock_calculate(int link_freq, 3928c349dbc7Sjsg const struct intel_link_m_n *m_n) 3929c349dbc7Sjsg { 3930c349dbc7Sjsg /* 3931c349dbc7Sjsg * The calculation for the data clock is: 3932c349dbc7Sjsg * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 3933c349dbc7Sjsg * But we want to avoid losing precison if possible, so: 3934c349dbc7Sjsg * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 3935c349dbc7Sjsg * 3936c349dbc7Sjsg * and the link clock is simpler: 3937c349dbc7Sjsg * link_clock = (m * link_clock) / n 3938c349dbc7Sjsg */ 3939c349dbc7Sjsg 3940c349dbc7Sjsg if (!m_n->link_n) 3941c349dbc7Sjsg return 0; 3942c349dbc7Sjsg 39431bb76ff1Sjsg return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), 39441bb76ff1Sjsg m_n->link_n); 3945c349dbc7Sjsg } 3946c349dbc7Sjsg 39471bb76ff1Sjsg int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) 3948c349dbc7Sjsg { 39491bb76ff1Sjsg int dotclock; 3950c349dbc7Sjsg 39511bb76ff1Sjsg if (intel_crtc_has_dp_encoder(pipe_config)) 39521bb76ff1Sjsg dotclock = intel_dotclock_calculate(pipe_config->port_clock, 39531bb76ff1Sjsg &pipe_config->dp_m_n); 39541bb76ff1Sjsg else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) 39551bb76ff1Sjsg dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, 39561bb76ff1Sjsg pipe_config->pipe_bpp); 39571bb76ff1Sjsg else 39581bb76ff1Sjsg dotclock = pipe_config->port_clock; 3959c349dbc7Sjsg 39601bb76ff1Sjsg if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 39611bb76ff1Sjsg !intel_crtc_has_dp_encoder(pipe_config)) 39621bb76ff1Sjsg dotclock *= 2; 39631bb76ff1Sjsg 39641bb76ff1Sjsg if (pipe_config->pixel_multiplier) 39651bb76ff1Sjsg dotclock /= pipe_config->pixel_multiplier; 39661bb76ff1Sjsg 39671bb76ff1Sjsg return dotclock; 3968c349dbc7Sjsg } 3969c349dbc7Sjsg 3970c349dbc7Sjsg /* Returns the currently programmed mode of the given encoder. */ 3971c349dbc7Sjsg struct drm_display_mode * 3972c349dbc7Sjsg intel_encoder_current_mode(struct intel_encoder *encoder) 3973c349dbc7Sjsg { 3974c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3975c349dbc7Sjsg struct intel_crtc_state *crtc_state; 3976c349dbc7Sjsg struct drm_display_mode *mode; 3977c349dbc7Sjsg struct intel_crtc *crtc; 3978c349dbc7Sjsg enum pipe pipe; 3979c349dbc7Sjsg 3980c349dbc7Sjsg if (!encoder->get_hw_state(encoder, &pipe)) 3981c349dbc7Sjsg return NULL; 3982c349dbc7Sjsg 39831bb76ff1Sjsg crtc = intel_crtc_for_pipe(dev_priv, pipe); 3984c349dbc7Sjsg 3985c349dbc7Sjsg mode = kzalloc(sizeof(*mode), GFP_KERNEL); 3986c349dbc7Sjsg if (!mode) 3987c349dbc7Sjsg return NULL; 3988c349dbc7Sjsg 3989c349dbc7Sjsg crtc_state = intel_crtc_state_alloc(crtc); 3990c349dbc7Sjsg if (!crtc_state) { 3991c349dbc7Sjsg kfree(mode); 3992c349dbc7Sjsg return NULL; 3993c349dbc7Sjsg } 3994c349dbc7Sjsg 39955ca02815Sjsg if (!intel_crtc_get_pipe_config(crtc_state)) { 3996c349dbc7Sjsg kfree(crtc_state); 3997c349dbc7Sjsg kfree(mode); 3998c349dbc7Sjsg return NULL; 3999c349dbc7Sjsg } 4000c349dbc7Sjsg 40015ca02815Sjsg intel_encoder_get_config(encoder, crtc_state); 4002c349dbc7Sjsg 40035ca02815Sjsg intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode); 4004c349dbc7Sjsg 4005c349dbc7Sjsg kfree(crtc_state); 4006c349dbc7Sjsg 4007c349dbc7Sjsg return mode; 4008c349dbc7Sjsg } 4009c349dbc7Sjsg 4010c349dbc7Sjsg static bool encoders_cloneable(const struct intel_encoder *a, 4011c349dbc7Sjsg const struct intel_encoder *b) 4012c349dbc7Sjsg { 4013c349dbc7Sjsg /* masks could be asymmetric, so check both ways */ 4014f005ef32Sjsg return a == b || (a->cloneable & BIT(b->type) && 4015f005ef32Sjsg b->cloneable & BIT(a->type)); 4016c349dbc7Sjsg } 4017c349dbc7Sjsg 40185ca02815Sjsg static bool check_single_encoder_cloning(struct intel_atomic_state *state, 4019c349dbc7Sjsg struct intel_crtc *crtc, 4020c349dbc7Sjsg struct intel_encoder *encoder) 4021c349dbc7Sjsg { 4022c349dbc7Sjsg struct intel_encoder *source_encoder; 4023c349dbc7Sjsg struct drm_connector *connector; 4024c349dbc7Sjsg struct drm_connector_state *connector_state; 4025c349dbc7Sjsg int i; 4026c349dbc7Sjsg 40275ca02815Sjsg for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4028c349dbc7Sjsg if (connector_state->crtc != &crtc->base) 4029c349dbc7Sjsg continue; 4030c349dbc7Sjsg 4031c349dbc7Sjsg source_encoder = 4032c349dbc7Sjsg to_intel_encoder(connector_state->best_encoder); 4033c349dbc7Sjsg if (!encoders_cloneable(encoder, source_encoder)) 4034c349dbc7Sjsg return false; 4035c349dbc7Sjsg } 4036c349dbc7Sjsg 4037c349dbc7Sjsg return true; 4038c349dbc7Sjsg } 4039c349dbc7Sjsg 4040c349dbc7Sjsg static int icl_add_linked_planes(struct intel_atomic_state *state) 4041c349dbc7Sjsg { 4042c349dbc7Sjsg struct intel_plane *plane, *linked; 4043c349dbc7Sjsg struct intel_plane_state *plane_state, *linked_plane_state; 4044c349dbc7Sjsg int i; 4045c349dbc7Sjsg 4046c349dbc7Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4047c349dbc7Sjsg linked = plane_state->planar_linked_plane; 4048c349dbc7Sjsg 4049c349dbc7Sjsg if (!linked) 4050c349dbc7Sjsg continue; 4051c349dbc7Sjsg 4052c349dbc7Sjsg linked_plane_state = intel_atomic_get_plane_state(state, linked); 4053c349dbc7Sjsg if (IS_ERR(linked_plane_state)) 4054c349dbc7Sjsg return PTR_ERR(linked_plane_state); 4055c349dbc7Sjsg 4056ad8b1aafSjsg drm_WARN_ON(state->base.dev, 4057ad8b1aafSjsg linked_plane_state->planar_linked_plane != plane); 4058ad8b1aafSjsg drm_WARN_ON(state->base.dev, 4059ad8b1aafSjsg linked_plane_state->planar_slave == plane_state->planar_slave); 4060c349dbc7Sjsg } 4061c349dbc7Sjsg 4062c349dbc7Sjsg return 0; 4063c349dbc7Sjsg } 4064c349dbc7Sjsg 4065c349dbc7Sjsg static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) 4066c349dbc7Sjsg { 4067c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4068c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4069c349dbc7Sjsg struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); 4070c349dbc7Sjsg struct intel_plane *plane, *linked; 4071c349dbc7Sjsg struct intel_plane_state *plane_state; 4072c349dbc7Sjsg int i; 4073c349dbc7Sjsg 40745ca02815Sjsg if (DISPLAY_VER(dev_priv) < 11) 4075c349dbc7Sjsg return 0; 4076c349dbc7Sjsg 4077c349dbc7Sjsg /* 4078c349dbc7Sjsg * Destroy all old plane links and make the slave plane invisible 4079c349dbc7Sjsg * in the crtc_state->active_planes mask. 4080c349dbc7Sjsg */ 4081c349dbc7Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4082c349dbc7Sjsg if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) 4083c349dbc7Sjsg continue; 4084c349dbc7Sjsg 4085c349dbc7Sjsg plane_state->planar_linked_plane = NULL; 4086c349dbc7Sjsg if (plane_state->planar_slave && !plane_state->uapi.visible) { 40875ca02815Sjsg crtc_state->enabled_planes &= ~BIT(plane->id); 4088c349dbc7Sjsg crtc_state->active_planes &= ~BIT(plane->id); 4089c349dbc7Sjsg crtc_state->update_planes |= BIT(plane->id); 40901bb76ff1Sjsg crtc_state->data_rate[plane->id] = 0; 40911bb76ff1Sjsg crtc_state->rel_data_rate[plane->id] = 0; 4092c349dbc7Sjsg } 4093c349dbc7Sjsg 4094c349dbc7Sjsg plane_state->planar_slave = false; 4095c349dbc7Sjsg } 4096c349dbc7Sjsg 4097c349dbc7Sjsg if (!crtc_state->nv12_planes) 4098c349dbc7Sjsg return 0; 4099c349dbc7Sjsg 4100c349dbc7Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 4101c349dbc7Sjsg struct intel_plane_state *linked_state = NULL; 4102c349dbc7Sjsg 4103c349dbc7Sjsg if (plane->pipe != crtc->pipe || 4104c349dbc7Sjsg !(crtc_state->nv12_planes & BIT(plane->id))) 4105c349dbc7Sjsg continue; 4106c349dbc7Sjsg 4107c349dbc7Sjsg for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { 4108ad8b1aafSjsg if (!icl_is_nv12_y_plane(dev_priv, linked->id)) 4109c349dbc7Sjsg continue; 4110c349dbc7Sjsg 4111c349dbc7Sjsg if (crtc_state->active_planes & BIT(linked->id)) 4112c349dbc7Sjsg continue; 4113c349dbc7Sjsg 4114c349dbc7Sjsg linked_state = intel_atomic_get_plane_state(state, linked); 4115c349dbc7Sjsg if (IS_ERR(linked_state)) 4116c349dbc7Sjsg return PTR_ERR(linked_state); 4117c349dbc7Sjsg 4118c349dbc7Sjsg break; 4119c349dbc7Sjsg } 4120c349dbc7Sjsg 4121c349dbc7Sjsg if (!linked_state) { 4122c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 4123c349dbc7Sjsg "Need %d free Y planes for planar YUV\n", 4124c349dbc7Sjsg hweight8(crtc_state->nv12_planes)); 4125c349dbc7Sjsg 4126c349dbc7Sjsg return -EINVAL; 4127c349dbc7Sjsg } 4128c349dbc7Sjsg 4129c349dbc7Sjsg plane_state->planar_linked_plane = linked; 4130c349dbc7Sjsg 4131c349dbc7Sjsg linked_state->planar_slave = true; 4132c349dbc7Sjsg linked_state->planar_linked_plane = plane; 41335ca02815Sjsg crtc_state->enabled_planes |= BIT(linked->id); 4134c349dbc7Sjsg crtc_state->active_planes |= BIT(linked->id); 4135c349dbc7Sjsg crtc_state->update_planes |= BIT(linked->id); 41361bb76ff1Sjsg crtc_state->data_rate[linked->id] = 41371bb76ff1Sjsg crtc_state->data_rate_y[plane->id]; 41381bb76ff1Sjsg crtc_state->rel_data_rate[linked->id] = 41391bb76ff1Sjsg crtc_state->rel_data_rate_y[plane->id]; 4140c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", 4141c349dbc7Sjsg linked->base.name, plane->base.name); 4142c349dbc7Sjsg 4143c349dbc7Sjsg /* Copy parameters to slave plane */ 4144c349dbc7Sjsg linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 4145c349dbc7Sjsg linked_state->color_ctl = plane_state->color_ctl; 4146c349dbc7Sjsg linked_state->view = plane_state->view; 41471bb76ff1Sjsg linked_state->decrypt = plane_state->decrypt; 4148c349dbc7Sjsg 41495ca02815Sjsg intel_plane_copy_hw_state(linked_state, plane_state); 4150c349dbc7Sjsg linked_state->uapi.src = plane_state->uapi.src; 4151c349dbc7Sjsg linked_state->uapi.dst = plane_state->uapi.dst; 4152c349dbc7Sjsg 4153c349dbc7Sjsg if (icl_is_hdr_plane(dev_priv, plane->id)) { 4154c349dbc7Sjsg if (linked->id == PLANE_SPRITE5) 41551bb76ff1Sjsg plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; 4156c349dbc7Sjsg else if (linked->id == PLANE_SPRITE4) 41571bb76ff1Sjsg plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; 4158ad8b1aafSjsg else if (linked->id == PLANE_SPRITE3) 41591bb76ff1Sjsg plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; 4160ad8b1aafSjsg else if (linked->id == PLANE_SPRITE2) 41611bb76ff1Sjsg plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; 4162c349dbc7Sjsg else 4163c349dbc7Sjsg MISSING_CASE(linked->id); 4164c349dbc7Sjsg } 4165c349dbc7Sjsg } 4166c349dbc7Sjsg 4167c349dbc7Sjsg return 0; 4168c349dbc7Sjsg } 4169c349dbc7Sjsg 4170c349dbc7Sjsg static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) 4171c349dbc7Sjsg { 4172c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 4173c349dbc7Sjsg struct intel_atomic_state *state = 4174c349dbc7Sjsg to_intel_atomic_state(new_crtc_state->uapi.state); 4175c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state = 4176c349dbc7Sjsg intel_atomic_get_old_crtc_state(state, crtc); 4177c349dbc7Sjsg 4178c349dbc7Sjsg return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; 4179c349dbc7Sjsg } 4180c349dbc7Sjsg 4181c349dbc7Sjsg static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) 4182c349dbc7Sjsg { 41835ca02815Sjsg const struct drm_display_mode *pipe_mode = 41845ca02815Sjsg &crtc_state->hw.pipe_mode; 4185ad8b1aafSjsg int linetime_wm; 4186c349dbc7Sjsg 4187c349dbc7Sjsg if (!crtc_state->hw.enable) 4188c349dbc7Sjsg return 0; 4189c349dbc7Sjsg 41905ca02815Sjsg linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 41915ca02815Sjsg pipe_mode->crtc_clock); 4192ad8b1aafSjsg 4193ad8b1aafSjsg return min(linetime_wm, 0x1ff); 4194c349dbc7Sjsg } 4195c349dbc7Sjsg 4196c349dbc7Sjsg static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, 4197c349dbc7Sjsg const struct intel_cdclk_state *cdclk_state) 4198c349dbc7Sjsg { 41995ca02815Sjsg const struct drm_display_mode *pipe_mode = 42005ca02815Sjsg &crtc_state->hw.pipe_mode; 4201ad8b1aafSjsg int linetime_wm; 4202c349dbc7Sjsg 4203c349dbc7Sjsg if (!crtc_state->hw.enable) 4204c349dbc7Sjsg return 0; 4205c349dbc7Sjsg 42065ca02815Sjsg linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, 4207c349dbc7Sjsg cdclk_state->logical.cdclk); 4208ad8b1aafSjsg 4209ad8b1aafSjsg return min(linetime_wm, 0x1ff); 4210c349dbc7Sjsg } 4211c349dbc7Sjsg 4212c349dbc7Sjsg static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) 4213c349dbc7Sjsg { 4214c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4215c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 42165ca02815Sjsg const struct drm_display_mode *pipe_mode = 42175ca02815Sjsg &crtc_state->hw.pipe_mode; 4218ad8b1aafSjsg int linetime_wm; 4219c349dbc7Sjsg 4220c349dbc7Sjsg if (!crtc_state->hw.enable) 4221c349dbc7Sjsg return 0; 4222c349dbc7Sjsg 42235ca02815Sjsg linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8, 4224c349dbc7Sjsg crtc_state->pixel_rate); 4225c349dbc7Sjsg 4226c349dbc7Sjsg /* Display WA #1135: BXT:ALL GLK:ALL */ 42275ca02815Sjsg if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 42281bb76ff1Sjsg skl_watermark_ipc_enabled(dev_priv)) 4229c349dbc7Sjsg linetime_wm /= 2; 4230c349dbc7Sjsg 4231ad8b1aafSjsg return min(linetime_wm, 0x1ff); 4232c349dbc7Sjsg } 4233c349dbc7Sjsg 4234c349dbc7Sjsg static int hsw_compute_linetime_wm(struct intel_atomic_state *state, 4235c349dbc7Sjsg struct intel_crtc *crtc) 4236c349dbc7Sjsg { 4237c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4238c349dbc7Sjsg struct intel_crtc_state *crtc_state = 4239c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4240c349dbc7Sjsg const struct intel_cdclk_state *cdclk_state; 4241c349dbc7Sjsg 42425ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) 4243c349dbc7Sjsg crtc_state->linetime = skl_linetime_wm(crtc_state); 4244c349dbc7Sjsg else 4245c349dbc7Sjsg crtc_state->linetime = hsw_linetime_wm(crtc_state); 4246c349dbc7Sjsg 4247c349dbc7Sjsg if (!hsw_crtc_supports_ips(crtc)) 4248c349dbc7Sjsg return 0; 4249c349dbc7Sjsg 4250c349dbc7Sjsg cdclk_state = intel_atomic_get_cdclk_state(state); 4251c349dbc7Sjsg if (IS_ERR(cdclk_state)) 4252c349dbc7Sjsg return PTR_ERR(cdclk_state); 4253c349dbc7Sjsg 4254c349dbc7Sjsg crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, 4255c349dbc7Sjsg cdclk_state); 4256c349dbc7Sjsg 4257c349dbc7Sjsg return 0; 4258c349dbc7Sjsg } 4259c349dbc7Sjsg 4260c349dbc7Sjsg static int intel_crtc_atomic_check(struct intel_atomic_state *state, 4261c349dbc7Sjsg struct intel_crtc *crtc) 4262c349dbc7Sjsg { 4263c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4264c349dbc7Sjsg struct intel_crtc_state *crtc_state = 4265c349dbc7Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4266c349dbc7Sjsg int ret; 4267c349dbc7Sjsg 42685ca02815Sjsg if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) && 4269f005ef32Sjsg intel_crtc_needs_modeset(crtc_state) && 4270f005ef32Sjsg !crtc_state->hw.active) 4271c349dbc7Sjsg crtc_state->update_wm_post = true; 4272c349dbc7Sjsg 4273f005ef32Sjsg if (intel_crtc_needs_modeset(crtc_state)) { 42741bb76ff1Sjsg ret = intel_dpll_crtc_get_shared_dpll(state, crtc); 4275c349dbc7Sjsg if (ret) 4276c349dbc7Sjsg return ret; 4277c349dbc7Sjsg } 4278c349dbc7Sjsg 4279c349dbc7Sjsg /* 4280c349dbc7Sjsg * May need to update pipe gamma enable bits 4281c349dbc7Sjsg * when C8 planes are getting enabled/disabled. 4282c349dbc7Sjsg */ 4283c349dbc7Sjsg if (c8_planes_changed(crtc_state)) 4284c349dbc7Sjsg crtc_state->uapi.color_mgmt_changed = true; 4285c349dbc7Sjsg 4286f005ef32Sjsg if (intel_crtc_needs_color_update(crtc_state)) { 4287c349dbc7Sjsg ret = intel_color_check(crtc_state); 4288c349dbc7Sjsg if (ret) 4289c349dbc7Sjsg return ret; 4290c349dbc7Sjsg } 4291c349dbc7Sjsg 42921bb76ff1Sjsg ret = intel_compute_pipe_wm(state, crtc); 4293c349dbc7Sjsg if (ret) { 4294c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 4295c349dbc7Sjsg "Target pipe watermarks are invalid\n"); 4296c349dbc7Sjsg return ret; 4297c349dbc7Sjsg } 42985ca02815Sjsg 4299c349dbc7Sjsg /* 4300c349dbc7Sjsg * Calculate 'intermediate' watermarks that satisfy both the 4301c349dbc7Sjsg * old state and the new state. We can program these 4302c349dbc7Sjsg * immediately. 4303c349dbc7Sjsg */ 43041bb76ff1Sjsg ret = intel_compute_intermediate_wm(state, crtc); 4305c349dbc7Sjsg if (ret) { 4306c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 4307c349dbc7Sjsg "No valid intermediate pipe watermarks are possible\n"); 4308c349dbc7Sjsg return ret; 4309c349dbc7Sjsg } 4310c349dbc7Sjsg 43115ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) { 4312f005ef32Sjsg if (intel_crtc_needs_modeset(crtc_state) || 4313f005ef32Sjsg intel_crtc_needs_fastset(crtc_state)) { 4314c349dbc7Sjsg ret = skl_update_scaler_crtc(crtc_state); 4315c349dbc7Sjsg if (ret) 4316c349dbc7Sjsg return ret; 4317c349dbc7Sjsg } 4318c349dbc7Sjsg 4319c349dbc7Sjsg ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); 4320c349dbc7Sjsg if (ret) 4321c349dbc7Sjsg return ret; 4322c349dbc7Sjsg } 4323c349dbc7Sjsg 4324c349dbc7Sjsg if (HAS_IPS(dev_priv)) { 43251bb76ff1Sjsg ret = hsw_ips_compute_config(state, crtc); 4326c349dbc7Sjsg if (ret) 4327c349dbc7Sjsg return ret; 4328c349dbc7Sjsg } 4329c349dbc7Sjsg 43305ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 || 4331c349dbc7Sjsg IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 4332c349dbc7Sjsg ret = hsw_compute_linetime_wm(state, crtc); 4333c349dbc7Sjsg if (ret) 4334c349dbc7Sjsg return ret; 4335c349dbc7Sjsg 4336c349dbc7Sjsg } 4337c349dbc7Sjsg 43385ca02815Sjsg ret = intel_psr2_sel_fetch_update(state, crtc); 43395ca02815Sjsg if (ret) 43405ca02815Sjsg return ret; 4341ad8b1aafSjsg 4342c349dbc7Sjsg return 0; 4343c349dbc7Sjsg } 4344c349dbc7Sjsg 4345c349dbc7Sjsg static int 4346c349dbc7Sjsg compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, 43471bb76ff1Sjsg struct intel_crtc_state *crtc_state) 4348c349dbc7Sjsg { 4349c349dbc7Sjsg struct drm_connector *connector = conn_state->connector; 43501bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 4351c349dbc7Sjsg const struct drm_display_info *info = &connector->display_info; 4352c349dbc7Sjsg int bpp; 4353c349dbc7Sjsg 4354c349dbc7Sjsg switch (conn_state->max_bpc) { 4355c349dbc7Sjsg case 6 ... 7: 4356c349dbc7Sjsg bpp = 6 * 3; 4357c349dbc7Sjsg break; 4358c349dbc7Sjsg case 8 ... 9: 4359c349dbc7Sjsg bpp = 8 * 3; 4360c349dbc7Sjsg break; 4361c349dbc7Sjsg case 10 ... 11: 4362c349dbc7Sjsg bpp = 10 * 3; 4363c349dbc7Sjsg break; 4364ad8b1aafSjsg case 12 ... 16: 4365c349dbc7Sjsg bpp = 12 * 3; 4366c349dbc7Sjsg break; 4367c349dbc7Sjsg default: 4368ad8b1aafSjsg MISSING_CASE(conn_state->max_bpc); 4369c349dbc7Sjsg return -EINVAL; 4370c349dbc7Sjsg } 4371c349dbc7Sjsg 43721bb76ff1Sjsg if (bpp < crtc_state->pipe_bpp) { 4373c349dbc7Sjsg drm_dbg_kms(&i915->drm, 43741bb76ff1Sjsg "[CONNECTOR:%d:%s] Limiting display bpp to %d " 43751bb76ff1Sjsg "(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n", 4376c349dbc7Sjsg connector->base.id, connector->name, 4377c349dbc7Sjsg bpp, 3 * info->bpc, 4378c349dbc7Sjsg 3 * conn_state->max_requested_bpc, 43791bb76ff1Sjsg crtc_state->pipe_bpp); 4380c349dbc7Sjsg 43811bb76ff1Sjsg crtc_state->pipe_bpp = bpp; 4382c349dbc7Sjsg } 4383c349dbc7Sjsg 4384c349dbc7Sjsg return 0; 4385c349dbc7Sjsg } 4386c349dbc7Sjsg 4387c349dbc7Sjsg static int 43881bb76ff1Sjsg compute_baseline_pipe_bpp(struct intel_atomic_state *state, 43891bb76ff1Sjsg struct intel_crtc *crtc) 4390c349dbc7Sjsg { 4391c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 43921bb76ff1Sjsg struct intel_crtc_state *crtc_state = 43931bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4394c349dbc7Sjsg struct drm_connector *connector; 4395c349dbc7Sjsg struct drm_connector_state *connector_state; 4396c349dbc7Sjsg int bpp, i; 4397c349dbc7Sjsg 4398c349dbc7Sjsg if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 4399c349dbc7Sjsg IS_CHERRYVIEW(dev_priv))) 4400c349dbc7Sjsg bpp = 10*3; 44015ca02815Sjsg else if (DISPLAY_VER(dev_priv) >= 5) 4402c349dbc7Sjsg bpp = 12*3; 4403c349dbc7Sjsg else 4404c349dbc7Sjsg bpp = 8*3; 4405c349dbc7Sjsg 44061bb76ff1Sjsg crtc_state->pipe_bpp = bpp; 4407c349dbc7Sjsg 4408c349dbc7Sjsg /* Clamp display bpp to connector max bpp */ 44091bb76ff1Sjsg for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4410c349dbc7Sjsg int ret; 4411c349dbc7Sjsg 4412c349dbc7Sjsg if (connector_state->crtc != &crtc->base) 4413c349dbc7Sjsg continue; 4414c349dbc7Sjsg 44151bb76ff1Sjsg ret = compute_sink_pipe_bpp(connector_state, crtc_state); 4416c349dbc7Sjsg if (ret) 4417c349dbc7Sjsg return ret; 4418c349dbc7Sjsg } 4419c349dbc7Sjsg 4420c349dbc7Sjsg return 0; 4421c349dbc7Sjsg } 4422c349dbc7Sjsg 4423c349dbc7Sjsg static bool check_digital_port_conflicts(struct intel_atomic_state *state) 4424c349dbc7Sjsg { 4425c349dbc7Sjsg struct drm_device *dev = state->base.dev; 4426c349dbc7Sjsg struct drm_connector *connector; 4427c349dbc7Sjsg struct drm_connector_list_iter conn_iter; 4428c349dbc7Sjsg unsigned int used_ports = 0; 4429c349dbc7Sjsg unsigned int used_mst_ports = 0; 4430c349dbc7Sjsg bool ret = true; 4431c349dbc7Sjsg 4432c349dbc7Sjsg /* 4433c349dbc7Sjsg * We're going to peek into connector->state, 4434c349dbc7Sjsg * hence connection_mutex must be held. 4435c349dbc7Sjsg */ 4436c349dbc7Sjsg drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); 4437c349dbc7Sjsg 4438c349dbc7Sjsg /* 4439c349dbc7Sjsg * Walk the connector list instead of the encoder 4440c349dbc7Sjsg * list to detect the problem on ddi platforms 4441c349dbc7Sjsg * where there's just one encoder per digital port. 4442c349dbc7Sjsg */ 4443c349dbc7Sjsg drm_connector_list_iter_begin(dev, &conn_iter); 4444c349dbc7Sjsg drm_for_each_connector_iter(connector, &conn_iter) { 4445c349dbc7Sjsg struct drm_connector_state *connector_state; 4446c349dbc7Sjsg struct intel_encoder *encoder; 4447c349dbc7Sjsg 4448c349dbc7Sjsg connector_state = 4449c349dbc7Sjsg drm_atomic_get_new_connector_state(&state->base, 4450c349dbc7Sjsg connector); 4451c349dbc7Sjsg if (!connector_state) 4452c349dbc7Sjsg connector_state = connector->state; 4453c349dbc7Sjsg 4454c349dbc7Sjsg if (!connector_state->best_encoder) 4455c349dbc7Sjsg continue; 4456c349dbc7Sjsg 4457c349dbc7Sjsg encoder = to_intel_encoder(connector_state->best_encoder); 4458c349dbc7Sjsg 4459c349dbc7Sjsg drm_WARN_ON(dev, !connector_state->crtc); 4460c349dbc7Sjsg 4461c349dbc7Sjsg switch (encoder->type) { 4462c349dbc7Sjsg case INTEL_OUTPUT_DDI: 4463c349dbc7Sjsg if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) 4464c349dbc7Sjsg break; 4465ad8b1aafSjsg fallthrough; 4466c349dbc7Sjsg case INTEL_OUTPUT_DP: 4467c349dbc7Sjsg case INTEL_OUTPUT_HDMI: 4468c349dbc7Sjsg case INTEL_OUTPUT_EDP: 4469c349dbc7Sjsg /* the same port mustn't appear more than once */ 4470c349dbc7Sjsg if (used_ports & BIT(encoder->port)) 4471c349dbc7Sjsg ret = false; 4472c349dbc7Sjsg 4473c349dbc7Sjsg used_ports |= BIT(encoder->port); 4474c349dbc7Sjsg break; 4475c349dbc7Sjsg case INTEL_OUTPUT_DP_MST: 4476c349dbc7Sjsg used_mst_ports |= 4477c349dbc7Sjsg 1 << encoder->port; 4478c349dbc7Sjsg break; 4479c349dbc7Sjsg default: 4480c349dbc7Sjsg break; 4481c349dbc7Sjsg } 4482c349dbc7Sjsg } 4483c349dbc7Sjsg drm_connector_list_iter_end(&conn_iter); 4484c349dbc7Sjsg 4485c349dbc7Sjsg /* can't mix MST and SST/HDMI on the same port */ 4486c349dbc7Sjsg if (used_ports & used_mst_ports) 4487c349dbc7Sjsg return false; 4488c349dbc7Sjsg 4489c349dbc7Sjsg return ret; 4490c349dbc7Sjsg } 4491c349dbc7Sjsg 4492c349dbc7Sjsg static void 44935ca02815Sjsg intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 44941bb76ff1Sjsg struct intel_crtc *crtc) 4495c349dbc7Sjsg { 44961bb76ff1Sjsg struct intel_crtc_state *crtc_state = 44971bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 44985ca02815Sjsg 44991bb76ff1Sjsg WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 45005ca02815Sjsg 45011bb76ff1Sjsg drm_property_replace_blob(&crtc_state->hw.degamma_lut, 45021bb76ff1Sjsg crtc_state->uapi.degamma_lut); 45031bb76ff1Sjsg drm_property_replace_blob(&crtc_state->hw.gamma_lut, 45041bb76ff1Sjsg crtc_state->uapi.gamma_lut); 45051bb76ff1Sjsg drm_property_replace_blob(&crtc_state->hw.ctm, 45061bb76ff1Sjsg crtc_state->uapi.ctm); 4507c349dbc7Sjsg } 4508c349dbc7Sjsg 4509c349dbc7Sjsg static void 45101bb76ff1Sjsg intel_crtc_copy_uapi_to_hw_state_modeset(struct intel_atomic_state *state, 45111bb76ff1Sjsg struct intel_crtc *crtc) 4512c349dbc7Sjsg { 45131bb76ff1Sjsg struct intel_crtc_state *crtc_state = 45141bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 45151bb76ff1Sjsg 45161bb76ff1Sjsg WARN_ON(intel_crtc_is_bigjoiner_slave(crtc_state)); 45171bb76ff1Sjsg 4518c349dbc7Sjsg crtc_state->hw.enable = crtc_state->uapi.enable; 4519c349dbc7Sjsg crtc_state->hw.active = crtc_state->uapi.active; 45201bb76ff1Sjsg drm_mode_copy(&crtc_state->hw.mode, 45211bb76ff1Sjsg &crtc_state->uapi.mode); 45221bb76ff1Sjsg drm_mode_copy(&crtc_state->hw.adjusted_mode, 45231bb76ff1Sjsg &crtc_state->uapi.adjusted_mode); 45245ca02815Sjsg crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter; 45255ca02815Sjsg 45261bb76ff1Sjsg intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 4527c349dbc7Sjsg } 4528c349dbc7Sjsg 45291bb76ff1Sjsg static void 45301bb76ff1Sjsg copy_bigjoiner_crtc_state_nomodeset(struct intel_atomic_state *state, 45311bb76ff1Sjsg struct intel_crtc *slave_crtc) 4532c349dbc7Sjsg { 45331bb76ff1Sjsg struct intel_crtc_state *slave_crtc_state = 45341bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, slave_crtc); 45351bb76ff1Sjsg struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 45361bb76ff1Sjsg const struct intel_crtc_state *master_crtc_state = 45371bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, master_crtc); 45385ca02815Sjsg 45391bb76ff1Sjsg drm_property_replace_blob(&slave_crtc_state->hw.degamma_lut, 45401bb76ff1Sjsg master_crtc_state->hw.degamma_lut); 45411bb76ff1Sjsg drm_property_replace_blob(&slave_crtc_state->hw.gamma_lut, 45421bb76ff1Sjsg master_crtc_state->hw.gamma_lut); 45431bb76ff1Sjsg drm_property_replace_blob(&slave_crtc_state->hw.ctm, 45441bb76ff1Sjsg master_crtc_state->hw.ctm); 4545c349dbc7Sjsg 45461bb76ff1Sjsg slave_crtc_state->uapi.color_mgmt_changed = master_crtc_state->uapi.color_mgmt_changed; 4547c349dbc7Sjsg } 4548c349dbc7Sjsg 4549c349dbc7Sjsg static int 45501bb76ff1Sjsg copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, 45511bb76ff1Sjsg struct intel_crtc *slave_crtc) 45525ca02815Sjsg { 45531bb76ff1Sjsg struct intel_crtc_state *slave_crtc_state = 45541bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, slave_crtc); 45551bb76ff1Sjsg struct intel_crtc *master_crtc = intel_master_crtc(slave_crtc_state); 45561bb76ff1Sjsg const struct intel_crtc_state *master_crtc_state = 45571bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, master_crtc); 45585ca02815Sjsg struct intel_crtc_state *saved_state; 45595ca02815Sjsg 45601bb76ff1Sjsg WARN_ON(master_crtc_state->bigjoiner_pipes != 45611bb76ff1Sjsg slave_crtc_state->bigjoiner_pipes); 45621bb76ff1Sjsg 45631bb76ff1Sjsg saved_state = kmemdup(master_crtc_state, sizeof(*saved_state), GFP_KERNEL); 45645ca02815Sjsg if (!saved_state) 45655ca02815Sjsg return -ENOMEM; 45665ca02815Sjsg 45671bb76ff1Sjsg /* preserve some things from the slave's original crtc state */ 45681bb76ff1Sjsg saved_state->uapi = slave_crtc_state->uapi; 45691bb76ff1Sjsg saved_state->scaler_state = slave_crtc_state->scaler_state; 45701bb76ff1Sjsg saved_state->shared_dpll = slave_crtc_state->shared_dpll; 45711bb76ff1Sjsg saved_state->crc_enabled = slave_crtc_state->crc_enabled; 45725ca02815Sjsg 45731bb76ff1Sjsg intel_crtc_free_hw_state(slave_crtc_state); 45741bb76ff1Sjsg memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); 45755ca02815Sjsg kfree(saved_state); 45765ca02815Sjsg 45775ca02815Sjsg /* Re-init hw state */ 45781bb76ff1Sjsg memset(&slave_crtc_state->hw, 0, sizeof(slave_crtc_state->hw)); 45791bb76ff1Sjsg slave_crtc_state->hw.enable = master_crtc_state->hw.enable; 45801bb76ff1Sjsg slave_crtc_state->hw.active = master_crtc_state->hw.active; 45811bb76ff1Sjsg drm_mode_copy(&slave_crtc_state->hw.mode, 45821bb76ff1Sjsg &master_crtc_state->hw.mode); 45831bb76ff1Sjsg drm_mode_copy(&slave_crtc_state->hw.pipe_mode, 45841bb76ff1Sjsg &master_crtc_state->hw.pipe_mode); 45851bb76ff1Sjsg drm_mode_copy(&slave_crtc_state->hw.adjusted_mode, 45861bb76ff1Sjsg &master_crtc_state->hw.adjusted_mode); 45871bb76ff1Sjsg slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; 45885ca02815Sjsg 45891bb76ff1Sjsg copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); 45901bb76ff1Sjsg 45911bb76ff1Sjsg slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; 45921bb76ff1Sjsg slave_crtc_state->uapi.connectors_changed = master_crtc_state->uapi.connectors_changed; 45931bb76ff1Sjsg slave_crtc_state->uapi.active_changed = master_crtc_state->uapi.active_changed; 45941bb76ff1Sjsg 45951bb76ff1Sjsg WARN_ON(master_crtc_state->bigjoiner_pipes != 45961bb76ff1Sjsg slave_crtc_state->bigjoiner_pipes); 45975ca02815Sjsg 45985ca02815Sjsg return 0; 45995ca02815Sjsg } 46005ca02815Sjsg 46015ca02815Sjsg static int 46025ca02815Sjsg intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, 46031bb76ff1Sjsg struct intel_crtc *crtc) 4604c349dbc7Sjsg { 46051bb76ff1Sjsg struct intel_crtc_state *crtc_state = 46061bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4607c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4608c349dbc7Sjsg struct intel_crtc_state *saved_state; 4609c349dbc7Sjsg 4610c349dbc7Sjsg saved_state = intel_crtc_state_alloc(crtc); 4611c349dbc7Sjsg if (!saved_state) 4612c349dbc7Sjsg return -ENOMEM; 4613c349dbc7Sjsg 4614c349dbc7Sjsg /* free the old crtc_state->hw members */ 4615c349dbc7Sjsg intel_crtc_free_hw_state(crtc_state); 4616c349dbc7Sjsg 4617c349dbc7Sjsg /* FIXME: before the switch to atomic started, a new pipe_config was 4618c349dbc7Sjsg * kzalloc'd. Code that depends on any field being zero should be 4619c349dbc7Sjsg * fixed, so that the crtc_state can be safely duplicated. For now, 4620c349dbc7Sjsg * only fields that are know to not cause problems are preserved. */ 4621c349dbc7Sjsg 4622c349dbc7Sjsg saved_state->uapi = crtc_state->uapi; 4623789f0c70Sjsg saved_state->inherited = crtc_state->inherited; 4624c349dbc7Sjsg saved_state->scaler_state = crtc_state->scaler_state; 4625c349dbc7Sjsg saved_state->shared_dpll = crtc_state->shared_dpll; 4626c349dbc7Sjsg saved_state->dpll_hw_state = crtc_state->dpll_hw_state; 4627c349dbc7Sjsg memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, 4628c349dbc7Sjsg sizeof(saved_state->icl_port_dplls)); 4629c349dbc7Sjsg saved_state->crc_enabled = crtc_state->crc_enabled; 4630c349dbc7Sjsg if (IS_G4X(dev_priv) || 4631c349dbc7Sjsg IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4632c349dbc7Sjsg saved_state->wm = crtc_state->wm; 4633c349dbc7Sjsg 4634c349dbc7Sjsg memcpy(crtc_state, saved_state, sizeof(*crtc_state)); 4635c349dbc7Sjsg kfree(saved_state); 4636c349dbc7Sjsg 46371bb76ff1Sjsg intel_crtc_copy_uapi_to_hw_state_modeset(state, crtc); 4638c349dbc7Sjsg 4639c349dbc7Sjsg return 0; 4640c349dbc7Sjsg } 4641c349dbc7Sjsg 4642c349dbc7Sjsg static int 46435ca02815Sjsg intel_modeset_pipe_config(struct intel_atomic_state *state, 46441bb76ff1Sjsg struct intel_crtc *crtc) 4645c349dbc7Sjsg { 46461bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 46471bb76ff1Sjsg struct intel_crtc_state *crtc_state = 46481bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4649c349dbc7Sjsg struct drm_connector *connector; 4650c349dbc7Sjsg struct drm_connector_state *connector_state; 46511bb76ff1Sjsg int pipe_src_w, pipe_src_h; 4652c349dbc7Sjsg int base_bpp, ret, i; 4653c349dbc7Sjsg bool retry = true; 4654c349dbc7Sjsg 46551bb76ff1Sjsg crtc_state->cpu_transcoder = (enum transcoder) crtc->pipe; 46561bb76ff1Sjsg 46571bb76ff1Sjsg crtc_state->framestart_delay = 1; 4658c349dbc7Sjsg 4659c349dbc7Sjsg /* 4660c349dbc7Sjsg * Sanitize sync polarity flags based on requested ones. If neither 4661c349dbc7Sjsg * positive or negative polarity is requested, treat this as meaning 4662c349dbc7Sjsg * negative polarity. 4663c349dbc7Sjsg */ 46641bb76ff1Sjsg if (!(crtc_state->hw.adjusted_mode.flags & 4665c349dbc7Sjsg (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 46661bb76ff1Sjsg crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 4667c349dbc7Sjsg 46681bb76ff1Sjsg if (!(crtc_state->hw.adjusted_mode.flags & 4669c349dbc7Sjsg (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 46701bb76ff1Sjsg crtc_state->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 4671c349dbc7Sjsg 46721bb76ff1Sjsg ret = compute_baseline_pipe_bpp(state, crtc); 4673c349dbc7Sjsg if (ret) 4674c349dbc7Sjsg return ret; 4675c349dbc7Sjsg 46761bb76ff1Sjsg base_bpp = crtc_state->pipe_bpp; 4677c349dbc7Sjsg 4678c349dbc7Sjsg /* 4679c349dbc7Sjsg * Determine the real pipe dimensions. Note that stereo modes can 4680c349dbc7Sjsg * increase the actual pipe size due to the frame doubling and 4681c349dbc7Sjsg * insertion of additional space for blanks between the frame. This 4682c349dbc7Sjsg * is stored in the crtc timings. We use the requested mode to do this 4683c349dbc7Sjsg * computation to clearly distinguish it from the adjusted mode, which 4684c349dbc7Sjsg * can be changed by the connectors in the below retry loop. 4685c349dbc7Sjsg */ 46861bb76ff1Sjsg drm_mode_get_hv_timing(&crtc_state->hw.mode, 46871bb76ff1Sjsg &pipe_src_w, &pipe_src_h); 46881bb76ff1Sjsg drm_rect_init(&crtc_state->pipe_src, 0, 0, 46891bb76ff1Sjsg pipe_src_w, pipe_src_h); 4690c349dbc7Sjsg 46915ca02815Sjsg for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4692c349dbc7Sjsg struct intel_encoder *encoder = 4693c349dbc7Sjsg to_intel_encoder(connector_state->best_encoder); 4694c349dbc7Sjsg 46951bb76ff1Sjsg if (connector_state->crtc != &crtc->base) 4696c349dbc7Sjsg continue; 4697c349dbc7Sjsg 46981bb76ff1Sjsg if (!check_single_encoder_cloning(state, crtc, encoder)) { 4699c349dbc7Sjsg drm_dbg_kms(&i915->drm, 47001bb76ff1Sjsg "[ENCODER:%d:%s] rejecting invalid cloning configuration\n", 47011bb76ff1Sjsg encoder->base.base.id, encoder->base.name); 4702c349dbc7Sjsg return -EINVAL; 4703c349dbc7Sjsg } 4704c349dbc7Sjsg 4705c349dbc7Sjsg /* 4706c349dbc7Sjsg * Determine output_types before calling the .compute_config() 4707c349dbc7Sjsg * hooks so that the hooks can use this information safely. 4708c349dbc7Sjsg */ 4709c349dbc7Sjsg if (encoder->compute_output_type) 47101bb76ff1Sjsg crtc_state->output_types |= 47111bb76ff1Sjsg BIT(encoder->compute_output_type(encoder, crtc_state, 4712c349dbc7Sjsg connector_state)); 4713c349dbc7Sjsg else 47141bb76ff1Sjsg crtc_state->output_types |= BIT(encoder->type); 4715c349dbc7Sjsg } 4716c349dbc7Sjsg 4717c349dbc7Sjsg encoder_retry: 4718c349dbc7Sjsg /* Ensure the port clock defaults are reset when retrying. */ 47191bb76ff1Sjsg crtc_state->port_clock = 0; 47201bb76ff1Sjsg crtc_state->pixel_multiplier = 1; 4721c349dbc7Sjsg 4722c349dbc7Sjsg /* Fill in default crtc timings, allow encoders to overwrite them. */ 47231bb76ff1Sjsg drm_mode_set_crtcinfo(&crtc_state->hw.adjusted_mode, 4724c349dbc7Sjsg CRTC_STEREO_DOUBLE); 4725c349dbc7Sjsg 4726c349dbc7Sjsg /* Pass our mode to the connectors and the CRTC to give them a chance to 4727c349dbc7Sjsg * adjust it according to limitations or connector properties, and also 4728c349dbc7Sjsg * a chance to reject the mode entirely. 4729c349dbc7Sjsg */ 47305ca02815Sjsg for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 4731c349dbc7Sjsg struct intel_encoder *encoder = 4732c349dbc7Sjsg to_intel_encoder(connector_state->best_encoder); 4733c349dbc7Sjsg 47341bb76ff1Sjsg if (connector_state->crtc != &crtc->base) 4735c349dbc7Sjsg continue; 4736c349dbc7Sjsg 47371bb76ff1Sjsg ret = encoder->compute_config(encoder, crtc_state, 4738c349dbc7Sjsg connector_state); 47391bb76ff1Sjsg if (ret == -EDEADLK) 47401bb76ff1Sjsg return ret; 4741c349dbc7Sjsg if (ret < 0) { 47421bb76ff1Sjsg drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] config failure: %d\n", 47431bb76ff1Sjsg encoder->base.base.id, encoder->base.name, ret); 4744c349dbc7Sjsg return ret; 4745c349dbc7Sjsg } 4746c349dbc7Sjsg } 4747c349dbc7Sjsg 4748c349dbc7Sjsg /* Set default port clock if not overwritten by the encoder. Needs to be 4749c349dbc7Sjsg * done afterwards in case the encoder adjusts the mode. */ 47501bb76ff1Sjsg if (!crtc_state->port_clock) 47511bb76ff1Sjsg crtc_state->port_clock = crtc_state->hw.adjusted_mode.crtc_clock 47521bb76ff1Sjsg * crtc_state->pixel_multiplier; 4753c349dbc7Sjsg 47541bb76ff1Sjsg ret = intel_crtc_compute_config(state, crtc); 4755c349dbc7Sjsg if (ret == -EDEADLK) 4756c349dbc7Sjsg return ret; 47571bb76ff1Sjsg if (ret == -EAGAIN) { 4758c349dbc7Sjsg if (drm_WARN(&i915->drm, !retry, 47591bb76ff1Sjsg "[CRTC:%d:%s] loop in pipe configuration computation\n", 47601bb76ff1Sjsg crtc->base.base.id, crtc->base.name)) 4761c349dbc7Sjsg return -EINVAL; 4762c349dbc7Sjsg 47631bb76ff1Sjsg drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] bw constrained, retrying\n", 47641bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 4765c349dbc7Sjsg retry = false; 4766c349dbc7Sjsg goto encoder_retry; 4767c349dbc7Sjsg } 47681bb76ff1Sjsg if (ret < 0) { 47691bb76ff1Sjsg drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] config failure: %d\n", 47701bb76ff1Sjsg crtc->base.base.id, crtc->base.name, ret); 47711bb76ff1Sjsg return ret; 47721bb76ff1Sjsg } 4773c349dbc7Sjsg 4774c349dbc7Sjsg /* Dithering seems to not pass-through bits correctly when it should, so 4775c349dbc7Sjsg * only enable it on 6bpc panels and when its not a compliance 4776c349dbc7Sjsg * test requesting 6bpc video pattern. 4777c349dbc7Sjsg */ 47781bb76ff1Sjsg crtc_state->dither = (crtc_state->pipe_bpp == 6*3) && 47791bb76ff1Sjsg !crtc_state->dither_force_disable; 4780c349dbc7Sjsg drm_dbg_kms(&i915->drm, 47811bb76ff1Sjsg "[CRTC:%d:%s] hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 47821bb76ff1Sjsg crtc->base.base.id, crtc->base.name, 47831bb76ff1Sjsg base_bpp, crtc_state->pipe_bpp, crtc_state->dither); 4784c349dbc7Sjsg 4785c349dbc7Sjsg return 0; 4786c349dbc7Sjsg } 4787c349dbc7Sjsg 4788c349dbc7Sjsg static int 47891bb76ff1Sjsg intel_modeset_pipe_config_late(struct intel_atomic_state *state, 47901bb76ff1Sjsg struct intel_crtc *crtc) 4791c349dbc7Sjsg { 47921bb76ff1Sjsg struct intel_crtc_state *crtc_state = 47931bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 4794c349dbc7Sjsg struct drm_connector_state *conn_state; 4795c349dbc7Sjsg struct drm_connector *connector; 4796c349dbc7Sjsg int i; 4797c349dbc7Sjsg 47981bb76ff1Sjsg intel_bigjoiner_adjust_pipe_src(crtc_state); 47991bb76ff1Sjsg 4800c349dbc7Sjsg for_each_new_connector_in_state(&state->base, connector, 4801c349dbc7Sjsg conn_state, i) { 4802c349dbc7Sjsg struct intel_encoder *encoder = 4803c349dbc7Sjsg to_intel_encoder(conn_state->best_encoder); 4804c349dbc7Sjsg int ret; 4805c349dbc7Sjsg 4806c349dbc7Sjsg if (conn_state->crtc != &crtc->base || 4807c349dbc7Sjsg !encoder->compute_config_late) 4808c349dbc7Sjsg continue; 4809c349dbc7Sjsg 4810c349dbc7Sjsg ret = encoder->compute_config_late(encoder, crtc_state, 4811c349dbc7Sjsg conn_state); 4812c349dbc7Sjsg if (ret) 4813c349dbc7Sjsg return ret; 4814c349dbc7Sjsg } 4815c349dbc7Sjsg 4816c349dbc7Sjsg return 0; 4817c349dbc7Sjsg } 4818c349dbc7Sjsg 4819c349dbc7Sjsg bool intel_fuzzy_clock_check(int clock1, int clock2) 4820c349dbc7Sjsg { 4821c349dbc7Sjsg int diff; 4822c349dbc7Sjsg 4823c349dbc7Sjsg if (clock1 == clock2) 4824c349dbc7Sjsg return true; 4825c349dbc7Sjsg 4826c349dbc7Sjsg if (!clock1 || !clock2) 4827c349dbc7Sjsg return false; 4828c349dbc7Sjsg 4829c349dbc7Sjsg diff = abs(clock1 - clock2); 4830c349dbc7Sjsg 4831c349dbc7Sjsg if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 4832c349dbc7Sjsg return true; 4833c349dbc7Sjsg 4834c349dbc7Sjsg return false; 4835c349dbc7Sjsg } 4836c349dbc7Sjsg 4837c349dbc7Sjsg static bool 4838c349dbc7Sjsg intel_compare_link_m_n(const struct intel_link_m_n *m_n, 48391bb76ff1Sjsg const struct intel_link_m_n *m2_n2) 4840c349dbc7Sjsg { 4841c349dbc7Sjsg return m_n->tu == m2_n2->tu && 48421bb76ff1Sjsg m_n->data_m == m2_n2->data_m && 48431bb76ff1Sjsg m_n->data_n == m2_n2->data_n && 48441bb76ff1Sjsg m_n->link_m == m2_n2->link_m && 48451bb76ff1Sjsg m_n->link_n == m2_n2->link_n; 4846c349dbc7Sjsg } 4847c349dbc7Sjsg 4848c349dbc7Sjsg static bool 4849c349dbc7Sjsg intel_compare_infoframe(const union hdmi_infoframe *a, 4850c349dbc7Sjsg const union hdmi_infoframe *b) 4851c349dbc7Sjsg { 4852c349dbc7Sjsg return memcmp(a, b, sizeof(*a)) == 0; 4853c349dbc7Sjsg } 4854c349dbc7Sjsg 4855ad8b1aafSjsg static bool 4856ad8b1aafSjsg intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a, 4857ad8b1aafSjsg const struct drm_dp_vsc_sdp *b) 4858ad8b1aafSjsg { 4859ad8b1aafSjsg return memcmp(a, b, sizeof(*a)) == 0; 4860ad8b1aafSjsg } 4861ad8b1aafSjsg 4862f005ef32Sjsg static bool 4863f005ef32Sjsg intel_compare_buffer(const u8 *a, const u8 *b, size_t len) 4864f005ef32Sjsg { 4865f005ef32Sjsg return memcmp(a, b, len) == 0; 4866f005ef32Sjsg } 4867f005ef32Sjsg 4868c349dbc7Sjsg static void 4869c349dbc7Sjsg pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, 4870c349dbc7Sjsg bool fastset, const char *name, 4871c349dbc7Sjsg const union hdmi_infoframe *a, 4872c349dbc7Sjsg const union hdmi_infoframe *b) 4873c349dbc7Sjsg { 4874c349dbc7Sjsg if (fastset) { 4875c349dbc7Sjsg if (!drm_debug_enabled(DRM_UT_KMS)) 4876c349dbc7Sjsg return; 4877c349dbc7Sjsg 4878c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 4879f005ef32Sjsg "fastset requirement not met in %s infoframe\n", name); 4880c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "expected:\n"); 4881c349dbc7Sjsg hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); 4882c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "found:\n"); 4883c349dbc7Sjsg hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); 4884c349dbc7Sjsg } else { 4885c349dbc7Sjsg drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); 4886c349dbc7Sjsg drm_err(&dev_priv->drm, "expected:\n"); 4887c349dbc7Sjsg hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); 4888c349dbc7Sjsg drm_err(&dev_priv->drm, "found:\n"); 4889c349dbc7Sjsg hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); 4890c349dbc7Sjsg } 4891c349dbc7Sjsg } 4892c349dbc7Sjsg 4893ad8b1aafSjsg static void 4894ad8b1aafSjsg pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, 4895ad8b1aafSjsg bool fastset, const char *name, 4896ad8b1aafSjsg const struct drm_dp_vsc_sdp *a, 4897ad8b1aafSjsg const struct drm_dp_vsc_sdp *b) 4898ad8b1aafSjsg { 4899ad8b1aafSjsg if (fastset) { 4900ad8b1aafSjsg if (!drm_debug_enabled(DRM_UT_KMS)) 4901ad8b1aafSjsg return; 4902ad8b1aafSjsg 4903ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm, 4904f005ef32Sjsg "fastset requirement not met in %s dp sdp\n", name); 4905ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm, "expected:\n"); 4906ad8b1aafSjsg drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a); 4907ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm, "found:\n"); 4908ad8b1aafSjsg drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b); 4909ad8b1aafSjsg } else { 4910ad8b1aafSjsg drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name); 4911ad8b1aafSjsg drm_err(&dev_priv->drm, "expected:\n"); 4912ad8b1aafSjsg drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a); 4913ad8b1aafSjsg drm_err(&dev_priv->drm, "found:\n"); 4914ad8b1aafSjsg drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b); 4915ad8b1aafSjsg } 4916ad8b1aafSjsg } 4917ad8b1aafSjsg 4918f005ef32Sjsg /* Returns the length up to and including the last differing byte */ 4919f005ef32Sjsg static size_t 4920f005ef32Sjsg memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 4921f005ef32Sjsg { 4922f005ef32Sjsg int i; 4923f005ef32Sjsg 4924f005ef32Sjsg for (i = len - 1; i >= 0; i--) { 4925f005ef32Sjsg if (a[i] != b[i]) 4926f005ef32Sjsg return i + 1; 4927f005ef32Sjsg } 4928f005ef32Sjsg 4929f005ef32Sjsg return 0; 4930f005ef32Sjsg } 4931f005ef32Sjsg 4932f005ef32Sjsg static void 4933f005ef32Sjsg pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, 4934f005ef32Sjsg bool fastset, const char *name, 4935f005ef32Sjsg const u8 *a, const u8 *b, size_t len) 4936f005ef32Sjsg { 4937f005ef32Sjsg if (fastset) { 4938f005ef32Sjsg if (!drm_debug_enabled(DRM_UT_KMS)) 4939f005ef32Sjsg return; 4940f005ef32Sjsg 4941f005ef32Sjsg /* only dump up to the last difference */ 4942f005ef32Sjsg len = memcmp_diff_len(a, b, len); 4943f005ef32Sjsg 4944f005ef32Sjsg drm_dbg_kms(&dev_priv->drm, 4945f005ef32Sjsg "fastset requirement not met in %s buffer\n", name); 4946f005ef32Sjsg print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, 4947f005ef32Sjsg 16, 0, a, len, false); 4948f005ef32Sjsg print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 4949f005ef32Sjsg 16, 0, b, len, false); 4950f005ef32Sjsg } else { 4951f005ef32Sjsg /* only dump up to the last difference */ 4952f005ef32Sjsg len = memcmp_diff_len(a, b, len); 4953f005ef32Sjsg 4954f005ef32Sjsg drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); 4955f005ef32Sjsg print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 4956f005ef32Sjsg 16, 0, a, len, false); 4957f005ef32Sjsg print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE, 4958f005ef32Sjsg 16, 0, b, len, false); 4959f005ef32Sjsg } 4960f005ef32Sjsg } 4961f005ef32Sjsg 4962c349dbc7Sjsg static void __printf(4, 5) 4963c349dbc7Sjsg pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, 4964c349dbc7Sjsg const char *name, const char *format, ...) 4965c349dbc7Sjsg { 4966c349dbc7Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 4967c349dbc7Sjsg struct va_format vaf; 4968c349dbc7Sjsg va_list args; 4969c349dbc7Sjsg 4970c349dbc7Sjsg va_start(args, format); 4971c349dbc7Sjsg vaf.fmt = format; 4972c349dbc7Sjsg vaf.va = &args; 4973c349dbc7Sjsg 4974c349dbc7Sjsg if (fastset) 4975c349dbc7Sjsg drm_dbg_kms(&i915->drm, 4976f005ef32Sjsg "[CRTC:%d:%s] fastset requirement not met in %s %pV\n", 4977c349dbc7Sjsg crtc->base.base.id, crtc->base.name, name, &vaf); 4978c349dbc7Sjsg else 4979c349dbc7Sjsg drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", 4980c349dbc7Sjsg crtc->base.base.id, crtc->base.name, name, &vaf); 4981c349dbc7Sjsg 4982c349dbc7Sjsg va_end(args); 4983c349dbc7Sjsg } 4984c349dbc7Sjsg 4985c349dbc7Sjsg static bool fastboot_enabled(struct drm_i915_private *dev_priv) 4986c349dbc7Sjsg { 4987ad8b1aafSjsg if (dev_priv->params.fastboot != -1) 4988ad8b1aafSjsg return dev_priv->params.fastboot; 4989c349dbc7Sjsg 4990c349dbc7Sjsg /* Enable fastboot by default on Skylake and newer */ 49915ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) 4992c349dbc7Sjsg return true; 4993c349dbc7Sjsg 4994c349dbc7Sjsg /* Enable fastboot by default on VLV and CHV */ 4995c349dbc7Sjsg if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4996c349dbc7Sjsg return true; 4997c349dbc7Sjsg 4998c349dbc7Sjsg /* Disabled by default on all others */ 4999c349dbc7Sjsg return false; 5000c349dbc7Sjsg } 5001c349dbc7Sjsg 50021bb76ff1Sjsg bool 5003c349dbc7Sjsg intel_pipe_config_compare(const struct intel_crtc_state *current_config, 5004c349dbc7Sjsg const struct intel_crtc_state *pipe_config, 5005c349dbc7Sjsg bool fastset) 5006c349dbc7Sjsg { 5007c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev); 5008c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 5009c349dbc7Sjsg bool ret = true; 5010c349dbc7Sjsg bool fixup_inherited = fastset && 5011ad8b1aafSjsg current_config->inherited && !pipe_config->inherited; 5012c349dbc7Sjsg 5013c349dbc7Sjsg if (fixup_inherited && !fastboot_enabled(dev_priv)) { 5014c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 5015c349dbc7Sjsg "initial modeset and fastboot not set\n"); 5016c349dbc7Sjsg ret = false; 5017c349dbc7Sjsg } 5018c349dbc7Sjsg 5019c349dbc7Sjsg #define PIPE_CONF_CHECK_X(name) do { \ 5020c349dbc7Sjsg if (current_config->name != pipe_config->name) { \ 5021c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5022c349dbc7Sjsg "(expected 0x%08x, found 0x%08x)", \ 5023c349dbc7Sjsg current_config->name, \ 5024c349dbc7Sjsg pipe_config->name); \ 5025c349dbc7Sjsg ret = false; \ 5026c349dbc7Sjsg } \ 5027c349dbc7Sjsg } while (0) 5028c349dbc7Sjsg 50295ca02815Sjsg #define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \ 50305ca02815Sjsg if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \ 50315ca02815Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 50325ca02815Sjsg "(expected 0x%08x, found 0x%08x)", \ 50335ca02815Sjsg current_config->name & (mask), \ 50345ca02815Sjsg pipe_config->name & (mask)); \ 50355ca02815Sjsg ret = false; \ 50365ca02815Sjsg } \ 50375ca02815Sjsg } while (0) 50385ca02815Sjsg 5039c349dbc7Sjsg #define PIPE_CONF_CHECK_I(name) do { \ 5040c349dbc7Sjsg if (current_config->name != pipe_config->name) { \ 5041c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5042c349dbc7Sjsg "(expected %i, found %i)", \ 5043c349dbc7Sjsg current_config->name, \ 5044c349dbc7Sjsg pipe_config->name); \ 5045c349dbc7Sjsg ret = false; \ 5046c349dbc7Sjsg } \ 5047c349dbc7Sjsg } while (0) 5048c349dbc7Sjsg 5049c349dbc7Sjsg #define PIPE_CONF_CHECK_BOOL(name) do { \ 5050c349dbc7Sjsg if (current_config->name != pipe_config->name) { \ 5051c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5052c349dbc7Sjsg "(expected %s, found %s)", \ 50531bb76ff1Sjsg str_yes_no(current_config->name), \ 50541bb76ff1Sjsg str_yes_no(pipe_config->name)); \ 5055c349dbc7Sjsg ret = false; \ 5056c349dbc7Sjsg } \ 5057c349dbc7Sjsg } while (0) 5058c349dbc7Sjsg 5059c349dbc7Sjsg /* 5060c349dbc7Sjsg * Checks state where we only read out the enabling, but not the entire 5061c349dbc7Sjsg * state itself (like full infoframes or ELD for audio). These states 5062c349dbc7Sjsg * require a full modeset on bootup to fix up. 5063c349dbc7Sjsg */ 5064c349dbc7Sjsg #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 5065c349dbc7Sjsg if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 5066c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(name); \ 5067c349dbc7Sjsg } else { \ 5068c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5069c349dbc7Sjsg "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 50701bb76ff1Sjsg str_yes_no(current_config->name), \ 50711bb76ff1Sjsg str_yes_no(pipe_config->name)); \ 5072c349dbc7Sjsg ret = false; \ 5073c349dbc7Sjsg } \ 5074c349dbc7Sjsg } while (0) 5075c349dbc7Sjsg 5076c349dbc7Sjsg #define PIPE_CONF_CHECK_P(name) do { \ 5077c349dbc7Sjsg if (current_config->name != pipe_config->name) { \ 5078c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5079c349dbc7Sjsg "(expected %p, found %p)", \ 5080c349dbc7Sjsg current_config->name, \ 5081c349dbc7Sjsg pipe_config->name); \ 5082c349dbc7Sjsg ret = false; \ 5083c349dbc7Sjsg } \ 5084c349dbc7Sjsg } while (0) 5085c349dbc7Sjsg 5086c349dbc7Sjsg #define PIPE_CONF_CHECK_M_N(name) do { \ 5087c349dbc7Sjsg if (!intel_compare_link_m_n(¤t_config->name, \ 50881bb76ff1Sjsg &pipe_config->name)) { \ 5089c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 50901bb76ff1Sjsg "(expected tu %i data %i/%i link %i/%i, " \ 50911bb76ff1Sjsg "found tu %i, data %i/%i link %i/%i)", \ 5092c349dbc7Sjsg current_config->name.tu, \ 50931bb76ff1Sjsg current_config->name.data_m, \ 50941bb76ff1Sjsg current_config->name.data_n, \ 5095c349dbc7Sjsg current_config->name.link_m, \ 5096c349dbc7Sjsg current_config->name.link_n, \ 5097c349dbc7Sjsg pipe_config->name.tu, \ 50981bb76ff1Sjsg pipe_config->name.data_m, \ 50991bb76ff1Sjsg pipe_config->name.data_n, \ 5100c349dbc7Sjsg pipe_config->name.link_m, \ 5101c349dbc7Sjsg pipe_config->name.link_n); \ 5102c349dbc7Sjsg ret = false; \ 5103c349dbc7Sjsg } \ 5104c349dbc7Sjsg } while (0) 5105c349dbc7Sjsg 51061bb76ff1Sjsg #define PIPE_CONF_CHECK_TIMINGS(name) do { \ 51071bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_hdisplay); \ 51081bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_htotal); \ 51091bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_hblank_start); \ 51101bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_hblank_end); \ 51111bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_hsync_start); \ 51121bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_hsync_end); \ 51131bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vdisplay); \ 51141bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vtotal); \ 51151bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vblank_start); \ 51161bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vblank_end); \ 51171bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vsync_start); \ 51181bb76ff1Sjsg PIPE_CONF_CHECK_I(name.crtc_vsync_end); \ 51191bb76ff1Sjsg } while (0) 51201bb76ff1Sjsg 51211bb76ff1Sjsg #define PIPE_CONF_CHECK_RECT(name) do { \ 51221bb76ff1Sjsg PIPE_CONF_CHECK_I(name.x1); \ 51231bb76ff1Sjsg PIPE_CONF_CHECK_I(name.x2); \ 51241bb76ff1Sjsg PIPE_CONF_CHECK_I(name.y1); \ 51251bb76ff1Sjsg PIPE_CONF_CHECK_I(name.y2); \ 51261bb76ff1Sjsg } while (0) 51271bb76ff1Sjsg 5128c349dbc7Sjsg #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ 5129c349dbc7Sjsg if ((current_config->name ^ pipe_config->name) & (mask)) { \ 5130c349dbc7Sjsg pipe_config_mismatch(fastset, crtc, __stringify(name), \ 5131c349dbc7Sjsg "(%x) (expected %i, found %i)", \ 5132c349dbc7Sjsg (mask), \ 5133c349dbc7Sjsg current_config->name & (mask), \ 5134c349dbc7Sjsg pipe_config->name & (mask)); \ 5135c349dbc7Sjsg ret = false; \ 5136c349dbc7Sjsg } \ 5137c349dbc7Sjsg } while (0) 5138c349dbc7Sjsg 5139c349dbc7Sjsg #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ 5140c349dbc7Sjsg if (!intel_compare_infoframe(¤t_config->infoframes.name, \ 5141c349dbc7Sjsg &pipe_config->infoframes.name)) { \ 5142c349dbc7Sjsg pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \ 5143c349dbc7Sjsg ¤t_config->infoframes.name, \ 5144c349dbc7Sjsg &pipe_config->infoframes.name); \ 5145c349dbc7Sjsg ret = false; \ 5146c349dbc7Sjsg } \ 5147c349dbc7Sjsg } while (0) 5148c349dbc7Sjsg 5149ad8b1aafSjsg #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \ 5150ad8b1aafSjsg if (!current_config->has_psr && !pipe_config->has_psr && \ 5151ad8b1aafSjsg !intel_compare_dp_vsc_sdp(¤t_config->infoframes.name, \ 5152ad8b1aafSjsg &pipe_config->infoframes.name)) { \ 5153ad8b1aafSjsg pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \ 5154ad8b1aafSjsg ¤t_config->infoframes.name, \ 5155ad8b1aafSjsg &pipe_config->infoframes.name); \ 5156ad8b1aafSjsg ret = false; \ 5157ad8b1aafSjsg } \ 5158ad8b1aafSjsg } while (0) 5159ad8b1aafSjsg 5160f005ef32Sjsg #define PIPE_CONF_CHECK_BUFFER(name, len) do { \ 5161f005ef32Sjsg BUILD_BUG_ON(sizeof(current_config->name) != (len)); \ 5162f005ef32Sjsg BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \ 5163f005ef32Sjsg if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \ 5164f005ef32Sjsg pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \ 5165f005ef32Sjsg current_config->name, \ 5166f005ef32Sjsg pipe_config->name, \ 5167f005ef32Sjsg (len)); \ 5168c349dbc7Sjsg ret = false; \ 5169f005ef32Sjsg } \ 5170f005ef32Sjsg } while (0) 5171f005ef32Sjsg 5172f005ef32Sjsg #define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \ 5173f005ef32Sjsg if (current_config->gamma_mode == pipe_config->gamma_mode && \ 5174f005ef32Sjsg !intel_color_lut_equal(current_config, \ 5175f005ef32Sjsg current_config->lut, pipe_config->lut, \ 5176f005ef32Sjsg is_pre_csc_lut)) { \ 5177f005ef32Sjsg pipe_config_mismatch(fastset, crtc, __stringify(lut), \ 5178c349dbc7Sjsg "hw_state doesn't match sw_state"); \ 5179c349dbc7Sjsg ret = false; \ 5180c349dbc7Sjsg } \ 5181f005ef32Sjsg } while (0) 5182f005ef32Sjsg 5183f005ef32Sjsg #define PIPE_CONF_CHECK_CSC(name) do { \ 5184f005ef32Sjsg PIPE_CONF_CHECK_X(name.preoff[0]); \ 5185f005ef32Sjsg PIPE_CONF_CHECK_X(name.preoff[1]); \ 5186f005ef32Sjsg PIPE_CONF_CHECK_X(name.preoff[2]); \ 5187f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[0]); \ 5188f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[1]); \ 5189f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[2]); \ 5190f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[3]); \ 5191f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[4]); \ 5192f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[5]); \ 5193f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[6]); \ 5194f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[7]); \ 5195f005ef32Sjsg PIPE_CONF_CHECK_X(name.coeff[8]); \ 5196f005ef32Sjsg PIPE_CONF_CHECK_X(name.postoff[0]); \ 5197f005ef32Sjsg PIPE_CONF_CHECK_X(name.postoff[1]); \ 5198f005ef32Sjsg PIPE_CONF_CHECK_X(name.postoff[2]); \ 5199c349dbc7Sjsg } while (0) 5200c349dbc7Sjsg 5201c349dbc7Sjsg #define PIPE_CONF_QUIRK(quirk) \ 5202c349dbc7Sjsg ((current_config->quirks | pipe_config->quirks) & (quirk)) 5203c349dbc7Sjsg 52041bb76ff1Sjsg PIPE_CONF_CHECK_I(hw.enable); 52051bb76ff1Sjsg PIPE_CONF_CHECK_I(hw.active); 52061bb76ff1Sjsg 5207c349dbc7Sjsg PIPE_CONF_CHECK_I(cpu_transcoder); 52081bb76ff1Sjsg PIPE_CONF_CHECK_I(mst_master_transcoder); 5209c349dbc7Sjsg 5210c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(has_pch_encoder); 5211c349dbc7Sjsg PIPE_CONF_CHECK_I(fdi_lanes); 5212c349dbc7Sjsg PIPE_CONF_CHECK_M_N(fdi_m_n); 5213c349dbc7Sjsg 5214c349dbc7Sjsg PIPE_CONF_CHECK_I(lane_count); 5215c349dbc7Sjsg PIPE_CONF_CHECK_X(lane_lat_optim_mask); 5216c349dbc7Sjsg 52171bb76ff1Sjsg if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { 52181b89dd6fSjsg if (!fastset || !pipe_config->update_m_n) 5219f005ef32Sjsg PIPE_CONF_CHECK_M_N(dp_m_n); 52201bb76ff1Sjsg } else { 52211bb76ff1Sjsg PIPE_CONF_CHECK_M_N(dp_m_n); 52221bb76ff1Sjsg PIPE_CONF_CHECK_M_N(dp_m2_n2); 52231bb76ff1Sjsg } 5224c349dbc7Sjsg 5225c349dbc7Sjsg PIPE_CONF_CHECK_X(output_types); 5226c349dbc7Sjsg 52271bb76ff1Sjsg PIPE_CONF_CHECK_I(framestart_delay); 52281bb76ff1Sjsg PIPE_CONF_CHECK_I(msa_timing_delay); 52295ca02815Sjsg 52301bb76ff1Sjsg PIPE_CONF_CHECK_TIMINGS(hw.pipe_mode); 52311bb76ff1Sjsg PIPE_CONF_CHECK_TIMINGS(hw.adjusted_mode); 5232c349dbc7Sjsg 5233c349dbc7Sjsg PIPE_CONF_CHECK_I(pixel_multiplier); 5234c349dbc7Sjsg 5235c349dbc7Sjsg PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5236c349dbc7Sjsg DRM_MODE_FLAG_INTERLACE); 5237c349dbc7Sjsg 5238c349dbc7Sjsg if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 5239c349dbc7Sjsg PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5240c349dbc7Sjsg DRM_MODE_FLAG_PHSYNC); 5241c349dbc7Sjsg PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5242c349dbc7Sjsg DRM_MODE_FLAG_NHSYNC); 5243c349dbc7Sjsg PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5244c349dbc7Sjsg DRM_MODE_FLAG_PVSYNC); 5245c349dbc7Sjsg PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 5246c349dbc7Sjsg DRM_MODE_FLAG_NVSYNC); 5247c349dbc7Sjsg } 52485ca02815Sjsg 52495ca02815Sjsg PIPE_CONF_CHECK_I(output_format); 52505ca02815Sjsg PIPE_CONF_CHECK_BOOL(has_hdmi_sink); 52515ca02815Sjsg if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 52525ca02815Sjsg IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 52535ca02815Sjsg PIPE_CONF_CHECK_BOOL(limited_color_range); 52545ca02815Sjsg 52555ca02815Sjsg PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 52565ca02815Sjsg PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 52575ca02815Sjsg PIPE_CONF_CHECK_BOOL(has_infoframe); 5258f005ef32Sjsg PIPE_CONF_CHECK_BOOL(enhanced_framing); 52595ca02815Sjsg PIPE_CONF_CHECK_BOOL(fec_enable); 52605ca02815Sjsg 52615ca02815Sjsg PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 5262f005ef32Sjsg PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5263c349dbc7Sjsg 5264c349dbc7Sjsg PIPE_CONF_CHECK_X(gmch_pfit.control); 5265c349dbc7Sjsg /* pfit ratios are autocomputed by the hw on gen4+ */ 52665ca02815Sjsg if (DISPLAY_VER(dev_priv) < 4) 5267c349dbc7Sjsg PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 5268c349dbc7Sjsg PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 5269c349dbc7Sjsg 5270c349dbc7Sjsg /* 5271c349dbc7Sjsg * Changing the EDP transcoder input mux 5272c349dbc7Sjsg * (A_ONOFF vs. A_ON) requires a full modeset. 5273c349dbc7Sjsg */ 5274c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru); 5275c349dbc7Sjsg 5276c349dbc7Sjsg if (!fastset) { 52771bb76ff1Sjsg PIPE_CONF_CHECK_RECT(pipe_src); 5278c349dbc7Sjsg 5279c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(pch_pfit.enabled); 52801bb76ff1Sjsg PIPE_CONF_CHECK_RECT(pch_pfit.dst); 5281c349dbc7Sjsg 5282c349dbc7Sjsg PIPE_CONF_CHECK_I(scaler_state.scaler_id); 52831bb76ff1Sjsg PIPE_CONF_CHECK_I(pixel_rate); 5284c349dbc7Sjsg 5285c349dbc7Sjsg PIPE_CONF_CHECK_X(gamma_mode); 5286c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv)) 5287c349dbc7Sjsg PIPE_CONF_CHECK_X(cgm_mode); 5288c349dbc7Sjsg else 5289c349dbc7Sjsg PIPE_CONF_CHECK_X(csc_mode); 5290c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(gamma_enable); 5291c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(csc_enable); 5292f005ef32Sjsg PIPE_CONF_CHECK_BOOL(wgc_enable); 5293c349dbc7Sjsg 5294c349dbc7Sjsg PIPE_CONF_CHECK_I(linetime); 5295c349dbc7Sjsg PIPE_CONF_CHECK_I(ips_linetime); 5296c349dbc7Sjsg 5297f005ef32Sjsg PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true); 5298f005ef32Sjsg PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false); 5299f005ef32Sjsg 5300f005ef32Sjsg PIPE_CONF_CHECK_CSC(csc); 5301f005ef32Sjsg PIPE_CONF_CHECK_CSC(output_csc); 53025ca02815Sjsg 53031bb76ff1Sjsg if (current_config->active_planes) { 53045ca02815Sjsg PIPE_CONF_CHECK_BOOL(has_psr); 53055ca02815Sjsg PIPE_CONF_CHECK_BOOL(has_psr2); 53065ca02815Sjsg PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); 53075ca02815Sjsg PIPE_CONF_CHECK_I(dc3co_exitline); 5308c349dbc7Sjsg } 53091bb76ff1Sjsg } 5310c349dbc7Sjsg 5311c349dbc7Sjsg PIPE_CONF_CHECK_BOOL(double_wide); 5312c349dbc7Sjsg 53131bb76ff1Sjsg if (dev_priv->display.dpll.mgr) { 5314c349dbc7Sjsg PIPE_CONF_CHECK_P(shared_dpll); 53155ca02815Sjsg 5316c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 5317c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 5318c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 5319c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 5320c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 5321c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.spll); 5322c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 5323c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 5324c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 5325c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 53261bb76ff1Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.div0); 5327c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 5328c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 5329c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 5330c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 5331c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 5332c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 5333c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 5334c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 5335c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 5336c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 5337c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 5338c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl); 5339c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1); 5340c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl); 5341c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0); 5342c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1); 5343c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf); 5344c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock); 5345c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc); 5346c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias); 5347c349dbc7Sjsg PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 53485ca02815Sjsg } 5349c349dbc7Sjsg 5350c349dbc7Sjsg PIPE_CONF_CHECK_X(dsi_pll.ctrl); 5351c349dbc7Sjsg PIPE_CONF_CHECK_X(dsi_pll.div); 5352c349dbc7Sjsg 53535ca02815Sjsg if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 5354c349dbc7Sjsg PIPE_CONF_CHECK_I(pipe_bpp); 5355c349dbc7Sjsg 53561b89dd6fSjsg if (!fastset || !pipe_config->update_m_n) { 53571bb76ff1Sjsg PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); 53581bb76ff1Sjsg PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); 53591bb76ff1Sjsg } 53601bb76ff1Sjsg PIPE_CONF_CHECK_I(port_clock); 5361c349dbc7Sjsg 5362c349dbc7Sjsg PIPE_CONF_CHECK_I(min_voltage_level); 5363c349dbc7Sjsg 53641bb76ff1Sjsg if (current_config->has_psr || pipe_config->has_psr) 53655ca02815Sjsg PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, 53665ca02815Sjsg ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); 53675ca02815Sjsg else 5368c349dbc7Sjsg PIPE_CONF_CHECK_X(infoframes.enable); 53695ca02815Sjsg 5370c349dbc7Sjsg PIPE_CONF_CHECK_X(infoframes.gcp); 5371c349dbc7Sjsg PIPE_CONF_CHECK_INFOFRAME(avi); 5372c349dbc7Sjsg PIPE_CONF_CHECK_INFOFRAME(spd); 5373c349dbc7Sjsg PIPE_CONF_CHECK_INFOFRAME(hdmi); 5374c349dbc7Sjsg PIPE_CONF_CHECK_INFOFRAME(drm); 5375ad8b1aafSjsg PIPE_CONF_CHECK_DP_VSC_SDP(vsc); 5376c349dbc7Sjsg 5377c349dbc7Sjsg PIPE_CONF_CHECK_X(sync_mode_slaves_mask); 5378c349dbc7Sjsg PIPE_CONF_CHECK_I(master_transcoder); 53791bb76ff1Sjsg PIPE_CONF_CHECK_X(bigjoiner_pipes); 5380c349dbc7Sjsg 5381c349dbc7Sjsg PIPE_CONF_CHECK_I(dsc.compression_enable); 5382c349dbc7Sjsg PIPE_CONF_CHECK_I(dsc.dsc_split); 5383c349dbc7Sjsg PIPE_CONF_CHECK_I(dsc.compressed_bpp); 5384c349dbc7Sjsg 53855ca02815Sjsg PIPE_CONF_CHECK_BOOL(splitter.enable); 53865ca02815Sjsg PIPE_CONF_CHECK_I(splitter.link_count); 53875ca02815Sjsg PIPE_CONF_CHECK_I(splitter.pixel_overlap); 53885ca02815Sjsg 5389f005ef32Sjsg if (!fastset) 53905ca02815Sjsg PIPE_CONF_CHECK_BOOL(vrr.enable); 53915ca02815Sjsg PIPE_CONF_CHECK_I(vrr.vmin); 53925ca02815Sjsg PIPE_CONF_CHECK_I(vrr.vmax); 53935ca02815Sjsg PIPE_CONF_CHECK_I(vrr.flipline); 53945ca02815Sjsg PIPE_CONF_CHECK_I(vrr.pipeline_full); 53955ca02815Sjsg PIPE_CONF_CHECK_I(vrr.guardband); 53965ca02815Sjsg 5397c349dbc7Sjsg #undef PIPE_CONF_CHECK_X 5398c349dbc7Sjsg #undef PIPE_CONF_CHECK_I 5399c349dbc7Sjsg #undef PIPE_CONF_CHECK_BOOL 5400c349dbc7Sjsg #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 5401c349dbc7Sjsg #undef PIPE_CONF_CHECK_P 5402c349dbc7Sjsg #undef PIPE_CONF_CHECK_FLAGS 5403c349dbc7Sjsg #undef PIPE_CONF_CHECK_COLOR_LUT 54041bb76ff1Sjsg #undef PIPE_CONF_CHECK_TIMINGS 54051bb76ff1Sjsg #undef PIPE_CONF_CHECK_RECT 5406c349dbc7Sjsg #undef PIPE_CONF_QUIRK 5407c349dbc7Sjsg 5408c349dbc7Sjsg return ret; 5409c349dbc7Sjsg } 5410c349dbc7Sjsg 5411c349dbc7Sjsg static void 5412c349dbc7Sjsg intel_verify_planes(struct intel_atomic_state *state) 5413c349dbc7Sjsg { 5414c349dbc7Sjsg struct intel_plane *plane; 5415c349dbc7Sjsg const struct intel_plane_state *plane_state; 5416c349dbc7Sjsg int i; 5417c349dbc7Sjsg 5418c349dbc7Sjsg for_each_new_intel_plane_in_state(state, plane, 5419c349dbc7Sjsg plane_state, i) 5420c349dbc7Sjsg assert_plane(plane, plane_state->planar_slave || 5421c349dbc7Sjsg plane_state->uapi.visible); 5422c349dbc7Sjsg } 5423c349dbc7Sjsg 5424f005ef32Sjsg int intel_modeset_all_pipes(struct intel_atomic_state *state, 5425f005ef32Sjsg const char *reason) 54265ca02815Sjsg { 54275ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 54285ca02815Sjsg struct intel_crtc *crtc; 54295ca02815Sjsg 54305ca02815Sjsg /* 54315ca02815Sjsg * Add all pipes to the state, and force 54325ca02815Sjsg * a modeset on all the active ones. 54335ca02815Sjsg */ 54345ca02815Sjsg for_each_intel_crtc(&dev_priv->drm, crtc) { 54355ca02815Sjsg struct intel_crtc_state *crtc_state; 54365ca02815Sjsg int ret; 54375ca02815Sjsg 54385ca02815Sjsg crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 54395ca02815Sjsg if (IS_ERR(crtc_state)) 54405ca02815Sjsg return PTR_ERR(crtc_state); 54415ca02815Sjsg 54425ca02815Sjsg if (!crtc_state->hw.active || 5443f005ef32Sjsg intel_crtc_needs_modeset(crtc_state)) 54445ca02815Sjsg continue; 54455ca02815Sjsg 5446f005ef32Sjsg drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] Full modeset due to %s\n", 5447f005ef32Sjsg crtc->base.base.id, crtc->base.name, reason); 5448f005ef32Sjsg 54495ca02815Sjsg crtc_state->uapi.mode_changed = true; 5450f005ef32Sjsg crtc_state->update_pipe = false; 54511b89dd6fSjsg crtc_state->update_m_n = false; 54525ca02815Sjsg 54535ca02815Sjsg ret = drm_atomic_add_affected_connectors(&state->base, 54545ca02815Sjsg &crtc->base); 54555ca02815Sjsg if (ret) 54565ca02815Sjsg return ret; 54575ca02815Sjsg 545847f7db4bSjsg ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); 545947f7db4bSjsg if (ret) 546047f7db4bSjsg return ret; 546147f7db4bSjsg 54625ca02815Sjsg ret = intel_atomic_add_affected_planes(state, crtc); 54635ca02815Sjsg if (ret) 54645ca02815Sjsg return ret; 54655ca02815Sjsg 54665ca02815Sjsg crtc_state->update_planes |= crtc_state->active_planes; 5467f005ef32Sjsg crtc_state->async_flip_planes = 0; 5468f005ef32Sjsg crtc_state->do_async_flip = false; 54695ca02815Sjsg } 54705ca02815Sjsg 54715ca02815Sjsg return 0; 54725ca02815Sjsg } 54735ca02815Sjsg 5474c349dbc7Sjsg /* 5475c349dbc7Sjsg * This implements the workaround described in the "notes" section of the mode 5476c349dbc7Sjsg * set sequence documentation. When going from no pipes or single pipe to 5477c349dbc7Sjsg * multiple pipes, and planes are enabled after the pipe, we need to wait at 5478c349dbc7Sjsg * least 2 vblanks on the first pipe before enabling planes on the second pipe. 5479c349dbc7Sjsg */ 5480c349dbc7Sjsg static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) 5481c349dbc7Sjsg { 5482c349dbc7Sjsg struct intel_crtc_state *crtc_state; 5483c349dbc7Sjsg struct intel_crtc *crtc; 5484c349dbc7Sjsg struct intel_crtc_state *first_crtc_state = NULL; 5485c349dbc7Sjsg struct intel_crtc_state *other_crtc_state = NULL; 5486c349dbc7Sjsg enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 5487c349dbc7Sjsg int i; 5488c349dbc7Sjsg 5489c349dbc7Sjsg /* look at all crtc's that are going to be enabled in during modeset */ 5490c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5491c349dbc7Sjsg if (!crtc_state->hw.active || 54925ca02815Sjsg !intel_crtc_needs_modeset(crtc_state)) 5493c349dbc7Sjsg continue; 5494c349dbc7Sjsg 5495c349dbc7Sjsg if (first_crtc_state) { 5496c349dbc7Sjsg other_crtc_state = crtc_state; 5497c349dbc7Sjsg break; 5498c349dbc7Sjsg } else { 5499c349dbc7Sjsg first_crtc_state = crtc_state; 5500c349dbc7Sjsg first_pipe = crtc->pipe; 5501c349dbc7Sjsg } 5502c349dbc7Sjsg } 5503c349dbc7Sjsg 5504c349dbc7Sjsg /* No workaround needed? */ 5505c349dbc7Sjsg if (!first_crtc_state) 5506c349dbc7Sjsg return 0; 5507c349dbc7Sjsg 5508c349dbc7Sjsg /* w/a possibly needed, check how many crtc's are already enabled. */ 5509c349dbc7Sjsg for_each_intel_crtc(state->base.dev, crtc) { 5510c349dbc7Sjsg crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 5511c349dbc7Sjsg if (IS_ERR(crtc_state)) 5512c349dbc7Sjsg return PTR_ERR(crtc_state); 5513c349dbc7Sjsg 5514c349dbc7Sjsg crtc_state->hsw_workaround_pipe = INVALID_PIPE; 5515c349dbc7Sjsg 5516c349dbc7Sjsg if (!crtc_state->hw.active || 55175ca02815Sjsg intel_crtc_needs_modeset(crtc_state)) 5518c349dbc7Sjsg continue; 5519c349dbc7Sjsg 5520c349dbc7Sjsg /* 2 or more enabled crtcs means no need for w/a */ 5521c349dbc7Sjsg if (enabled_pipe != INVALID_PIPE) 5522c349dbc7Sjsg return 0; 5523c349dbc7Sjsg 5524c349dbc7Sjsg enabled_pipe = crtc->pipe; 5525c349dbc7Sjsg } 5526c349dbc7Sjsg 5527c349dbc7Sjsg if (enabled_pipe != INVALID_PIPE) 5528c349dbc7Sjsg first_crtc_state->hsw_workaround_pipe = enabled_pipe; 5529c349dbc7Sjsg else if (other_crtc_state) 5530c349dbc7Sjsg other_crtc_state->hsw_workaround_pipe = first_pipe; 5531c349dbc7Sjsg 5532c349dbc7Sjsg return 0; 5533c349dbc7Sjsg } 5534c349dbc7Sjsg 5535c349dbc7Sjsg u8 intel_calc_active_pipes(struct intel_atomic_state *state, 5536c349dbc7Sjsg u8 active_pipes) 5537c349dbc7Sjsg { 5538c349dbc7Sjsg const struct intel_crtc_state *crtc_state; 5539c349dbc7Sjsg struct intel_crtc *crtc; 5540c349dbc7Sjsg int i; 5541c349dbc7Sjsg 5542c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5543c349dbc7Sjsg if (crtc_state->hw.active) 5544c349dbc7Sjsg active_pipes |= BIT(crtc->pipe); 5545c349dbc7Sjsg else 5546c349dbc7Sjsg active_pipes &= ~BIT(crtc->pipe); 5547c349dbc7Sjsg } 5548c349dbc7Sjsg 5549c349dbc7Sjsg return active_pipes; 5550c349dbc7Sjsg } 5551c349dbc7Sjsg 5552c349dbc7Sjsg static int intel_modeset_checks(struct intel_atomic_state *state) 5553c349dbc7Sjsg { 5554c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5555c349dbc7Sjsg 5556c349dbc7Sjsg state->modeset = true; 5557c349dbc7Sjsg 5558c349dbc7Sjsg if (IS_HASWELL(dev_priv)) 5559c349dbc7Sjsg return hsw_mode_set_planes_workaround(state); 5560c349dbc7Sjsg 5561c349dbc7Sjsg return 0; 5562c349dbc7Sjsg } 5563c349dbc7Sjsg 5564c349dbc7Sjsg static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, 5565c349dbc7Sjsg struct intel_crtc_state *new_crtc_state) 5566c349dbc7Sjsg { 5567f005ef32Sjsg struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev); 5568f005ef32Sjsg 55691b89dd6fSjsg if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) 5570f005ef32Sjsg drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n"); 55711b89dd6fSjsg else 5572c349dbc7Sjsg new_crtc_state->uapi.mode_changed = false; 55731b89dd6fSjsg 55741b89dd6fSjsg if (intel_crtc_needs_modeset(new_crtc_state)) 55751b89dd6fSjsg new_crtc_state->update_m_n = false; 55761b89dd6fSjsg 5577f005ef32Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 5578c349dbc7Sjsg new_crtc_state->update_pipe = true; 5579c349dbc7Sjsg } 5580c349dbc7Sjsg 5581c349dbc7Sjsg static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, 5582c349dbc7Sjsg struct intel_crtc *crtc, 5583c349dbc7Sjsg u8 plane_ids_mask) 5584c349dbc7Sjsg { 5585c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5586c349dbc7Sjsg struct intel_plane *plane; 5587c349dbc7Sjsg 5588c349dbc7Sjsg for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 5589c349dbc7Sjsg struct intel_plane_state *plane_state; 5590c349dbc7Sjsg 5591c349dbc7Sjsg if ((plane_ids_mask & BIT(plane->id)) == 0) 5592c349dbc7Sjsg continue; 5593c349dbc7Sjsg 5594c349dbc7Sjsg plane_state = intel_atomic_get_plane_state(state, plane); 5595c349dbc7Sjsg if (IS_ERR(plane_state)) 5596c349dbc7Sjsg return PTR_ERR(plane_state); 5597c349dbc7Sjsg } 5598c349dbc7Sjsg 5599c349dbc7Sjsg return 0; 5600c349dbc7Sjsg } 5601c349dbc7Sjsg 56025ca02815Sjsg int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 56035ca02815Sjsg struct intel_crtc *crtc) 56045ca02815Sjsg { 56055ca02815Sjsg const struct intel_crtc_state *old_crtc_state = 56065ca02815Sjsg intel_atomic_get_old_crtc_state(state, crtc); 56075ca02815Sjsg const struct intel_crtc_state *new_crtc_state = 56085ca02815Sjsg intel_atomic_get_new_crtc_state(state, crtc); 56095ca02815Sjsg 56105ca02815Sjsg return intel_crtc_add_planes_to_state(state, crtc, 56115ca02815Sjsg old_crtc_state->enabled_planes | 56125ca02815Sjsg new_crtc_state->enabled_planes); 56135ca02815Sjsg } 56145ca02815Sjsg 5615c349dbc7Sjsg static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) 5616c349dbc7Sjsg { 5617c349dbc7Sjsg /* See {hsw,vlv,ivb}_plane_ratio() */ 5618c349dbc7Sjsg return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || 5619c349dbc7Sjsg IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || 56205ca02815Sjsg IS_IVYBRIDGE(dev_priv); 56215ca02815Sjsg } 56225ca02815Sjsg 56235ca02815Sjsg static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state, 56245ca02815Sjsg struct intel_crtc *crtc, 56255ca02815Sjsg struct intel_crtc *other) 56265ca02815Sjsg { 5627f005ef32Sjsg const struct intel_plane_state __maybe_unused *plane_state; 56285ca02815Sjsg struct intel_plane *plane; 56295ca02815Sjsg u8 plane_ids = 0; 56305ca02815Sjsg int i; 56315ca02815Sjsg 56325ca02815Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 56335ca02815Sjsg if (plane->pipe == crtc->pipe) 56345ca02815Sjsg plane_ids |= BIT(plane->id); 56355ca02815Sjsg } 56365ca02815Sjsg 56375ca02815Sjsg return intel_crtc_add_planes_to_state(state, other, plane_ids); 56385ca02815Sjsg } 56395ca02815Sjsg 56405ca02815Sjsg static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) 56415ca02815Sjsg { 56421bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 56435ca02815Sjsg const struct intel_crtc_state *crtc_state; 56445ca02815Sjsg struct intel_crtc *crtc; 56455ca02815Sjsg int i; 56465ca02815Sjsg 56475ca02815Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 56481bb76ff1Sjsg struct intel_crtc *other; 56491bb76ff1Sjsg 56501bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, other, 56511bb76ff1Sjsg crtc_state->bigjoiner_pipes) { 56525ca02815Sjsg int ret; 56535ca02815Sjsg 56541bb76ff1Sjsg if (crtc == other) 56555ca02815Sjsg continue; 56565ca02815Sjsg 56571bb76ff1Sjsg ret = intel_crtc_add_bigjoiner_planes(state, crtc, other); 56585ca02815Sjsg if (ret) 56595ca02815Sjsg return ret; 56605ca02815Sjsg } 56611bb76ff1Sjsg } 56625ca02815Sjsg 56635ca02815Sjsg return 0; 5664c349dbc7Sjsg } 5665c349dbc7Sjsg 5666ad8b1aafSjsg static int intel_atomic_check_planes(struct intel_atomic_state *state) 5667c349dbc7Sjsg { 5668c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5669c349dbc7Sjsg struct intel_crtc_state *old_crtc_state, *new_crtc_state; 5670f005ef32Sjsg struct intel_plane_state __maybe_unused *plane_state; 5671c349dbc7Sjsg struct intel_plane *plane; 5672c349dbc7Sjsg struct intel_crtc *crtc; 5673c349dbc7Sjsg int i, ret; 5674c349dbc7Sjsg 5675c349dbc7Sjsg ret = icl_add_linked_planes(state); 5676c349dbc7Sjsg if (ret) 5677c349dbc7Sjsg return ret; 5678c349dbc7Sjsg 56795ca02815Sjsg ret = intel_bigjoiner_add_affected_planes(state); 56805ca02815Sjsg if (ret) 56815ca02815Sjsg return ret; 56825ca02815Sjsg 5683c349dbc7Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 5684c349dbc7Sjsg ret = intel_plane_atomic_check(state, plane); 5685c349dbc7Sjsg if (ret) { 5686c349dbc7Sjsg drm_dbg_atomic(&dev_priv->drm, 5687c349dbc7Sjsg "[PLANE:%d:%s] atomic driver check failed\n", 5688c349dbc7Sjsg plane->base.base.id, plane->base.name); 5689c349dbc7Sjsg return ret; 5690c349dbc7Sjsg } 5691c349dbc7Sjsg } 5692c349dbc7Sjsg 5693c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 5694c349dbc7Sjsg new_crtc_state, i) { 5695c349dbc7Sjsg u8 old_active_planes, new_active_planes; 5696c349dbc7Sjsg 5697c349dbc7Sjsg ret = icl_check_nv12_planes(new_crtc_state); 5698c349dbc7Sjsg if (ret) 5699c349dbc7Sjsg return ret; 5700c349dbc7Sjsg 5701c349dbc7Sjsg /* 5702c349dbc7Sjsg * On some platforms the number of active planes affects 5703c349dbc7Sjsg * the planes' minimum cdclk calculation. Add such planes 5704c349dbc7Sjsg * to the state before we compute the minimum cdclk. 5705c349dbc7Sjsg */ 5706c349dbc7Sjsg if (!active_planes_affects_min_cdclk(dev_priv)) 5707c349dbc7Sjsg continue; 5708c349dbc7Sjsg 5709c349dbc7Sjsg old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5710c349dbc7Sjsg new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); 5711c349dbc7Sjsg 57125ca02815Sjsg if (hweight8(old_active_planes) == hweight8(new_active_planes)) 5713c349dbc7Sjsg continue; 5714c349dbc7Sjsg 5715c349dbc7Sjsg ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 5716c349dbc7Sjsg if (ret) 5717c349dbc7Sjsg return ret; 5718c349dbc7Sjsg } 5719c349dbc7Sjsg 5720ad8b1aafSjsg return 0; 5721ad8b1aafSjsg } 5722ad8b1aafSjsg 5723c349dbc7Sjsg static int intel_atomic_check_crtcs(struct intel_atomic_state *state) 5724c349dbc7Sjsg { 5725f005ef32Sjsg struct intel_crtc_state __maybe_unused *crtc_state; 5726c349dbc7Sjsg struct intel_crtc *crtc; 5727c349dbc7Sjsg int i; 5728c349dbc7Sjsg 5729c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 5730c349dbc7Sjsg struct drm_i915_private *i915 = to_i915(crtc->base.dev); 57315ca02815Sjsg int ret; 57325ca02815Sjsg 57335ca02815Sjsg ret = intel_crtc_atomic_check(state, crtc); 5734c349dbc7Sjsg if (ret) { 5735c349dbc7Sjsg drm_dbg_atomic(&i915->drm, 5736c349dbc7Sjsg "[CRTC:%d:%s] atomic driver check failed\n", 5737c349dbc7Sjsg crtc->base.base.id, crtc->base.name); 5738c349dbc7Sjsg return ret; 5739c349dbc7Sjsg } 5740c349dbc7Sjsg } 5741c349dbc7Sjsg 5742c349dbc7Sjsg return 0; 5743c349dbc7Sjsg } 5744c349dbc7Sjsg 5745c349dbc7Sjsg static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, 5746c349dbc7Sjsg u8 transcoders) 5747c349dbc7Sjsg { 5748c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state; 5749c349dbc7Sjsg struct intel_crtc *crtc; 5750c349dbc7Sjsg int i; 5751c349dbc7Sjsg 5752c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 5753c349dbc7Sjsg if (new_crtc_state->hw.enable && 5754c349dbc7Sjsg transcoders & BIT(new_crtc_state->cpu_transcoder) && 57555ca02815Sjsg intel_crtc_needs_modeset(new_crtc_state)) 5756c349dbc7Sjsg return true; 5757c349dbc7Sjsg } 5758c349dbc7Sjsg 5759c349dbc7Sjsg return false; 5760c349dbc7Sjsg } 5761c349dbc7Sjsg 57621bb76ff1Sjsg static bool intel_pipes_need_modeset(struct intel_atomic_state *state, 57631bb76ff1Sjsg u8 pipes) 57645ca02815Sjsg { 57651bb76ff1Sjsg const struct intel_crtc_state *new_crtc_state; 57661bb76ff1Sjsg struct intel_crtc *crtc; 57671bb76ff1Sjsg int i; 57685ca02815Sjsg 57691bb76ff1Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 57701bb76ff1Sjsg if (new_crtc_state->hw.enable && 57711bb76ff1Sjsg pipes & BIT(crtc->pipe) && 57721bb76ff1Sjsg intel_crtc_needs_modeset(new_crtc_state)) 57731bb76ff1Sjsg return true; 57745ca02815Sjsg } 57755ca02815Sjsg 57761bb76ff1Sjsg return false; 57771bb76ff1Sjsg } 57781bb76ff1Sjsg 57791bb76ff1Sjsg static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, 57801bb76ff1Sjsg struct intel_crtc *master_crtc) 57811bb76ff1Sjsg { 57821bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 57831bb76ff1Sjsg struct intel_crtc_state *master_crtc_state = 57841bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, master_crtc); 57851bb76ff1Sjsg struct intel_crtc *slave_crtc; 57861bb76ff1Sjsg 57871bb76ff1Sjsg if (!master_crtc_state->bigjoiner_pipes) 57885ca02815Sjsg return 0; 57895ca02815Sjsg 57901bb76ff1Sjsg /* sanity check */ 57911bb76ff1Sjsg if (drm_WARN_ON(&i915->drm, 57921bb76ff1Sjsg master_crtc->pipe != bigjoiner_master_pipe(master_crtc_state))) 57931bb76ff1Sjsg return -EINVAL; 57941bb76ff1Sjsg 57951bb76ff1Sjsg if (master_crtc_state->bigjoiner_pipes & ~bigjoiner_pipes(i915)) { 57961bb76ff1Sjsg drm_dbg_kms(&i915->drm, 57971bb76ff1Sjsg "[CRTC:%d:%s] Cannot act as big joiner master " 57981bb76ff1Sjsg "(need 0x%x as pipes, only 0x%x possible)\n", 57991bb76ff1Sjsg master_crtc->base.base.id, master_crtc->base.name, 58001bb76ff1Sjsg master_crtc_state->bigjoiner_pipes, bigjoiner_pipes(i915)); 58015ca02815Sjsg return -EINVAL; 58025ca02815Sjsg } 58035ca02815Sjsg 58041bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 58051bb76ff1Sjsg intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 58061bb76ff1Sjsg struct intel_crtc_state *slave_crtc_state; 58071bb76ff1Sjsg int ret; 58081bb76ff1Sjsg 58091bb76ff1Sjsg slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 58105ca02815Sjsg if (IS_ERR(slave_crtc_state)) 58115ca02815Sjsg return PTR_ERR(slave_crtc_state); 58125ca02815Sjsg 58135ca02815Sjsg /* master being enabled, slave was already configured? */ 58141bb76ff1Sjsg if (slave_crtc_state->uapi.enable) { 58151bb76ff1Sjsg drm_dbg_kms(&i915->drm, 58161bb76ff1Sjsg "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 58175ca02815Sjsg "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 58181bb76ff1Sjsg slave_crtc->base.base.id, slave_crtc->base.name, 58191bb76ff1Sjsg master_crtc->base.base.id, master_crtc->base.name); 58205ca02815Sjsg return -EINVAL; 58215ca02815Sjsg } 58225ca02815Sjsg 58231bb76ff1Sjsg /* 58241bb76ff1Sjsg * The state copy logic assumes the master crtc gets processed 58251bb76ff1Sjsg * before the slave crtc during the main compute_config loop. 58261bb76ff1Sjsg * This works because the crtcs are created in pipe order, 58271bb76ff1Sjsg * and the hardware requires master pipe < slave pipe as well. 58281bb76ff1Sjsg * Should that change we need to rethink the logic. 58291bb76ff1Sjsg */ 58301bb76ff1Sjsg if (WARN_ON(drm_crtc_index(&master_crtc->base) > 58311bb76ff1Sjsg drm_crtc_index(&slave_crtc->base))) 58321bb76ff1Sjsg return -EINVAL; 58335ca02815Sjsg 58341bb76ff1Sjsg drm_dbg_kms(&i915->drm, 58351bb76ff1Sjsg "[CRTC:%d:%s] Used as slave for big joiner master [CRTC:%d:%s]\n", 58361bb76ff1Sjsg slave_crtc->base.base.id, slave_crtc->base.name, 58371bb76ff1Sjsg master_crtc->base.base.id, master_crtc->base.name); 58381bb76ff1Sjsg 58391bb76ff1Sjsg slave_crtc_state->bigjoiner_pipes = 58401bb76ff1Sjsg master_crtc_state->bigjoiner_pipes; 58411bb76ff1Sjsg 58421bb76ff1Sjsg ret = copy_bigjoiner_crtc_state_modeset(state, slave_crtc); 58431bb76ff1Sjsg if (ret) 58441bb76ff1Sjsg return ret; 58451bb76ff1Sjsg } 58461bb76ff1Sjsg 58471bb76ff1Sjsg return 0; 58481bb76ff1Sjsg } 58491bb76ff1Sjsg 58501bb76ff1Sjsg static void kill_bigjoiner_slave(struct intel_atomic_state *state, 58511bb76ff1Sjsg struct intel_crtc *master_crtc) 58521bb76ff1Sjsg { 58531bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 58541bb76ff1Sjsg struct intel_crtc_state *master_crtc_state = 58551bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, master_crtc); 58561bb76ff1Sjsg struct intel_crtc *slave_crtc; 58571bb76ff1Sjsg 58581bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 58591bb76ff1Sjsg intel_crtc_bigjoiner_slave_pipes(master_crtc_state)) { 58601bb76ff1Sjsg struct intel_crtc_state *slave_crtc_state = 58611bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, slave_crtc); 58621bb76ff1Sjsg 58631bb76ff1Sjsg slave_crtc_state->bigjoiner_pipes = 0; 58641bb76ff1Sjsg 58651bb76ff1Sjsg intel_crtc_copy_uapi_to_hw_state_modeset(state, slave_crtc); 58661bb76ff1Sjsg } 58671bb76ff1Sjsg 58681bb76ff1Sjsg master_crtc_state->bigjoiner_pipes = 0; 58695ca02815Sjsg } 58705ca02815Sjsg 58715ca02815Sjsg /** 58725ca02815Sjsg * DOC: asynchronous flip implementation 58735ca02815Sjsg * 58745ca02815Sjsg * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC 58755ca02815Sjsg * flag. Currently async flip is only supported via the drmModePageFlip IOCTL. 58765ca02815Sjsg * Correspondingly, support is currently added for primary plane only. 58775ca02815Sjsg * 58785ca02815Sjsg * Async flip can only change the plane surface address, so anything else 58791bb76ff1Sjsg * changing is rejected from the intel_async_flip_check_hw() function. 58805ca02815Sjsg * Once this check is cleared, flip done interrupt is enabled using 58815ca02815Sjsg * the intel_crtc_enable_flip_done() function. 58825ca02815Sjsg * 58835ca02815Sjsg * As soon as the surface address register is written, flip done interrupt is 58845ca02815Sjsg * generated and the requested events are sent to the usersapce in the interrupt 58855ca02815Sjsg * handler itself. The timestamp and sequence sent during the flip done event 58865ca02815Sjsg * correspond to the last vblank and have no relation to the actual time when 58875ca02815Sjsg * the flip done event was sent. 58885ca02815Sjsg */ 58891bb76ff1Sjsg static int intel_async_flip_check_uapi(struct intel_atomic_state *state, 58901bb76ff1Sjsg struct intel_crtc *crtc) 58915ca02815Sjsg { 58925ca02815Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 58931bb76ff1Sjsg const struct intel_crtc_state *new_crtc_state = 58941bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc); 58951bb76ff1Sjsg const struct intel_plane_state *old_plane_state; 58961bb76ff1Sjsg struct intel_plane_state *new_plane_state; 58975ca02815Sjsg struct intel_plane *plane; 58985ca02815Sjsg int i; 58995ca02815Sjsg 59001bb76ff1Sjsg if (!new_crtc_state->uapi.async_flip) 59011bb76ff1Sjsg return 0; 59021bb76ff1Sjsg 59031bb76ff1Sjsg if (!new_crtc_state->uapi.active) { 59041bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59051bb76ff1Sjsg "[CRTC:%d:%s] not active\n", 59061bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 59075ca02815Sjsg return -EINVAL; 59085ca02815Sjsg } 59095ca02815Sjsg 59101bb76ff1Sjsg if (intel_crtc_needs_modeset(new_crtc_state)) { 59115ca02815Sjsg drm_dbg_kms(&i915->drm, 59121bb76ff1Sjsg "[CRTC:%d:%s] modeset required\n", 59131bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 59145ca02815Sjsg return -EINVAL; 59155ca02815Sjsg } 59165ca02815Sjsg 59175ca02815Sjsg for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 59185ca02815Sjsg new_plane_state, i) { 59191bb76ff1Sjsg if (plane->pipe != crtc->pipe) 59201bb76ff1Sjsg continue; 59211bb76ff1Sjsg 59225ca02815Sjsg /* 59235ca02815Sjsg * TODO: Async flip is only supported through the page flip IOCTL 59245ca02815Sjsg * as of now. So support currently added for primary plane only. 59255ca02815Sjsg * Support for other planes on platforms on which supports 59265ca02815Sjsg * this(vlv/chv and icl+) should be added when async flip is 59275ca02815Sjsg * enabled in the atomic IOCTL path. 59285ca02815Sjsg */ 59291bb76ff1Sjsg if (!plane->async_flip) { 59301bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59311bb76ff1Sjsg "[PLANE:%d:%s] async flip not supported\n", 59321bb76ff1Sjsg plane->base.base.id, plane->base.name); 59335ca02815Sjsg return -EINVAL; 59341bb76ff1Sjsg } 59351bb76ff1Sjsg 59361bb76ff1Sjsg if (!old_plane_state->uapi.fb || !new_plane_state->uapi.fb) { 59371bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59381bb76ff1Sjsg "[PLANE:%d:%s] no old or new framebuffer\n", 59391bb76ff1Sjsg plane->base.base.id, plane->base.name); 59401bb76ff1Sjsg return -EINVAL; 59411bb76ff1Sjsg } 59421bb76ff1Sjsg } 59431bb76ff1Sjsg 59441bb76ff1Sjsg return 0; 59451bb76ff1Sjsg } 59461bb76ff1Sjsg 59471bb76ff1Sjsg static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct intel_crtc *crtc) 59481bb76ff1Sjsg { 59491bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 59501bb76ff1Sjsg const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 59511bb76ff1Sjsg const struct intel_plane_state *new_plane_state, *old_plane_state; 59521bb76ff1Sjsg struct intel_plane *plane; 59531bb76ff1Sjsg int i; 59541bb76ff1Sjsg 59551bb76ff1Sjsg old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 59561bb76ff1Sjsg new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 59571bb76ff1Sjsg 59581bb76ff1Sjsg if (!new_crtc_state->uapi.async_flip) 59591bb76ff1Sjsg return 0; 59601bb76ff1Sjsg 59611bb76ff1Sjsg if (!new_crtc_state->hw.active) { 59621bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59631bb76ff1Sjsg "[CRTC:%d:%s] not active\n", 59641bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 59651bb76ff1Sjsg return -EINVAL; 59661bb76ff1Sjsg } 59671bb76ff1Sjsg 59681bb76ff1Sjsg if (intel_crtc_needs_modeset(new_crtc_state)) { 59691bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59701bb76ff1Sjsg "[CRTC:%d:%s] modeset required\n", 59711bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 59721bb76ff1Sjsg return -EINVAL; 59731bb76ff1Sjsg } 59741bb76ff1Sjsg 59751bb76ff1Sjsg if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 59761bb76ff1Sjsg drm_dbg_kms(&i915->drm, 59771bb76ff1Sjsg "[CRTC:%d:%s] Active planes cannot be in async flip\n", 59781bb76ff1Sjsg crtc->base.base.id, crtc->base.name); 59791bb76ff1Sjsg return -EINVAL; 59801bb76ff1Sjsg } 59811bb76ff1Sjsg 5982f005ef32Sjsg /* 5983f005ef32Sjsg * FIXME: Bigjoiner+async flip is busted currently. 5984f005ef32Sjsg * Remove this check once the issues are fixed. 5985f005ef32Sjsg */ 5986f005ef32Sjsg if (new_crtc_state->bigjoiner_pipes) { 5987f005ef32Sjsg drm_dbg_kms(&i915->drm, 5988f005ef32Sjsg "[CRTC:%d:%s] async flip disallowed with bigjoiner\n", 5989f005ef32Sjsg crtc->base.base.id, crtc->base.name); 5990f005ef32Sjsg return -EINVAL; 5991f005ef32Sjsg } 5992f005ef32Sjsg 59931bb76ff1Sjsg for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 59941bb76ff1Sjsg new_plane_state, i) { 59951bb76ff1Sjsg if (plane->pipe != crtc->pipe) 59961bb76ff1Sjsg continue; 59971bb76ff1Sjsg 59981bb76ff1Sjsg /* 59991bb76ff1Sjsg * Only async flip capable planes should be in the state 60001bb76ff1Sjsg * if we're really about to ask the hardware to perform 60011bb76ff1Sjsg * an async flip. We should never get this far otherwise. 60021bb76ff1Sjsg */ 60031bb76ff1Sjsg if (drm_WARN_ON(&i915->drm, 60041bb76ff1Sjsg new_crtc_state->do_async_flip && !plane->async_flip)) 60051bb76ff1Sjsg return -EINVAL; 60061bb76ff1Sjsg 60071bb76ff1Sjsg /* 60081bb76ff1Sjsg * Only check async flip capable planes other planes 60091bb76ff1Sjsg * may be involved in the initial commit due to 60101bb76ff1Sjsg * the wm0/ddb optimization. 60111bb76ff1Sjsg * 60121bb76ff1Sjsg * TODO maybe should track which planes actually 60131bb76ff1Sjsg * were requested to do the async flip... 60141bb76ff1Sjsg */ 60151bb76ff1Sjsg if (!plane->async_flip) 60161bb76ff1Sjsg continue; 60175ca02815Sjsg 60185ca02815Sjsg /* 60195ca02815Sjsg * FIXME: This check is kept generic for all platforms. 60205ca02815Sjsg * Need to verify this for all gen9 platforms to enable 60215ca02815Sjsg * this selectively if required. 60225ca02815Sjsg */ 60235ca02815Sjsg switch (new_plane_state->hw.fb->modifier) { 6024f005ef32Sjsg case DRM_FORMAT_MOD_LINEAR: 6025f005ef32Sjsg /* 6026f005ef32Sjsg * FIXME: Async on Linear buffer is supported on ICL as 6027f005ef32Sjsg * but with additional alignment and fbc restrictions 6028f005ef32Sjsg * need to be taken care of. These aren't applicable for 6029f005ef32Sjsg * gen12+. 6030f005ef32Sjsg */ 6031f005ef32Sjsg if (DISPLAY_VER(i915) < 12) { 6032f005ef32Sjsg drm_dbg_kms(&i915->drm, 6033f005ef32Sjsg "[PLANE:%d:%s] Modifier 0x%llx does not support async flip on display ver %d\n", 6034f005ef32Sjsg plane->base.base.id, plane->base.name, 6035f005ef32Sjsg new_plane_state->hw.fb->modifier, DISPLAY_VER(i915)); 6036f005ef32Sjsg return -EINVAL; 6037f005ef32Sjsg } 6038f005ef32Sjsg break; 6039f005ef32Sjsg 60405ca02815Sjsg case I915_FORMAT_MOD_X_TILED: 60415ca02815Sjsg case I915_FORMAT_MOD_Y_TILED: 60425ca02815Sjsg case I915_FORMAT_MOD_Yf_TILED: 60431bb76ff1Sjsg case I915_FORMAT_MOD_4_TILED: 60445ca02815Sjsg break; 60455ca02815Sjsg default: 60465ca02815Sjsg drm_dbg_kms(&i915->drm, 6047f005ef32Sjsg "[PLANE:%d:%s] Modifier 0x%llx does not support async flip\n", 6048f005ef32Sjsg plane->base.base.id, plane->base.name, 6049f005ef32Sjsg new_plane_state->hw.fb->modifier); 60505ca02815Sjsg return -EINVAL; 60515ca02815Sjsg } 60525ca02815Sjsg 60531bb76ff1Sjsg if (new_plane_state->hw.fb->format->num_planes > 1) { 60541bb76ff1Sjsg drm_dbg_kms(&i915->drm, 60551bb76ff1Sjsg "[PLANE:%d:%s] Planar formats do not support async flips\n", 60561bb76ff1Sjsg plane->base.base.id, plane->base.name); 60571bb76ff1Sjsg return -EINVAL; 60581bb76ff1Sjsg } 60591bb76ff1Sjsg 60601bb76ff1Sjsg if (old_plane_state->view.color_plane[0].mapping_stride != 60611bb76ff1Sjsg new_plane_state->view.color_plane[0].mapping_stride) { 60621bb76ff1Sjsg drm_dbg_kms(&i915->drm, 60631bb76ff1Sjsg "[PLANE:%d:%s] Stride cannot be changed in async flip\n", 60641bb76ff1Sjsg plane->base.base.id, plane->base.name); 60655ca02815Sjsg return -EINVAL; 60665ca02815Sjsg } 60675ca02815Sjsg 60685ca02815Sjsg if (old_plane_state->hw.fb->modifier != 60695ca02815Sjsg new_plane_state->hw.fb->modifier) { 60705ca02815Sjsg drm_dbg_kms(&i915->drm, 60711bb76ff1Sjsg "[PLANE:%d:%s] Modifier cannot be changed in async flip\n", 60721bb76ff1Sjsg plane->base.base.id, plane->base.name); 60735ca02815Sjsg return -EINVAL; 60745ca02815Sjsg } 60755ca02815Sjsg 60765ca02815Sjsg if (old_plane_state->hw.fb->format != 60775ca02815Sjsg new_plane_state->hw.fb->format) { 60785ca02815Sjsg drm_dbg_kms(&i915->drm, 60791bb76ff1Sjsg "[PLANE:%d:%s] Pixel format cannot be changed in async flip\n", 60801bb76ff1Sjsg plane->base.base.id, plane->base.name); 60815ca02815Sjsg return -EINVAL; 60825ca02815Sjsg } 60835ca02815Sjsg 60845ca02815Sjsg if (old_plane_state->hw.rotation != 60855ca02815Sjsg new_plane_state->hw.rotation) { 60861bb76ff1Sjsg drm_dbg_kms(&i915->drm, 60871bb76ff1Sjsg "[PLANE:%d:%s] Rotation cannot be changed in async flip\n", 60881bb76ff1Sjsg plane->base.base.id, plane->base.name); 60895ca02815Sjsg return -EINVAL; 60905ca02815Sjsg } 60915ca02815Sjsg 60925ca02815Sjsg if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) || 60935ca02815Sjsg !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) { 60945ca02815Sjsg drm_dbg_kms(&i915->drm, 60951bb76ff1Sjsg "[PLANE:%d:%s] Size/co-ordinates cannot be changed in async flip\n", 60961bb76ff1Sjsg plane->base.base.id, plane->base.name); 60975ca02815Sjsg return -EINVAL; 60985ca02815Sjsg } 60995ca02815Sjsg 61005ca02815Sjsg if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) { 61011bb76ff1Sjsg drm_dbg_kms(&i915->drm, 61021bb76ff1Sjsg "[PLANES:%d:%s] Alpha value cannot be changed in async flip\n", 61031bb76ff1Sjsg plane->base.base.id, plane->base.name); 61045ca02815Sjsg return -EINVAL; 61055ca02815Sjsg } 61065ca02815Sjsg 61075ca02815Sjsg if (old_plane_state->hw.pixel_blend_mode != 61085ca02815Sjsg new_plane_state->hw.pixel_blend_mode) { 61095ca02815Sjsg drm_dbg_kms(&i915->drm, 61101bb76ff1Sjsg "[PLANE:%d:%s] Pixel blend mode cannot be changed in async flip\n", 61111bb76ff1Sjsg plane->base.base.id, plane->base.name); 61125ca02815Sjsg return -EINVAL; 61135ca02815Sjsg } 61145ca02815Sjsg 61155ca02815Sjsg if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) { 61165ca02815Sjsg drm_dbg_kms(&i915->drm, 61171bb76ff1Sjsg "[PLANE:%d:%s] Color encoding cannot be changed in async flip\n", 61181bb76ff1Sjsg plane->base.base.id, plane->base.name); 61195ca02815Sjsg return -EINVAL; 61205ca02815Sjsg } 61215ca02815Sjsg 61225ca02815Sjsg if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) { 61231bb76ff1Sjsg drm_dbg_kms(&i915->drm, 61241bb76ff1Sjsg "[PLANE:%d:%s] Color range cannot be changed in async flip\n", 61251bb76ff1Sjsg plane->base.base.id, plane->base.name); 61261bb76ff1Sjsg return -EINVAL; 61271bb76ff1Sjsg } 61281bb76ff1Sjsg 61291bb76ff1Sjsg /* plane decryption is allow to change only in synchronous flips */ 61301bb76ff1Sjsg if (old_plane_state->decrypt != new_plane_state->decrypt) { 61311bb76ff1Sjsg drm_dbg_kms(&i915->drm, 61321bb76ff1Sjsg "[PLANE:%d:%s] Decryption cannot be changed in async flip\n", 61331bb76ff1Sjsg plane->base.base.id, plane->base.name); 61345ca02815Sjsg return -EINVAL; 61355ca02815Sjsg } 61365ca02815Sjsg } 61375ca02815Sjsg 61385ca02815Sjsg return 0; 61395ca02815Sjsg } 61405ca02815Sjsg 61415ca02815Sjsg static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 61425ca02815Sjsg { 61431bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 61445ca02815Sjsg struct intel_crtc_state *crtc_state; 61455ca02815Sjsg struct intel_crtc *crtc; 61461bb76ff1Sjsg u8 affected_pipes = 0; 61471bb76ff1Sjsg u8 modeset_pipes = 0; 61485ca02815Sjsg int i; 61495ca02815Sjsg 61505ca02815Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 61511bb76ff1Sjsg affected_pipes |= crtc_state->bigjoiner_pipes; 61521bb76ff1Sjsg if (intel_crtc_needs_modeset(crtc_state)) 61531bb76ff1Sjsg modeset_pipes |= crtc_state->bigjoiner_pipes; 61541bb76ff1Sjsg } 61551bb76ff1Sjsg 61561bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, affected_pipes) { 61571bb76ff1Sjsg crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 61581bb76ff1Sjsg if (IS_ERR(crtc_state)) 61591bb76ff1Sjsg return PTR_ERR(crtc_state); 61601bb76ff1Sjsg } 61611bb76ff1Sjsg 61621bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, modeset_pipes) { 61635ca02815Sjsg int ret; 61645ca02815Sjsg 61651bb76ff1Sjsg crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 61665ca02815Sjsg 61671bb76ff1Sjsg crtc_state->uapi.mode_changed = true; 61685ca02815Sjsg 61691bb76ff1Sjsg ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 61705ca02815Sjsg if (ret) 61715ca02815Sjsg return ret; 61725ca02815Sjsg 61731bb76ff1Sjsg ret = intel_atomic_add_affected_planes(state, crtc); 61745ca02815Sjsg if (ret) 61755ca02815Sjsg return ret; 61765ca02815Sjsg } 61775ca02815Sjsg 61785ca02815Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 61795ca02815Sjsg /* Kill old bigjoiner link, we may re-establish afterwards */ 61805ca02815Sjsg if (intel_crtc_needs_modeset(crtc_state) && 61811bb76ff1Sjsg intel_crtc_is_bigjoiner_master(crtc_state)) 61821bb76ff1Sjsg kill_bigjoiner_slave(state, crtc); 61835ca02815Sjsg } 61845ca02815Sjsg 61855ca02815Sjsg return 0; 61865ca02815Sjsg } 61875ca02815Sjsg 6188c349dbc7Sjsg /** 6189c349dbc7Sjsg * intel_atomic_check - validate state object 6190c349dbc7Sjsg * @dev: drm device 6191c349dbc7Sjsg * @_state: state to validate 6192c349dbc7Sjsg */ 6193f005ef32Sjsg int intel_atomic_check(struct drm_device *dev, 6194c349dbc7Sjsg struct drm_atomic_state *_state) 6195c349dbc7Sjsg { 6196c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 6197c349dbc7Sjsg struct intel_atomic_state *state = to_intel_atomic_state(_state); 6198c349dbc7Sjsg struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6199c349dbc7Sjsg struct intel_crtc *crtc; 6200c349dbc7Sjsg int ret, i; 6201c349dbc7Sjsg bool any_ms = false; 6202c349dbc7Sjsg 6203c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6204c349dbc7Sjsg new_crtc_state, i) { 6205f005ef32Sjsg /* 6206f005ef32Sjsg * crtc's state no longer considered to be inherited 6207f005ef32Sjsg * after the first userspace/client initiated commit. 6208f005ef32Sjsg */ 6209f005ef32Sjsg if (!state->internal) 6210f005ef32Sjsg new_crtc_state->inherited = false; 6211f005ef32Sjsg 6212ad8b1aafSjsg if (new_crtc_state->inherited != old_crtc_state->inherited) 6213c349dbc7Sjsg new_crtc_state->uapi.mode_changed = true; 62141bb76ff1Sjsg 62151bb76ff1Sjsg if (new_crtc_state->uapi.scaling_filter != 62161bb76ff1Sjsg old_crtc_state->uapi.scaling_filter) 62171bb76ff1Sjsg new_crtc_state->uapi.mode_changed = true; 6218c349dbc7Sjsg } 6219c349dbc7Sjsg 62205ca02815Sjsg intel_vrr_check_modeset(state); 62215ca02815Sjsg 6222c349dbc7Sjsg ret = drm_atomic_helper_check_modeset(dev, &state->base); 6223c349dbc7Sjsg if (ret) 6224c349dbc7Sjsg goto fail; 6225c349dbc7Sjsg 62261bb76ff1Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 62271bb76ff1Sjsg ret = intel_async_flip_check_uapi(state, crtc); 62281bb76ff1Sjsg if (ret) 62291bb76ff1Sjsg return ret; 62301bb76ff1Sjsg } 62311bb76ff1Sjsg 62325ca02815Sjsg ret = intel_bigjoiner_add_affected_crtcs(state); 62335ca02815Sjsg if (ret) 62345ca02815Sjsg goto fail; 62355ca02815Sjsg 6236c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6237c349dbc7Sjsg new_crtc_state, i) { 62385ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) { 62391bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 62401bb76ff1Sjsg copy_bigjoiner_crtc_state_nomodeset(state, crtc); 62411bb76ff1Sjsg else 62421bb76ff1Sjsg intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc); 6243c349dbc7Sjsg continue; 6244c349dbc7Sjsg } 6245c349dbc7Sjsg 62461bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) { 62471bb76ff1Sjsg drm_WARN_ON(&dev_priv->drm, new_crtc_state->uapi.enable); 62485ca02815Sjsg continue; 62495ca02815Sjsg } 62505ca02815Sjsg 62511bb76ff1Sjsg ret = intel_crtc_prepare_cleared_state(state, crtc); 6252c349dbc7Sjsg if (ret) 6253c349dbc7Sjsg goto fail; 6254c349dbc7Sjsg 62551bb76ff1Sjsg if (!new_crtc_state->hw.enable) 62561bb76ff1Sjsg continue; 62571bb76ff1Sjsg 62581bb76ff1Sjsg ret = intel_modeset_pipe_config(state, crtc); 62595ca02815Sjsg if (ret) 62605ca02815Sjsg goto fail; 6261c349dbc7Sjsg 62621bb76ff1Sjsg ret = intel_atomic_check_bigjoiner(state, crtc); 6263c349dbc7Sjsg if (ret) 6264c349dbc7Sjsg goto fail; 6265c349dbc7Sjsg } 6266c349dbc7Sjsg 6267c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6268c349dbc7Sjsg new_crtc_state, i) { 62695ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 6270c349dbc7Sjsg continue; 6271c349dbc7Sjsg 62721bb76ff1Sjsg if (new_crtc_state->hw.enable) { 62731bb76ff1Sjsg ret = intel_modeset_pipe_config_late(state, crtc); 6274c349dbc7Sjsg if (ret) 6275c349dbc7Sjsg goto fail; 62761bb76ff1Sjsg } 6277c349dbc7Sjsg 6278c349dbc7Sjsg intel_crtc_check_fastset(old_crtc_state, new_crtc_state); 6279c349dbc7Sjsg } 6280c349dbc7Sjsg 6281c349dbc7Sjsg /** 6282c349dbc7Sjsg * Check if fastset is allowed by external dependencies like other 6283c349dbc7Sjsg * pipes and transcoders. 6284c349dbc7Sjsg * 6285c349dbc7Sjsg * Right now it only forces a fullmodeset when the MST master 6286c349dbc7Sjsg * transcoder did not changed but the pipe of the master transcoder 6287c349dbc7Sjsg * needs a fullmodeset so all slaves also needs to do a fullmodeset or 6288c349dbc7Sjsg * in case of port synced crtcs, if one of the synced crtcs 6289c349dbc7Sjsg * needs a full modeset, all other synced crtcs should be 6290c349dbc7Sjsg * forced a full modeset. 6291c349dbc7Sjsg */ 6292c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 62935ca02815Sjsg if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6294c349dbc7Sjsg continue; 6295c349dbc7Sjsg 6296c349dbc7Sjsg if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6297c349dbc7Sjsg enum transcoder master = new_crtc_state->mst_master_transcoder; 6298c349dbc7Sjsg 6299c349dbc7Sjsg if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 6300c349dbc7Sjsg new_crtc_state->uapi.mode_changed = true; 6301c349dbc7Sjsg new_crtc_state->update_pipe = false; 63021b89dd6fSjsg new_crtc_state->update_m_n = false; 6303c349dbc7Sjsg } 6304c349dbc7Sjsg } 6305c349dbc7Sjsg 6306c349dbc7Sjsg if (is_trans_port_sync_mode(new_crtc_state)) { 6307c349dbc7Sjsg u8 trans = new_crtc_state->sync_mode_slaves_mask; 6308c349dbc7Sjsg 6309c349dbc7Sjsg if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6310c349dbc7Sjsg trans |= BIT(new_crtc_state->master_transcoder); 6311c349dbc7Sjsg 6312c349dbc7Sjsg if (intel_cpu_transcoders_need_modeset(state, trans)) { 6313c349dbc7Sjsg new_crtc_state->uapi.mode_changed = true; 6314c349dbc7Sjsg new_crtc_state->update_pipe = false; 63151b89dd6fSjsg new_crtc_state->update_m_n = false; 6316c349dbc7Sjsg } 6317c349dbc7Sjsg } 63185ca02815Sjsg 63191bb76ff1Sjsg if (new_crtc_state->bigjoiner_pipes) { 63201bb76ff1Sjsg if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { 63215ca02815Sjsg new_crtc_state->uapi.mode_changed = true; 63225ca02815Sjsg new_crtc_state->update_pipe = false; 63231b89dd6fSjsg new_crtc_state->update_m_n = false; 63245ca02815Sjsg } 63255ca02815Sjsg } 6326c349dbc7Sjsg } 6327c349dbc7Sjsg 6328c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6329c349dbc7Sjsg new_crtc_state, i) { 63301bb76ff1Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 63311bb76ff1Sjsg continue; 63321bb76ff1Sjsg 6333c349dbc7Sjsg any_ms = true; 6334c349dbc7Sjsg 63351bb76ff1Sjsg intel_release_shared_dplls(state, crtc); 6336c349dbc7Sjsg } 6337c349dbc7Sjsg 6338c349dbc7Sjsg if (any_ms && !check_digital_port_conflicts(state)) { 6339c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 6340c349dbc7Sjsg "rejecting conflicting digital port configuration\n"); 6341ad8b1aafSjsg ret = -EINVAL; 6342c349dbc7Sjsg goto fail; 6343c349dbc7Sjsg } 6344c349dbc7Sjsg 6345c349dbc7Sjsg ret = drm_dp_mst_atomic_check(&state->base); 6346c349dbc7Sjsg if (ret) 6347c349dbc7Sjsg goto fail; 6348c349dbc7Sjsg 6349ad8b1aafSjsg ret = intel_atomic_check_planes(state); 6350c349dbc7Sjsg if (ret) 6351c349dbc7Sjsg goto fail; 6352c349dbc7Sjsg 63531bb76ff1Sjsg ret = intel_compute_global_watermarks(state); 6354c349dbc7Sjsg if (ret) 6355c349dbc7Sjsg goto fail; 6356c349dbc7Sjsg 6357c349dbc7Sjsg ret = intel_bw_atomic_check(state); 6358c349dbc7Sjsg if (ret) 6359c349dbc7Sjsg goto fail; 6360c349dbc7Sjsg 63611bb76ff1Sjsg ret = intel_cdclk_atomic_check(state, &any_ms); 6362ad8b1aafSjsg if (ret) 6363ad8b1aafSjsg goto fail; 6364ad8b1aafSjsg 63655ca02815Sjsg if (intel_any_crtc_needs_modeset(state)) 63665ca02815Sjsg any_ms = true; 63675ca02815Sjsg 6368ad8b1aafSjsg if (any_ms) { 6369ad8b1aafSjsg ret = intel_modeset_checks(state); 6370ad8b1aafSjsg if (ret) 6371ad8b1aafSjsg goto fail; 6372ad8b1aafSjsg 6373ad8b1aafSjsg ret = intel_modeset_calc_cdclk(state); 6374ad8b1aafSjsg if (ret) 6375ad8b1aafSjsg return ret; 6376ad8b1aafSjsg } 6377ad8b1aafSjsg 6378f005ef32Sjsg ret = intel_pmdemand_atomic_check(state); 6379f005ef32Sjsg if (ret) 6380f005ef32Sjsg goto fail; 6381f005ef32Sjsg 6382ad8b1aafSjsg ret = intel_atomic_check_crtcs(state); 6383ad8b1aafSjsg if (ret) 6384ad8b1aafSjsg goto fail; 6385ad8b1aafSjsg 63861bb76ff1Sjsg ret = intel_fbc_atomic_check(state); 63875ca02815Sjsg if (ret) 63885ca02815Sjsg goto fail; 63891bb76ff1Sjsg 63901bb76ff1Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 63911bb76ff1Sjsg new_crtc_state, i) { 6392f005ef32Sjsg intel_color_assert_luts(new_crtc_state); 6393f005ef32Sjsg 63941bb76ff1Sjsg ret = intel_async_flip_check_hw(state, crtc); 63951bb76ff1Sjsg if (ret) 63961bb76ff1Sjsg goto fail; 63975ca02815Sjsg 6398f005ef32Sjsg /* Either full modeset or fastset (or neither), never both */ 6399f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, 6400f005ef32Sjsg intel_crtc_needs_modeset(new_crtc_state) && 6401f005ef32Sjsg intel_crtc_needs_fastset(new_crtc_state)); 6402f005ef32Sjsg 64035ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state) && 6404f005ef32Sjsg !intel_crtc_needs_fastset(new_crtc_state)) 6405c349dbc7Sjsg continue; 6406c349dbc7Sjsg 64071bb76ff1Sjsg intel_crtc_state_dump(new_crtc_state, state, 64085ca02815Sjsg intel_crtc_needs_modeset(new_crtc_state) ? 64091bb76ff1Sjsg "modeset" : "fastset"); 6410c349dbc7Sjsg } 6411c349dbc7Sjsg 6412c349dbc7Sjsg return 0; 6413c349dbc7Sjsg 6414c349dbc7Sjsg fail: 6415c349dbc7Sjsg if (ret == -EDEADLK) 6416c349dbc7Sjsg return ret; 6417c349dbc7Sjsg 6418c349dbc7Sjsg /* 6419c349dbc7Sjsg * FIXME would probably be nice to know which crtc specifically 6420c349dbc7Sjsg * caused the failure, in cases where we can pinpoint it. 6421c349dbc7Sjsg */ 6422c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6423c349dbc7Sjsg new_crtc_state, i) 64241bb76ff1Sjsg intel_crtc_state_dump(new_crtc_state, state, "failed"); 6425c349dbc7Sjsg 6426c349dbc7Sjsg return ret; 6427c349dbc7Sjsg } 6428c349dbc7Sjsg 6429c349dbc7Sjsg static int intel_atomic_prepare_commit(struct intel_atomic_state *state) 6430c349dbc7Sjsg { 6431ad8b1aafSjsg struct intel_crtc_state *crtc_state; 6432ad8b1aafSjsg struct intel_crtc *crtc; 6433ad8b1aafSjsg int i, ret; 6434ad8b1aafSjsg 6435ad8b1aafSjsg ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base); 6436ad8b1aafSjsg if (ret < 0) 6437ad8b1aafSjsg return ret; 6438ad8b1aafSjsg 6439ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 6440f005ef32Sjsg if (intel_crtc_needs_color_update(crtc_state)) 6441f005ef32Sjsg intel_color_prepare_commit(crtc_state); 6442ad8b1aafSjsg } 6443ad8b1aafSjsg 6444ad8b1aafSjsg return 0; 6445c349dbc7Sjsg } 6446c349dbc7Sjsg 6447c349dbc7Sjsg void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, 6448c349dbc7Sjsg struct intel_crtc_state *crtc_state) 6449c349dbc7Sjsg { 6450c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6451c349dbc7Sjsg 64525ca02815Sjsg if (DISPLAY_VER(dev_priv) != 2 || crtc_state->active_planes) 6453c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 6454c349dbc7Sjsg 6455c349dbc7Sjsg if (crtc_state->has_pch_encoder) { 6456c349dbc7Sjsg enum pipe pch_transcoder = 6457c349dbc7Sjsg intel_crtc_pch_transcoder(crtc); 6458c349dbc7Sjsg 6459c349dbc7Sjsg intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); 6460c349dbc7Sjsg } 6461c349dbc7Sjsg } 6462c349dbc7Sjsg 6463c349dbc7Sjsg static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, 6464c349dbc7Sjsg const struct intel_crtc_state *new_crtc_state) 6465c349dbc7Sjsg { 6466c349dbc7Sjsg struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 6467c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6468c349dbc7Sjsg 6469c349dbc7Sjsg /* 6470c349dbc7Sjsg * Update pipe size and adjust fitter if needed: the reason for this is 6471c349dbc7Sjsg * that in compute_mode_changes we check the native mode (not the pfit 6472c349dbc7Sjsg * mode) to see if we can flip rather than do a full mode set. In the 6473c349dbc7Sjsg * fastboot case, we'll flip, but if we don't update the pipesrc and 6474c349dbc7Sjsg * pfit state, we'll end up with a big fb scanned out into the wrong 6475c349dbc7Sjsg * sized surface. 6476c349dbc7Sjsg */ 6477c349dbc7Sjsg intel_set_pipe_src_size(new_crtc_state); 6478c349dbc7Sjsg 6479c349dbc7Sjsg /* on skylake this is done by detaching scalers */ 64805ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) { 6481c349dbc7Sjsg if (new_crtc_state->pch_pfit.enabled) 6482c349dbc7Sjsg skl_pfit_enable(new_crtc_state); 6483c349dbc7Sjsg } else if (HAS_PCH_SPLIT(dev_priv)) { 6484c349dbc7Sjsg if (new_crtc_state->pch_pfit.enabled) 6485c349dbc7Sjsg ilk_pfit_enable(new_crtc_state); 6486c349dbc7Sjsg else if (old_crtc_state->pch_pfit.enabled) 6487c349dbc7Sjsg ilk_pfit_disable(old_crtc_state); 6488c349dbc7Sjsg } 6489c349dbc7Sjsg 6490c349dbc7Sjsg /* 6491c349dbc7Sjsg * The register is supposedly single buffered so perhaps 6492c349dbc7Sjsg * not 100% correct to do this here. But SKL+ calculate 6493c349dbc7Sjsg * this based on the adjust pixel rate so pfit changes do 6494c349dbc7Sjsg * affect it and so it must be updated for fastsets. 6495c349dbc7Sjsg * HSW/BDW only really need this here for fastboot, after 6496c349dbc7Sjsg * that the value should not change without a full modeset. 6497c349dbc7Sjsg */ 64985ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 || 6499c349dbc7Sjsg IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 6500c349dbc7Sjsg hsw_set_linetime_wm(new_crtc_state); 6501c349dbc7Sjsg 65021b89dd6fSjsg if (new_crtc_state->update_m_n) 65031bb76ff1Sjsg intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, 65041bb76ff1Sjsg &new_crtc_state->dp_m_n); 6505c349dbc7Sjsg } 6506c349dbc7Sjsg 65075ca02815Sjsg static void commit_pipe_pre_planes(struct intel_atomic_state *state, 6508ad8b1aafSjsg struct intel_crtc *crtc) 6509c349dbc7Sjsg { 6510c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6511ad8b1aafSjsg const struct intel_crtc_state *old_crtc_state = 6512ad8b1aafSjsg intel_atomic_get_old_crtc_state(state, crtc); 6513ad8b1aafSjsg const struct intel_crtc_state *new_crtc_state = 6514ad8b1aafSjsg intel_atomic_get_new_crtc_state(state, crtc); 65155ca02815Sjsg bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6516c349dbc7Sjsg 6517c349dbc7Sjsg /* 6518c349dbc7Sjsg * During modesets pipe configuration was programmed as the 6519c349dbc7Sjsg * CRTC was enabled. 6520c349dbc7Sjsg */ 6521c349dbc7Sjsg if (!modeset) { 6522f005ef32Sjsg if (intel_crtc_needs_color_update(new_crtc_state)) 65231bb76ff1Sjsg intel_color_commit_arm(new_crtc_state); 6524c349dbc7Sjsg 65255ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6526f005ef32Sjsg bdw_set_pipe_misc(new_crtc_state); 6527c349dbc7Sjsg 6528f005ef32Sjsg if (intel_crtc_needs_fastset(new_crtc_state)) 6529c349dbc7Sjsg intel_pipe_fastset(old_crtc_state, new_crtc_state); 6530c349dbc7Sjsg } 6531c349dbc7Sjsg 65321bb76ff1Sjsg intel_psr2_program_trans_man_trk_ctl(new_crtc_state); 65331bb76ff1Sjsg 65341bb76ff1Sjsg intel_atomic_update_watermarks(state, crtc); 6535c349dbc7Sjsg } 6536c349dbc7Sjsg 65375ca02815Sjsg static void commit_pipe_post_planes(struct intel_atomic_state *state, 65385ca02815Sjsg struct intel_crtc *crtc) 65395ca02815Sjsg { 65405ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6541f5f7381eSjsg const struct intel_crtc_state *old_crtc_state = 6542f5f7381eSjsg intel_atomic_get_old_crtc_state(state, crtc); 65435ca02815Sjsg const struct intel_crtc_state *new_crtc_state = 65445ca02815Sjsg intel_atomic_get_new_crtc_state(state, crtc); 65455ca02815Sjsg 65465ca02815Sjsg /* 65475ca02815Sjsg * Disable the scaler(s) after the plane(s) so that we don't 65485ca02815Sjsg * get a catastrophic underrun even if the two operations 65495ca02815Sjsg * end up happening in two different frames. 65505ca02815Sjsg */ 65515ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 && 65525ca02815Sjsg !intel_crtc_needs_modeset(new_crtc_state)) 65535ca02815Sjsg skl_detach_scalers(new_crtc_state); 6554f5f7381eSjsg 6555f5f7381eSjsg if (vrr_enabling(old_crtc_state, new_crtc_state)) 6556f5f7381eSjsg intel_vrr_enable(new_crtc_state); 65575ca02815Sjsg } 65585ca02815Sjsg 6559ad8b1aafSjsg static void intel_enable_crtc(struct intel_atomic_state *state, 6560ad8b1aafSjsg struct intel_crtc *crtc) 6561c349dbc7Sjsg { 6562c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6563ad8b1aafSjsg const struct intel_crtc_state *new_crtc_state = 6564ad8b1aafSjsg intel_atomic_get_new_crtc_state(state, crtc); 6565c349dbc7Sjsg 65665ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 6567ad8b1aafSjsg return; 6568ad8b1aafSjsg 6569f005ef32Sjsg /* VRR will be enable later, if required */ 6570f005ef32Sjsg intel_crtc_update_active_timings(new_crtc_state, false); 6571c349dbc7Sjsg 65721bb76ff1Sjsg dev_priv->display.funcs.display->crtc_enable(state, crtc); 6573c349dbc7Sjsg 65741bb76ff1Sjsg if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) 65755ca02815Sjsg return; 65765ca02815Sjsg 6577c349dbc7Sjsg /* vblanks work again, re-enable pipe CRC. */ 6578c349dbc7Sjsg intel_crtc_enable_pipe_crc(crtc); 6579ad8b1aafSjsg } 6580ad8b1aafSjsg 6581ad8b1aafSjsg static void intel_update_crtc(struct intel_atomic_state *state, 6582ad8b1aafSjsg struct intel_crtc *crtc) 6583ad8b1aafSjsg { 65841bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 6585ad8b1aafSjsg const struct intel_crtc_state *old_crtc_state = 6586ad8b1aafSjsg intel_atomic_get_old_crtc_state(state, crtc); 6587ad8b1aafSjsg struct intel_crtc_state *new_crtc_state = 6588ad8b1aafSjsg intel_atomic_get_new_crtc_state(state, crtc); 65895ca02815Sjsg bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6590ad8b1aafSjsg 6591f005ef32Sjsg if (old_crtc_state->inherited || 6592f005ef32Sjsg intel_crtc_needs_modeset(new_crtc_state)) { 6593f005ef32Sjsg if (HAS_DPT(i915)) 6594f005ef32Sjsg intel_dpt_configure(crtc); 6595f005ef32Sjsg } 6596f005ef32Sjsg 6597ad8b1aafSjsg if (!modeset) { 6598c349dbc7Sjsg if (new_crtc_state->preload_luts && 6599f005ef32Sjsg intel_crtc_needs_color_update(new_crtc_state)) 6600c349dbc7Sjsg intel_color_load_luts(new_crtc_state); 6601c349dbc7Sjsg 6602c349dbc7Sjsg intel_pre_plane_update(state, crtc); 6603c349dbc7Sjsg 6604f005ef32Sjsg if (intel_crtc_needs_fastset(new_crtc_state)) 6605c349dbc7Sjsg intel_encoders_update_pipe(state, crtc); 66061bb76ff1Sjsg 66071bb76ff1Sjsg if (DISPLAY_VER(i915) >= 11 && 6608f005ef32Sjsg intel_crtc_needs_fastset(new_crtc_state)) 66091bb76ff1Sjsg icl_set_pipe_chicken(new_crtc_state); 6610c349dbc7Sjsg } 6611c349dbc7Sjsg 66121bb76ff1Sjsg intel_fbc_update(state, crtc); 66131bb76ff1Sjsg 6614f005ef32Sjsg drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 6615f005ef32Sjsg 66161bb76ff1Sjsg if (!modeset && 6617f005ef32Sjsg intel_crtc_needs_color_update(new_crtc_state)) 66181bb76ff1Sjsg intel_color_commit_noarm(new_crtc_state); 66191bb76ff1Sjsg 66201bb76ff1Sjsg intel_crtc_planes_update_noarm(state, crtc); 6621c349dbc7Sjsg 6622c349dbc7Sjsg /* Perform vblank evasion around commit operation */ 6623dafb1c99Sjsg intel_pipe_update_start(state, crtc); 6624c349dbc7Sjsg 66255ca02815Sjsg commit_pipe_pre_planes(state, crtc); 6626c349dbc7Sjsg 66271bb76ff1Sjsg intel_crtc_planes_update_arm(state, crtc); 6628c349dbc7Sjsg 66295ca02815Sjsg commit_pipe_post_planes(state, crtc); 66305ca02815Sjsg 6631dafb1c99Sjsg intel_pipe_update_end(state, crtc); 6632c349dbc7Sjsg 6633c349dbc7Sjsg /* 6634f5f7381eSjsg * VRR/Seamless M/N update may need to update frame timings. 6635f5f7381eSjsg * 6636f5f7381eSjsg * FIXME Should be synchronized with the start of vblank somehow... 6637f5f7381eSjsg */ 66381b89dd6fSjsg if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n) 6639f5f7381eSjsg intel_crtc_update_active_timings(new_crtc_state, 6640f5f7381eSjsg new_crtc_state->vrr.enable); 6641f5f7381eSjsg 6642f5f7381eSjsg /* 6643c349dbc7Sjsg * We usually enable FIFO underrun interrupts as part of the 6644c349dbc7Sjsg * CRTC enable sequence during modesets. But when we inherit a 6645c349dbc7Sjsg * valid pipe configuration from the BIOS we need to take care 6646c349dbc7Sjsg * of enabling them on the CRTC's first fastset. 6647c349dbc7Sjsg */ 6648f005ef32Sjsg if (intel_crtc_needs_fastset(new_crtc_state) && !modeset && 6649ad8b1aafSjsg old_crtc_state->inherited) 6650c349dbc7Sjsg intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6651c349dbc7Sjsg } 6652c349dbc7Sjsg 6653c349dbc7Sjsg static void intel_old_crtc_state_disables(struct intel_atomic_state *state, 6654c349dbc7Sjsg struct intel_crtc_state *old_crtc_state, 6655c349dbc7Sjsg struct intel_crtc_state *new_crtc_state, 6656c349dbc7Sjsg struct intel_crtc *crtc) 6657c349dbc7Sjsg { 6658c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6659c349dbc7Sjsg 66605ca02815Sjsg /* 6661c349dbc7Sjsg * We need to disable pipe CRC before disabling the pipe, 6662c349dbc7Sjsg * or we race against vblank off. 6663c349dbc7Sjsg */ 6664c349dbc7Sjsg intel_crtc_disable_pipe_crc(crtc); 6665c349dbc7Sjsg 66661bb76ff1Sjsg dev_priv->display.funcs.display->crtc_disable(state, crtc); 6667c349dbc7Sjsg crtc->active = false; 6668c349dbc7Sjsg intel_fbc_disable(crtc); 6669c349dbc7Sjsg 6670f005ef32Sjsg if (!new_crtc_state->hw.active) 66711bb76ff1Sjsg intel_initial_watermarks(state, crtc); 6672c349dbc7Sjsg } 6673c349dbc7Sjsg 6674c349dbc7Sjsg static void intel_commit_modeset_disables(struct intel_atomic_state *state) 6675c349dbc7Sjsg { 6676c349dbc7Sjsg struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6677c349dbc7Sjsg struct intel_crtc *crtc; 6678c349dbc7Sjsg u32 handled = 0; 6679c349dbc7Sjsg int i; 6680c349dbc7Sjsg 66811bb76ff1Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 66821bb76ff1Sjsg new_crtc_state, i) { 66831bb76ff1Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 66841bb76ff1Sjsg continue; 66851bb76ff1Sjsg 6686f005ef32Sjsg intel_pre_plane_update(state, crtc); 6687f005ef32Sjsg 66881bb76ff1Sjsg if (!old_crtc_state->hw.active) 66891bb76ff1Sjsg continue; 66901bb76ff1Sjsg 66911bb76ff1Sjsg intel_crtc_disable_planes(state, crtc); 66921bb76ff1Sjsg } 66931bb76ff1Sjsg 6694c349dbc7Sjsg /* Only disable port sync and MST slaves */ 6695c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6696c349dbc7Sjsg new_crtc_state, i) { 66971bb76ff1Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) 6698c349dbc7Sjsg continue; 6699c349dbc7Sjsg 6700c349dbc7Sjsg if (!old_crtc_state->hw.active) 6701c349dbc7Sjsg continue; 6702c349dbc7Sjsg 6703c349dbc7Sjsg /* In case of Transcoder port Sync master slave CRTCs can be 6704c349dbc7Sjsg * assigned in any order and we need to make sure that 6705c349dbc7Sjsg * slave CRTCs are disabled first and then master CRTC since 6706c349dbc7Sjsg * Slave vblanks are masked till Master Vblanks. 6707c349dbc7Sjsg */ 6708c349dbc7Sjsg if (!is_trans_port_sync_slave(old_crtc_state) && 67091bb76ff1Sjsg !intel_dp_mst_is_slave_trans(old_crtc_state) && 67101bb76ff1Sjsg !intel_crtc_is_bigjoiner_slave(old_crtc_state)) 6711c349dbc7Sjsg continue; 6712c349dbc7Sjsg 6713c349dbc7Sjsg intel_old_crtc_state_disables(state, old_crtc_state, 6714c349dbc7Sjsg new_crtc_state, crtc); 6715c349dbc7Sjsg handled |= BIT(crtc->pipe); 6716c349dbc7Sjsg } 6717c349dbc7Sjsg 6718c349dbc7Sjsg /* Disable everything else left on */ 6719c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6720c349dbc7Sjsg new_crtc_state, i) { 67215ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state) || 67221bb76ff1Sjsg (handled & BIT(crtc->pipe))) 6723c349dbc7Sjsg continue; 6724c349dbc7Sjsg 67251bb76ff1Sjsg if (!old_crtc_state->hw.active) 67261bb76ff1Sjsg continue; 67275ca02815Sjsg 6728c349dbc7Sjsg intel_old_crtc_state_disables(state, old_crtc_state, 6729c349dbc7Sjsg new_crtc_state, crtc); 6730c349dbc7Sjsg } 6731c349dbc7Sjsg } 6732c349dbc7Sjsg 6733c349dbc7Sjsg static void intel_commit_modeset_enables(struct intel_atomic_state *state) 6734c349dbc7Sjsg { 6735ad8b1aafSjsg struct intel_crtc_state *new_crtc_state; 6736c349dbc7Sjsg struct intel_crtc *crtc; 6737c349dbc7Sjsg int i; 6738c349dbc7Sjsg 6739ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6740c349dbc7Sjsg if (!new_crtc_state->hw.active) 6741c349dbc7Sjsg continue; 6742c349dbc7Sjsg 6743ad8b1aafSjsg intel_enable_crtc(state, crtc); 6744ad8b1aafSjsg intel_update_crtc(state, crtc); 6745c349dbc7Sjsg } 6746c349dbc7Sjsg } 6747c349dbc7Sjsg 6748c349dbc7Sjsg static void skl_commit_modeset_enables(struct intel_atomic_state *state) 6749c349dbc7Sjsg { 6750c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev); 6751c349dbc7Sjsg struct intel_crtc *crtc; 6752c349dbc7Sjsg struct intel_crtc_state *old_crtc_state, *new_crtc_state; 6753c349dbc7Sjsg struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; 6754c349dbc7Sjsg u8 update_pipes = 0, modeset_pipes = 0; 6755c349dbc7Sjsg int i; 6756c349dbc7Sjsg 6757c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 6758c349dbc7Sjsg enum pipe pipe = crtc->pipe; 6759c349dbc7Sjsg 6760c349dbc7Sjsg if (!new_crtc_state->hw.active) 6761c349dbc7Sjsg continue; 6762c349dbc7Sjsg 6763c349dbc7Sjsg /* ignore allocations for crtc's that have been turned off. */ 67645ca02815Sjsg if (!intel_crtc_needs_modeset(new_crtc_state)) { 6765c349dbc7Sjsg entries[pipe] = old_crtc_state->wm.skl.ddb; 6766c349dbc7Sjsg update_pipes |= BIT(pipe); 6767c349dbc7Sjsg } else { 6768c349dbc7Sjsg modeset_pipes |= BIT(pipe); 6769c349dbc7Sjsg } 6770c349dbc7Sjsg } 6771c349dbc7Sjsg 6772c349dbc7Sjsg /* 6773c349dbc7Sjsg * Whenever the number of active pipes changes, we need to make sure we 6774c349dbc7Sjsg * update the pipes in the right order so that their ddb allocations 6775c349dbc7Sjsg * never overlap with each other between CRTC updates. Otherwise we'll 6776c349dbc7Sjsg * cause pipe underruns and other bad stuff. 6777c349dbc7Sjsg * 6778c349dbc7Sjsg * So first lets enable all pipes that do not need a fullmodeset as 6779c349dbc7Sjsg * those don't have any external dependency. 6780c349dbc7Sjsg */ 6781c349dbc7Sjsg while (update_pipes) { 6782c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6783c349dbc7Sjsg new_crtc_state, i) { 6784c349dbc7Sjsg enum pipe pipe = crtc->pipe; 6785c349dbc7Sjsg 6786c349dbc7Sjsg if ((update_pipes & BIT(pipe)) == 0) 6787c349dbc7Sjsg continue; 6788c349dbc7Sjsg 6789c349dbc7Sjsg if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6790c349dbc7Sjsg entries, I915_MAX_PIPES, pipe)) 6791c349dbc7Sjsg continue; 6792c349dbc7Sjsg 6793c349dbc7Sjsg entries[pipe] = new_crtc_state->wm.skl.ddb; 6794c349dbc7Sjsg update_pipes &= ~BIT(pipe); 6795c349dbc7Sjsg 6796ad8b1aafSjsg intel_update_crtc(state, crtc); 6797c349dbc7Sjsg 6798c349dbc7Sjsg /* 6799c349dbc7Sjsg * If this is an already active pipe, it's DDB changed, 6800c349dbc7Sjsg * and this isn't the last pipe that needs updating 6801c349dbc7Sjsg * then we need to wait for a vblank to pass for the 6802c349dbc7Sjsg * new ddb allocation to take effect. 6803c349dbc7Sjsg */ 6804c349dbc7Sjsg if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, 6805c349dbc7Sjsg &old_crtc_state->wm.skl.ddb) && 6806c349dbc7Sjsg (update_pipes | modeset_pipes)) 68071bb76ff1Sjsg intel_crtc_wait_for_next_vblank(crtc); 6808c349dbc7Sjsg } 6809c349dbc7Sjsg } 6810c349dbc7Sjsg 6811ad8b1aafSjsg update_pipes = modeset_pipes; 6812ad8b1aafSjsg 6813c349dbc7Sjsg /* 6814c349dbc7Sjsg * Enable all pipes that needs a modeset and do not depends on other 6815c349dbc7Sjsg * pipes 6816c349dbc7Sjsg */ 6817ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6818c349dbc7Sjsg enum pipe pipe = crtc->pipe; 6819c349dbc7Sjsg 6820c349dbc7Sjsg if ((modeset_pipes & BIT(pipe)) == 0) 6821c349dbc7Sjsg continue; 6822c349dbc7Sjsg 6823c349dbc7Sjsg if (intel_dp_mst_is_slave_trans(new_crtc_state) || 68245ca02815Sjsg is_trans_port_sync_master(new_crtc_state) || 68251bb76ff1Sjsg intel_crtc_is_bigjoiner_master(new_crtc_state)) 6826c349dbc7Sjsg continue; 6827c349dbc7Sjsg 6828c349dbc7Sjsg modeset_pipes &= ~BIT(pipe); 6829c349dbc7Sjsg 6830ad8b1aafSjsg intel_enable_crtc(state, crtc); 6831c349dbc7Sjsg } 6832c349dbc7Sjsg 6833c349dbc7Sjsg /* 6834ad8b1aafSjsg * Then we enable all remaining pipes that depend on other 68355ca02815Sjsg * pipes: MST slaves and port sync masters, big joiner master 6836c349dbc7Sjsg */ 6837ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6838c349dbc7Sjsg enum pipe pipe = crtc->pipe; 6839c349dbc7Sjsg 6840c349dbc7Sjsg if ((modeset_pipes & BIT(pipe)) == 0) 6841c349dbc7Sjsg continue; 6842c349dbc7Sjsg 6843ad8b1aafSjsg modeset_pipes &= ~BIT(pipe); 6844ad8b1aafSjsg 6845ad8b1aafSjsg intel_enable_crtc(state, crtc); 6846ad8b1aafSjsg } 6847ad8b1aafSjsg 6848ad8b1aafSjsg /* 6849ad8b1aafSjsg * Finally we do the plane updates/etc. for all pipes that got enabled. 6850ad8b1aafSjsg */ 6851ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6852ad8b1aafSjsg enum pipe pipe = crtc->pipe; 6853ad8b1aafSjsg 6854ad8b1aafSjsg if ((update_pipes & BIT(pipe)) == 0) 6855ad8b1aafSjsg continue; 6856ad8b1aafSjsg 6857c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6858c349dbc7Sjsg entries, I915_MAX_PIPES, pipe)); 6859c349dbc7Sjsg 6860c349dbc7Sjsg entries[pipe] = new_crtc_state->wm.skl.ddb; 6861ad8b1aafSjsg update_pipes &= ~BIT(pipe); 6862c349dbc7Sjsg 6863ad8b1aafSjsg intel_update_crtc(state, crtc); 6864c349dbc7Sjsg } 6865c349dbc7Sjsg 6866c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, modeset_pipes); 6867ad8b1aafSjsg drm_WARN_ON(&dev_priv->drm, update_pipes); 6868c349dbc7Sjsg } 6869c349dbc7Sjsg 6870c349dbc7Sjsg static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 6871c349dbc7Sjsg { 6872c349dbc7Sjsg struct intel_atomic_state *state, *next; 6873c349dbc7Sjsg struct llist_node *freed; 6874c349dbc7Sjsg 68751bb76ff1Sjsg freed = llist_del_all(&dev_priv->display.atomic_helper.free_list); 6876c349dbc7Sjsg llist_for_each_entry_safe(state, next, freed, freed) 6877c349dbc7Sjsg drm_atomic_state_put(&state->base); 6878c349dbc7Sjsg } 6879c349dbc7Sjsg 6880f005ef32Sjsg void intel_atomic_helper_free_state_worker(struct work_struct *work) 6881c349dbc7Sjsg { 6882c349dbc7Sjsg struct drm_i915_private *dev_priv = 68831bb76ff1Sjsg container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); 6884c349dbc7Sjsg 6885c349dbc7Sjsg intel_atomic_helper_free_state(dev_priv); 6886c349dbc7Sjsg } 6887c349dbc7Sjsg 6888c349dbc7Sjsg static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 6889c349dbc7Sjsg { 6890c349dbc7Sjsg struct wait_queue_entry wait_fence, wait_reset; 6891c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 6892c349dbc7Sjsg 6893c349dbc7Sjsg init_wait_entry(&wait_fence, 0); 6894c349dbc7Sjsg init_wait_entry(&wait_reset, 0); 6895c349dbc7Sjsg for (;;) { 6896c349dbc7Sjsg prepare_to_wait(&intel_state->commit_ready.wait, 6897c349dbc7Sjsg &wait_fence, TASK_UNINTERRUPTIBLE); 68981bb76ff1Sjsg prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 6899c349dbc7Sjsg I915_RESET_MODESET), 6900c349dbc7Sjsg &wait_reset, TASK_UNINTERRUPTIBLE); 6901c349dbc7Sjsg 6902c349dbc7Sjsg 6903c349dbc7Sjsg if (i915_sw_fence_done(&intel_state->commit_ready) || 69041bb76ff1Sjsg test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) 6905c349dbc7Sjsg break; 6906c349dbc7Sjsg 6907c349dbc7Sjsg schedule(); 6908c349dbc7Sjsg } 6909c349dbc7Sjsg finish_wait(&intel_state->commit_ready.wait, &wait_fence); 69101bb76ff1Sjsg finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 6911c349dbc7Sjsg I915_RESET_MODESET), 6912c349dbc7Sjsg &wait_reset); 6913c349dbc7Sjsg } 6914c349dbc7Sjsg 6915c349dbc7Sjsg static void intel_atomic_cleanup_work(struct work_struct *work) 6916c349dbc7Sjsg { 6917ad8b1aafSjsg struct intel_atomic_state *state = 6918ad8b1aafSjsg container_of(work, struct intel_atomic_state, base.commit_work); 6919ad8b1aafSjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 6920f005ef32Sjsg struct intel_crtc_state *old_crtc_state; 6921f005ef32Sjsg struct intel_crtc *crtc; 6922f005ef32Sjsg int i; 6923c349dbc7Sjsg 6924f005ef32Sjsg for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i) 6925f005ef32Sjsg intel_color_cleanup_commit(old_crtc_state); 6926f005ef32Sjsg 6927ad8b1aafSjsg drm_atomic_helper_cleanup_planes(&i915->drm, &state->base); 6928ad8b1aafSjsg drm_atomic_helper_commit_cleanup_done(&state->base); 6929ad8b1aafSjsg drm_atomic_state_put(&state->base); 6930c349dbc7Sjsg 6931c349dbc7Sjsg intel_atomic_helper_free_state(i915); 6932c349dbc7Sjsg } 6933c349dbc7Sjsg 69345ca02815Sjsg static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state) 69355ca02815Sjsg { 69365ca02815Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 69375ca02815Sjsg struct intel_plane *plane; 69385ca02815Sjsg struct intel_plane_state *plane_state; 69395ca02815Sjsg int i; 69405ca02815Sjsg 69415ca02815Sjsg for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 69425ca02815Sjsg struct drm_framebuffer *fb = plane_state->hw.fb; 69431bb76ff1Sjsg int cc_plane; 69445ca02815Sjsg int ret; 69455ca02815Sjsg 69461bb76ff1Sjsg if (!fb) 69471bb76ff1Sjsg continue; 69481bb76ff1Sjsg 69491bb76ff1Sjsg cc_plane = intel_fb_rc_ccs_cc_plane(fb); 69501bb76ff1Sjsg if (cc_plane < 0) 69515ca02815Sjsg continue; 69525ca02815Sjsg 69535ca02815Sjsg /* 69545ca02815Sjsg * The layout of the fast clear color value expected by HW 69551bb76ff1Sjsg * (the DRM ABI requiring this value to be located in fb at 69561bb76ff1Sjsg * offset 0 of cc plane, plane #2 previous generations or 69571bb76ff1Sjsg * plane #1 for flat ccs): 69585ca02815Sjsg * - 4 x 4 bytes per-channel value 69595ca02815Sjsg * (in surface type specific float/int format provided by the fb user) 69605ca02815Sjsg * - 8 bytes native color value used by the display 69615ca02815Sjsg * (converted/written by GPU during a fast clear operation using the 69625ca02815Sjsg * above per-channel values) 69635ca02815Sjsg * 69645ca02815Sjsg * The commit's FB prepare hook already ensured that FB obj is pinned and the 69655ca02815Sjsg * caller made sure that the object is synced wrt. the related color clear value 69665ca02815Sjsg * GPU write on it. 69675ca02815Sjsg */ 69685ca02815Sjsg ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 69691bb76ff1Sjsg fb->offsets[cc_plane] + 16, 69705ca02815Sjsg &plane_state->ccval, 69715ca02815Sjsg sizeof(plane_state->ccval)); 69725ca02815Sjsg /* The above could only fail if the FB obj has an unexpected backing store type. */ 69735ca02815Sjsg drm_WARN_ON(&i915->drm, ret); 69745ca02815Sjsg } 69755ca02815Sjsg } 69765ca02815Sjsg 6977c349dbc7Sjsg static void intel_atomic_commit_tail(struct intel_atomic_state *state) 6978c349dbc7Sjsg { 6979c349dbc7Sjsg struct drm_device *dev = state->base.dev; 6980c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 6981c349dbc7Sjsg struct intel_crtc_state *new_crtc_state, *old_crtc_state; 6982c349dbc7Sjsg struct intel_crtc *crtc; 69831bb76ff1Sjsg struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; 6984c349dbc7Sjsg intel_wakeref_t wakeref = 0; 6985c349dbc7Sjsg int i; 6986c349dbc7Sjsg 6987c349dbc7Sjsg intel_atomic_commit_fence_wait(state); 6988c349dbc7Sjsg 6989c349dbc7Sjsg drm_atomic_helper_wait_for_dependencies(&state->base); 69901bb76ff1Sjsg drm_dp_mst_atomic_wait_for_dependencies(&state->base); 6991c349dbc7Sjsg 6992f005ef32Sjsg /* 6993f005ef32Sjsg * During full modesets we write a lot of registers, wait 6994f005ef32Sjsg * for PLLs, etc. Doing that while DC states are enabled 6995f005ef32Sjsg * is not a good idea. 6996f005ef32Sjsg * 6997f005ef32Sjsg * During fastsets and other updates we also need to 6998f005ef32Sjsg * disable DC states due to the following scenario: 6999f005ef32Sjsg * 1. DC5 exit and PSR exit happen 7000f005ef32Sjsg * 2. Some or all _noarm() registers are written 7001f005ef32Sjsg * 3. Due to some long delay PSR is re-entered 7002f005ef32Sjsg * 4. DC5 entry -> DMC saves the already written new 7003f005ef32Sjsg * _noarm() registers and the old not yet written 7004f005ef32Sjsg * _arm() registers 7005f005ef32Sjsg * 5. DC5 exit -> DMC restores a mixture of old and 7006f005ef32Sjsg * new register values and arms the update 7007f005ef32Sjsg * 6. PSR exit -> hardware latches a mixture of old and 7008f005ef32Sjsg * new register values -> corrupted frame, or worse 7009f005ef32Sjsg * 7. New _arm() registers are finally written 7010f005ef32Sjsg * 8. Hardware finally latches a complete set of new 7011f005ef32Sjsg * register values, and subsequent frames will be OK again 7012f005ef32Sjsg * 7013f005ef32Sjsg * Also note that due to the pipe CSC hardware issues on 7014f005ef32Sjsg * SKL/GLK DC states must remain off until the pipe CSC 7015f005ef32Sjsg * state readout has happened. Otherwise we risk corrupting 7016f005ef32Sjsg * the CSC latched register values with the readout (see 7017f005ef32Sjsg * skl_read_csc() and skl_color_commit_noarm()). 7018f005ef32Sjsg */ 7019f005ef32Sjsg wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7020c349dbc7Sjsg 70215ca02815Sjsg intel_atomic_prepare_plane_clear_colors(state); 70225ca02815Sjsg 7023c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7024c349dbc7Sjsg new_crtc_state, i) { 70255ca02815Sjsg if (intel_crtc_needs_modeset(new_crtc_state) || 7026f005ef32Sjsg intel_crtc_needs_fastset(new_crtc_state)) 70271bb76ff1Sjsg intel_modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]); 7028c349dbc7Sjsg } 7029c349dbc7Sjsg 7030c349dbc7Sjsg intel_commit_modeset_disables(state); 7031c349dbc7Sjsg 7032c349dbc7Sjsg /* FIXME: Eventually get rid of our crtc->config pointer */ 7033c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7034c349dbc7Sjsg crtc->config = new_crtc_state; 7035c349dbc7Sjsg 7036f005ef32Sjsg /* 7037f005ef32Sjsg * In XE_LPD+ Pmdemand combines many parameters such as voltage index, 7038f005ef32Sjsg * plls, cdclk frequency, QGV point selection parameter etc. Voltage 7039f005ef32Sjsg * index, cdclk/ddiclk frequencies are supposed to be configured before 7040f005ef32Sjsg * the cdclk config is set. 7041f005ef32Sjsg */ 7042f005ef32Sjsg intel_pmdemand_pre_plane_update(state); 7043f005ef32Sjsg 7044c349dbc7Sjsg if (state->modeset) { 7045c349dbc7Sjsg drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); 7046c349dbc7Sjsg 7047c349dbc7Sjsg intel_set_cdclk_pre_plane_update(state); 7048c349dbc7Sjsg 7049c349dbc7Sjsg intel_modeset_verify_disabled(dev_priv, state); 7050c349dbc7Sjsg } 7051c349dbc7Sjsg 7052ad8b1aafSjsg intel_sagv_pre_plane_update(state); 7053ad8b1aafSjsg 7054c349dbc7Sjsg /* Complete the events for pipes that have now been disabled */ 7055c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 70565ca02815Sjsg bool modeset = intel_crtc_needs_modeset(new_crtc_state); 7057c349dbc7Sjsg 7058c349dbc7Sjsg /* Complete events for now disable pipes here. */ 7059c349dbc7Sjsg if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { 7060c349dbc7Sjsg spin_lock_irq(&dev->event_lock); 7061c349dbc7Sjsg drm_crtc_send_vblank_event(&crtc->base, 7062c349dbc7Sjsg new_crtc_state->uapi.event); 7063c349dbc7Sjsg spin_unlock_irq(&dev->event_lock); 7064c349dbc7Sjsg 7065c349dbc7Sjsg new_crtc_state->uapi.event = NULL; 7066c349dbc7Sjsg } 7067c349dbc7Sjsg } 7068c349dbc7Sjsg 7069c349dbc7Sjsg intel_encoders_update_prepare(state); 7070c349dbc7Sjsg 7071ad8b1aafSjsg intel_dbuf_pre_plane_update(state); 70721bb76ff1Sjsg intel_mbus_dbox_update(state); 7073c349dbc7Sjsg 70745ca02815Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 70751bb76ff1Sjsg if (new_crtc_state->do_async_flip) 70765ca02815Sjsg intel_crtc_enable_flip_done(state, crtc); 70775ca02815Sjsg } 70785ca02815Sjsg 7079c349dbc7Sjsg /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 70801bb76ff1Sjsg dev_priv->display.funcs.display->commit_modeset_enables(state); 7081c349dbc7Sjsg 70821bb76ff1Sjsg if (state->modeset) 7083c349dbc7Sjsg intel_set_cdclk_post_plane_update(state); 70841bb76ff1Sjsg 70851bb76ff1Sjsg intel_wait_for_vblank_workers(state); 7086c349dbc7Sjsg 7087c349dbc7Sjsg /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 7088c349dbc7Sjsg * already, but still need the state for the delayed optimization. To 7089c349dbc7Sjsg * fix this: 7090c349dbc7Sjsg * - wrap the optimization/post_plane_update stuff into a per-crtc work. 7091c349dbc7Sjsg * - schedule that vblank worker _before_ calling hw_done 7092c349dbc7Sjsg * - at the start of commit_tail, cancel it _synchrously 7093c349dbc7Sjsg * - switch over to the vblank wait helper in the core after that since 7094c349dbc7Sjsg * we don't need out special handling any more. 7095c349dbc7Sjsg */ 7096c349dbc7Sjsg drm_atomic_helper_wait_for_flip_done(dev, &state->base); 7097c349dbc7Sjsg 7098c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 70991bb76ff1Sjsg if (new_crtc_state->do_async_flip) 71005ca02815Sjsg intel_crtc_disable_flip_done(state, crtc); 7101c349dbc7Sjsg } 7102c349dbc7Sjsg 7103c349dbc7Sjsg /* 7104c349dbc7Sjsg * Now that the vblank has passed, we can go ahead and program the 7105c349dbc7Sjsg * optimal watermarks on platforms that need two-step watermark 7106c349dbc7Sjsg * programming. 7107c349dbc7Sjsg * 7108c349dbc7Sjsg * TODO: Move this (and other cleanup) to an async worker eventually. 7109c349dbc7Sjsg */ 7110c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7111c349dbc7Sjsg new_crtc_state, i) { 7112c349dbc7Sjsg /* 7113c349dbc7Sjsg * Gen2 reports pipe underruns whenever all planes are disabled. 7114c349dbc7Sjsg * So re-enable underrun reporting after some planes get enabled. 7115c349dbc7Sjsg * 7116c349dbc7Sjsg * We do this before .optimize_watermarks() so that we have a 7117c349dbc7Sjsg * chance of catching underruns with the intermediate watermarks 7118c349dbc7Sjsg * vs. the new plane configuration. 7119c349dbc7Sjsg */ 71205ca02815Sjsg if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) 7121c349dbc7Sjsg intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 7122c349dbc7Sjsg 71231bb76ff1Sjsg intel_optimize_watermarks(state, crtc); 7124c349dbc7Sjsg } 7125c349dbc7Sjsg 7126ad8b1aafSjsg intel_dbuf_post_plane_update(state); 71271bb76ff1Sjsg intel_psr_post_plane_update(state); 7128c349dbc7Sjsg 7129c349dbc7Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7130c349dbc7Sjsg intel_post_plane_update(state, crtc); 7131c349dbc7Sjsg 71321bb76ff1Sjsg intel_modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]); 7133c349dbc7Sjsg 7134c349dbc7Sjsg intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 7135ad8b1aafSjsg 7136f005ef32Sjsg /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */ 7137f005ef32Sjsg hsw_ips_post_update(state, crtc); 7138f005ef32Sjsg 7139f005ef32Sjsg /* 7140f005ef32Sjsg * Activate DRRS after state readout to avoid 7141f005ef32Sjsg * dp_m_n vs. dp_m2_n2 confusion on BDW+. 7142f005ef32Sjsg */ 7143f005ef32Sjsg intel_drrs_activate(new_crtc_state); 7144f005ef32Sjsg 7145ad8b1aafSjsg /* 7146ad8b1aafSjsg * DSB cleanup is done in cleanup_work aligning with framebuffer 7147ad8b1aafSjsg * cleanup. So copy and reset the dsb structure to sync with 7148ad8b1aafSjsg * commit_done and later do dsb cleanup in cleanup_work. 7149f005ef32Sjsg * 7150f005ef32Sjsg * FIXME get rid of this funny new->old swapping 7151ad8b1aafSjsg */ 7152ad8b1aafSjsg old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb); 7153c349dbc7Sjsg } 7154c349dbc7Sjsg 7155c349dbc7Sjsg /* Underruns don't always raise interrupts, so check manually */ 7156c349dbc7Sjsg intel_check_cpu_fifo_underruns(dev_priv); 7157c349dbc7Sjsg intel_check_pch_fifo_underruns(dev_priv); 7158c349dbc7Sjsg 7159c349dbc7Sjsg if (state->modeset) 7160c349dbc7Sjsg intel_verify_planes(state); 7161c349dbc7Sjsg 7162ad8b1aafSjsg intel_sagv_post_plane_update(state); 7163f005ef32Sjsg intel_pmdemand_post_plane_update(state); 7164c349dbc7Sjsg 7165c349dbc7Sjsg drm_atomic_helper_commit_hw_done(&state->base); 7166c349dbc7Sjsg 7167c349dbc7Sjsg if (state->modeset) { 7168c349dbc7Sjsg /* As one of the primary mmio accessors, KMS has a high 7169c349dbc7Sjsg * likelihood of triggering bugs in unclaimed access. After we 7170c349dbc7Sjsg * finish modesetting, see if an error has been flagged, and if 7171c349dbc7Sjsg * so enable debugging for the next modeset - and hope we catch 7172c349dbc7Sjsg * the culprit. 7173c349dbc7Sjsg */ 7174c349dbc7Sjsg intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7175c349dbc7Sjsg } 7176f005ef32Sjsg /* 7177f005ef32Sjsg * Delay re-enabling DC states by 17 ms to avoid the off->on->off 7178f005ef32Sjsg * toggling overhead at and above 60 FPS. 7179f005ef32Sjsg */ 7180f005ef32Sjsg intel_display_power_put_async_delay(dev_priv, POWER_DOMAIN_DC_OFF, wakeref, 17); 7181c349dbc7Sjsg intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7182c349dbc7Sjsg 7183c349dbc7Sjsg /* 7184c349dbc7Sjsg * Defer the cleanup of the old state to a separate worker to not 7185c349dbc7Sjsg * impede the current task (userspace for blocking modesets) that 7186c349dbc7Sjsg * are executed inline. For out-of-line asynchronous modesets/flips, 7187c349dbc7Sjsg * deferring to a new worker seems overkill, but we would place a 7188c349dbc7Sjsg * schedule point (cond_resched()) here anyway to keep latencies 7189c349dbc7Sjsg * down. 7190c349dbc7Sjsg */ 7191c349dbc7Sjsg INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); 7192c349dbc7Sjsg queue_work(system_highpri_wq, &state->base.commit_work); 7193c349dbc7Sjsg } 7194c349dbc7Sjsg 7195c349dbc7Sjsg static void intel_atomic_commit_work(struct work_struct *work) 7196c349dbc7Sjsg { 7197c349dbc7Sjsg struct intel_atomic_state *state = 7198c349dbc7Sjsg container_of(work, struct intel_atomic_state, base.commit_work); 7199c349dbc7Sjsg 7200c349dbc7Sjsg intel_atomic_commit_tail(state); 7201c349dbc7Sjsg } 7202c349dbc7Sjsg 72031bb76ff1Sjsg static int 7204c349dbc7Sjsg intel_atomic_commit_ready(struct i915_sw_fence *fence, 7205c349dbc7Sjsg enum i915_sw_fence_notify notify) 7206c349dbc7Sjsg { 7207c349dbc7Sjsg struct intel_atomic_state *state = 7208c349dbc7Sjsg container_of(fence, struct intel_atomic_state, commit_ready); 7209c349dbc7Sjsg 7210c349dbc7Sjsg switch (notify) { 7211c349dbc7Sjsg case FENCE_COMPLETE: 7212c349dbc7Sjsg /* we do blocking waits in the worker, nothing to do here */ 7213c349dbc7Sjsg break; 7214c349dbc7Sjsg case FENCE_FREE: 7215c349dbc7Sjsg { 7216f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev); 7217c349dbc7Sjsg struct intel_atomic_helper *helper = 7218f005ef32Sjsg &i915->display.atomic_helper; 7219c349dbc7Sjsg 7220c349dbc7Sjsg if (llist_add(&state->freed, &helper->free_list)) 7221f005ef32Sjsg queue_work(i915->unordered_wq, &helper->free_work); 7222c349dbc7Sjsg break; 7223c349dbc7Sjsg } 7224c349dbc7Sjsg } 7225c349dbc7Sjsg 7226c349dbc7Sjsg return NOTIFY_DONE; 7227c349dbc7Sjsg } 7228c349dbc7Sjsg 7229c349dbc7Sjsg static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7230c349dbc7Sjsg { 7231c349dbc7Sjsg struct intel_plane_state *old_plane_state, *new_plane_state; 7232c349dbc7Sjsg struct intel_plane *plane; 7233c349dbc7Sjsg int i; 7234c349dbc7Sjsg 7235c349dbc7Sjsg for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7236c349dbc7Sjsg new_plane_state, i) 7237c349dbc7Sjsg intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 7238c349dbc7Sjsg to_intel_frontbuffer(new_plane_state->hw.fb), 7239c349dbc7Sjsg plane->frontbuffer_bit); 7240c349dbc7Sjsg } 7241c349dbc7Sjsg 7242f005ef32Sjsg int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, 7243c349dbc7Sjsg bool nonblock) 7244c349dbc7Sjsg { 7245c349dbc7Sjsg struct intel_atomic_state *state = to_intel_atomic_state(_state); 7246c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 7247c349dbc7Sjsg int ret = 0; 7248c349dbc7Sjsg 7249c349dbc7Sjsg state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7250c349dbc7Sjsg 7251c349dbc7Sjsg drm_atomic_state_get(&state->base); 7252c349dbc7Sjsg i915_sw_fence_init(&state->commit_ready, 7253c349dbc7Sjsg intel_atomic_commit_ready); 7254c349dbc7Sjsg 7255c349dbc7Sjsg /* 7256c349dbc7Sjsg * The intel_legacy_cursor_update() fast path takes care 7257c349dbc7Sjsg * of avoiding the vblank waits for simple cursor 7258c349dbc7Sjsg * movement and flips. For cursor on/off and size changes, 7259c349dbc7Sjsg * we want to perform the vblank waits so that watermark 7260c349dbc7Sjsg * updates happen during the correct frames. Gen9+ have 7261c349dbc7Sjsg * double buffered watermarks and so shouldn't need this. 7262c349dbc7Sjsg * 7263c349dbc7Sjsg * Unset state->legacy_cursor_update before the call to 7264c349dbc7Sjsg * drm_atomic_helper_setup_commit() because otherwise 7265c349dbc7Sjsg * drm_atomic_helper_wait_for_flip_done() is a noop and 7266c349dbc7Sjsg * we get FIFO underruns because we didn't wait 7267c349dbc7Sjsg * for vblank. 7268c349dbc7Sjsg * 7269c349dbc7Sjsg * FIXME doing watermarks and fb cleanup from a vblank worker 7270c349dbc7Sjsg * (assuming we had any) would solve these problems. 7271c349dbc7Sjsg */ 72725ca02815Sjsg if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) { 7273c349dbc7Sjsg struct intel_crtc_state *new_crtc_state; 7274c349dbc7Sjsg struct intel_crtc *crtc; 7275c349dbc7Sjsg int i; 7276c349dbc7Sjsg 7277c349dbc7Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7278c349dbc7Sjsg if (new_crtc_state->wm.need_postvbl_update || 7279c349dbc7Sjsg new_crtc_state->update_wm_post) 7280c349dbc7Sjsg state->base.legacy_cursor_update = false; 7281c349dbc7Sjsg } 7282c349dbc7Sjsg 7283c349dbc7Sjsg ret = intel_atomic_prepare_commit(state); 7284c349dbc7Sjsg if (ret) { 7285c349dbc7Sjsg drm_dbg_atomic(&dev_priv->drm, 7286c349dbc7Sjsg "Preparing state failed with %i\n", ret); 7287c349dbc7Sjsg i915_sw_fence_commit(&state->commit_ready); 7288c349dbc7Sjsg intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7289c349dbc7Sjsg return ret; 7290c349dbc7Sjsg } 7291c349dbc7Sjsg 7292c349dbc7Sjsg ret = drm_atomic_helper_setup_commit(&state->base, nonblock); 7293c349dbc7Sjsg if (!ret) 7294c349dbc7Sjsg ret = drm_atomic_helper_swap_state(&state->base, true); 7295c349dbc7Sjsg if (!ret) 7296c349dbc7Sjsg intel_atomic_swap_global_state(state); 7297c349dbc7Sjsg 7298c349dbc7Sjsg if (ret) { 7299ad8b1aafSjsg struct intel_crtc_state *new_crtc_state; 7300ad8b1aafSjsg struct intel_crtc *crtc; 7301ad8b1aafSjsg int i; 7302ad8b1aafSjsg 7303c349dbc7Sjsg i915_sw_fence_commit(&state->commit_ready); 7304c349dbc7Sjsg 7305ad8b1aafSjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7306f005ef32Sjsg intel_color_cleanup_commit(new_crtc_state); 7307ad8b1aafSjsg 7308f005ef32Sjsg drm_atomic_helper_unprepare_planes(dev, &state->base); 7309c349dbc7Sjsg intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7310c349dbc7Sjsg return ret; 7311c349dbc7Sjsg } 7312c349dbc7Sjsg intel_shared_dpll_swap_state(state); 7313c349dbc7Sjsg intel_atomic_track_fbs(state); 7314c349dbc7Sjsg 7315c349dbc7Sjsg drm_atomic_state_get(&state->base); 7316c349dbc7Sjsg INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7317c349dbc7Sjsg 7318c349dbc7Sjsg i915_sw_fence_commit(&state->commit_ready); 7319c349dbc7Sjsg if (nonblock && state->modeset) { 73201bb76ff1Sjsg queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7321c349dbc7Sjsg } else if (nonblock) { 73221bb76ff1Sjsg queue_work(dev_priv->display.wq.flip, &state->base.commit_work); 7323c349dbc7Sjsg } else { 7324c349dbc7Sjsg if (state->modeset) 73251bb76ff1Sjsg flush_workqueue(dev_priv->display.wq.modeset); 7326c349dbc7Sjsg intel_atomic_commit_tail(state); 7327c349dbc7Sjsg } 7328c349dbc7Sjsg 7329c349dbc7Sjsg return 0; 7330c349dbc7Sjsg } 7331c349dbc7Sjsg 7332c349dbc7Sjsg /** 7333c349dbc7Sjsg * intel_plane_destroy - destroy a plane 7334c349dbc7Sjsg * @plane: plane to destroy 7335c349dbc7Sjsg * 7336c349dbc7Sjsg * Common destruction function for all types of planes (primary, cursor, 7337c349dbc7Sjsg * sprite). 7338c349dbc7Sjsg */ 7339c349dbc7Sjsg void intel_plane_destroy(struct drm_plane *plane) 7340c349dbc7Sjsg { 7341c349dbc7Sjsg drm_plane_cleanup(plane); 7342c349dbc7Sjsg kfree(to_intel_plane(plane)); 7343c349dbc7Sjsg } 7344c349dbc7Sjsg 7345c349dbc7Sjsg int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 7346c349dbc7Sjsg struct drm_file *file) 7347c349dbc7Sjsg { 7348c349dbc7Sjsg struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 7349c349dbc7Sjsg struct drm_crtc *drmmode_crtc; 7350c349dbc7Sjsg struct intel_crtc *crtc; 7351c349dbc7Sjsg 7352c349dbc7Sjsg drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 7353c349dbc7Sjsg if (!drmmode_crtc) 7354c349dbc7Sjsg return -ENOENT; 7355c349dbc7Sjsg 7356c349dbc7Sjsg crtc = to_intel_crtc(drmmode_crtc); 7357c349dbc7Sjsg pipe_from_crtc_id->pipe = crtc->pipe; 7358c349dbc7Sjsg 7359c349dbc7Sjsg return 0; 7360c349dbc7Sjsg } 7361c349dbc7Sjsg 7362c349dbc7Sjsg static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) 7363c349dbc7Sjsg { 7364c349dbc7Sjsg struct drm_device *dev = encoder->base.dev; 7365c349dbc7Sjsg struct intel_encoder *source_encoder; 7366c349dbc7Sjsg u32 possible_clones = 0; 7367c349dbc7Sjsg 7368c349dbc7Sjsg for_each_intel_encoder(dev, source_encoder) { 7369c349dbc7Sjsg if (encoders_cloneable(encoder, source_encoder)) 7370c349dbc7Sjsg possible_clones |= drm_encoder_mask(&source_encoder->base); 7371c349dbc7Sjsg } 7372c349dbc7Sjsg 7373c349dbc7Sjsg return possible_clones; 7374c349dbc7Sjsg } 7375c349dbc7Sjsg 7376c349dbc7Sjsg static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) 7377c349dbc7Sjsg { 7378c349dbc7Sjsg struct drm_device *dev = encoder->base.dev; 7379c349dbc7Sjsg struct intel_crtc *crtc; 7380c349dbc7Sjsg u32 possible_crtcs = 0; 7381c349dbc7Sjsg 73821bb76ff1Sjsg for_each_intel_crtc_in_pipe_mask(dev, crtc, encoder->pipe_mask) 7383c349dbc7Sjsg possible_crtcs |= drm_crtc_mask(&crtc->base); 7384c349dbc7Sjsg 7385c349dbc7Sjsg return possible_crtcs; 7386c349dbc7Sjsg } 7387c349dbc7Sjsg 7388c349dbc7Sjsg static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) 7389c349dbc7Sjsg { 7390c349dbc7Sjsg if (!IS_MOBILE(dev_priv)) 7391c349dbc7Sjsg return false; 7392c349dbc7Sjsg 7393c349dbc7Sjsg if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) 7394c349dbc7Sjsg return false; 7395c349dbc7Sjsg 73965ca02815Sjsg if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) 7397c349dbc7Sjsg return false; 7398c349dbc7Sjsg 7399c349dbc7Sjsg return true; 7400c349dbc7Sjsg } 7401c349dbc7Sjsg 7402c349dbc7Sjsg static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) 7403c349dbc7Sjsg { 74045ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) 7405c349dbc7Sjsg return false; 7406c349dbc7Sjsg 7407f005ef32Sjsg if (IS_HASWELL_ULT(dev_priv) || IS_BROADWELL_ULT(dev_priv)) 7408c349dbc7Sjsg return false; 7409c349dbc7Sjsg 7410c349dbc7Sjsg if (HAS_PCH_LPT_H(dev_priv) && 7411c349dbc7Sjsg intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 7412c349dbc7Sjsg return false; 7413c349dbc7Sjsg 7414c349dbc7Sjsg /* DDI E can't be used if DDI A requires 4 lanes */ 7415c349dbc7Sjsg if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 7416c349dbc7Sjsg return false; 7417c349dbc7Sjsg 74181bb76ff1Sjsg if (!dev_priv->display.vbt.int_crt_support) 7419c349dbc7Sjsg return false; 7420c349dbc7Sjsg 7421c349dbc7Sjsg return true; 7422c349dbc7Sjsg } 7423c349dbc7Sjsg 7424f005ef32Sjsg bool assert_port_valid(struct drm_i915_private *i915, enum port port) 7425f005ef32Sjsg { 7426f005ef32Sjsg return !drm_WARN(&i915->drm, !(DISPLAY_RUNTIME_INFO(i915)->port_mask & BIT(port)), 7427f005ef32Sjsg "Platform does not support port %c\n", port_name(port)); 7428f005ef32Sjsg } 7429f005ef32Sjsg 7430f005ef32Sjsg void intel_setup_outputs(struct drm_i915_private *dev_priv) 7431c349dbc7Sjsg { 7432c349dbc7Sjsg struct intel_encoder *encoder; 7433c349dbc7Sjsg bool dpd_is_edp = false; 7434c349dbc7Sjsg 74355ca02815Sjsg intel_pps_unlock_regs_wa(dev_priv); 7436c349dbc7Sjsg 7437ad8b1aafSjsg if (!HAS_DISPLAY(dev_priv)) 7438c349dbc7Sjsg return; 7439c349dbc7Sjsg 7440f005ef32Sjsg if (HAS_DDI(dev_priv)) { 7441c349dbc7Sjsg if (intel_ddi_crt_present(dev_priv)) 7442c349dbc7Sjsg intel_crt_init(dev_priv); 7443c349dbc7Sjsg 7444f005ef32Sjsg intel_bios_for_each_encoder(dev_priv, intel_ddi_init); 7445c349dbc7Sjsg 7446f005ef32Sjsg if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 7447f005ef32Sjsg vlv_dsi_init(dev_priv); 7448c349dbc7Sjsg } else if (HAS_PCH_SPLIT(dev_priv)) { 7449c349dbc7Sjsg int found; 7450c349dbc7Sjsg 7451c349dbc7Sjsg /* 7452c349dbc7Sjsg * intel_edp_init_connector() depends on this completing first, 7453c349dbc7Sjsg * to prevent the registration of both eDP and LVDS and the 7454c349dbc7Sjsg * incorrect sharing of the PPS. 7455c349dbc7Sjsg */ 7456c349dbc7Sjsg intel_lvds_init(dev_priv); 7457c349dbc7Sjsg intel_crt_init(dev_priv); 7458c349dbc7Sjsg 7459c349dbc7Sjsg dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 7460c349dbc7Sjsg 7461c349dbc7Sjsg if (ilk_has_edp_a(dev_priv)) 74625ca02815Sjsg g4x_dp_init(dev_priv, DP_A, PORT_A); 7463c349dbc7Sjsg 7464c349dbc7Sjsg if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { 7465c349dbc7Sjsg /* PCH SDVOB multiplex with HDMIB */ 7466c349dbc7Sjsg found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 7467c349dbc7Sjsg if (!found) 74685ca02815Sjsg g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 7469c349dbc7Sjsg if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) 74705ca02815Sjsg g4x_dp_init(dev_priv, PCH_DP_B, PORT_B); 7471c349dbc7Sjsg } 7472c349dbc7Sjsg 7473c349dbc7Sjsg if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) 74745ca02815Sjsg g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 7475c349dbc7Sjsg 7476c349dbc7Sjsg if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) 74775ca02815Sjsg g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 7478c349dbc7Sjsg 7479c349dbc7Sjsg if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) 74805ca02815Sjsg g4x_dp_init(dev_priv, PCH_DP_C, PORT_C); 7481c349dbc7Sjsg 7482c349dbc7Sjsg if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) 74835ca02815Sjsg g4x_dp_init(dev_priv, PCH_DP_D, PORT_D); 7484c349dbc7Sjsg } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 7485c349dbc7Sjsg bool has_edp, has_port; 7486c349dbc7Sjsg 74871bb76ff1Sjsg if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) 7488c349dbc7Sjsg intel_crt_init(dev_priv); 7489c349dbc7Sjsg 7490c349dbc7Sjsg /* 7491c349dbc7Sjsg * The DP_DETECTED bit is the latched state of the DDC 7492c349dbc7Sjsg * SDA pin at boot. However since eDP doesn't require DDC 7493c349dbc7Sjsg * (no way to plug in a DP->HDMI dongle) the DDC pins for 7494c349dbc7Sjsg * eDP ports may have been muxed to an alternate function. 7495c349dbc7Sjsg * Thus we can't rely on the DP_DETECTED bit alone to detect 7496c349dbc7Sjsg * eDP ports. Consult the VBT as well as DP_DETECTED to 7497c349dbc7Sjsg * detect eDP ports. 7498c349dbc7Sjsg * 7499c349dbc7Sjsg * Sadly the straps seem to be missing sometimes even for HDMI 7500c349dbc7Sjsg * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 7501c349dbc7Sjsg * and VBT for the presence of the port. Additionally we can't 7502c349dbc7Sjsg * trust the port type the VBT declares as we've seen at least 7503c349dbc7Sjsg * HDMI ports that the VBT claim are DP or eDP. 7504c349dbc7Sjsg */ 7505c349dbc7Sjsg has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 7506c349dbc7Sjsg has_port = intel_bios_is_port_present(dev_priv, PORT_B); 7507c349dbc7Sjsg if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) 75085ca02815Sjsg has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B); 7509c349dbc7Sjsg if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 75105ca02815Sjsg g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 7511c349dbc7Sjsg 7512c349dbc7Sjsg has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 7513c349dbc7Sjsg has_port = intel_bios_is_port_present(dev_priv, PORT_C); 7514c349dbc7Sjsg if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) 75155ca02815Sjsg has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C); 7516c349dbc7Sjsg if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 75175ca02815Sjsg g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 7518c349dbc7Sjsg 7519c349dbc7Sjsg if (IS_CHERRYVIEW(dev_priv)) { 7520c349dbc7Sjsg /* 7521c349dbc7Sjsg * eDP not supported on port D, 7522c349dbc7Sjsg * so no need to worry about it 7523c349dbc7Sjsg */ 7524c349dbc7Sjsg has_port = intel_bios_is_port_present(dev_priv, PORT_D); 7525c349dbc7Sjsg if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) 75265ca02815Sjsg g4x_dp_init(dev_priv, CHV_DP_D, PORT_D); 7527c349dbc7Sjsg if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) 75285ca02815Sjsg g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 7529c349dbc7Sjsg } 7530c349dbc7Sjsg 7531c349dbc7Sjsg vlv_dsi_init(dev_priv); 7532c349dbc7Sjsg } else if (IS_PINEVIEW(dev_priv)) { 7533c349dbc7Sjsg intel_lvds_init(dev_priv); 7534c349dbc7Sjsg intel_crt_init(dev_priv); 75355ca02815Sjsg } else if (IS_DISPLAY_VER(dev_priv, 3, 4)) { 7536c349dbc7Sjsg bool found = false; 7537c349dbc7Sjsg 7538c349dbc7Sjsg if (IS_MOBILE(dev_priv)) 7539c349dbc7Sjsg intel_lvds_init(dev_priv); 7540c349dbc7Sjsg 7541c349dbc7Sjsg intel_crt_init(dev_priv); 7542c349dbc7Sjsg 7543c349dbc7Sjsg if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7544c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); 7545c349dbc7Sjsg found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 7546c349dbc7Sjsg if (!found && IS_G4X(dev_priv)) { 7547c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 7548c349dbc7Sjsg "probing HDMI on SDVOB\n"); 75495ca02815Sjsg g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 7550c349dbc7Sjsg } 7551c349dbc7Sjsg 7552c349dbc7Sjsg if (!found && IS_G4X(dev_priv)) 75535ca02815Sjsg g4x_dp_init(dev_priv, DP_B, PORT_B); 7554c349dbc7Sjsg } 7555c349dbc7Sjsg 7556c349dbc7Sjsg /* Before G4X SDVOC doesn't have its own detect register */ 7557c349dbc7Sjsg 7558c349dbc7Sjsg if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { 7559c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); 7560c349dbc7Sjsg found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 7561c349dbc7Sjsg } 7562c349dbc7Sjsg 7563c349dbc7Sjsg if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { 7564c349dbc7Sjsg 7565c349dbc7Sjsg if (IS_G4X(dev_priv)) { 7566c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 7567c349dbc7Sjsg "probing HDMI on SDVOC\n"); 75685ca02815Sjsg g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 7569c349dbc7Sjsg } 7570c349dbc7Sjsg if (IS_G4X(dev_priv)) 75715ca02815Sjsg g4x_dp_init(dev_priv, DP_C, PORT_C); 7572c349dbc7Sjsg } 7573c349dbc7Sjsg 7574c349dbc7Sjsg if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) 75755ca02815Sjsg g4x_dp_init(dev_priv, DP_D, PORT_D); 7576c349dbc7Sjsg 7577c349dbc7Sjsg if (SUPPORTS_TV(dev_priv)) 7578c349dbc7Sjsg intel_tv_init(dev_priv); 75795ca02815Sjsg } else if (DISPLAY_VER(dev_priv) == 2) { 7580c349dbc7Sjsg if (IS_I85X(dev_priv)) 7581c349dbc7Sjsg intel_lvds_init(dev_priv); 7582c349dbc7Sjsg 7583c349dbc7Sjsg intel_crt_init(dev_priv); 7584c349dbc7Sjsg intel_dvo_init(dev_priv); 7585c349dbc7Sjsg } 7586c349dbc7Sjsg 7587c349dbc7Sjsg for_each_intel_encoder(&dev_priv->drm, encoder) { 7588c349dbc7Sjsg encoder->base.possible_crtcs = 7589c349dbc7Sjsg intel_encoder_possible_crtcs(encoder); 7590c349dbc7Sjsg encoder->base.possible_clones = 7591c349dbc7Sjsg intel_encoder_possible_clones(encoder); 7592c349dbc7Sjsg } 7593c349dbc7Sjsg 7594c349dbc7Sjsg intel_init_pch_refclk(dev_priv); 7595c349dbc7Sjsg 7596c349dbc7Sjsg drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 7597c349dbc7Sjsg } 7598c349dbc7Sjsg 75991bb76ff1Sjsg static int max_dotclock(struct drm_i915_private *i915) 7600c349dbc7Sjsg { 76011bb76ff1Sjsg int max_dotclock = i915->max_dotclk_freq; 7602c349dbc7Sjsg 76031bb76ff1Sjsg /* icl+ might use bigjoiner */ 76041bb76ff1Sjsg if (DISPLAY_VER(i915) >= 11) 76051bb76ff1Sjsg max_dotclock *= 2; 76065ca02815Sjsg 76071bb76ff1Sjsg return max_dotclock; 7608c349dbc7Sjsg } 7609c349dbc7Sjsg 7610f005ef32Sjsg enum drm_mode_status intel_mode_valid(struct drm_device *dev, 7611c349dbc7Sjsg const struct drm_display_mode *mode) 7612c349dbc7Sjsg { 7613c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev); 7614c349dbc7Sjsg int hdisplay_max, htotal_max; 7615c349dbc7Sjsg int vdisplay_max, vtotal_max; 7616c349dbc7Sjsg 7617c349dbc7Sjsg /* 7618c349dbc7Sjsg * Can't reject DBLSCAN here because Xorg ddxen can add piles 7619c349dbc7Sjsg * of DBLSCAN modes to the output's mode list when they detect 7620c349dbc7Sjsg * the scaling mode property on the connector. And they don't 7621c349dbc7Sjsg * ask the kernel to validate those modes in any way until 7622c349dbc7Sjsg * modeset time at which point the client gets a protocol error. 7623c349dbc7Sjsg * So in order to not upset those clients we silently ignore the 7624c349dbc7Sjsg * DBLSCAN flag on such connectors. For other connectors we will 7625c349dbc7Sjsg * reject modes with the DBLSCAN flag in encoder->compute_config(). 7626c349dbc7Sjsg * And we always reject DBLSCAN modes in connector->mode_valid() 7627c349dbc7Sjsg * as we never want such modes on the connector's mode list. 7628c349dbc7Sjsg */ 7629c349dbc7Sjsg 7630c349dbc7Sjsg if (mode->vscan > 1) 7631c349dbc7Sjsg return MODE_NO_VSCAN; 7632c349dbc7Sjsg 7633c349dbc7Sjsg if (mode->flags & DRM_MODE_FLAG_HSKEW) 7634c349dbc7Sjsg return MODE_H_ILLEGAL; 7635c349dbc7Sjsg 7636c349dbc7Sjsg if (mode->flags & (DRM_MODE_FLAG_CSYNC | 7637c349dbc7Sjsg DRM_MODE_FLAG_NCSYNC | 7638c349dbc7Sjsg DRM_MODE_FLAG_PCSYNC)) 7639c349dbc7Sjsg return MODE_HSYNC; 7640c349dbc7Sjsg 7641c349dbc7Sjsg if (mode->flags & (DRM_MODE_FLAG_BCAST | 7642c349dbc7Sjsg DRM_MODE_FLAG_PIXMUX | 7643c349dbc7Sjsg DRM_MODE_FLAG_CLKDIV2)) 7644c349dbc7Sjsg return MODE_BAD; 7645c349dbc7Sjsg 76461bb76ff1Sjsg /* 76471bb76ff1Sjsg * Reject clearly excessive dotclocks early to 76481bb76ff1Sjsg * avoid having to worry about huge integers later. 76491bb76ff1Sjsg */ 76501bb76ff1Sjsg if (mode->clock > max_dotclock(dev_priv)) 76511bb76ff1Sjsg return MODE_CLOCK_HIGH; 76521bb76ff1Sjsg 7653c349dbc7Sjsg /* Transcoder timing limits */ 76545ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11) { 7655c349dbc7Sjsg hdisplay_max = 16384; 7656c349dbc7Sjsg vdisplay_max = 8192; 7657c349dbc7Sjsg htotal_max = 16384; 7658c349dbc7Sjsg vtotal_max = 8192; 76595ca02815Sjsg } else if (DISPLAY_VER(dev_priv) >= 9 || 7660c349dbc7Sjsg IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { 7661c349dbc7Sjsg hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ 7662c349dbc7Sjsg vdisplay_max = 4096; 7663c349dbc7Sjsg htotal_max = 8192; 7664c349dbc7Sjsg vtotal_max = 8192; 76655ca02815Sjsg } else if (DISPLAY_VER(dev_priv) >= 3) { 7666c349dbc7Sjsg hdisplay_max = 4096; 7667c349dbc7Sjsg vdisplay_max = 4096; 7668c349dbc7Sjsg htotal_max = 8192; 7669c349dbc7Sjsg vtotal_max = 8192; 7670c349dbc7Sjsg } else { 7671c349dbc7Sjsg hdisplay_max = 2048; 7672c349dbc7Sjsg vdisplay_max = 2048; 7673c349dbc7Sjsg htotal_max = 4096; 7674c349dbc7Sjsg vtotal_max = 4096; 7675c349dbc7Sjsg } 7676c349dbc7Sjsg 7677c349dbc7Sjsg if (mode->hdisplay > hdisplay_max || 7678c349dbc7Sjsg mode->hsync_start > htotal_max || 7679c349dbc7Sjsg mode->hsync_end > htotal_max || 7680c349dbc7Sjsg mode->htotal > htotal_max) 7681c349dbc7Sjsg return MODE_H_ILLEGAL; 7682c349dbc7Sjsg 7683c349dbc7Sjsg if (mode->vdisplay > vdisplay_max || 7684c349dbc7Sjsg mode->vsync_start > vtotal_max || 7685c349dbc7Sjsg mode->vsync_end > vtotal_max || 7686c349dbc7Sjsg mode->vtotal > vtotal_max) 7687c349dbc7Sjsg return MODE_V_ILLEGAL; 7688c349dbc7Sjsg 76892bd53da4Sjsg return MODE_OK; 76902bd53da4Sjsg } 76912bd53da4Sjsg 76922bd53da4Sjsg enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv, 76932bd53da4Sjsg const struct drm_display_mode *mode) 76942bd53da4Sjsg { 76952bd53da4Sjsg /* 76962bd53da4Sjsg * Additional transcoder timing limits, 76972bd53da4Sjsg * excluding BXT/GLK DSI transcoders. 76982bd53da4Sjsg */ 76995ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 5) { 7700c349dbc7Sjsg if (mode->hdisplay < 64 || 7701c349dbc7Sjsg mode->htotal - mode->hdisplay < 32) 7702c349dbc7Sjsg return MODE_H_ILLEGAL; 7703c349dbc7Sjsg 7704c349dbc7Sjsg if (mode->vtotal - mode->vdisplay < 5) 7705c349dbc7Sjsg return MODE_V_ILLEGAL; 7706c349dbc7Sjsg } else { 7707c349dbc7Sjsg if (mode->htotal - mode->hdisplay < 32) 7708c349dbc7Sjsg return MODE_H_ILLEGAL; 7709c349dbc7Sjsg 7710c349dbc7Sjsg if (mode->vtotal - mode->vdisplay < 3) 7711c349dbc7Sjsg return MODE_V_ILLEGAL; 7712c349dbc7Sjsg } 7713c349dbc7Sjsg 77141bb76ff1Sjsg /* 77151bb76ff1Sjsg * Cantiga+ cannot handle modes with a hsync front porch of 0. 77161bb76ff1Sjsg * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 77171bb76ff1Sjsg */ 77181bb76ff1Sjsg if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && 77191bb76ff1Sjsg mode->hsync_start == mode->hdisplay) 77201bb76ff1Sjsg return MODE_H_ILLEGAL; 77211bb76ff1Sjsg 7722c349dbc7Sjsg return MODE_OK; 7723c349dbc7Sjsg } 7724c349dbc7Sjsg 7725c349dbc7Sjsg enum drm_mode_status 7726c349dbc7Sjsg intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, 77275ca02815Sjsg const struct drm_display_mode *mode, 77285ca02815Sjsg bool bigjoiner) 7729c349dbc7Sjsg { 7730c349dbc7Sjsg int plane_width_max, plane_height_max; 7731c349dbc7Sjsg 7732c349dbc7Sjsg /* 7733c349dbc7Sjsg * intel_mode_valid() should be 7734c349dbc7Sjsg * sufficient on older platforms. 7735c349dbc7Sjsg */ 77365ca02815Sjsg if (DISPLAY_VER(dev_priv) < 9) 7737c349dbc7Sjsg return MODE_OK; 7738c349dbc7Sjsg 7739c349dbc7Sjsg /* 7740c349dbc7Sjsg * Most people will probably want a fullscreen 7741c349dbc7Sjsg * plane so let's not advertize modes that are 7742c349dbc7Sjsg * too big for that. 7743c349dbc7Sjsg */ 77445ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11) { 77455ca02815Sjsg plane_width_max = 5120 << bigjoiner; 7746c349dbc7Sjsg plane_height_max = 4320; 7747c349dbc7Sjsg } else { 7748c349dbc7Sjsg plane_width_max = 5120; 7749c349dbc7Sjsg plane_height_max = 4096; 7750c349dbc7Sjsg } 7751c349dbc7Sjsg 7752c349dbc7Sjsg if (mode->hdisplay > plane_width_max) 7753c349dbc7Sjsg return MODE_H_ILLEGAL; 7754c349dbc7Sjsg 7755c349dbc7Sjsg if (mode->vdisplay > plane_height_max) 7756c349dbc7Sjsg return MODE_V_ILLEGAL; 7757c349dbc7Sjsg 7758c349dbc7Sjsg return MODE_OK; 7759c349dbc7Sjsg } 7760c349dbc7Sjsg 77611bb76ff1Sjsg static const struct intel_display_funcs skl_display_funcs = { 77621bb76ff1Sjsg .get_pipe_config = hsw_get_pipe_config, 77631bb76ff1Sjsg .crtc_enable = hsw_crtc_enable, 77641bb76ff1Sjsg .crtc_disable = hsw_crtc_disable, 77651bb76ff1Sjsg .commit_modeset_enables = skl_commit_modeset_enables, 77661bb76ff1Sjsg .get_initial_plane_config = skl_get_initial_plane_config, 7767*34c95817Sjsg .fixup_initial_plane_config = skl_fixup_initial_plane_config, 77681bb76ff1Sjsg }; 77691bb76ff1Sjsg 77701bb76ff1Sjsg static const struct intel_display_funcs ddi_display_funcs = { 77711bb76ff1Sjsg .get_pipe_config = hsw_get_pipe_config, 77721bb76ff1Sjsg .crtc_enable = hsw_crtc_enable, 77731bb76ff1Sjsg .crtc_disable = hsw_crtc_disable, 77741bb76ff1Sjsg .commit_modeset_enables = intel_commit_modeset_enables, 77751bb76ff1Sjsg .get_initial_plane_config = i9xx_get_initial_plane_config, 7776*34c95817Sjsg .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 77771bb76ff1Sjsg }; 77781bb76ff1Sjsg 77791bb76ff1Sjsg static const struct intel_display_funcs pch_split_display_funcs = { 77801bb76ff1Sjsg .get_pipe_config = ilk_get_pipe_config, 77811bb76ff1Sjsg .crtc_enable = ilk_crtc_enable, 77821bb76ff1Sjsg .crtc_disable = ilk_crtc_disable, 77831bb76ff1Sjsg .commit_modeset_enables = intel_commit_modeset_enables, 77841bb76ff1Sjsg .get_initial_plane_config = i9xx_get_initial_plane_config, 7785*34c95817Sjsg .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 77861bb76ff1Sjsg }; 77871bb76ff1Sjsg 77881bb76ff1Sjsg static const struct intel_display_funcs vlv_display_funcs = { 77891bb76ff1Sjsg .get_pipe_config = i9xx_get_pipe_config, 77901bb76ff1Sjsg .crtc_enable = valleyview_crtc_enable, 77911bb76ff1Sjsg .crtc_disable = i9xx_crtc_disable, 77921bb76ff1Sjsg .commit_modeset_enables = intel_commit_modeset_enables, 77931bb76ff1Sjsg .get_initial_plane_config = i9xx_get_initial_plane_config, 7794*34c95817Sjsg .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 77951bb76ff1Sjsg }; 77961bb76ff1Sjsg 77971bb76ff1Sjsg static const struct intel_display_funcs i9xx_display_funcs = { 77981bb76ff1Sjsg .get_pipe_config = i9xx_get_pipe_config, 77991bb76ff1Sjsg .crtc_enable = i9xx_crtc_enable, 78001bb76ff1Sjsg .crtc_disable = i9xx_crtc_disable, 78011bb76ff1Sjsg .commit_modeset_enables = intel_commit_modeset_enables, 78021bb76ff1Sjsg .get_initial_plane_config = i9xx_get_initial_plane_config, 7803*34c95817Sjsg .fixup_initial_plane_config = i9xx_fixup_initial_plane_config, 78041bb76ff1Sjsg }; 78051bb76ff1Sjsg 7806c349dbc7Sjsg /** 7807c349dbc7Sjsg * intel_init_display_hooks - initialize the display modesetting hooks 7808c349dbc7Sjsg * @dev_priv: device private 7809c349dbc7Sjsg */ 7810c349dbc7Sjsg void intel_init_display_hooks(struct drm_i915_private *dev_priv) 7811c349dbc7Sjsg { 78125ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) { 78131bb76ff1Sjsg dev_priv->display.funcs.display = &skl_display_funcs; 7814c349dbc7Sjsg } else if (HAS_DDI(dev_priv)) { 78151bb76ff1Sjsg dev_priv->display.funcs.display = &ddi_display_funcs; 7816c349dbc7Sjsg } else if (HAS_PCH_SPLIT(dev_priv)) { 78171bb76ff1Sjsg dev_priv->display.funcs.display = &pch_split_display_funcs; 78185ca02815Sjsg } else if (IS_CHERRYVIEW(dev_priv) || 78195ca02815Sjsg IS_VALLEYVIEW(dev_priv)) { 78201bb76ff1Sjsg dev_priv->display.funcs.display = &vlv_display_funcs; 7821c349dbc7Sjsg } else { 78221bb76ff1Sjsg dev_priv->display.funcs.display = &i9xx_display_funcs; 7823c349dbc7Sjsg } 7824c349dbc7Sjsg } 7825c349dbc7Sjsg 7826f005ef32Sjsg int intel_initial_commit(struct drm_device *dev) 7827c349dbc7Sjsg { 7828c349dbc7Sjsg struct drm_atomic_state *state = NULL; 7829c349dbc7Sjsg struct drm_modeset_acquire_ctx ctx; 7830c349dbc7Sjsg struct intel_crtc *crtc; 7831c349dbc7Sjsg int ret = 0; 7832c349dbc7Sjsg 7833c349dbc7Sjsg state = drm_atomic_state_alloc(dev); 7834c349dbc7Sjsg if (!state) 7835c349dbc7Sjsg return -ENOMEM; 7836c349dbc7Sjsg 7837c349dbc7Sjsg drm_modeset_acquire_init(&ctx, 0); 7838c349dbc7Sjsg 7839c349dbc7Sjsg state->acquire_ctx = &ctx; 7840f005ef32Sjsg to_intel_atomic_state(state)->internal = true; 7841c349dbc7Sjsg 7842f005ef32Sjsg retry: 7843c349dbc7Sjsg for_each_intel_crtc(dev, crtc) { 7844c349dbc7Sjsg struct intel_crtc_state *crtc_state = 7845c349dbc7Sjsg intel_atomic_get_crtc_state(state, crtc); 7846c349dbc7Sjsg 7847c349dbc7Sjsg if (IS_ERR(crtc_state)) { 7848c349dbc7Sjsg ret = PTR_ERR(crtc_state); 7849c349dbc7Sjsg goto out; 7850c349dbc7Sjsg } 7851c349dbc7Sjsg 7852c349dbc7Sjsg if (crtc_state->hw.active) { 78535ca02815Sjsg struct intel_encoder *encoder; 78545ca02815Sjsg 7855c349dbc7Sjsg ret = drm_atomic_add_affected_planes(state, &crtc->base); 7856c349dbc7Sjsg if (ret) 7857c349dbc7Sjsg goto out; 7858c349dbc7Sjsg 7859c349dbc7Sjsg /* 7860c349dbc7Sjsg * FIXME hack to force a LUT update to avoid the 7861c349dbc7Sjsg * plane update forcing the pipe gamma on without 7862c349dbc7Sjsg * having a proper LUT loaded. Remove once we 7863c349dbc7Sjsg * have readout for pipe gamma enable. 7864c349dbc7Sjsg */ 7865c349dbc7Sjsg crtc_state->uapi.color_mgmt_changed = true; 7866c349dbc7Sjsg 78675ca02815Sjsg for_each_intel_encoder_mask(dev, encoder, 78685ca02815Sjsg crtc_state->uapi.encoder_mask) { 78695ca02815Sjsg if (encoder->initial_fastset_check && 78705ca02815Sjsg !encoder->initial_fastset_check(encoder, crtc_state)) { 7871c349dbc7Sjsg ret = drm_atomic_add_affected_connectors(state, 7872c349dbc7Sjsg &crtc->base); 7873c349dbc7Sjsg if (ret) 7874c349dbc7Sjsg goto out; 78755ca02815Sjsg } 7876c349dbc7Sjsg } 7877c349dbc7Sjsg } 7878c349dbc7Sjsg } 7879c349dbc7Sjsg 7880c349dbc7Sjsg ret = drm_atomic_commit(state); 7881c349dbc7Sjsg 7882c349dbc7Sjsg out: 7883c349dbc7Sjsg if (ret == -EDEADLK) { 7884c349dbc7Sjsg drm_atomic_state_clear(state); 7885c349dbc7Sjsg drm_modeset_backoff(&ctx); 7886c349dbc7Sjsg goto retry; 7887c349dbc7Sjsg } 7888c349dbc7Sjsg 7889c349dbc7Sjsg drm_atomic_state_put(state); 7890c349dbc7Sjsg 7891c349dbc7Sjsg drm_modeset_drop_locks(&ctx); 7892c349dbc7Sjsg drm_modeset_acquire_fini(&ctx); 7893c349dbc7Sjsg 7894c349dbc7Sjsg return ret; 7895c349dbc7Sjsg } 7896c349dbc7Sjsg 7897c349dbc7Sjsg void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 7898c349dbc7Sjsg { 78991bb76ff1Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 7900f005ef32Sjsg enum transcoder cpu_transcoder = (enum transcoder)pipe; 7901c349dbc7Sjsg /* 640x480@60Hz, ~25175 kHz */ 7902c349dbc7Sjsg struct dpll clock = { 7903c349dbc7Sjsg .m1 = 18, 7904c349dbc7Sjsg .m2 = 7, 7905c349dbc7Sjsg .p1 = 13, 7906c349dbc7Sjsg .p2 = 4, 7907c349dbc7Sjsg .n = 2, 7908c349dbc7Sjsg }; 7909c349dbc7Sjsg u32 dpll, fp; 7910c349dbc7Sjsg int i; 7911c349dbc7Sjsg 7912c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 7913c349dbc7Sjsg i9xx_calc_dpll_params(48000, &clock) != 25154); 7914c349dbc7Sjsg 7915c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, 7916c349dbc7Sjsg "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 7917c349dbc7Sjsg pipe_name(pipe), clock.vco, clock.dot); 7918c349dbc7Sjsg 7919c349dbc7Sjsg fp = i9xx_dpll_compute_fp(&clock); 7920c349dbc7Sjsg dpll = DPLL_DVO_2X_MODE | 7921c349dbc7Sjsg DPLL_VGA_MODE_DIS | 7922c349dbc7Sjsg ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 7923c349dbc7Sjsg PLL_P2_DIVIDE_BY_4 | 7924c349dbc7Sjsg PLL_REF_INPUT_DREFCLK | 7925c349dbc7Sjsg DPLL_VCO_ENABLE; 7926c349dbc7Sjsg 7927f005ef32Sjsg intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 7928f005ef32Sjsg HACTIVE(640 - 1) | HTOTAL(800 - 1)); 7929f005ef32Sjsg intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 7930f005ef32Sjsg HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 7931f005ef32Sjsg intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 7932f005ef32Sjsg HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 7933f005ef32Sjsg intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 7934f005ef32Sjsg VACTIVE(480 - 1) | VTOTAL(525 - 1)); 7935f005ef32Sjsg intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 7936f005ef32Sjsg VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 7937f005ef32Sjsg intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 7938f005ef32Sjsg VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 7939f005ef32Sjsg intel_de_write(dev_priv, PIPESRC(pipe), 7940f005ef32Sjsg PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 7941c349dbc7Sjsg 79421bb76ff1Sjsg intel_de_write(dev_priv, FP0(pipe), fp); 79431bb76ff1Sjsg intel_de_write(dev_priv, FP1(pipe), fp); 79441bb76ff1Sjsg 7945c349dbc7Sjsg /* 7946c349dbc7Sjsg * Apparently we need to have VGA mode enabled prior to changing 7947c349dbc7Sjsg * the P1/P2 dividers. Otherwise the DPLL will keep using the old 7948c349dbc7Sjsg * dividers, even though the register value does change. 7949c349dbc7Sjsg */ 7950c349dbc7Sjsg intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 7951c349dbc7Sjsg intel_de_write(dev_priv, DPLL(pipe), dpll); 7952c349dbc7Sjsg 7953c349dbc7Sjsg /* Wait for the clocks to stabilize. */ 7954c349dbc7Sjsg intel_de_posting_read(dev_priv, DPLL(pipe)); 7955c349dbc7Sjsg udelay(150); 7956c349dbc7Sjsg 7957c349dbc7Sjsg /* The pixel multiplier can only be updated once the 7958c349dbc7Sjsg * DPLL is enabled and the clocks are stable. 7959c349dbc7Sjsg * 7960c349dbc7Sjsg * So write it again. 7961c349dbc7Sjsg */ 7962c349dbc7Sjsg intel_de_write(dev_priv, DPLL(pipe), dpll); 7963c349dbc7Sjsg 7964c349dbc7Sjsg /* We do this three times for luck */ 7965c349dbc7Sjsg for (i = 0; i < 3 ; i++) { 7966c349dbc7Sjsg intel_de_write(dev_priv, DPLL(pipe), dpll); 7967c349dbc7Sjsg intel_de_posting_read(dev_priv, DPLL(pipe)); 7968c349dbc7Sjsg udelay(150); /* wait for warmup */ 7969c349dbc7Sjsg } 7970c349dbc7Sjsg 7971f005ef32Sjsg intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); 7972f005ef32Sjsg intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 7973c349dbc7Sjsg 7974c349dbc7Sjsg intel_wait_for_pipe_scanline_moving(crtc); 7975c349dbc7Sjsg } 7976c349dbc7Sjsg 7977c349dbc7Sjsg void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 7978c349dbc7Sjsg { 79791bb76ff1Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 7980c349dbc7Sjsg 7981c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", 7982c349dbc7Sjsg pipe_name(pipe)); 7983c349dbc7Sjsg 7984c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 79851bb76ff1Sjsg intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); 7986c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 79871bb76ff1Sjsg intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); 7988c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 79891bb76ff1Sjsg intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); 7990c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 79911bb76ff1Sjsg intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); 7992c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, 79931bb76ff1Sjsg intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 7994c349dbc7Sjsg 7995f005ef32Sjsg intel_de_write(dev_priv, TRANSCONF(pipe), 0); 7996f005ef32Sjsg intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 7997c349dbc7Sjsg 7998c349dbc7Sjsg intel_wait_for_pipe_scanline_stopped(crtc); 7999c349dbc7Sjsg 8000c349dbc7Sjsg intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); 8001c349dbc7Sjsg intel_de_posting_read(dev_priv, DPLL(pipe)); 8002c349dbc7Sjsg } 8003c349dbc7Sjsg 8004f005ef32Sjsg void intel_hpd_poll_fini(struct drm_i915_private *i915) 8005c349dbc7Sjsg { 8006c349dbc7Sjsg struct intel_connector *connector; 8007c349dbc7Sjsg struct drm_connector_list_iter conn_iter; 8008c349dbc7Sjsg 8009c349dbc7Sjsg /* Kill all the work that may have been queued by hpd. */ 8010c349dbc7Sjsg drm_connector_list_iter_begin(&i915->drm, &conn_iter); 8011c349dbc7Sjsg for_each_intel_connector_iter(connector, &conn_iter) { 8012c349dbc7Sjsg #ifdef __linux__ 8013c349dbc7Sjsg if (connector->modeset_retry_work.func) 8014c349dbc7Sjsg #else 8015c349dbc7Sjsg if (connector->modeset_retry_work.task.t_func) 8016c349dbc7Sjsg #endif 8017c349dbc7Sjsg cancel_work_sync(&connector->modeset_retry_work); 8018c349dbc7Sjsg if (connector->hdcp.shim) { 8019c349dbc7Sjsg cancel_delayed_work_sync(&connector->hdcp.check_work); 8020c349dbc7Sjsg cancel_work_sync(&connector->hdcp.prop_work); 8021c349dbc7Sjsg } 8022c349dbc7Sjsg } 8023c349dbc7Sjsg drm_connector_list_iter_end(&conn_iter); 8024c349dbc7Sjsg } 8025c349dbc7Sjsg 80261bb76ff1Sjsg bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915) 80271bb76ff1Sjsg { 80281bb76ff1Sjsg return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915); 80291bb76ff1Sjsg } 8030