1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "gpio_service_interface.h" 37 #include "clk_mgr.h" 38 #include "clock_source.h" 39 #include "dc_bios_types.h" 40 41 #include "bios_parser_interface.h" 42 #include "bios/bios_parser_helper.h" 43 #include "include/irq_service_interface.h" 44 #include "transform.h" 45 #include "dmcu.h" 46 #include "dpp.h" 47 #include "timing_generator.h" 48 #include "abm.h" 49 #include "virtual/virtual_link_encoder.h" 50 #include "hubp.h" 51 52 #include "link_hwss.h" 53 #include "link_encoder.h" 54 #include "link_enc_cfg.h" 55 56 #include "link.h" 57 #include "dm_helpers.h" 58 #include "mem_input.h" 59 60 #include "dc_dmub_srv.h" 61 62 #include "dsc.h" 63 64 #include "vm_helper.h" 65 66 #include "dce/dce_i2c.h" 67 68 #include "dmub/dmub_srv.h" 69 70 #include "dce/dmub_psr.h" 71 72 #include "dce/dmub_hw_lock_mgr.h" 73 74 #include "dc_trace.h" 75 76 #include "hw_sequencer_private.h" 77 78 #include "dce/dmub_outbox.h" 79 80 #define CTX \ 81 dc->ctx 82 83 #define DC_LOGGER \ 84 dc->ctx->logger 85 86 static const char DC_BUILD_ID[] = "production-build"; 87 88 /** 89 * DOC: Overview 90 * 91 * DC is the OS-agnostic component of the amdgpu DC driver. 92 * 93 * DC maintains and validates a set of structs representing the state of the 94 * driver and writes that state to AMD hardware 95 * 96 * Main DC HW structs: 97 * 98 * struct dc - The central struct. One per driver. Created on driver load, 99 * destroyed on driver unload. 100 * 101 * struct dc_context - One per driver. 102 * Used as a backpointer by most other structs in dc. 103 * 104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 105 * plugpoints). Created on driver load, destroyed on driver unload. 106 * 107 * struct dc_sink - One per display. Created on boot or hotplug. 108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 109 * (the display directly attached). It may also have one or more remote 110 * sinks (in the Multi-Stream Transport case) 111 * 112 * struct resource_pool - One per driver. Represents the hw blocks not in the 113 * main pipeline. Not directly accessible by dm. 114 * 115 * Main dc state structs: 116 * 117 * These structs can be created and destroyed as needed. There is a full set of 118 * these structs in dc->current_state representing the currently programmed state. 119 * 120 * struct dc_state - The global DC state to track global state information, 121 * such as bandwidth values. 122 * 123 * struct dc_stream_state - Represents the hw configuration for the pipeline from 124 * a framebuffer to a display. Maps one-to-one with dc_sink. 125 * 126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 127 * and may have more in the Multi-Plane Overlay case. 128 * 129 * struct resource_context - Represents the programmable state of everything in 130 * the resource_pool. Not directly accessible by dm. 131 * 132 * struct pipe_ctx - A member of struct resource_context. Represents the 133 * internal hardware pipeline components. Each dc_plane_state has either 134 * one or two (in the pipe-split case). 135 */ 136 137 /* Private functions */ 138 139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 140 { 141 if (new > *original) 142 *original = new; 143 } 144 145 static void destroy_links(struct dc *dc) 146 { 147 uint32_t i; 148 149 for (i = 0; i < dc->link_count; i++) { 150 if (NULL != dc->links[i]) 151 dc->link_srv->destroy_link(&dc->links[i]); 152 } 153 } 154 155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 156 { 157 int i; 158 uint32_t count = 0; 159 160 for (i = 0; i < num_links; i++) { 161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 162 links[i]->is_internal_display) 163 count++; 164 } 165 166 return count; 167 } 168 169 static int get_seamless_boot_stream_count(struct dc_state *ctx) 170 { 171 uint8_t i; 172 uint8_t seamless_boot_stream_count = 0; 173 174 for (i = 0; i < ctx->stream_count; i++) 175 if (ctx->streams[i]->apply_seamless_boot_optimization) 176 seamless_boot_stream_count++; 177 178 return seamless_boot_stream_count; 179 } 180 181 static bool create_links( 182 struct dc *dc, 183 uint32_t num_virtual_links) 184 { 185 int i; 186 int connectors_num; 187 struct dc_bios *bios = dc->ctx->dc_bios; 188 189 dc->link_count = 0; 190 191 connectors_num = bios->funcs->get_connectors_number(bios); 192 193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 194 195 if (connectors_num > ENUM_ID_COUNT) { 196 dm_error( 197 "DC: Number of connectors %d exceeds maximum of %d!\n", 198 connectors_num, 199 ENUM_ID_COUNT); 200 return false; 201 } 202 203 dm_output_to_console( 204 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 205 __func__, 206 connectors_num, 207 num_virtual_links); 208 209 for (i = 0; i < connectors_num; i++) { 210 struct link_init_data link_init_params = {0}; 211 struct dc_link *link; 212 213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 214 215 link_init_params.ctx = dc->ctx; 216 /* next BIOS object table connector */ 217 link_init_params.connector_index = i; 218 link_init_params.link_index = dc->link_count; 219 link_init_params.dc = dc; 220 link = dc->link_srv->create_link(&link_init_params); 221 222 if (link) { 223 dc->links[dc->link_count] = link; 224 link->dc = dc; 225 ++dc->link_count; 226 } 227 } 228 229 DC_LOG_DC("BIOS object table - end"); 230 231 /* Create a link for each usb4 dpia port */ 232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 233 struct link_init_data link_init_params = {0}; 234 struct dc_link *link; 235 236 link_init_params.ctx = dc->ctx; 237 link_init_params.connector_index = i; 238 link_init_params.link_index = dc->link_count; 239 link_init_params.dc = dc; 240 link_init_params.is_dpia_link = true; 241 242 link = dc->link_srv->create_link(&link_init_params); 243 if (link) { 244 dc->links[dc->link_count] = link; 245 link->dc = dc; 246 ++dc->link_count; 247 } 248 } 249 250 for (i = 0; i < num_virtual_links; i++) { 251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 252 struct encoder_init_data enc_init = {0}; 253 254 if (link == NULL) { 255 BREAK_TO_DEBUGGER(); 256 goto failed_alloc; 257 } 258 259 link->link_index = dc->link_count; 260 dc->links[dc->link_count] = link; 261 dc->link_count++; 262 263 link->ctx = dc->ctx; 264 link->dc = dc; 265 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 266 link->link_id.type = OBJECT_TYPE_CONNECTOR; 267 link->link_id.id = CONNECTOR_ID_VIRTUAL; 268 link->link_id.enum_id = ENUM_ID_1; 269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 270 271 if (!link->link_enc) { 272 BREAK_TO_DEBUGGER(); 273 goto failed_alloc; 274 } 275 276 link->link_status.dpcd_caps = &link->dpcd_caps; 277 278 enc_init.ctx = dc->ctx; 279 enc_init.channel = CHANNEL_ID_UNKNOWN; 280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 281 enc_init.transmitter = TRANSMITTER_UNKNOWN; 282 enc_init.connector = link->link_id; 283 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 285 enc_init.encoder.enum_id = ENUM_ID_1; 286 virtual_link_encoder_construct(link->link_enc, &enc_init); 287 } 288 289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 290 291 return true; 292 293 failed_alloc: 294 return false; 295 } 296 297 /* Create additional DIG link encoder objects if fewer than the platform 298 * supports were created during link construction. This can happen if the 299 * number of physical connectors is less than the number of DIGs. 300 */ 301 static bool create_link_encoders(struct dc *dc) 302 { 303 bool res = true; 304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 306 int i; 307 308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 309 * link encoders and physical display endpoints and does not require 310 * additional link encoder objects. 311 */ 312 if (num_usb4_dpia == 0) 313 return res; 314 315 /* Create as many link encoder objects as the platform supports. DPIA 316 * endpoints can be programmably mapped to any DIG. 317 */ 318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 319 for (i = 0; i < num_dig_link_enc; i++) { 320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 321 322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 324 (enum engine_id)(ENGINE_ID_DIGA + i)); 325 if (link_enc) { 326 dc->res_pool->link_encoders[i] = link_enc; 327 dc->res_pool->dig_link_enc_count++; 328 } else { 329 res = false; 330 } 331 } 332 } 333 } 334 335 return res; 336 } 337 338 /* Destroy any additional DIG link encoder objects created by 339 * create_link_encoders(). 340 * NB: Must only be called after destroy_links(). 341 */ 342 static void destroy_link_encoders(struct dc *dc) 343 { 344 unsigned int num_usb4_dpia; 345 unsigned int num_dig_link_enc; 346 int i; 347 348 if (!dc->res_pool) 349 return; 350 351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 353 354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 355 * link encoders and physical display endpoints and does not require 356 * additional link encoder objects. 357 */ 358 if (num_usb4_dpia == 0) 359 return; 360 361 for (i = 0; i < num_dig_link_enc; i++) { 362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 363 364 if (link_enc) { 365 link_enc->funcs->destroy(&link_enc); 366 dc->res_pool->link_encoders[i] = NULL; 367 dc->res_pool->dig_link_enc_count--; 368 } 369 } 370 } 371 372 static struct dc_perf_trace *dc_perf_trace_create(void) 373 { 374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 375 } 376 377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 378 { 379 kfree(*perf_trace); 380 *perf_trace = NULL; 381 } 382 383 /** 384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 385 * @dc: dc reference 386 * @stream: Initial dc stream state 387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 388 * 389 * Looks up the pipe context of dc_stream_state and updates the 390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 391 * Rate, which is a power-saving feature that targets reducing panel 392 * refresh rate while the screen is static 393 * 394 * Return: %true if the pipe context is found and adjusted; 395 * %false if the pipe context is not found. 396 */ 397 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 398 struct dc_stream_state *stream, 399 struct dc_crtc_timing_adjust *adjust) 400 { 401 int i; 402 403 /* 404 * Don't adjust DRR while there's bandwidth optimizations pending to 405 * avoid conflicting with firmware updates. 406 */ 407 if (dc->ctx->dce_version > DCE_VERSION_MAX) 408 if (dc->optimized_required || dc->wm_optimized_required) 409 return false; 410 411 stream->adjust.v_total_max = adjust->v_total_max; 412 stream->adjust.v_total_mid = adjust->v_total_mid; 413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 414 stream->adjust.v_total_min = adjust->v_total_min; 415 416 for (i = 0; i < MAX_PIPES; i++) { 417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 418 419 if (pipe->stream == stream && pipe->stream_res.tg) { 420 dc->hwss.set_drr(&pipe, 421 1, 422 *adjust); 423 424 return true; 425 } 426 } 427 return false; 428 } 429 430 /** 431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 433 * 434 * @dc: [in] dc reference 435 * @stream: [in] Initial dc stream state 436 * @refresh_rate: [in] new refresh_rate 437 * 438 * Return: %true if the pipe context is found and there is an associated 439 * timing_generator for the DC; 440 * %false if the pipe context is not found or there is no 441 * timing_generator for the DC. 442 */ 443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 444 struct dc_stream_state *stream, 445 uint32_t *refresh_rate) 446 { 447 bool status = false; 448 449 int i = 0; 450 451 for (i = 0; i < MAX_PIPES; i++) { 452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 453 454 if (pipe->stream == stream && pipe->stream_res.tg) { 455 /* Only execute if a function pointer has been defined for 456 * the DC version in question 457 */ 458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 460 461 status = true; 462 463 break; 464 } 465 } 466 } 467 468 return status; 469 } 470 471 bool dc_stream_get_crtc_position(struct dc *dc, 472 struct dc_stream_state **streams, int num_streams, 473 unsigned int *v_pos, unsigned int *nom_v_pos) 474 { 475 /* TODO: Support multiple streams */ 476 const struct dc_stream_state *stream = streams[0]; 477 int i; 478 bool ret = false; 479 struct crtc_position position; 480 481 for (i = 0; i < MAX_PIPES; i++) { 482 struct pipe_ctx *pipe = 483 &dc->current_state->res_ctx.pipe_ctx[i]; 484 485 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 486 dc->hwss.get_position(&pipe, 1, &position); 487 488 *v_pos = position.vertical_count; 489 *nom_v_pos = position.nominal_vcount; 490 ret = true; 491 } 492 } 493 return ret; 494 } 495 496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 497 static inline void 498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 500 { 501 union dmub_rb_cmd cmd = {0}; 502 503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 505 506 if (is_stop) { 507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 509 } else { 510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 512 cmd.secure_display.roi_info.x_start = rect->x; 513 cmd.secure_display.roi_info.y_start = rect->y; 514 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 515 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 516 } 517 518 dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 519 } 520 521 static inline void 522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 524 { 525 if (is_stop) 526 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 527 else 528 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 529 } 530 531 bool 532 dc_stream_forward_crc_window(struct dc_stream_state *stream, 533 struct rect *rect, bool is_stop) 534 { 535 struct dmcu *dmcu; 536 struct dc_dmub_srv *dmub_srv; 537 struct otg_phy_mux mux_mapping; 538 struct pipe_ctx *pipe; 539 int i; 540 struct dc *dc = stream->ctx->dc; 541 542 for (i = 0; i < MAX_PIPES; i++) { 543 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 545 break; 546 } 547 548 /* Stream not found */ 549 if (i == MAX_PIPES) 550 return false; 551 552 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 553 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 554 555 dmcu = dc->res_pool->dmcu; 556 dmub_srv = dc->ctx->dmub_srv; 557 558 /* forward to dmub */ 559 if (dmub_srv) 560 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 561 /* forward to dmcu */ 562 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 563 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 564 else 565 return false; 566 567 return true; 568 } 569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 570 571 /** 572 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 573 * @dc: DC Object 574 * @stream: The stream to configure CRC on. 575 * @enable: Enable CRC if true, disable otherwise. 576 * @crc_window: CRC window (x/y start/end) information 577 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 578 * once. 579 * 580 * By default, only CRC0 is configured, and the entire frame is used to 581 * calculate the CRC. 582 * 583 * Return: %false if the stream is not found or CRC capture is not supported; 584 * %true if the stream has been configured. 585 */ 586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 587 struct crc_params *crc_window, bool enable, bool continuous) 588 { 589 struct pipe_ctx *pipe; 590 struct crc_params param; 591 struct timing_generator *tg; 592 593 pipe = resource_get_otg_master_for_stream( 594 &dc->current_state->res_ctx, stream); 595 596 /* Stream not found */ 597 if (pipe == NULL) 598 return false; 599 600 /* By default, capture the full frame */ 601 param.windowa_x_start = 0; 602 param.windowa_y_start = 0; 603 param.windowa_x_end = pipe->stream->timing.h_addressable; 604 param.windowa_y_end = pipe->stream->timing.v_addressable; 605 param.windowb_x_start = 0; 606 param.windowb_y_start = 0; 607 param.windowb_x_end = pipe->stream->timing.h_addressable; 608 param.windowb_y_end = pipe->stream->timing.v_addressable; 609 610 if (crc_window) { 611 param.windowa_x_start = crc_window->windowa_x_start; 612 param.windowa_y_start = crc_window->windowa_y_start; 613 param.windowa_x_end = crc_window->windowa_x_end; 614 param.windowa_y_end = crc_window->windowa_y_end; 615 param.windowb_x_start = crc_window->windowb_x_start; 616 param.windowb_y_start = crc_window->windowb_y_start; 617 param.windowb_x_end = crc_window->windowb_x_end; 618 param.windowb_y_end = crc_window->windowb_y_end; 619 } 620 621 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 622 param.odm_mode = pipe->next_odm_pipe ? 1:0; 623 624 /* Default to the union of both windows */ 625 param.selection = UNION_WINDOW_A_B; 626 param.continuous_mode = continuous; 627 param.enable = enable; 628 629 tg = pipe->stream_res.tg; 630 631 /* Only call if supported */ 632 if (tg->funcs->configure_crc) 633 return tg->funcs->configure_crc(tg, ¶m); 634 DC_LOG_WARNING("CRC capture not supported."); 635 return false; 636 } 637 638 /** 639 * dc_stream_get_crc() - Get CRC values for the given stream. 640 * 641 * @dc: DC object. 642 * @stream: The DC stream state of the stream to get CRCs from. 643 * @r_cr: CRC value for the red component. 644 * @g_y: CRC value for the green component. 645 * @b_cb: CRC value for the blue component. 646 * 647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 648 * 649 * Return: 650 * %false if stream is not found, or if CRCs are not enabled. 651 */ 652 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 653 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 654 { 655 int i; 656 struct pipe_ctx *pipe; 657 struct timing_generator *tg; 658 659 for (i = 0; i < MAX_PIPES; i++) { 660 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 661 if (pipe->stream == stream) 662 break; 663 } 664 /* Stream not found */ 665 if (i == MAX_PIPES) 666 return false; 667 668 tg = pipe->stream_res.tg; 669 670 if (tg->funcs->get_crc) 671 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 672 DC_LOG_WARNING("CRC capture not supported."); 673 return false; 674 } 675 676 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 677 enum dc_dynamic_expansion option) 678 { 679 /* OPP FMT dyn expansion updates*/ 680 int i; 681 struct pipe_ctx *pipe_ctx; 682 683 for (i = 0; i < MAX_PIPES; i++) { 684 if (dc->current_state->res_ctx.pipe_ctx[i].stream 685 == stream) { 686 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 687 pipe_ctx->stream_res.opp->dyn_expansion = option; 688 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 689 pipe_ctx->stream_res.opp, 690 COLOR_SPACE_YCBCR601, 691 stream->timing.display_color_depth, 692 stream->signal); 693 } 694 } 695 } 696 697 void dc_stream_set_dither_option(struct dc_stream_state *stream, 698 enum dc_dither_option option) 699 { 700 struct bit_depth_reduction_params params; 701 struct dc_link *link = stream->link; 702 struct pipe_ctx *pipes = NULL; 703 int i; 704 705 for (i = 0; i < MAX_PIPES; i++) { 706 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 707 stream) { 708 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 709 break; 710 } 711 } 712 713 if (!pipes) 714 return; 715 if (option > DITHER_OPTION_MAX) 716 return; 717 718 stream->dither_option = option; 719 720 memset(¶ms, 0, sizeof(params)); 721 resource_build_bit_depth_reduction_params(stream, ¶ms); 722 stream->bit_depth_params = params; 723 724 if (pipes->plane_res.xfm && 725 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 726 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 727 pipes->plane_res.xfm, 728 pipes->plane_res.scl_data.lb_params.depth, 729 &stream->bit_depth_params); 730 } 731 732 pipes->stream_res.opp->funcs-> 733 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 734 } 735 736 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 737 { 738 int i; 739 bool ret = false; 740 struct pipe_ctx *pipes; 741 742 for (i = 0; i < MAX_PIPES; i++) { 743 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 744 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 745 dc->hwss.program_gamut_remap(pipes); 746 ret = true; 747 } 748 } 749 750 return ret; 751 } 752 753 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 754 { 755 int i; 756 bool ret = false; 757 struct pipe_ctx *pipes; 758 759 for (i = 0; i < MAX_PIPES; i++) { 760 if (dc->current_state->res_ctx.pipe_ctx[i].stream 761 == stream) { 762 763 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 764 dc->hwss.program_output_csc(dc, 765 pipes, 766 stream->output_color_space, 767 stream->csc_color_matrix.matrix, 768 pipes->stream_res.opp->inst); 769 ret = true; 770 } 771 } 772 773 return ret; 774 } 775 776 void dc_stream_set_static_screen_params(struct dc *dc, 777 struct dc_stream_state **streams, 778 int num_streams, 779 const struct dc_static_screen_params *params) 780 { 781 int i, j; 782 struct pipe_ctx *pipes_affected[MAX_PIPES]; 783 int num_pipes_affected = 0; 784 785 for (i = 0; i < num_streams; i++) { 786 struct dc_stream_state *stream = streams[i]; 787 788 for (j = 0; j < MAX_PIPES; j++) { 789 if (dc->current_state->res_ctx.pipe_ctx[j].stream 790 == stream) { 791 pipes_affected[num_pipes_affected++] = 792 &dc->current_state->res_ctx.pipe_ctx[j]; 793 } 794 } 795 } 796 797 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 798 } 799 800 static void dc_destruct(struct dc *dc) 801 { 802 // reset link encoder assignment table on destruct 803 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 804 link_enc_cfg_init(dc, dc->current_state); 805 806 if (dc->current_state) { 807 dc_release_state(dc->current_state); 808 dc->current_state = NULL; 809 } 810 811 destroy_links(dc); 812 813 destroy_link_encoders(dc); 814 815 if (dc->clk_mgr) { 816 dc_destroy_clk_mgr(dc->clk_mgr); 817 dc->clk_mgr = NULL; 818 } 819 820 dc_destroy_resource_pool(dc); 821 822 if (dc->link_srv) 823 link_destroy_link_service(&dc->link_srv); 824 825 if (dc->ctx->gpio_service) 826 dal_gpio_service_destroy(&dc->ctx->gpio_service); 827 828 if (dc->ctx->created_bios) 829 dal_bios_parser_destroy(&dc->ctx->dc_bios); 830 831 dc_perf_trace_destroy(&dc->ctx->perf_trace); 832 833 kfree(dc->ctx); 834 dc->ctx = NULL; 835 836 kfree(dc->bw_vbios); 837 dc->bw_vbios = NULL; 838 839 kfree(dc->bw_dceip); 840 dc->bw_dceip = NULL; 841 842 kfree(dc->dcn_soc); 843 dc->dcn_soc = NULL; 844 845 kfree(dc->dcn_ip); 846 dc->dcn_ip = NULL; 847 848 kfree(dc->vm_helper); 849 dc->vm_helper = NULL; 850 851 } 852 853 static bool dc_construct_ctx(struct dc *dc, 854 const struct dc_init_data *init_params) 855 { 856 struct dc_context *dc_ctx; 857 858 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 859 if (!dc_ctx) 860 return false; 861 862 dc_ctx->cgs_device = init_params->cgs_device; 863 dc_ctx->driver_context = init_params->driver; 864 dc_ctx->dc = dc; 865 dc_ctx->asic_id = init_params->asic_id; 866 dc_ctx->dc_sink_id_count = 0; 867 dc_ctx->dc_stream_id_count = 0; 868 dc_ctx->dce_environment = init_params->dce_environment; 869 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 870 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 871 872 /* Create logger */ 873 874 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 875 876 dc_ctx->perf_trace = dc_perf_trace_create(); 877 if (!dc_ctx->perf_trace) { 878 kfree(dc_ctx); 879 ASSERT_CRITICAL(false); 880 return false; 881 } 882 883 dc->ctx = dc_ctx; 884 885 dc->link_srv = link_create_link_service(); 886 if (!dc->link_srv) 887 return false; 888 889 return true; 890 } 891 892 static bool dc_construct(struct dc *dc, 893 const struct dc_init_data *init_params) 894 { 895 struct dc_context *dc_ctx; 896 struct bw_calcs_dceip *dc_dceip; 897 struct bw_calcs_vbios *dc_vbios; 898 struct dcn_soc_bounding_box *dcn_soc; 899 struct dcn_ip_params *dcn_ip; 900 901 dc->config = init_params->flags; 902 903 // Allocate memory for the vm_helper 904 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 905 if (!dc->vm_helper) { 906 dm_error("%s: failed to create dc->vm_helper\n", __func__); 907 goto fail; 908 } 909 910 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 911 912 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 913 if (!dc_dceip) { 914 dm_error("%s: failed to create dceip\n", __func__); 915 goto fail; 916 } 917 918 dc->bw_dceip = dc_dceip; 919 920 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 921 if (!dc_vbios) { 922 dm_error("%s: failed to create vbios\n", __func__); 923 goto fail; 924 } 925 926 dc->bw_vbios = dc_vbios; 927 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 928 if (!dcn_soc) { 929 dm_error("%s: failed to create dcn_soc\n", __func__); 930 goto fail; 931 } 932 933 dc->dcn_soc = dcn_soc; 934 935 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 936 if (!dcn_ip) { 937 dm_error("%s: failed to create dcn_ip\n", __func__); 938 goto fail; 939 } 940 941 dc->dcn_ip = dcn_ip; 942 943 if (!dc_construct_ctx(dc, init_params)) { 944 dm_error("%s: failed to create ctx\n", __func__); 945 goto fail; 946 } 947 948 dc_ctx = dc->ctx; 949 950 /* Resource should construct all asic specific resources. 951 * This should be the only place where we need to parse the asic id 952 */ 953 if (init_params->vbios_override) 954 dc_ctx->dc_bios = init_params->vbios_override; 955 else { 956 /* Create BIOS parser */ 957 struct bp_init_data bp_init_data; 958 959 bp_init_data.ctx = dc_ctx; 960 bp_init_data.bios = init_params->asic_id.atombios_base_address; 961 962 dc_ctx->dc_bios = dal_bios_parser_create( 963 &bp_init_data, dc_ctx->dce_version); 964 965 if (!dc_ctx->dc_bios) { 966 ASSERT_CRITICAL(false); 967 goto fail; 968 } 969 970 dc_ctx->created_bios = true; 971 } 972 973 dc->vendor_signature = init_params->vendor_signature; 974 975 /* Create GPIO service */ 976 dc_ctx->gpio_service = dal_gpio_service_create( 977 dc_ctx->dce_version, 978 dc_ctx->dce_environment, 979 dc_ctx); 980 981 if (!dc_ctx->gpio_service) { 982 ASSERT_CRITICAL(false); 983 goto fail; 984 } 985 986 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 987 if (!dc->res_pool) 988 goto fail; 989 990 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 991 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 992 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 993 if (dc->caps.max_optimizable_video_width == 0) 994 dc->caps.max_optimizable_video_width = 5120; 995 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 996 if (!dc->clk_mgr) 997 goto fail; 998 #ifdef CONFIG_DRM_AMD_DC_FP 999 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1000 1001 if (dc->res_pool->funcs->update_bw_bounding_box) { 1002 DC_FP_START(); 1003 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1004 DC_FP_END(); 1005 } 1006 #endif 1007 1008 /* Creation of current_state must occur after dc->dml 1009 * is initialized in dc_create_resource_pool because 1010 * on creation it copies the contents of dc->dml 1011 */ 1012 1013 dc->current_state = dc_create_state(dc); 1014 1015 if (!dc->current_state) { 1016 dm_error("%s: failed to create validate ctx\n", __func__); 1017 goto fail; 1018 } 1019 1020 if (!create_links(dc, init_params->num_virtual_links)) 1021 goto fail; 1022 1023 /* Create additional DIG link encoder objects if fewer than the platform 1024 * supports were created during link construction. 1025 */ 1026 if (!create_link_encoders(dc)) 1027 goto fail; 1028 1029 dc_resource_state_construct(dc, dc->current_state); 1030 1031 return true; 1032 1033 fail: 1034 return false; 1035 } 1036 1037 static void disable_all_writeback_pipes_for_stream( 1038 const struct dc *dc, 1039 struct dc_stream_state *stream, 1040 struct dc_state *context) 1041 { 1042 int i; 1043 1044 for (i = 0; i < stream->num_wb_info; i++) 1045 stream->writeback_info[i].wb_enabled = false; 1046 } 1047 1048 static void apply_ctx_interdependent_lock(struct dc *dc, 1049 struct dc_state *context, 1050 struct dc_stream_state *stream, 1051 bool lock) 1052 { 1053 int i; 1054 1055 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1056 if (dc->hwss.interdependent_update_lock) 1057 dc->hwss.interdependent_update_lock(dc, context, lock); 1058 else { 1059 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1060 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1061 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1062 1063 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1064 if (stream == pipe_ctx->stream) { 1065 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1066 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1067 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1068 } 1069 } 1070 } 1071 } 1072 1073 static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1074 { 1075 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1076 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1077 1078 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1079 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1080 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1081 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1082 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1083 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1084 else { 1085 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1086 color_space_to_black_color( 1087 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1088 } 1089 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1090 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1091 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1092 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1093 get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1094 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1095 get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1096 } 1097 } 1098 } 1099 1100 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1101 { 1102 int i, j; 1103 struct dc_state *dangling_context = dc_create_state(dc); 1104 struct dc_state *current_ctx; 1105 struct pipe_ctx *pipe; 1106 struct timing_generator *tg; 1107 1108 if (dangling_context == NULL) 1109 return; 1110 1111 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1112 1113 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1114 struct dc_stream_state *old_stream = 1115 dc->current_state->res_ctx.pipe_ctx[i].stream; 1116 bool should_disable = true; 1117 bool pipe_split_change = false; 1118 1119 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1120 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1121 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1122 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1123 else 1124 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1125 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1126 1127 for (j = 0; j < context->stream_count; j++) { 1128 if (old_stream == context->streams[j]) { 1129 should_disable = false; 1130 break; 1131 } 1132 } 1133 if (!should_disable && pipe_split_change && 1134 dc->current_state->stream_count != context->stream_count) 1135 should_disable = true; 1136 1137 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1138 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1139 struct pipe_ctx *old_pipe, *new_pipe; 1140 1141 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1142 new_pipe = &context->res_ctx.pipe_ctx[i]; 1143 1144 if (old_pipe->plane_state && !new_pipe->plane_state) 1145 should_disable = true; 1146 } 1147 1148 if (should_disable && old_stream) { 1149 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1150 tg = pipe->stream_res.tg; 1151 /* When disabling plane for a phantom pipe, we must turn on the 1152 * phantom OTG so the disable programming gets the double buffer 1153 * update. Otherwise the pipe will be left in a partially disabled 1154 * state that can result in underflow or hang when enabling it 1155 * again for different use. 1156 */ 1157 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1158 if (tg->funcs->enable_crtc) { 1159 int main_pipe_width, main_pipe_height; 1160 1161 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; 1162 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; 1163 if (dc->hwss.blank_phantom) 1164 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1165 tg->funcs->enable_crtc(tg); 1166 } 1167 } 1168 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1169 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1170 1171 if (pipe->stream && pipe->plane_state) 1172 dc_update_viusal_confirm_color(dc, context, pipe); 1173 1174 if (dc->hwss.apply_ctx_for_surface) { 1175 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1176 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1177 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1178 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1179 } 1180 if (dc->hwss.program_front_end_for_ctx) { 1181 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1182 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1183 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1184 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1185 } 1186 /* We need to put the phantom OTG back into it's default (disabled) state or we 1187 * can get corruption when transition from one SubVP config to a different one. 1188 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1189 * will still get it's double buffer update. 1190 */ 1191 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1192 if (tg->funcs->disable_phantom_crtc) 1193 tg->funcs->disable_phantom_crtc(tg); 1194 } 1195 } 1196 } 1197 1198 current_ctx = dc->current_state; 1199 dc->current_state = dangling_context; 1200 dc_release_state(current_ctx); 1201 } 1202 1203 static void disable_vbios_mode_if_required( 1204 struct dc *dc, 1205 struct dc_state *context) 1206 { 1207 unsigned int i, j; 1208 1209 /* check if timing_changed, disable stream*/ 1210 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1211 struct dc_stream_state *stream = NULL; 1212 struct dc_link *link = NULL; 1213 struct pipe_ctx *pipe = NULL; 1214 1215 pipe = &context->res_ctx.pipe_ctx[i]; 1216 stream = pipe->stream; 1217 if (stream == NULL) 1218 continue; 1219 1220 if (stream->apply_seamless_boot_optimization) 1221 continue; 1222 1223 // only looking for first odm pipe 1224 if (pipe->prev_odm_pipe) 1225 continue; 1226 1227 if (stream->link->local_sink && 1228 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1229 link = stream->link; 1230 } 1231 1232 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1233 unsigned int enc_inst, tg_inst = 0; 1234 unsigned int pix_clk_100hz; 1235 1236 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1237 if (enc_inst != ENGINE_ID_UNKNOWN) { 1238 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1239 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1240 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1241 dc->res_pool->stream_enc[j]); 1242 break; 1243 } 1244 } 1245 1246 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1247 dc->res_pool->dp_clock_source, 1248 tg_inst, &pix_clk_100hz); 1249 1250 if (link->link_status.link_active) { 1251 uint32_t requested_pix_clk_100hz = 1252 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1253 1254 if (pix_clk_100hz != requested_pix_clk_100hz) { 1255 dc->link_srv->set_dpms_off(pipe); 1256 pipe->stream->dpms_off = false; 1257 } 1258 } 1259 } 1260 } 1261 } 1262 } 1263 1264 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1265 { 1266 int i; 1267 PERF_TRACE(); 1268 for (i = 0; i < MAX_PIPES; i++) { 1269 int count = 0; 1270 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1271 1272 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1273 continue; 1274 1275 /* Timeout 100 ms */ 1276 while (count < 100000) { 1277 /* Must set to false to start with, due to OR in update function */ 1278 pipe->plane_state->status.is_flip_pending = false; 1279 dc->hwss.update_pending_status(pipe); 1280 if (!pipe->plane_state->status.is_flip_pending) 1281 break; 1282 udelay(1); 1283 count++; 1284 } 1285 ASSERT(!pipe->plane_state->status.is_flip_pending); 1286 } 1287 PERF_TRACE(); 1288 } 1289 1290 /* Public functions */ 1291 1292 struct dc *dc_create(const struct dc_init_data *init_params) 1293 { 1294 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1295 unsigned int full_pipe_count; 1296 1297 if (!dc) 1298 return NULL; 1299 1300 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1301 dc->caps.linear_pitch_alignment = 64; 1302 if (!dc_construct_ctx(dc, init_params)) 1303 goto destruct_dc; 1304 } else { 1305 if (!dc_construct(dc, init_params)) 1306 goto destruct_dc; 1307 1308 full_pipe_count = dc->res_pool->pipe_count; 1309 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1310 full_pipe_count--; 1311 dc->caps.max_streams = min( 1312 full_pipe_count, 1313 dc->res_pool->stream_enc_count); 1314 1315 dc->caps.max_links = dc->link_count; 1316 dc->caps.max_audios = dc->res_pool->audio_count; 1317 dc->caps.linear_pitch_alignment = 64; 1318 1319 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1320 1321 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1322 1323 if (dc->res_pool->dmcu != NULL) 1324 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1325 } 1326 1327 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1328 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1329 1330 /* Populate versioning information */ 1331 dc->versions.dc_ver = DC_VER; 1332 1333 dc->build_id = DC_BUILD_ID; 1334 1335 DC_LOG_DC("Display Core initialized\n"); 1336 1337 1338 1339 return dc; 1340 1341 destruct_dc: 1342 dc_destruct(dc); 1343 kfree(dc); 1344 return NULL; 1345 } 1346 1347 static void detect_edp_presence(struct dc *dc) 1348 { 1349 struct dc_link *edp_links[MAX_NUM_EDP]; 1350 struct dc_link *edp_link = NULL; 1351 enum dc_connection_type type; 1352 int i; 1353 int edp_num; 1354 1355 dc_get_edp_links(dc, edp_links, &edp_num); 1356 if (!edp_num) 1357 return; 1358 1359 for (i = 0; i < edp_num; i++) { 1360 edp_link = edp_links[i]; 1361 if (dc->config.edp_not_connected) { 1362 edp_link->edp_sink_present = false; 1363 } else { 1364 dc_link_detect_connection_type(edp_link, &type); 1365 edp_link->edp_sink_present = (type != dc_connection_none); 1366 } 1367 } 1368 } 1369 1370 void dc_hardware_init(struct dc *dc) 1371 { 1372 1373 detect_edp_presence(dc); 1374 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1375 dc->hwss.init_hw(dc); 1376 } 1377 1378 void dc_init_callbacks(struct dc *dc, 1379 const struct dc_callback_init *init_params) 1380 { 1381 dc->ctx->cp_psp = init_params->cp_psp; 1382 } 1383 1384 void dc_deinit_callbacks(struct dc *dc) 1385 { 1386 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1387 } 1388 1389 void dc_destroy(struct dc **dc) 1390 { 1391 dc_destruct(*dc); 1392 kfree(*dc); 1393 *dc = NULL; 1394 } 1395 1396 static void enable_timing_multisync( 1397 struct dc *dc, 1398 struct dc_state *ctx) 1399 { 1400 int i, multisync_count = 0; 1401 int pipe_count = dc->res_pool->pipe_count; 1402 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1403 1404 for (i = 0; i < pipe_count; i++) { 1405 if (!ctx->res_ctx.pipe_ctx[i].stream || 1406 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1407 continue; 1408 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1409 continue; 1410 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1411 multisync_count++; 1412 } 1413 1414 if (multisync_count > 0) { 1415 dc->hwss.enable_per_frame_crtc_position_reset( 1416 dc, multisync_count, multisync_pipes); 1417 } 1418 } 1419 1420 static void program_timing_sync( 1421 struct dc *dc, 1422 struct dc_state *ctx) 1423 { 1424 int i, j, k; 1425 int group_index = 0; 1426 int num_group = 0; 1427 int pipe_count = dc->res_pool->pipe_count; 1428 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1429 1430 for (i = 0; i < pipe_count; i++) { 1431 if (!ctx->res_ctx.pipe_ctx[i].stream 1432 || ctx->res_ctx.pipe_ctx[i].top_pipe 1433 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1434 continue; 1435 1436 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1437 } 1438 1439 for (i = 0; i < pipe_count; i++) { 1440 int group_size = 1; 1441 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1442 struct pipe_ctx *pipe_set[MAX_PIPES]; 1443 1444 if (!unsynced_pipes[i]) 1445 continue; 1446 1447 pipe_set[0] = unsynced_pipes[i]; 1448 unsynced_pipes[i] = NULL; 1449 1450 /* Add tg to the set, search rest of the tg's for ones with 1451 * same timing, add all tgs with same timing to the group 1452 */ 1453 for (j = i + 1; j < pipe_count; j++) { 1454 if (!unsynced_pipes[j]) 1455 continue; 1456 if (sync_type != TIMING_SYNCHRONIZABLE && 1457 dc->hwss.enable_vblanks_synchronization && 1458 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1459 resource_are_vblanks_synchronizable( 1460 unsynced_pipes[j]->stream, 1461 pipe_set[0]->stream)) { 1462 sync_type = VBLANK_SYNCHRONIZABLE; 1463 pipe_set[group_size] = unsynced_pipes[j]; 1464 unsynced_pipes[j] = NULL; 1465 group_size++; 1466 } else 1467 if (sync_type != VBLANK_SYNCHRONIZABLE && 1468 resource_are_streams_timing_synchronizable( 1469 unsynced_pipes[j]->stream, 1470 pipe_set[0]->stream)) { 1471 sync_type = TIMING_SYNCHRONIZABLE; 1472 pipe_set[group_size] = unsynced_pipes[j]; 1473 unsynced_pipes[j] = NULL; 1474 group_size++; 1475 } 1476 } 1477 1478 /* set first unblanked pipe as master */ 1479 for (j = 0; j < group_size; j++) { 1480 bool is_blanked; 1481 1482 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1483 is_blanked = 1484 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1485 else 1486 is_blanked = 1487 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1488 if (!is_blanked) { 1489 if (j == 0) 1490 break; 1491 1492 swap(pipe_set[0], pipe_set[j]); 1493 break; 1494 } 1495 } 1496 1497 for (k = 0; k < group_size; k++) { 1498 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1499 1500 status->timing_sync_info.group_id = num_group; 1501 status->timing_sync_info.group_size = group_size; 1502 if (k == 0) 1503 status->timing_sync_info.master = true; 1504 else 1505 status->timing_sync_info.master = false; 1506 1507 } 1508 1509 /* remove any other pipes that are already been synced */ 1510 if (dc->config.use_pipe_ctx_sync_logic) { 1511 /* check pipe's syncd to decide which pipe to be removed */ 1512 for (j = 1; j < group_size; j++) { 1513 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1514 group_size--; 1515 pipe_set[j] = pipe_set[group_size]; 1516 j--; 1517 } else 1518 /* link slave pipe's syncd with master pipe */ 1519 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1520 } 1521 } else { 1522 for (j = j + 1; j < group_size; j++) { 1523 bool is_blanked; 1524 1525 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1526 is_blanked = 1527 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1528 else 1529 is_blanked = 1530 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1531 if (!is_blanked) { 1532 group_size--; 1533 pipe_set[j] = pipe_set[group_size]; 1534 j--; 1535 } 1536 } 1537 } 1538 1539 if (group_size > 1) { 1540 if (sync_type == TIMING_SYNCHRONIZABLE) { 1541 dc->hwss.enable_timing_synchronization( 1542 dc, group_index, group_size, pipe_set); 1543 } else 1544 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1545 dc->hwss.enable_vblanks_synchronization( 1546 dc, group_index, group_size, pipe_set); 1547 } 1548 group_index++; 1549 } 1550 num_group++; 1551 } 1552 } 1553 1554 static bool streams_changed(struct dc *dc, 1555 struct dc_stream_state *streams[], 1556 uint8_t stream_count) 1557 { 1558 uint8_t i; 1559 1560 if (stream_count != dc->current_state->stream_count) 1561 return true; 1562 1563 for (i = 0; i < dc->current_state->stream_count; i++) { 1564 if (dc->current_state->streams[i] != streams[i]) 1565 return true; 1566 if (!streams[i]->link->link_state_valid) 1567 return true; 1568 } 1569 1570 return false; 1571 } 1572 1573 bool dc_validate_boot_timing(const struct dc *dc, 1574 const struct dc_sink *sink, 1575 struct dc_crtc_timing *crtc_timing) 1576 { 1577 struct timing_generator *tg; 1578 struct stream_encoder *se = NULL; 1579 1580 struct dc_crtc_timing hw_crtc_timing = {0}; 1581 1582 struct dc_link *link = sink->link; 1583 unsigned int i, enc_inst, tg_inst = 0; 1584 1585 /* Support seamless boot on EDP displays only */ 1586 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1587 return false; 1588 } 1589 1590 if (dc->debug.force_odm_combine) 1591 return false; 1592 1593 /* Check for enabled DIG to identify enabled display */ 1594 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1595 return false; 1596 1597 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1598 1599 if (enc_inst == ENGINE_ID_UNKNOWN) 1600 return false; 1601 1602 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1603 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1604 1605 se = dc->res_pool->stream_enc[i]; 1606 1607 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1608 dc->res_pool->stream_enc[i]); 1609 break; 1610 } 1611 } 1612 1613 // tg_inst not found 1614 if (i == dc->res_pool->stream_enc_count) 1615 return false; 1616 1617 if (tg_inst >= dc->res_pool->timing_generator_count) 1618 return false; 1619 1620 if (tg_inst != link->link_enc->preferred_engine) 1621 return false; 1622 1623 tg = dc->res_pool->timing_generators[tg_inst]; 1624 1625 if (!tg->funcs->get_hw_timing) 1626 return false; 1627 1628 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1629 return false; 1630 1631 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1632 return false; 1633 1634 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1635 return false; 1636 1637 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1638 return false; 1639 1640 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1641 return false; 1642 1643 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1644 return false; 1645 1646 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1647 return false; 1648 1649 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1650 return false; 1651 1652 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1653 return false; 1654 1655 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1656 return false; 1657 1658 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1659 return false; 1660 1661 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1662 return false; 1663 1664 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1665 return false; 1666 1667 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1668 if (crtc_timing->flags.DSC) 1669 return false; 1670 1671 if (dc_is_dp_signal(link->connector_signal)) { 1672 unsigned int pix_clk_100hz; 1673 uint32_t numOdmPipes = 1; 1674 uint32_t id_src[4] = {0}; 1675 1676 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1677 dc->res_pool->dp_clock_source, 1678 tg_inst, &pix_clk_100hz); 1679 1680 if (tg->funcs->get_optc_source) 1681 tg->funcs->get_optc_source(tg, 1682 &numOdmPipes, &id_src[0], &id_src[1]); 1683 1684 if (numOdmPipes == 2) 1685 pix_clk_100hz *= 2; 1686 if (numOdmPipes == 4) 1687 pix_clk_100hz *= 4; 1688 1689 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1690 // slightly due to rounding issues in 10 kHz units. 1691 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1692 return false; 1693 1694 if (!se || !se->funcs->dp_get_pixel_format) 1695 return false; 1696 1697 if (!se->funcs->dp_get_pixel_format( 1698 se, 1699 &hw_crtc_timing.pixel_encoding, 1700 &hw_crtc_timing.display_color_depth)) 1701 return false; 1702 1703 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1704 return false; 1705 1706 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1707 return false; 1708 } 1709 1710 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1711 return false; 1712 } 1713 1714 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) 1715 return false; 1716 1717 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1718 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1719 return false; 1720 } 1721 1722 return true; 1723 } 1724 1725 static inline bool should_update_pipe_for_stream( 1726 struct dc_state *context, 1727 struct pipe_ctx *pipe_ctx, 1728 struct dc_stream_state *stream) 1729 { 1730 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1731 } 1732 1733 static inline bool should_update_pipe_for_plane( 1734 struct dc_state *context, 1735 struct pipe_ctx *pipe_ctx, 1736 struct dc_plane_state *plane_state) 1737 { 1738 return (pipe_ctx->plane_state == plane_state); 1739 } 1740 1741 void dc_enable_stereo( 1742 struct dc *dc, 1743 struct dc_state *context, 1744 struct dc_stream_state *streams[], 1745 uint8_t stream_count) 1746 { 1747 int i, j; 1748 struct pipe_ctx *pipe; 1749 1750 for (i = 0; i < MAX_PIPES; i++) { 1751 if (context != NULL) { 1752 pipe = &context->res_ctx.pipe_ctx[i]; 1753 } else { 1754 context = dc->current_state; 1755 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1756 } 1757 1758 for (j = 0; pipe && j < stream_count; j++) { 1759 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1760 dc->hwss.setup_stereo) 1761 dc->hwss.setup_stereo(pipe, dc); 1762 } 1763 } 1764 } 1765 1766 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1767 { 1768 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1769 enable_timing_multisync(dc, context); 1770 program_timing_sync(dc, context); 1771 } 1772 } 1773 1774 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1775 { 1776 int i; 1777 unsigned int stream_mask = 0; 1778 1779 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1780 if (context->res_ctx.pipe_ctx[i].stream) 1781 stream_mask |= 1 << i; 1782 } 1783 1784 return stream_mask; 1785 } 1786 1787 void dc_z10_restore(const struct dc *dc) 1788 { 1789 if (dc->hwss.z10_restore) 1790 dc->hwss.z10_restore(dc); 1791 } 1792 1793 void dc_z10_save_init(struct dc *dc) 1794 { 1795 if (dc->hwss.z10_save_init) 1796 dc->hwss.z10_save_init(dc); 1797 } 1798 1799 /** 1800 * dc_commit_state_no_check - Apply context to the hardware 1801 * 1802 * @dc: DC object with the current status to be updated 1803 * @context: New state that will become the current status at the end of this function 1804 * 1805 * Applies given context to the hardware and copy it into current context. 1806 * It's up to the user to release the src context afterwards. 1807 * 1808 * Return: an enum dc_status result code for the operation 1809 */ 1810 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1811 { 1812 struct dc_bios *dcb = dc->ctx->dc_bios; 1813 enum dc_status result = DC_ERROR_UNEXPECTED; 1814 struct pipe_ctx *pipe; 1815 int i, k, l; 1816 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1817 struct dc_state *old_state; 1818 bool subvp_prev_use = false; 1819 1820 dc_z10_restore(dc); 1821 dc_allow_idle_optimizations(dc, false); 1822 1823 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1824 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1825 1826 /* Check old context for SubVP */ 1827 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1828 if (subvp_prev_use) 1829 break; 1830 } 1831 1832 for (i = 0; i < context->stream_count; i++) 1833 dc_streams[i] = context->streams[i]; 1834 1835 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1836 disable_vbios_mode_if_required(dc, context); 1837 dc->hwss.enable_accelerated_mode(dc, context); 1838 } 1839 1840 if (context->stream_count > get_seamless_boot_stream_count(context) || 1841 context->stream_count == 0) 1842 dc->hwss.prepare_bandwidth(dc, context); 1843 1844 /* When SubVP is active, all HW programming must be done while 1845 * SubVP lock is acquired 1846 */ 1847 if (dc->hwss.subvp_pipe_control_lock) 1848 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1849 1850 if (dc->hwss.update_dsc_pg) 1851 dc->hwss.update_dsc_pg(dc, context, false); 1852 1853 disable_dangling_plane(dc, context); 1854 /* re-program planes for existing stream, in case we need to 1855 * free up plane resource for later use 1856 */ 1857 if (dc->hwss.apply_ctx_for_surface) { 1858 for (i = 0; i < context->stream_count; i++) { 1859 if (context->streams[i]->mode_changed) 1860 continue; 1861 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1862 dc->hwss.apply_ctx_for_surface( 1863 dc, context->streams[i], 1864 context->stream_status[i].plane_count, 1865 context); /* use new pipe config in new context */ 1866 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1867 dc->hwss.post_unlock_program_front_end(dc, context); 1868 } 1869 } 1870 1871 /* Program hardware */ 1872 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1873 pipe = &context->res_ctx.pipe_ctx[i]; 1874 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1875 } 1876 1877 result = dc->hwss.apply_ctx_to_hw(dc, context); 1878 1879 if (result != DC_OK) { 1880 /* Application of dc_state to hardware stopped. */ 1881 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1882 return result; 1883 } 1884 1885 dc_trigger_sync(dc, context); 1886 1887 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 1888 for (i = 0; i < context->stream_count; i++) { 1889 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 1890 1891 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 1892 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 1893 } 1894 1895 /* Program all planes within new context*/ 1896 if (dc->hwss.program_front_end_for_ctx) { 1897 dc->hwss.interdependent_update_lock(dc, context, true); 1898 dc->hwss.program_front_end_for_ctx(dc, context); 1899 dc->hwss.interdependent_update_lock(dc, context, false); 1900 dc->hwss.post_unlock_program_front_end(dc, context); 1901 } 1902 1903 if (dc->hwss.commit_subvp_config) 1904 dc->hwss.commit_subvp_config(dc, context); 1905 if (dc->hwss.subvp_pipe_control_lock) 1906 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1907 1908 for (i = 0; i < context->stream_count; i++) { 1909 const struct dc_link *link = context->streams[i]->link; 1910 1911 if (!context->streams[i]->mode_changed) 1912 continue; 1913 1914 if (dc->hwss.apply_ctx_for_surface) { 1915 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1916 dc->hwss.apply_ctx_for_surface( 1917 dc, context->streams[i], 1918 context->stream_status[i].plane_count, 1919 context); 1920 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1921 dc->hwss.post_unlock_program_front_end(dc, context); 1922 } 1923 1924 /* 1925 * enable stereo 1926 * TODO rework dc_enable_stereo call to work with validation sets? 1927 */ 1928 for (k = 0; k < MAX_PIPES; k++) { 1929 pipe = &context->res_ctx.pipe_ctx[k]; 1930 1931 for (l = 0 ; pipe && l < context->stream_count; l++) { 1932 if (context->streams[l] && 1933 context->streams[l] == pipe->stream && 1934 dc->hwss.setup_stereo) 1935 dc->hwss.setup_stereo(pipe, dc); 1936 } 1937 } 1938 1939 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1940 context->streams[i]->timing.h_addressable, 1941 context->streams[i]->timing.v_addressable, 1942 context->streams[i]->timing.h_total, 1943 context->streams[i]->timing.v_total, 1944 context->streams[i]->timing.pix_clk_100hz / 10); 1945 } 1946 1947 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1948 1949 if (context->stream_count > get_seamless_boot_stream_count(context) || 1950 context->stream_count == 0) { 1951 /* Must wait for no flips to be pending before doing optimize bw */ 1952 wait_for_no_pipes_pending(dc, context); 1953 /* pplib is notified if disp_num changed */ 1954 dc->hwss.optimize_bandwidth(dc, context); 1955 /* Need to do otg sync again as otg could be out of sync due to otg 1956 * workaround applied during clock update 1957 */ 1958 dc_trigger_sync(dc, context); 1959 } 1960 1961 if (dc->hwss.update_dsc_pg) 1962 dc->hwss.update_dsc_pg(dc, context, true); 1963 1964 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1965 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1966 else 1967 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1968 1969 context->stream_mask = get_stream_mask(dc, context); 1970 1971 if (context->stream_mask != dc->current_state->stream_mask) 1972 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1973 1974 for (i = 0; i < context->stream_count; i++) 1975 context->streams[i]->mode_changed = false; 1976 1977 /* Clear update flags that were set earlier to avoid redundant programming */ 1978 for (i = 0; i < context->stream_count; i++) { 1979 context->streams[i]->update_flags.raw = 0x0; 1980 } 1981 1982 old_state = dc->current_state; 1983 dc->current_state = context; 1984 1985 dc_release_state(old_state); 1986 1987 dc_retain_state(dc->current_state); 1988 1989 return result; 1990 } 1991 1992 static bool commit_minimal_transition_state(struct dc *dc, 1993 struct dc_state *transition_base_context); 1994 1995 /** 1996 * dc_commit_streams - Commit current stream state 1997 * 1998 * @dc: DC object with the commit state to be configured in the hardware 1999 * @streams: Array with a list of stream state 2000 * @stream_count: Total of streams 2001 * 2002 * Function responsible for commit streams change to the hardware. 2003 * 2004 * Return: 2005 * Return DC_OK if everything work as expected, otherwise, return a dc_status 2006 * code. 2007 */ 2008 enum dc_status dc_commit_streams(struct dc *dc, 2009 struct dc_stream_state *streams[], 2010 uint8_t stream_count) 2011 { 2012 int i, j; 2013 struct dc_state *context; 2014 enum dc_status res = DC_OK; 2015 struct dc_validation_set set[MAX_STREAMS] = {0}; 2016 struct pipe_ctx *pipe; 2017 bool handle_exit_odm2to1 = false; 2018 2019 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2020 return res; 2021 2022 if (!streams_changed(dc, streams, stream_count)) 2023 return res; 2024 2025 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 2026 2027 for (i = 0; i < stream_count; i++) { 2028 struct dc_stream_state *stream = streams[i]; 2029 struct dc_stream_status *status = dc_stream_get_status(stream); 2030 2031 dc_stream_log(dc, stream); 2032 2033 set[i].stream = stream; 2034 2035 if (status) { 2036 set[i].plane_count = status->plane_count; 2037 for (j = 0; j < status->plane_count; j++) 2038 set[i].plane_states[j] = status->plane_states[j]; 2039 } 2040 } 2041 2042 /* ODM Combine 2:1 power optimization is only applied for single stream 2043 * scenario, it uses extra pipes than needed to reduce power consumption 2044 * We need to switch off this feature to make room for new streams. 2045 */ 2046 if (stream_count > dc->current_state->stream_count && 2047 dc->current_state->stream_count == 1) { 2048 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2049 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2050 if (pipe->next_odm_pipe) 2051 handle_exit_odm2to1 = true; 2052 } 2053 } 2054 2055 if (handle_exit_odm2to1) 2056 res = commit_minimal_transition_state(dc, dc->current_state); 2057 2058 context = dc_create_state(dc); 2059 if (!context) 2060 goto context_alloc_fail; 2061 2062 dc_resource_state_copy_construct_current(dc, context); 2063 2064 res = dc_validate_with_context(dc, set, stream_count, context, false); 2065 if (res != DC_OK) { 2066 BREAK_TO_DEBUGGER(); 2067 goto fail; 2068 } 2069 2070 res = dc_commit_state_no_check(dc, context); 2071 2072 for (i = 0; i < stream_count; i++) { 2073 for (j = 0; j < context->stream_count; j++) { 2074 if (streams[i]->stream_id == context->streams[j]->stream_id) 2075 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2076 2077 if (dc_is_embedded_signal(streams[i]->signal)) { 2078 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 2079 2080 if (dc->hwss.is_abm_supported) 2081 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 2082 else 2083 status->is_abm_supported = true; 2084 } 2085 } 2086 } 2087 2088 fail: 2089 dc_release_state(context); 2090 2091 context_alloc_fail: 2092 2093 DC_LOG_DC("%s Finished.\n", __func__); 2094 2095 return res; 2096 } 2097 2098 bool dc_acquire_release_mpc_3dlut( 2099 struct dc *dc, bool acquire, 2100 struct dc_stream_state *stream, 2101 struct dc_3dlut **lut, 2102 struct dc_transfer_func **shaper) 2103 { 2104 int pipe_idx; 2105 bool ret = false; 2106 bool found_pipe_idx = false; 2107 const struct resource_pool *pool = dc->res_pool; 2108 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2109 int mpcc_id = 0; 2110 2111 if (pool && res_ctx) { 2112 if (acquire) { 2113 /*find pipe idx for the given stream*/ 2114 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2115 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2116 found_pipe_idx = true; 2117 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2118 break; 2119 } 2120 } 2121 } else 2122 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2123 2124 if (found_pipe_idx) { 2125 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2126 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2127 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2128 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2129 } 2130 } 2131 return ret; 2132 } 2133 2134 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2135 { 2136 int i; 2137 struct pipe_ctx *pipe; 2138 2139 for (i = 0; i < MAX_PIPES; i++) { 2140 pipe = &context->res_ctx.pipe_ctx[i]; 2141 2142 // Don't check flip pending on phantom pipes 2143 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2144 continue; 2145 2146 /* Must set to false to start with, due to OR in update function */ 2147 pipe->plane_state->status.is_flip_pending = false; 2148 dc->hwss.update_pending_status(pipe); 2149 if (pipe->plane_state->status.is_flip_pending) 2150 return true; 2151 } 2152 return false; 2153 } 2154 2155 /* Perform updates here which need to be deferred until next vupdate 2156 * 2157 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2158 * but forcing lut memory to shutdown state is immediate. This causes 2159 * single frame corruption as lut gets disabled mid-frame unless shutdown 2160 * is deferred until after entering bypass. 2161 */ 2162 static void process_deferred_updates(struct dc *dc) 2163 { 2164 int i = 0; 2165 2166 if (dc->debug.enable_mem_low_power.bits.cm) { 2167 ASSERT(dc->dcn_ip->max_num_dpp); 2168 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2169 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2170 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2171 } 2172 } 2173 2174 void dc_post_update_surfaces_to_stream(struct dc *dc) 2175 { 2176 int i; 2177 struct dc_state *context = dc->current_state; 2178 2179 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2180 return; 2181 2182 post_surface_trace(dc); 2183 2184 /* 2185 * Only relevant for DCN behavior where we can guarantee the optimization 2186 * is safe to apply - retain the legacy behavior for DCE. 2187 */ 2188 2189 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2190 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2191 else { 2192 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2193 2194 if (is_flip_pending_in_pipes(dc, context)) 2195 return; 2196 2197 for (i = 0; i < dc->res_pool->pipe_count; i++) 2198 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2199 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2200 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2201 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2202 } 2203 2204 process_deferred_updates(dc); 2205 2206 dc->hwss.optimize_bandwidth(dc, context); 2207 2208 if (dc->hwss.update_dsc_pg) 2209 dc->hwss.update_dsc_pg(dc, context, true); 2210 } 2211 2212 dc->optimized_required = false; 2213 dc->wm_optimized_required = false; 2214 } 2215 2216 static void init_state(struct dc *dc, struct dc_state *context) 2217 { 2218 /* Each context must have their own instance of VBA and in order to 2219 * initialize and obtain IP and SOC the base DML instance from DC is 2220 * initially copied into every context 2221 */ 2222 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2223 } 2224 2225 struct dc_state *dc_create_state(struct dc *dc) 2226 { 2227 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2228 GFP_KERNEL); 2229 2230 if (!context) 2231 return NULL; 2232 2233 init_state(dc, context); 2234 2235 kref_init(&context->refcount); 2236 2237 return context; 2238 } 2239 2240 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2241 { 2242 int i, j; 2243 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2244 2245 if (!new_ctx) 2246 return NULL; 2247 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2248 2249 for (i = 0; i < MAX_PIPES; i++) { 2250 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2251 2252 if (cur_pipe->top_pipe) 2253 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2254 2255 if (cur_pipe->bottom_pipe) 2256 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2257 2258 if (cur_pipe->prev_odm_pipe) 2259 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2260 2261 if (cur_pipe->next_odm_pipe) 2262 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2263 2264 } 2265 2266 for (i = 0; i < new_ctx->stream_count; i++) { 2267 dc_stream_retain(new_ctx->streams[i]); 2268 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2269 dc_plane_state_retain( 2270 new_ctx->stream_status[i].plane_states[j]); 2271 } 2272 2273 kref_init(&new_ctx->refcount); 2274 2275 return new_ctx; 2276 } 2277 2278 void dc_retain_state(struct dc_state *context) 2279 { 2280 kref_get(&context->refcount); 2281 } 2282 2283 static void dc_state_free(struct kref *kref) 2284 { 2285 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2286 dc_resource_state_destruct(context); 2287 kvfree(context); 2288 } 2289 2290 void dc_release_state(struct dc_state *context) 2291 { 2292 kref_put(&context->refcount, dc_state_free); 2293 } 2294 2295 bool dc_set_generic_gpio_for_stereo(bool enable, 2296 struct gpio_service *gpio_service) 2297 { 2298 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2299 struct gpio_pin_info pin_info; 2300 struct gpio *generic; 2301 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2302 GFP_KERNEL); 2303 2304 if (!config) 2305 return false; 2306 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2307 2308 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2309 kfree(config); 2310 return false; 2311 } else { 2312 generic = dal_gpio_service_create_generic_mux( 2313 gpio_service, 2314 pin_info.offset, 2315 pin_info.mask); 2316 } 2317 2318 if (!generic) { 2319 kfree(config); 2320 return false; 2321 } 2322 2323 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2324 2325 config->enable_output_from_mux = enable; 2326 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2327 2328 if (gpio_result == GPIO_RESULT_OK) 2329 gpio_result = dal_mux_setup_config(generic, config); 2330 2331 if (gpio_result == GPIO_RESULT_OK) { 2332 dal_gpio_close(generic); 2333 dal_gpio_destroy_generic_mux(&generic); 2334 kfree(config); 2335 return true; 2336 } else { 2337 dal_gpio_close(generic); 2338 dal_gpio_destroy_generic_mux(&generic); 2339 kfree(config); 2340 return false; 2341 } 2342 } 2343 2344 static bool is_surface_in_context( 2345 const struct dc_state *context, 2346 const struct dc_plane_state *plane_state) 2347 { 2348 int j; 2349 2350 for (j = 0; j < MAX_PIPES; j++) { 2351 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2352 2353 if (plane_state == pipe_ctx->plane_state) { 2354 return true; 2355 } 2356 } 2357 2358 return false; 2359 } 2360 2361 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2362 { 2363 union surface_update_flags *update_flags = &u->surface->update_flags; 2364 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2365 2366 if (!u->plane_info) 2367 return UPDATE_TYPE_FAST; 2368 2369 if (u->plane_info->color_space != u->surface->color_space) { 2370 update_flags->bits.color_space_change = 1; 2371 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2372 } 2373 2374 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2375 update_flags->bits.horizontal_mirror_change = 1; 2376 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2377 } 2378 2379 if (u->plane_info->rotation != u->surface->rotation) { 2380 update_flags->bits.rotation_change = 1; 2381 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2382 } 2383 2384 if (u->plane_info->format != u->surface->format) { 2385 update_flags->bits.pixel_format_change = 1; 2386 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2387 } 2388 2389 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2390 update_flags->bits.stereo_format_change = 1; 2391 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2392 } 2393 2394 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2395 update_flags->bits.per_pixel_alpha_change = 1; 2396 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2397 } 2398 2399 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2400 update_flags->bits.global_alpha_change = 1; 2401 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2402 } 2403 2404 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2405 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2406 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2407 /* During DCC on/off, stutter period is calculated before 2408 * DCC has fully transitioned. This results in incorrect 2409 * stutter period calculation. Triggering a full update will 2410 * recalculate stutter period. 2411 */ 2412 update_flags->bits.dcc_change = 1; 2413 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2414 } 2415 2416 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2417 resource_pixel_format_to_bpp(u->surface->format)) { 2418 /* different bytes per element will require full bandwidth 2419 * and DML calculation 2420 */ 2421 update_flags->bits.bpp_change = 1; 2422 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2423 } 2424 2425 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2426 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2427 update_flags->bits.plane_size_change = 1; 2428 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2429 } 2430 2431 2432 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2433 sizeof(union dc_tiling_info)) != 0) { 2434 update_flags->bits.swizzle_change = 1; 2435 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2436 2437 /* todo: below are HW dependent, we should add a hook to 2438 * DCE/N resource and validated there. 2439 */ 2440 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2441 /* swizzled mode requires RQ to be setup properly, 2442 * thus need to run DML to calculate RQ settings 2443 */ 2444 update_flags->bits.bandwidth_change = 1; 2445 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2446 } 2447 } 2448 2449 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2450 return update_type; 2451 } 2452 2453 static enum surface_update_type get_scaling_info_update_type( 2454 const struct dc *dc, 2455 const struct dc_surface_update *u) 2456 { 2457 union surface_update_flags *update_flags = &u->surface->update_flags; 2458 2459 if (!u->scaling_info) 2460 return UPDATE_TYPE_FAST; 2461 2462 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2463 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2464 || u->scaling_info->scaling_quality.integer_scaling != 2465 u->surface->scaling_quality.integer_scaling 2466 ) { 2467 update_flags->bits.scaling_change = 1; 2468 2469 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2470 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2471 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2472 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2473 /* Making dst rect smaller requires a bandwidth change */ 2474 update_flags->bits.bandwidth_change = 1; 2475 } 2476 2477 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2478 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2479 2480 update_flags->bits.scaling_change = 1; 2481 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2482 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2483 /* Making src rect bigger requires a bandwidth change */ 2484 update_flags->bits.clock_change = 1; 2485 } 2486 2487 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && 2488 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2489 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2490 /* Changing clip size of a large surface may result in MPC slice count change */ 2491 update_flags->bits.bandwidth_change = 1; 2492 2493 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2494 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2495 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2496 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2497 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2498 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2499 update_flags->bits.position_change = 1; 2500 2501 if (update_flags->bits.clock_change 2502 || update_flags->bits.bandwidth_change 2503 || update_flags->bits.scaling_change) 2504 return UPDATE_TYPE_FULL; 2505 2506 if (update_flags->bits.position_change) 2507 return UPDATE_TYPE_MED; 2508 2509 return UPDATE_TYPE_FAST; 2510 } 2511 2512 static enum surface_update_type det_surface_update(const struct dc *dc, 2513 const struct dc_surface_update *u) 2514 { 2515 const struct dc_state *context = dc->current_state; 2516 enum surface_update_type type; 2517 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2518 union surface_update_flags *update_flags = &u->surface->update_flags; 2519 2520 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2521 update_flags->raw = 0xFFFFFFFF; 2522 return UPDATE_TYPE_FULL; 2523 } 2524 2525 update_flags->raw = 0; // Reset all flags 2526 2527 type = get_plane_info_update_type(u); 2528 elevate_update_type(&overall_type, type); 2529 2530 type = get_scaling_info_update_type(dc, u); 2531 elevate_update_type(&overall_type, type); 2532 2533 if (u->flip_addr) { 2534 update_flags->bits.addr_update = 1; 2535 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2536 update_flags->bits.tmz_changed = 1; 2537 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2538 } 2539 } 2540 if (u->in_transfer_func) 2541 update_flags->bits.in_transfer_func_change = 1; 2542 2543 if (u->input_csc_color_matrix) 2544 update_flags->bits.input_csc_change = 1; 2545 2546 if (u->coeff_reduction_factor) 2547 update_flags->bits.coeff_reduction_change = 1; 2548 2549 if (u->gamut_remap_matrix) 2550 update_flags->bits.gamut_remap_change = 1; 2551 2552 if (u->gamma) { 2553 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2554 2555 if (u->plane_info) 2556 format = u->plane_info->format; 2557 else if (u->surface) 2558 format = u->surface->format; 2559 2560 if (dce_use_lut(format)) 2561 update_flags->bits.gamma_change = 1; 2562 } 2563 2564 if (u->lut3d_func || u->func_shaper) 2565 update_flags->bits.lut_3d = 1; 2566 2567 if (u->hdr_mult.value) 2568 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2569 update_flags->bits.hdr_mult = 1; 2570 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2571 } 2572 2573 if (update_flags->bits.in_transfer_func_change) { 2574 type = UPDATE_TYPE_MED; 2575 elevate_update_type(&overall_type, type); 2576 } 2577 2578 if (update_flags->bits.lut_3d) { 2579 type = UPDATE_TYPE_FULL; 2580 elevate_update_type(&overall_type, type); 2581 } 2582 2583 if (dc->debug.enable_legacy_fast_update && 2584 (update_flags->bits.gamma_change || 2585 update_flags->bits.gamut_remap_change || 2586 update_flags->bits.input_csc_change || 2587 update_flags->bits.coeff_reduction_change)) { 2588 type = UPDATE_TYPE_FULL; 2589 elevate_update_type(&overall_type, type); 2590 } 2591 return overall_type; 2592 } 2593 2594 static enum surface_update_type check_update_surfaces_for_stream( 2595 struct dc *dc, 2596 struct dc_surface_update *updates, 2597 int surface_count, 2598 struct dc_stream_update *stream_update, 2599 const struct dc_stream_status *stream_status) 2600 { 2601 int i; 2602 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2603 2604 if (dc->idle_optimizations_allowed) 2605 overall_type = UPDATE_TYPE_FULL; 2606 2607 if (stream_status == NULL || stream_status->plane_count != surface_count) 2608 overall_type = UPDATE_TYPE_FULL; 2609 2610 if (stream_update && stream_update->pending_test_pattern) { 2611 overall_type = UPDATE_TYPE_FULL; 2612 } 2613 2614 /* some stream updates require passive update */ 2615 if (stream_update) { 2616 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2617 2618 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2619 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2620 stream_update->integer_scaling_update) 2621 su_flags->bits.scaling = 1; 2622 2623 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2624 su_flags->bits.out_tf = 1; 2625 2626 if (stream_update->abm_level) 2627 su_flags->bits.abm_level = 1; 2628 2629 if (stream_update->dpms_off) 2630 su_flags->bits.dpms_off = 1; 2631 2632 if (stream_update->gamut_remap) 2633 su_flags->bits.gamut_remap = 1; 2634 2635 if (stream_update->wb_update) 2636 su_flags->bits.wb_update = 1; 2637 2638 if (stream_update->dsc_config) 2639 su_flags->bits.dsc_changed = 1; 2640 2641 if (stream_update->mst_bw_update) 2642 su_flags->bits.mst_bw = 1; 2643 2644 if (stream_update->stream && stream_update->stream->freesync_on_desktop && 2645 (stream_update->vrr_infopacket || stream_update->allow_freesync || 2646 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 2647 su_flags->bits.fams_changed = 1; 2648 2649 if (su_flags->raw != 0) 2650 overall_type = UPDATE_TYPE_FULL; 2651 2652 if (stream_update->output_csc_transform || stream_update->output_color_space) 2653 su_flags->bits.out_csc = 1; 2654 2655 /* Output transfer function changes do not require bandwidth recalculation, 2656 * so don't trigger a full update 2657 */ 2658 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2659 su_flags->bits.out_tf = 1; 2660 } 2661 2662 for (i = 0 ; i < surface_count; i++) { 2663 enum surface_update_type type = 2664 det_surface_update(dc, &updates[i]); 2665 2666 elevate_update_type(&overall_type, type); 2667 } 2668 2669 return overall_type; 2670 } 2671 2672 /* 2673 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2674 * 2675 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2676 */ 2677 enum surface_update_type dc_check_update_surfaces_for_stream( 2678 struct dc *dc, 2679 struct dc_surface_update *updates, 2680 int surface_count, 2681 struct dc_stream_update *stream_update, 2682 const struct dc_stream_status *stream_status) 2683 { 2684 int i; 2685 enum surface_update_type type; 2686 2687 if (stream_update) 2688 stream_update->stream->update_flags.raw = 0; 2689 for (i = 0; i < surface_count; i++) 2690 updates[i].surface->update_flags.raw = 0; 2691 2692 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2693 if (type == UPDATE_TYPE_FULL) { 2694 if (stream_update) { 2695 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2696 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2697 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2698 } 2699 for (i = 0; i < surface_count; i++) 2700 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2701 } 2702 2703 if (type == UPDATE_TYPE_FAST) { 2704 // If there's an available clock comparator, we use that. 2705 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2706 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2707 dc->optimized_required = true; 2708 // Else we fallback to mem compare. 2709 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2710 dc->optimized_required = true; 2711 } 2712 2713 dc->optimized_required |= dc->wm_optimized_required; 2714 } 2715 2716 return type; 2717 } 2718 2719 static struct dc_stream_status *stream_get_status( 2720 struct dc_state *ctx, 2721 struct dc_stream_state *stream) 2722 { 2723 uint8_t i; 2724 2725 for (i = 0; i < ctx->stream_count; i++) { 2726 if (stream == ctx->streams[i]) { 2727 return &ctx->stream_status[i]; 2728 } 2729 } 2730 2731 return NULL; 2732 } 2733 2734 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2735 2736 static void copy_surface_update_to_plane( 2737 struct dc_plane_state *surface, 2738 struct dc_surface_update *srf_update) 2739 { 2740 if (srf_update->flip_addr) { 2741 surface->address = srf_update->flip_addr->address; 2742 surface->flip_immediate = 2743 srf_update->flip_addr->flip_immediate; 2744 surface->time.time_elapsed_in_us[surface->time.index] = 2745 srf_update->flip_addr->flip_timestamp_in_us - 2746 surface->time.prev_update_time_in_us; 2747 surface->time.prev_update_time_in_us = 2748 srf_update->flip_addr->flip_timestamp_in_us; 2749 surface->time.index++; 2750 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2751 surface->time.index = 0; 2752 2753 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2754 } 2755 2756 if (srf_update->scaling_info) { 2757 surface->scaling_quality = 2758 srf_update->scaling_info->scaling_quality; 2759 surface->dst_rect = 2760 srf_update->scaling_info->dst_rect; 2761 surface->src_rect = 2762 srf_update->scaling_info->src_rect; 2763 surface->clip_rect = 2764 srf_update->scaling_info->clip_rect; 2765 } 2766 2767 if (srf_update->plane_info) { 2768 surface->color_space = 2769 srf_update->plane_info->color_space; 2770 surface->format = 2771 srf_update->plane_info->format; 2772 surface->plane_size = 2773 srf_update->plane_info->plane_size; 2774 surface->rotation = 2775 srf_update->plane_info->rotation; 2776 surface->horizontal_mirror = 2777 srf_update->plane_info->horizontal_mirror; 2778 surface->stereo_format = 2779 srf_update->plane_info->stereo_format; 2780 surface->tiling_info = 2781 srf_update->plane_info->tiling_info; 2782 surface->visible = 2783 srf_update->plane_info->visible; 2784 surface->per_pixel_alpha = 2785 srf_update->plane_info->per_pixel_alpha; 2786 surface->global_alpha = 2787 srf_update->plane_info->global_alpha; 2788 surface->global_alpha_value = 2789 srf_update->plane_info->global_alpha_value; 2790 surface->dcc = 2791 srf_update->plane_info->dcc; 2792 surface->layer_index = 2793 srf_update->plane_info->layer_index; 2794 } 2795 2796 if (srf_update->gamma && 2797 (surface->gamma_correction != 2798 srf_update->gamma)) { 2799 memcpy(&surface->gamma_correction->entries, 2800 &srf_update->gamma->entries, 2801 sizeof(struct dc_gamma_entries)); 2802 surface->gamma_correction->is_identity = 2803 srf_update->gamma->is_identity; 2804 surface->gamma_correction->num_entries = 2805 srf_update->gamma->num_entries; 2806 surface->gamma_correction->type = 2807 srf_update->gamma->type; 2808 } 2809 2810 if (srf_update->in_transfer_func && 2811 (surface->in_transfer_func != 2812 srf_update->in_transfer_func)) { 2813 surface->in_transfer_func->sdr_ref_white_level = 2814 srf_update->in_transfer_func->sdr_ref_white_level; 2815 surface->in_transfer_func->tf = 2816 srf_update->in_transfer_func->tf; 2817 surface->in_transfer_func->type = 2818 srf_update->in_transfer_func->type; 2819 memcpy(&surface->in_transfer_func->tf_pts, 2820 &srf_update->in_transfer_func->tf_pts, 2821 sizeof(struct dc_transfer_func_distributed_points)); 2822 } 2823 2824 if (srf_update->func_shaper && 2825 (surface->in_shaper_func != 2826 srf_update->func_shaper)) 2827 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2828 sizeof(*surface->in_shaper_func)); 2829 2830 if (srf_update->lut3d_func && 2831 (surface->lut3d_func != 2832 srf_update->lut3d_func)) 2833 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2834 sizeof(*surface->lut3d_func)); 2835 2836 if (srf_update->hdr_mult.value) 2837 surface->hdr_mult = 2838 srf_update->hdr_mult; 2839 2840 if (srf_update->blend_tf && 2841 (surface->blend_tf != 2842 srf_update->blend_tf)) 2843 memcpy(surface->blend_tf, srf_update->blend_tf, 2844 sizeof(*surface->blend_tf)); 2845 2846 if (srf_update->input_csc_color_matrix) 2847 surface->input_csc_color_matrix = 2848 *srf_update->input_csc_color_matrix; 2849 2850 if (srf_update->coeff_reduction_factor) 2851 surface->coeff_reduction_factor = 2852 *srf_update->coeff_reduction_factor; 2853 2854 if (srf_update->gamut_remap_matrix) 2855 surface->gamut_remap_matrix = 2856 *srf_update->gamut_remap_matrix; 2857 } 2858 2859 static void copy_stream_update_to_stream(struct dc *dc, 2860 struct dc_state *context, 2861 struct dc_stream_state *stream, 2862 struct dc_stream_update *update) 2863 { 2864 struct dc_context *dc_ctx = dc->ctx; 2865 2866 if (update == NULL || stream == NULL) 2867 return; 2868 2869 if (update->src.height && update->src.width) 2870 stream->src = update->src; 2871 2872 if (update->dst.height && update->dst.width) 2873 stream->dst = update->dst; 2874 2875 if (update->out_transfer_func && 2876 stream->out_transfer_func != update->out_transfer_func) { 2877 stream->out_transfer_func->sdr_ref_white_level = 2878 update->out_transfer_func->sdr_ref_white_level; 2879 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2880 stream->out_transfer_func->type = 2881 update->out_transfer_func->type; 2882 memcpy(&stream->out_transfer_func->tf_pts, 2883 &update->out_transfer_func->tf_pts, 2884 sizeof(struct dc_transfer_func_distributed_points)); 2885 } 2886 2887 if (update->hdr_static_metadata) 2888 stream->hdr_static_metadata = *update->hdr_static_metadata; 2889 2890 if (update->abm_level) 2891 stream->abm_level = *update->abm_level; 2892 2893 if (update->periodic_interrupt) 2894 stream->periodic_interrupt = *update->periodic_interrupt; 2895 2896 if (update->gamut_remap) 2897 stream->gamut_remap_matrix = *update->gamut_remap; 2898 2899 /* Note: this being updated after mode set is currently not a use case 2900 * however if it arises OCSC would need to be reprogrammed at the 2901 * minimum 2902 */ 2903 if (update->output_color_space) 2904 stream->output_color_space = *update->output_color_space; 2905 2906 if (update->output_csc_transform) 2907 stream->csc_color_matrix = *update->output_csc_transform; 2908 2909 if (update->vrr_infopacket) 2910 stream->vrr_infopacket = *update->vrr_infopacket; 2911 2912 if (update->allow_freesync) 2913 stream->allow_freesync = *update->allow_freesync; 2914 2915 if (update->vrr_active_variable) 2916 stream->vrr_active_variable = *update->vrr_active_variable; 2917 2918 if (update->vrr_active_fixed) 2919 stream->vrr_active_fixed = *update->vrr_active_fixed; 2920 2921 if (update->crtc_timing_adjust) 2922 stream->adjust = *update->crtc_timing_adjust; 2923 2924 if (update->dpms_off) 2925 stream->dpms_off = *update->dpms_off; 2926 2927 if (update->hfvsif_infopacket) 2928 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2929 2930 if (update->vtem_infopacket) 2931 stream->vtem_infopacket = *update->vtem_infopacket; 2932 2933 if (update->vsc_infopacket) 2934 stream->vsc_infopacket = *update->vsc_infopacket; 2935 2936 if (update->vsp_infopacket) 2937 stream->vsp_infopacket = *update->vsp_infopacket; 2938 2939 if (update->adaptive_sync_infopacket) 2940 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 2941 2942 if (update->dither_option) 2943 stream->dither_option = *update->dither_option; 2944 2945 if (update->pending_test_pattern) 2946 stream->test_pattern = *update->pending_test_pattern; 2947 /* update current stream with writeback info */ 2948 if (update->wb_update) { 2949 int i; 2950 2951 stream->num_wb_info = update->wb_update->num_wb_info; 2952 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2953 for (i = 0; i < stream->num_wb_info; i++) 2954 stream->writeback_info[i] = 2955 update->wb_update->writeback_info[i]; 2956 } 2957 if (update->dsc_config) { 2958 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2959 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2960 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2961 update->dsc_config->num_slices_v != 0); 2962 2963 /* Use temporarry context for validating new DSC config */ 2964 struct dc_state *dsc_validate_context = dc_create_state(dc); 2965 2966 if (dsc_validate_context) { 2967 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2968 2969 stream->timing.dsc_cfg = *update->dsc_config; 2970 stream->timing.flags.DSC = enable_dsc; 2971 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2972 stream->timing.dsc_cfg = old_dsc_cfg; 2973 stream->timing.flags.DSC = old_dsc_enabled; 2974 update->dsc_config = NULL; 2975 } 2976 2977 dc_release_state(dsc_validate_context); 2978 } else { 2979 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2980 update->dsc_config = NULL; 2981 } 2982 } 2983 } 2984 2985 static bool update_planes_and_stream_state(struct dc *dc, 2986 struct dc_surface_update *srf_updates, int surface_count, 2987 struct dc_stream_state *stream, 2988 struct dc_stream_update *stream_update, 2989 enum surface_update_type *new_update_type, 2990 struct dc_state **new_context) 2991 { 2992 struct dc_state *context; 2993 int i, j; 2994 enum surface_update_type update_type; 2995 const struct dc_stream_status *stream_status; 2996 struct dc_context *dc_ctx = dc->ctx; 2997 2998 stream_status = dc_stream_get_status(stream); 2999 3000 if (!stream_status) { 3001 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3002 ASSERT(false); 3003 3004 return false; /* Cannot commit surface to stream that is not committed */ 3005 } 3006 3007 context = dc->current_state; 3008 3009 update_type = dc_check_update_surfaces_for_stream( 3010 dc, srf_updates, surface_count, stream_update, stream_status); 3011 3012 /* update current stream with the new updates */ 3013 copy_stream_update_to_stream(dc, context, stream, stream_update); 3014 3015 /* do not perform surface update if surface has invalid dimensions 3016 * (all zero) and no scaling_info is provided 3017 */ 3018 if (surface_count > 0) { 3019 for (i = 0; i < surface_count; i++) { 3020 if ((srf_updates[i].surface->src_rect.width == 0 || 3021 srf_updates[i].surface->src_rect.height == 0 || 3022 srf_updates[i].surface->dst_rect.width == 0 || 3023 srf_updates[i].surface->dst_rect.height == 0) && 3024 (!srf_updates[i].scaling_info || 3025 srf_updates[i].scaling_info->src_rect.width == 0 || 3026 srf_updates[i].scaling_info->src_rect.height == 0 || 3027 srf_updates[i].scaling_info->dst_rect.width == 0 || 3028 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3029 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3030 return false; 3031 } 3032 } 3033 } 3034 3035 if (update_type >= update_surface_trace_level) 3036 update_surface_trace(dc, srf_updates, surface_count); 3037 3038 if (update_type >= UPDATE_TYPE_FULL) { 3039 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3040 3041 for (i = 0; i < surface_count; i++) 3042 new_planes[i] = srf_updates[i].surface; 3043 3044 /* initialize scratch memory for building context */ 3045 context = dc_create_state(dc); 3046 if (context == NULL) { 3047 DC_ERROR("Failed to allocate new validate context!\n"); 3048 return false; 3049 } 3050 3051 dc_resource_state_copy_construct( 3052 dc->current_state, context); 3053 3054 /* For each full update, remove all existing phantom pipes first. 3055 * Ensures that we have enough pipes for newly added MPO planes 3056 */ 3057 if (dc->res_pool->funcs->remove_phantom_pipes) 3058 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3059 3060 /*remove old surfaces from context */ 3061 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3062 3063 BREAK_TO_DEBUGGER(); 3064 goto fail; 3065 } 3066 3067 /* add surface to context */ 3068 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3069 3070 BREAK_TO_DEBUGGER(); 3071 goto fail; 3072 } 3073 } 3074 3075 /* save update parameters into surface */ 3076 for (i = 0; i < surface_count; i++) { 3077 struct dc_plane_state *surface = srf_updates[i].surface; 3078 3079 copy_surface_update_to_plane(surface, &srf_updates[i]); 3080 3081 if (update_type >= UPDATE_TYPE_MED) { 3082 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3083 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3084 3085 if (pipe_ctx->plane_state != surface) 3086 continue; 3087 3088 resource_build_scaling_params(pipe_ctx); 3089 } 3090 } 3091 } 3092 3093 if (update_type == UPDATE_TYPE_FULL) { 3094 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3095 /* For phantom pipes we remove and create a new set of phantom pipes 3096 * for each full update (because we don't know if we'll need phantom 3097 * pipes until after the first round of validation). However, if validation 3098 * fails we need to keep the existing phantom pipes (because we don't update 3099 * the dc->current_state). 3100 * 3101 * The phantom stream/plane refcount is decremented for validation because 3102 * we assume it'll be removed (the free comes when the dc_state is freed), 3103 * but if validation fails we have to increment back the refcount so it's 3104 * consistent. 3105 */ 3106 if (dc->res_pool->funcs->retain_phantom_pipes) 3107 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); 3108 BREAK_TO_DEBUGGER(); 3109 goto fail; 3110 } 3111 } 3112 3113 *new_context = context; 3114 *new_update_type = update_type; 3115 3116 return true; 3117 3118 fail: 3119 dc_release_state(context); 3120 3121 return false; 3122 3123 } 3124 3125 static void commit_planes_do_stream_update(struct dc *dc, 3126 struct dc_stream_state *stream, 3127 struct dc_stream_update *stream_update, 3128 enum surface_update_type update_type, 3129 struct dc_state *context) 3130 { 3131 int j; 3132 3133 // Stream updates 3134 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3135 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3136 3137 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3138 3139 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3140 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3141 3142 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3143 stream_update->vrr_infopacket || 3144 stream_update->vsc_infopacket || 3145 stream_update->vsp_infopacket || 3146 stream_update->hfvsif_infopacket || 3147 stream_update->adaptive_sync_infopacket || 3148 stream_update->vtem_infopacket) { 3149 resource_build_info_frame(pipe_ctx); 3150 dc->hwss.update_info_frame(pipe_ctx); 3151 3152 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3153 dc->link_srv->dp_trace_source_sequence( 3154 pipe_ctx->stream->link, 3155 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3156 } 3157 3158 if (stream_update->hdr_static_metadata && 3159 stream->use_dynamic_meta && 3160 dc->hwss.set_dmdata_attributes && 3161 pipe_ctx->stream->dmdata_address.quad_part != 0) 3162 dc->hwss.set_dmdata_attributes(pipe_ctx); 3163 3164 if (stream_update->gamut_remap) 3165 dc_stream_set_gamut_remap(dc, stream); 3166 3167 if (stream_update->output_csc_transform) 3168 dc_stream_program_csc_matrix(dc, stream); 3169 3170 if (stream_update->dither_option) { 3171 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3172 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3173 &pipe_ctx->stream->bit_depth_params); 3174 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3175 &stream->bit_depth_params, 3176 &stream->clamping); 3177 while (odm_pipe) { 3178 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3179 &stream->bit_depth_params, 3180 &stream->clamping); 3181 odm_pipe = odm_pipe->next_odm_pipe; 3182 } 3183 } 3184 3185 3186 /* Full fe update*/ 3187 if (update_type == UPDATE_TYPE_FAST) 3188 continue; 3189 3190 if (stream_update->dsc_config) 3191 dc->link_srv->update_dsc_config(pipe_ctx); 3192 3193 if (stream_update->mst_bw_update) { 3194 if (stream_update->mst_bw_update->is_increase) 3195 dc->link_srv->increase_mst_payload(pipe_ctx, 3196 stream_update->mst_bw_update->mst_stream_bw); 3197 else 3198 dc->link_srv->reduce_mst_payload(pipe_ctx, 3199 stream_update->mst_bw_update->mst_stream_bw); 3200 } 3201 3202 if (stream_update->pending_test_pattern) { 3203 dc_link_dp_set_test_pattern(stream->link, 3204 stream->test_pattern.type, 3205 stream->test_pattern.color_space, 3206 stream->test_pattern.p_link_settings, 3207 stream->test_pattern.p_custom_pattern, 3208 stream->test_pattern.cust_pattern_size); 3209 } 3210 3211 if (stream_update->dpms_off) { 3212 if (*stream_update->dpms_off) { 3213 dc->link_srv->set_dpms_off(pipe_ctx); 3214 /* for dpms, keep acquired resources*/ 3215 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3216 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3217 3218 dc->optimized_required = true; 3219 3220 } else { 3221 if (get_seamless_boot_stream_count(context) == 0) 3222 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3223 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3224 } 3225 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3226 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3227 /* 3228 * Workaround for firmware issue in some receivers where they don't pick up 3229 * correct output color space unless DP link is disabled/re-enabled 3230 */ 3231 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3232 } 3233 3234 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3235 bool should_program_abm = true; 3236 3237 // if otg funcs defined check if blanked before programming 3238 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3239 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3240 should_program_abm = false; 3241 3242 if (should_program_abm) { 3243 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3244 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3245 } else { 3246 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3247 pipe_ctx->stream_res.abm, stream->abm_level); 3248 } 3249 } 3250 } 3251 } 3252 } 3253 } 3254 3255 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3256 { 3257 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3258 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3259 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3260 return true; 3261 3262 if (stream->link->replay_settings.config.replay_supported) 3263 return true; 3264 3265 return false; 3266 } 3267 3268 void dc_dmub_update_dirty_rect(struct dc *dc, 3269 int surface_count, 3270 struct dc_stream_state *stream, 3271 struct dc_surface_update *srf_updates, 3272 struct dc_state *context) 3273 { 3274 union dmub_rb_cmd cmd; 3275 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3276 unsigned int i, j; 3277 unsigned int panel_inst = 0; 3278 3279 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3280 return; 3281 3282 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3283 return; 3284 3285 memset(&cmd, 0x0, sizeof(cmd)); 3286 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3287 cmd.update_dirty_rect.header.sub_type = 0; 3288 cmd.update_dirty_rect.header.payload_bytes = 3289 sizeof(cmd.update_dirty_rect) - 3290 sizeof(cmd.update_dirty_rect.header); 3291 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3292 for (i = 0; i < surface_count; i++) { 3293 struct dc_plane_state *plane_state = srf_updates[i].surface; 3294 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3295 3296 if (!srf_updates[i].surface || !flip_addr) 3297 continue; 3298 /* Do not send in immediate flip mode */ 3299 if (srf_updates[i].surface->flip_immediate) 3300 continue; 3301 3302 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3303 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3304 sizeof(flip_addr->dirty_rects)); 3305 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3306 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3307 3308 if (pipe_ctx->stream != stream) 3309 continue; 3310 if (pipe_ctx->plane_state != plane_state) 3311 continue; 3312 3313 update_dirty_rect->panel_inst = panel_inst; 3314 update_dirty_rect->pipe_idx = j; 3315 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3316 } 3317 } 3318 } 3319 3320 static void build_dmub_update_dirty_rect( 3321 struct dc *dc, 3322 int surface_count, 3323 struct dc_stream_state *stream, 3324 struct dc_surface_update *srf_updates, 3325 struct dc_state *context, 3326 struct dc_dmub_cmd dc_dmub_cmd[], 3327 unsigned int *dmub_cmd_count) 3328 { 3329 union dmub_rb_cmd cmd; 3330 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3331 unsigned int i, j; 3332 unsigned int panel_inst = 0; 3333 3334 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3335 return; 3336 3337 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3338 return; 3339 3340 memset(&cmd, 0x0, sizeof(cmd)); 3341 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3342 cmd.update_dirty_rect.header.sub_type = 0; 3343 cmd.update_dirty_rect.header.payload_bytes = 3344 sizeof(cmd.update_dirty_rect) - 3345 sizeof(cmd.update_dirty_rect.header); 3346 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3347 for (i = 0; i < surface_count; i++) { 3348 struct dc_plane_state *plane_state = srf_updates[i].surface; 3349 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3350 3351 if (!srf_updates[i].surface || !flip_addr) 3352 continue; 3353 /* Do not send in immediate flip mode */ 3354 if (srf_updates[i].surface->flip_immediate) 3355 continue; 3356 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3357 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3358 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3359 sizeof(flip_addr->dirty_rects)); 3360 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3361 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3362 3363 if (pipe_ctx->stream != stream) 3364 continue; 3365 if (pipe_ctx->plane_state != plane_state) 3366 continue; 3367 update_dirty_rect->panel_inst = panel_inst; 3368 update_dirty_rect->pipe_idx = j; 3369 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3370 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3371 (*dmub_cmd_count)++; 3372 } 3373 } 3374 } 3375 3376 3377 /** 3378 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3379 * 3380 * @dc: Current DC state 3381 * @srf_updates: Array of surface updates 3382 * @surface_count: Number of surfaces that have an updated 3383 * @stream: Corresponding stream to be updated in the current flip 3384 * @context: New DC state to be programmed 3385 * 3386 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3387 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3388 * 3389 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3390 * to build an array of commands and have them sent while the OTG lock is acquired. 3391 * 3392 * Return: void 3393 */ 3394 static void build_dmub_cmd_list(struct dc *dc, 3395 struct dc_surface_update *srf_updates, 3396 int surface_count, 3397 struct dc_stream_state *stream, 3398 struct dc_state *context, 3399 struct dc_dmub_cmd dc_dmub_cmd[], 3400 unsigned int *dmub_cmd_count) 3401 { 3402 // Initialize cmd count to 0 3403 *dmub_cmd_count = 0; 3404 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3405 } 3406 3407 static void commit_planes_for_stream_fast(struct dc *dc, 3408 struct dc_surface_update *srf_updates, 3409 int surface_count, 3410 struct dc_stream_state *stream, 3411 struct dc_stream_update *stream_update, 3412 enum surface_update_type update_type, 3413 struct dc_state *context) 3414 { 3415 int i, j; 3416 struct pipe_ctx *top_pipe_to_program = NULL; 3417 dc_z10_restore(dc); 3418 3419 top_pipe_to_program = resource_get_otg_master_for_stream( 3420 &context->res_ctx, 3421 stream); 3422 3423 if (dc->debug.visual_confirm) { 3424 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3425 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3426 3427 if (pipe->stream && pipe->plane_state) 3428 dc_update_viusal_confirm_color(dc, context, pipe); 3429 } 3430 } 3431 3432 for (i = 0; i < surface_count; i++) { 3433 struct dc_plane_state *plane_state = srf_updates[i].surface; 3434 /*set logical flag for lock/unlock use*/ 3435 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3436 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3437 3438 if (!pipe_ctx->plane_state) 3439 continue; 3440 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3441 continue; 3442 pipe_ctx->plane_state->triplebuffer_flips = false; 3443 if (update_type == UPDATE_TYPE_FAST && 3444 dc->hwss.program_triplebuffer && 3445 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3446 /*triple buffer for VUpdate only*/ 3447 pipe_ctx->plane_state->triplebuffer_flips = true; 3448 } 3449 } 3450 } 3451 3452 build_dmub_cmd_list(dc, 3453 srf_updates, 3454 surface_count, 3455 stream, 3456 context, 3457 context->dc_dmub_cmd, 3458 &(context->dmub_cmd_count)); 3459 hwss_build_fast_sequence(dc, 3460 context->dc_dmub_cmd, 3461 context->dmub_cmd_count, 3462 context->block_sequence, 3463 &(context->block_sequence_steps), 3464 top_pipe_to_program); 3465 hwss_execute_sequence(dc, 3466 context->block_sequence, 3467 context->block_sequence_steps); 3468 /* Clear update flags so next flip doesn't have redundant programming 3469 * (if there's no stream update, the update flags are not cleared). 3470 * Surface updates are cleared unconditionally at the beginning of each flip, 3471 * so no need to clear here. 3472 */ 3473 if (top_pipe_to_program->stream) 3474 top_pipe_to_program->stream->update_flags.raw = 0; 3475 } 3476 3477 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) 3478 { 3479 /* 3480 * This function calls HWSS to wait for any potentially double buffered 3481 * operations to complete. It should be invoked as a pre-amble prior 3482 * to full update programming before asserting any HW locks. 3483 */ 3484 int pipe_idx; 3485 int opp_inst; 3486 int opp_count = dc->res_pool->pipe_count; 3487 struct hubp *hubp; 3488 int mpcc_inst; 3489 const struct pipe_ctx *pipe_ctx; 3490 3491 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { 3492 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; 3493 3494 if (!pipe_ctx->stream) 3495 continue; 3496 3497 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) 3498 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); 3499 3500 hubp = pipe_ctx->plane_res.hubp; 3501 if (!hubp) 3502 continue; 3503 3504 mpcc_inst = hubp->inst; 3505 // MPCC inst is equal to pipe index in practice 3506 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { 3507 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { 3508 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 3509 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; 3510 break; 3511 } 3512 } 3513 } 3514 } 3515 3516 static void commit_planes_for_stream(struct dc *dc, 3517 struct dc_surface_update *srf_updates, 3518 int surface_count, 3519 struct dc_stream_state *stream, 3520 struct dc_stream_update *stream_update, 3521 enum surface_update_type update_type, 3522 struct dc_state *context) 3523 { 3524 int i, j; 3525 struct pipe_ctx *top_pipe_to_program = NULL; 3526 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3527 bool subvp_prev_use = false; 3528 bool subvp_curr_use = false; 3529 3530 // Once we apply the new subvp context to hardware it won't be in the 3531 // dc->current_state anymore, so we have to cache it before we apply 3532 // the new SubVP context 3533 subvp_prev_use = false; 3534 dc_z10_restore(dc); 3535 if (update_type == UPDATE_TYPE_FULL) 3536 wait_for_outstanding_hw_updates(dc, context); 3537 3538 if (update_type == UPDATE_TYPE_FULL) { 3539 dc_allow_idle_optimizations(dc, false); 3540 3541 if (get_seamless_boot_stream_count(context) == 0) 3542 dc->hwss.prepare_bandwidth(dc, context); 3543 3544 if (dc->hwss.update_dsc_pg) 3545 dc->hwss.update_dsc_pg(dc, context, false); 3546 3547 context_clock_trace(dc, context); 3548 } 3549 3550 top_pipe_to_program = resource_get_otg_master_for_stream( 3551 &context->res_ctx, 3552 stream); 3553 3554 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3555 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3556 3557 // Check old context for SubVP 3558 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3559 if (subvp_prev_use) 3560 break; 3561 } 3562 3563 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3564 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3565 3566 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3567 subvp_curr_use = true; 3568 break; 3569 } 3570 } 3571 3572 if (dc->debug.visual_confirm) 3573 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3574 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3575 3576 if (pipe->stream && pipe->plane_state) 3577 dc_update_viusal_confirm_color(dc, context, pipe); 3578 } 3579 3580 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3581 struct pipe_ctx *mpcc_pipe; 3582 struct pipe_ctx *odm_pipe; 3583 3584 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3585 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3586 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3587 } 3588 3589 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3590 if (top_pipe_to_program && 3591 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3592 if (should_use_dmub_lock(stream->link)) { 3593 union dmub_hw_lock_flags hw_locks = { 0 }; 3594 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3595 3596 hw_locks.bits.lock_dig = 1; 3597 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3598 3599 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3600 true, 3601 &hw_locks, 3602 &inst_flags); 3603 } else 3604 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3605 top_pipe_to_program->stream_res.tg); 3606 } 3607 3608 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3609 if (dc->hwss.subvp_pipe_control_lock) 3610 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3611 dc->hwss.interdependent_update_lock(dc, context, true); 3612 3613 } else { 3614 if (dc->hwss.subvp_pipe_control_lock) 3615 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3616 /* Lock the top pipe while updating plane addrs, since freesync requires 3617 * plane addr update event triggers to be synchronized. 3618 * top_pipe_to_program is expected to never be NULL 3619 */ 3620 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3621 } 3622 3623 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3624 3625 // Stream updates 3626 if (stream_update) 3627 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3628 3629 if (surface_count == 0) { 3630 /* 3631 * In case of turning off screen, no need to program front end a second time. 3632 * just return after program blank. 3633 */ 3634 if (dc->hwss.apply_ctx_for_surface) 3635 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3636 if (dc->hwss.program_front_end_for_ctx) 3637 dc->hwss.program_front_end_for_ctx(dc, context); 3638 3639 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3640 dc->hwss.interdependent_update_lock(dc, context, false); 3641 } else { 3642 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3643 } 3644 dc->hwss.post_unlock_program_front_end(dc, context); 3645 3646 if (update_type != UPDATE_TYPE_FAST) 3647 if (dc->hwss.commit_subvp_config) 3648 dc->hwss.commit_subvp_config(dc, context); 3649 3650 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3651 * move the SubVP lock to after the phantom pipes have been setup 3652 */ 3653 if (dc->hwss.subvp_pipe_control_lock) 3654 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3655 NULL, subvp_prev_use); 3656 return; 3657 } 3658 3659 if (update_type != UPDATE_TYPE_FAST) { 3660 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3661 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3662 3663 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 3664 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 3665 pipe_ctx->stream && pipe_ctx->plane_state) { 3666 /* Only update visual confirm for SUBVP and Mclk switching here. 3667 * The bar appears on all pipes, so we need to update the bar on all displays, 3668 * so the information doesn't get stale. 3669 */ 3670 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 3671 pipe_ctx->plane_res.hubp->inst); 3672 } 3673 } 3674 } 3675 3676 for (i = 0; i < surface_count; i++) { 3677 struct dc_plane_state *plane_state = srf_updates[i].surface; 3678 /*set logical flag for lock/unlock use*/ 3679 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3680 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3681 if (!pipe_ctx->plane_state) 3682 continue; 3683 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3684 continue; 3685 pipe_ctx->plane_state->triplebuffer_flips = false; 3686 if (update_type == UPDATE_TYPE_FAST && 3687 dc->hwss.program_triplebuffer != NULL && 3688 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3689 /*triple buffer for VUpdate only*/ 3690 pipe_ctx->plane_state->triplebuffer_flips = true; 3691 } 3692 } 3693 if (update_type == UPDATE_TYPE_FULL) { 3694 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3695 plane_state->flip_immediate = false; 3696 } 3697 } 3698 3699 // Update Type FULL, Surface updates 3700 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3701 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3702 3703 if (!pipe_ctx->top_pipe && 3704 !pipe_ctx->prev_odm_pipe && 3705 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3706 struct dc_stream_status *stream_status = NULL; 3707 3708 if (!pipe_ctx->plane_state) 3709 continue; 3710 3711 /* Full fe update*/ 3712 if (update_type == UPDATE_TYPE_FAST) 3713 continue; 3714 3715 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3716 3717 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3718 /*turn off triple buffer for full update*/ 3719 dc->hwss.program_triplebuffer( 3720 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3721 } 3722 stream_status = 3723 stream_get_status(context, pipe_ctx->stream); 3724 3725 if (dc->hwss.apply_ctx_for_surface) 3726 dc->hwss.apply_ctx_for_surface( 3727 dc, pipe_ctx->stream, stream_status->plane_count, context); 3728 } 3729 } 3730 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3731 dc->hwss.program_front_end_for_ctx(dc, context); 3732 if (dc->debug.validate_dml_output) { 3733 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3734 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3735 if (cur_pipe->stream == NULL) 3736 continue; 3737 3738 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3739 cur_pipe->plane_res.hubp, dc->ctx, 3740 &context->res_ctx.pipe_ctx[i].rq_regs, 3741 &context->res_ctx.pipe_ctx[i].dlg_regs, 3742 &context->res_ctx.pipe_ctx[i].ttu_regs); 3743 } 3744 } 3745 } 3746 3747 // Update Type FAST, Surface updates 3748 if (update_type == UPDATE_TYPE_FAST) { 3749 if (dc->hwss.set_flip_control_gsl) 3750 for (i = 0; i < surface_count; i++) { 3751 struct dc_plane_state *plane_state = srf_updates[i].surface; 3752 3753 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3754 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3755 3756 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3757 continue; 3758 3759 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3760 continue; 3761 3762 // GSL has to be used for flip immediate 3763 dc->hwss.set_flip_control_gsl(pipe_ctx, 3764 pipe_ctx->plane_state->flip_immediate); 3765 } 3766 } 3767 3768 /* Perform requested Updates */ 3769 for (i = 0; i < surface_count; i++) { 3770 struct dc_plane_state *plane_state = srf_updates[i].surface; 3771 3772 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3773 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3774 3775 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3776 continue; 3777 3778 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3779 continue; 3780 3781 /*program triple buffer after lock based on flip type*/ 3782 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3783 /*only enable triplebuffer for fast_update*/ 3784 dc->hwss.program_triplebuffer( 3785 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3786 } 3787 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3788 dc->hwss.update_plane_addr(dc, pipe_ctx); 3789 } 3790 } 3791 } 3792 3793 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3794 dc->hwss.interdependent_update_lock(dc, context, false); 3795 } else { 3796 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3797 } 3798 3799 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3800 if (top_pipe_to_program && 3801 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3802 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3803 top_pipe_to_program->stream_res.tg, 3804 CRTC_STATE_VACTIVE); 3805 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3806 top_pipe_to_program->stream_res.tg, 3807 CRTC_STATE_VBLANK); 3808 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3809 top_pipe_to_program->stream_res.tg, 3810 CRTC_STATE_VACTIVE); 3811 3812 if (should_use_dmub_lock(stream->link)) { 3813 union dmub_hw_lock_flags hw_locks = { 0 }; 3814 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3815 3816 hw_locks.bits.lock_dig = 1; 3817 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3818 3819 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3820 false, 3821 &hw_locks, 3822 &inst_flags); 3823 } else 3824 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3825 top_pipe_to_program->stream_res.tg); 3826 } 3827 3828 if (subvp_curr_use) { 3829 /* If enabling subvp or transitioning from subvp->subvp, enable the 3830 * phantom streams before we program front end for the phantom pipes. 3831 */ 3832 if (update_type != UPDATE_TYPE_FAST) { 3833 if (dc->hwss.enable_phantom_streams) 3834 dc->hwss.enable_phantom_streams(dc, context); 3835 } 3836 } 3837 3838 if (update_type != UPDATE_TYPE_FAST) 3839 dc->hwss.post_unlock_program_front_end(dc, context); 3840 3841 if (subvp_prev_use && !subvp_curr_use) { 3842 /* If disabling subvp, disable phantom streams after front end 3843 * programming has completed (we turn on phantom OTG in order 3844 * to complete the plane disable for phantom pipes). 3845 */ 3846 dc->hwss.apply_ctx_to_hw(dc, context); 3847 } 3848 3849 if (update_type != UPDATE_TYPE_FAST) 3850 if (dc->hwss.commit_subvp_config) 3851 dc->hwss.commit_subvp_config(dc, context); 3852 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3853 * move the SubVP lock to after the phantom pipes have been setup 3854 */ 3855 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3856 if (dc->hwss.subvp_pipe_control_lock) 3857 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3858 } else { 3859 if (dc->hwss.subvp_pipe_control_lock) 3860 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3861 } 3862 3863 // Fire manual trigger only when bottom plane is flipped 3864 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3865 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3866 3867 if (!pipe_ctx->plane_state) 3868 continue; 3869 3870 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3871 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3872 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3873 pipe_ctx->plane_state->skip_manual_trigger) 3874 continue; 3875 3876 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3877 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3878 } 3879 } 3880 3881 /** 3882 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3883 * 3884 * @dc: Used to get the current state status 3885 * @stream: Target stream, which we want to remove the attached planes 3886 * @surface_count: Number of surface update 3887 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3888 * 3889 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3890 * the MPO if used simultaneously in some specific configurations (e.g., 3891 * 4k@144). This function checks if the incoming context requires applying a 3892 * transition state with unnecessary pipe splitting and ODM disabled to 3893 * circumvent our hardware limitations to prevent this edge case. If the OPP 3894 * associated with an MPCC might change due to plane additions, this function 3895 * returns true. 3896 * 3897 * Return: 3898 * Return true if OPP and MPCC might change, otherwise, return false. 3899 */ 3900 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3901 struct dc_stream_state *stream, 3902 int surface_count, 3903 bool *is_plane_addition) 3904 { 3905 3906 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3907 bool force_minimal_pipe_splitting = false; 3908 bool subvp_active = false; 3909 uint32_t i; 3910 3911 *is_plane_addition = false; 3912 3913 if (cur_stream_status && 3914 dc->current_state->stream_count > 0 && 3915 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3916 /* determine if minimal transition is required due to MPC*/ 3917 if (surface_count > 0) { 3918 if (cur_stream_status->plane_count > surface_count) { 3919 force_minimal_pipe_splitting = true; 3920 } else if (cur_stream_status->plane_count < surface_count) { 3921 force_minimal_pipe_splitting = true; 3922 *is_plane_addition = true; 3923 } 3924 } 3925 } 3926 3927 if (cur_stream_status && 3928 dc->current_state->stream_count == 1 && 3929 dc->debug.enable_single_display_2to1_odm_policy) { 3930 /* determine if minimal transition is required due to dynamic ODM*/ 3931 if (surface_count > 0) { 3932 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3933 force_minimal_pipe_splitting = true; 3934 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3935 force_minimal_pipe_splitting = true; 3936 *is_plane_addition = true; 3937 } 3938 } 3939 } 3940 3941 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3942 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3943 3944 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 3945 subvp_active = true; 3946 break; 3947 } 3948 } 3949 3950 /* For SubVP when adding or removing planes we need to add a minimal transition 3951 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3952 * must use the minimal transition path to disable the pipe correctly. 3953 * 3954 * We want to use the minimal transition whenever subvp is active, not only if 3955 * a plane is being added / removed from a subvp stream (MPO plane can be added 3956 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3957 * a min transition to disable subvp. 3958 */ 3959 if (cur_stream_status && subvp_active) { 3960 /* determine if minimal transition is required due to SubVP*/ 3961 if (cur_stream_status->plane_count > surface_count) { 3962 force_minimal_pipe_splitting = true; 3963 } else if (cur_stream_status->plane_count < surface_count) { 3964 force_minimal_pipe_splitting = true; 3965 *is_plane_addition = true; 3966 } 3967 } 3968 3969 return force_minimal_pipe_splitting; 3970 } 3971 3972 /** 3973 * commit_minimal_transition_state - Create a transition pipe split state 3974 * 3975 * @dc: Used to get the current state status 3976 * @transition_base_context: New transition state 3977 * 3978 * In some specific configurations, such as pipe split on multi-display with 3979 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 3980 * programming when moving to new planes. To mitigate those types of problems, 3981 * this function adds a transition state that minimizes pipe usage before 3982 * programming the new configuration. When adding a new plane, the current 3983 * state requires the least pipes, so it is applied without splitting. When 3984 * removing a plane, the new state requires the least pipes, so it is applied 3985 * without splitting. 3986 * 3987 * Return: 3988 * Return false if something is wrong in the transition state. 3989 */ 3990 static bool commit_minimal_transition_state(struct dc *dc, 3991 struct dc_state *transition_base_context) 3992 { 3993 struct dc_state *transition_context = dc_create_state(dc); 3994 enum pipe_split_policy tmp_mpc_policy = 0; 3995 bool temp_dynamic_odm_policy = 0; 3996 bool temp_subvp_policy = 0; 3997 enum dc_status ret = DC_ERROR_UNEXPECTED; 3998 unsigned int i, j; 3999 unsigned int pipe_in_use = 0; 4000 bool subvp_in_use = false; 4001 bool odm_in_use = false; 4002 4003 if (!transition_context) 4004 return false; 4005 /* Setup: 4006 * Store the current ODM and MPC config in some temp variables to be 4007 * restored after we commit the transition state. 4008 */ 4009 4010 /* check current pipes in use*/ 4011 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4012 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4013 4014 if (pipe->plane_state) 4015 pipe_in_use++; 4016 } 4017 4018 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4019 * pipe, we must use the minimal transition. 4020 */ 4021 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4022 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4023 4024 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 4025 subvp_in_use = true; 4026 break; 4027 } 4028 } 4029 4030 /* If ODM is enabled and we are adding or removing planes from any ODM 4031 * pipe, we must use the minimal transition. 4032 */ 4033 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4034 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4035 4036 if (pipe->stream && pipe->next_odm_pipe) { 4037 odm_in_use = true; 4038 break; 4039 } 4040 } 4041 4042 /* When the OS add a new surface if we have been used all of pipes with odm combine 4043 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4044 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4045 * call it again. Otherwise return true to skip. 4046 * 4047 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4048 * enter/exit MPO when DCN still have enough resources. 4049 */ 4050 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { 4051 dc_release_state(transition_context); 4052 return true; 4053 } 4054 4055 if (!dc->config.is_vmin_only_asic) { 4056 tmp_mpc_policy = dc->debug.pipe_split_policy; 4057 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 4058 } 4059 4060 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 4061 dc->debug.enable_single_display_2to1_odm_policy = false; 4062 4063 temp_subvp_policy = dc->debug.force_disable_subvp; 4064 dc->debug.force_disable_subvp = true; 4065 4066 dc_resource_state_copy_construct(transition_base_context, transition_context); 4067 4068 /* commit minimal state */ 4069 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 4070 for (i = 0; i < transition_context->stream_count; i++) { 4071 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 4072 4073 for (j = 0; j < stream_status->plane_count; j++) { 4074 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 4075 4076 /* force vsync flip when reconfiguring pipes to prevent underflow 4077 * and corruption 4078 */ 4079 plane_state->flip_immediate = false; 4080 } 4081 } 4082 4083 ret = dc_commit_state_no_check(dc, transition_context); 4084 } 4085 4086 /* always release as dc_commit_state_no_check retains in good case */ 4087 dc_release_state(transition_context); 4088 4089 /* TearDown: 4090 * Restore original configuration for ODM and MPO. 4091 */ 4092 if (!dc->config.is_vmin_only_asic) 4093 dc->debug.pipe_split_policy = tmp_mpc_policy; 4094 4095 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 4096 dc->debug.force_disable_subvp = temp_subvp_policy; 4097 4098 if (ret != DC_OK) { 4099 /* this should never happen */ 4100 BREAK_TO_DEBUGGER(); 4101 return false; 4102 } 4103 4104 /* force full surface update */ 4105 for (i = 0; i < dc->current_state->stream_count; i++) { 4106 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 4107 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 4108 } 4109 } 4110 4111 return true; 4112 } 4113 4114 /** 4115 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 4116 * 4117 * @dc: Current DC state 4118 * @context: New DC state to be programmed 4119 * @surface_count: Number of surfaces that have an updated 4120 * @stream: Corresponding stream to be updated in the current flip 4121 * 4122 * Updating seamless boot flags do not need to be part of the commit sequence. This 4123 * helper function will update the seamless boot flags on each flip (if required) 4124 * outside of the HW commit sequence (fast or slow). 4125 * 4126 * Return: void 4127 */ 4128 static void update_seamless_boot_flags(struct dc *dc, 4129 struct dc_state *context, 4130 int surface_count, 4131 struct dc_stream_state *stream) 4132 { 4133 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 4134 /* Optimize seamless boot flag keeps clocks and watermarks high until 4135 * first flip. After first flip, optimization is required to lower 4136 * bandwidth. Important to note that it is expected UEFI will 4137 * only light up a single display on POST, therefore we only expect 4138 * one stream with seamless boot flag set. 4139 */ 4140 if (stream->apply_seamless_boot_optimization) { 4141 stream->apply_seamless_boot_optimization = false; 4142 4143 if (get_seamless_boot_stream_count(context) == 0) 4144 dc->optimized_required = true; 4145 } 4146 } 4147 } 4148 4149 static void populate_fast_updates(struct dc_fast_update *fast_update, 4150 struct dc_surface_update *srf_updates, 4151 int surface_count, 4152 struct dc_stream_update *stream_update) 4153 { 4154 int i = 0; 4155 4156 if (stream_update) { 4157 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 4158 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 4159 } 4160 4161 for (i = 0; i < surface_count; i++) { 4162 fast_update[i].flip_addr = srf_updates[i].flip_addr; 4163 fast_update[i].gamma = srf_updates[i].gamma; 4164 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4165 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4166 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4167 } 4168 } 4169 4170 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4171 { 4172 int i; 4173 4174 if (fast_update[0].out_transfer_func || 4175 fast_update[0].output_csc_transform) 4176 return true; 4177 4178 for (i = 0; i < surface_count; i++) { 4179 if (fast_update[i].flip_addr || 4180 fast_update[i].gamma || 4181 fast_update[i].gamut_remap_matrix || 4182 fast_update[i].input_csc_color_matrix || 4183 fast_update[i].coeff_reduction_factor) 4184 return true; 4185 } 4186 4187 return false; 4188 } 4189 4190 static bool full_update_required(struct dc *dc, 4191 struct dc_surface_update *srf_updates, 4192 int surface_count, 4193 struct dc_stream_update *stream_update, 4194 struct dc_stream_state *stream) 4195 { 4196 4197 int i; 4198 struct dc_stream_status *stream_status; 4199 const struct dc_state *context = dc->current_state; 4200 4201 for (i = 0; i < surface_count; i++) { 4202 if (srf_updates && 4203 (srf_updates[i].plane_info || 4204 srf_updates[i].scaling_info || 4205 (srf_updates[i].hdr_mult.value && 4206 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 4207 srf_updates[i].in_transfer_func || 4208 srf_updates[i].func_shaper || 4209 srf_updates[i].lut3d_func || 4210 srf_updates[i].blend_tf || 4211 srf_updates[i].surface->force_full_update || 4212 (srf_updates[i].flip_addr && 4213 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4214 !is_surface_in_context(context, srf_updates[i].surface))) 4215 return true; 4216 } 4217 4218 if (stream_update && 4219 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 4220 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 4221 stream_update->integer_scaling_update) || 4222 stream_update->hdr_static_metadata || 4223 stream_update->abm_level || 4224 stream_update->periodic_interrupt || 4225 stream_update->vrr_infopacket || 4226 stream_update->vsc_infopacket || 4227 stream_update->vsp_infopacket || 4228 stream_update->hfvsif_infopacket || 4229 stream_update->vtem_infopacket || 4230 stream_update->adaptive_sync_infopacket || 4231 stream_update->dpms_off || 4232 stream_update->allow_freesync || 4233 stream_update->vrr_active_variable || 4234 stream_update->vrr_active_fixed || 4235 stream_update->gamut_remap || 4236 stream_update->output_color_space || 4237 stream_update->dither_option || 4238 stream_update->wb_update || 4239 stream_update->dsc_config || 4240 stream_update->mst_bw_update || 4241 stream_update->func_shaper || 4242 stream_update->lut3d_func || 4243 stream_update->pending_test_pattern || 4244 stream_update->crtc_timing_adjust)) 4245 return true; 4246 4247 if (stream) { 4248 stream_status = dc_stream_get_status(stream); 4249 if (stream_status == NULL || stream_status->plane_count != surface_count) 4250 return true; 4251 } 4252 if (dc->idle_optimizations_allowed) 4253 return true; 4254 4255 return false; 4256 } 4257 4258 static bool fast_update_only(struct dc *dc, 4259 struct dc_fast_update *fast_update, 4260 struct dc_surface_update *srf_updates, 4261 int surface_count, 4262 struct dc_stream_update *stream_update, 4263 struct dc_stream_state *stream) 4264 { 4265 return fast_updates_exist(fast_update, surface_count) 4266 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 4267 } 4268 4269 bool dc_update_planes_and_stream(struct dc *dc, 4270 struct dc_surface_update *srf_updates, int surface_count, 4271 struct dc_stream_state *stream, 4272 struct dc_stream_update *stream_update) 4273 { 4274 struct dc_state *context; 4275 enum surface_update_type update_type; 4276 int i; 4277 struct mall_temp_config mall_temp_config; 4278 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4279 4280 /* In cases where MPO and split or ODM are used transitions can 4281 * cause underflow. Apply stream configuration with minimal pipe 4282 * split first to avoid unsupported transitions for active pipes. 4283 */ 4284 bool force_minimal_pipe_splitting = 0; 4285 bool is_plane_addition = 0; 4286 4287 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4288 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 4289 dc, 4290 stream, 4291 surface_count, 4292 &is_plane_addition); 4293 4294 /* on plane addition, minimal state is the current one */ 4295 if (force_minimal_pipe_splitting && is_plane_addition && 4296 !commit_minimal_transition_state(dc, dc->current_state)) 4297 return false; 4298 4299 if (!update_planes_and_stream_state( 4300 dc, 4301 srf_updates, 4302 surface_count, 4303 stream, 4304 stream_update, 4305 &update_type, 4306 &context)) 4307 return false; 4308 4309 /* on plane removal, minimal state is the new one */ 4310 if (force_minimal_pipe_splitting && !is_plane_addition) { 4311 /* Since all phantom pipes are removed in full validation, 4312 * we have to save and restore the subvp/mall config when 4313 * we do a minimal transition since the flags marking the 4314 * pipe as subvp/phantom will be cleared (dc copy constructor 4315 * creates a shallow copy). 4316 */ 4317 if (dc->res_pool->funcs->save_mall_state) 4318 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4319 if (!commit_minimal_transition_state(dc, context)) { 4320 dc_release_state(context); 4321 return false; 4322 } 4323 if (dc->res_pool->funcs->restore_mall_state) 4324 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); 4325 4326 /* If we do a minimal transition with plane removal and the context 4327 * has subvp we also have to retain back the phantom stream / planes 4328 * since the refcount is decremented as part of the min transition 4329 * (we commit a state with no subvp, so the phantom streams / planes 4330 * had to be removed). 4331 */ 4332 if (dc->res_pool->funcs->retain_phantom_pipes) 4333 dc->res_pool->funcs->retain_phantom_pipes(dc, context); 4334 update_type = UPDATE_TYPE_FULL; 4335 } 4336 4337 update_seamless_boot_flags(dc, context, surface_count, stream); 4338 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4339 !dc->debug.enable_legacy_fast_update) { 4340 commit_planes_for_stream_fast(dc, 4341 srf_updates, 4342 surface_count, 4343 stream, 4344 stream_update, 4345 update_type, 4346 context); 4347 } else { 4348 if (!stream_update && 4349 dc->hwss.is_pipe_topology_transition_seamless && 4350 !dc->hwss.is_pipe_topology_transition_seamless( 4351 dc, dc->current_state, context)) { 4352 4353 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 4354 BREAK_TO_DEBUGGER(); 4355 } 4356 commit_planes_for_stream( 4357 dc, 4358 srf_updates, 4359 surface_count, 4360 stream, 4361 stream_update, 4362 update_type, 4363 context); 4364 } 4365 4366 if (dc->current_state != context) { 4367 4368 /* Since memory free requires elevated IRQL, an interrupt 4369 * request is generated by mem free. If this happens 4370 * between freeing and reassigning the context, our vsync 4371 * interrupt will call into dc and cause a memory 4372 * corruption BSOD. Hence, we first reassign the context, 4373 * then free the old context. 4374 */ 4375 4376 struct dc_state *old = dc->current_state; 4377 4378 dc->current_state = context; 4379 dc_release_state(old); 4380 4381 // clear any forced full updates 4382 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4383 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4384 4385 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4386 pipe_ctx->plane_state->force_full_update = false; 4387 } 4388 } 4389 return true; 4390 } 4391 4392 void dc_commit_updates_for_stream(struct dc *dc, 4393 struct dc_surface_update *srf_updates, 4394 int surface_count, 4395 struct dc_stream_state *stream, 4396 struct dc_stream_update *stream_update, 4397 struct dc_state *state) 4398 { 4399 const struct dc_stream_status *stream_status; 4400 enum surface_update_type update_type; 4401 struct dc_state *context; 4402 struct dc_context *dc_ctx = dc->ctx; 4403 int i, j; 4404 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4405 4406 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4407 stream_status = dc_stream_get_status(stream); 4408 context = dc->current_state; 4409 4410 update_type = dc_check_update_surfaces_for_stream( 4411 dc, srf_updates, surface_count, stream_update, stream_status); 4412 4413 /* TODO: Since change commit sequence can have a huge impact, 4414 * we decided to only enable it for DCN3x. However, as soon as 4415 * we get more confident about this change we'll need to enable 4416 * the new sequence for all ASICs. 4417 */ 4418 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4419 /* 4420 * Previous frame finished and HW is ready for optimization. 4421 */ 4422 if (update_type == UPDATE_TYPE_FAST) 4423 dc_post_update_surfaces_to_stream(dc); 4424 4425 dc_update_planes_and_stream(dc, srf_updates, 4426 surface_count, stream, 4427 stream_update); 4428 return; 4429 } 4430 4431 if (update_type >= update_surface_trace_level) 4432 update_surface_trace(dc, srf_updates, surface_count); 4433 4434 4435 if (update_type >= UPDATE_TYPE_FULL) { 4436 4437 /* initialize scratch memory for building context */ 4438 context = dc_create_state(dc); 4439 if (context == NULL) { 4440 DC_ERROR("Failed to allocate new validate context!\n"); 4441 return; 4442 } 4443 4444 dc_resource_state_copy_construct(state, context); 4445 4446 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4447 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4448 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4449 4450 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4451 new_pipe->plane_state->force_full_update = true; 4452 } 4453 } else if (update_type == UPDATE_TYPE_FAST) { 4454 /* 4455 * Previous frame finished and HW is ready for optimization. 4456 */ 4457 dc_post_update_surfaces_to_stream(dc); 4458 } 4459 4460 4461 for (i = 0; i < surface_count; i++) { 4462 struct dc_plane_state *surface = srf_updates[i].surface; 4463 4464 copy_surface_update_to_plane(surface, &srf_updates[i]); 4465 4466 if (update_type >= UPDATE_TYPE_MED) { 4467 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4468 struct pipe_ctx *pipe_ctx = 4469 &context->res_ctx.pipe_ctx[j]; 4470 4471 if (pipe_ctx->plane_state != surface) 4472 continue; 4473 4474 resource_build_scaling_params(pipe_ctx); 4475 } 4476 } 4477 } 4478 4479 copy_stream_update_to_stream(dc, context, stream, stream_update); 4480 4481 if (update_type >= UPDATE_TYPE_FULL) { 4482 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4483 DC_ERROR("Mode validation failed for stream update!\n"); 4484 dc_release_state(context); 4485 return; 4486 } 4487 } 4488 4489 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4490 4491 update_seamless_boot_flags(dc, context, surface_count, stream); 4492 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4493 !dc->debug.enable_legacy_fast_update) { 4494 commit_planes_for_stream_fast(dc, 4495 srf_updates, 4496 surface_count, 4497 stream, 4498 stream_update, 4499 update_type, 4500 context); 4501 } else { 4502 commit_planes_for_stream( 4503 dc, 4504 srf_updates, 4505 surface_count, 4506 stream, 4507 stream_update, 4508 update_type, 4509 context); 4510 } 4511 /*update current_State*/ 4512 if (dc->current_state != context) { 4513 4514 struct dc_state *old = dc->current_state; 4515 4516 dc->current_state = context; 4517 dc_release_state(old); 4518 4519 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4520 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4521 4522 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4523 pipe_ctx->plane_state->force_full_update = false; 4524 } 4525 } 4526 4527 /* Legacy optimization path for DCE. */ 4528 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4529 dc_post_update_surfaces_to_stream(dc); 4530 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4531 } 4532 4533 return; 4534 4535 } 4536 4537 uint8_t dc_get_current_stream_count(struct dc *dc) 4538 { 4539 return dc->current_state->stream_count; 4540 } 4541 4542 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4543 { 4544 if (i < dc->current_state->stream_count) 4545 return dc->current_state->streams[i]; 4546 return NULL; 4547 } 4548 4549 enum dc_irq_source dc_interrupt_to_irq_source( 4550 struct dc *dc, 4551 uint32_t src_id, 4552 uint32_t ext_id) 4553 { 4554 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4555 } 4556 4557 /* 4558 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4559 */ 4560 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4561 { 4562 4563 if (dc == NULL) 4564 return false; 4565 4566 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4567 } 4568 4569 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4570 { 4571 dal_irq_service_ack(dc->res_pool->irqs, src); 4572 } 4573 4574 void dc_power_down_on_boot(struct dc *dc) 4575 { 4576 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4577 dc->hwss.power_down_on_boot) 4578 dc->hwss.power_down_on_boot(dc); 4579 } 4580 4581 void dc_set_power_state( 4582 struct dc *dc, 4583 enum dc_acpi_cm_power_state power_state) 4584 { 4585 struct kref refcount; 4586 struct display_mode_lib *dml; 4587 4588 if (!dc->current_state) 4589 return; 4590 4591 switch (power_state) { 4592 case DC_ACPI_CM_POWER_STATE_D0: 4593 dc_resource_state_construct(dc, dc->current_state); 4594 4595 dc_z10_restore(dc); 4596 4597 dc->hwss.init_hw(dc); 4598 4599 if (dc->hwss.init_sys_ctx != NULL && 4600 dc->vm_pa_config.valid) { 4601 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4602 } 4603 4604 break; 4605 default: 4606 ASSERT(dc->current_state->stream_count == 0); 4607 /* Zero out the current context so that on resume we start with 4608 * clean state, and dc hw programming optimizations will not 4609 * cause any trouble. 4610 */ 4611 dml = kzalloc(sizeof(struct display_mode_lib), 4612 GFP_KERNEL); 4613 4614 ASSERT(dml); 4615 if (!dml) 4616 return; 4617 4618 /* Preserve refcount */ 4619 refcount = dc->current_state->refcount; 4620 /* Preserve display mode lib */ 4621 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4622 4623 dc_resource_state_destruct(dc->current_state); 4624 memset(dc->current_state, 0, 4625 sizeof(*dc->current_state)); 4626 4627 dc->current_state->refcount = refcount; 4628 dc->current_state->bw_ctx.dml = *dml; 4629 4630 kfree(dml); 4631 4632 break; 4633 } 4634 } 4635 4636 void dc_resume(struct dc *dc) 4637 { 4638 uint32_t i; 4639 4640 for (i = 0; i < dc->link_count; i++) 4641 dc->link_srv->resume(dc->links[i]); 4642 } 4643 4644 bool dc_is_dmcu_initialized(struct dc *dc) 4645 { 4646 struct dmcu *dmcu = dc->res_pool->dmcu; 4647 4648 if (dmcu) 4649 return dmcu->funcs->is_dmcu_initialized(dmcu); 4650 return false; 4651 } 4652 4653 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4654 { 4655 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4656 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4657 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4658 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4659 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4660 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4661 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4662 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4663 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4664 } 4665 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4666 { 4667 if (dc->hwss.set_clock) 4668 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4669 return DC_ERROR_UNEXPECTED; 4670 } 4671 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4672 { 4673 if (dc->hwss.get_clock) 4674 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4675 } 4676 4677 /* enable/disable eDP PSR without specify stream for eDP */ 4678 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4679 { 4680 int i; 4681 bool allow_active; 4682 4683 for (i = 0; i < dc->current_state->stream_count ; i++) { 4684 struct dc_link *link; 4685 struct dc_stream_state *stream = dc->current_state->streams[i]; 4686 4687 link = stream->link; 4688 if (!link) 4689 continue; 4690 4691 if (link->psr_settings.psr_feature_enabled) { 4692 if (enable && !link->psr_settings.psr_allow_active) { 4693 allow_active = true; 4694 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4695 return false; 4696 } else if (!enable && link->psr_settings.psr_allow_active) { 4697 allow_active = false; 4698 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4699 return false; 4700 } 4701 } 4702 } 4703 4704 return true; 4705 } 4706 4707 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4708 { 4709 if (dc->debug.disable_idle_power_optimizations) 4710 return; 4711 4712 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4713 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4714 return; 4715 4716 if (allow == dc->idle_optimizations_allowed) 4717 return; 4718 4719 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && 4720 dc->hwss.apply_idle_power_optimizations(dc, allow)) 4721 dc->idle_optimizations_allowed = allow; 4722 } 4723 4724 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4725 void dc_unlock_memory_clock_frequency(struct dc *dc) 4726 { 4727 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4728 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4729 4730 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4731 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4732 } 4733 4734 /* set min memory clock to the min required for current mode, max to maxDPM */ 4735 void dc_lock_memory_clock_frequency(struct dc *dc) 4736 { 4737 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4738 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4739 4740 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4741 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4742 4743 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4744 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4745 } 4746 4747 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4748 { 4749 struct dc_state *context = dc->current_state; 4750 struct hubp *hubp; 4751 struct pipe_ctx *pipe; 4752 int i; 4753 4754 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4755 pipe = &context->res_ctx.pipe_ctx[i]; 4756 4757 if (pipe->stream != NULL) { 4758 dc->hwss.disable_pixel_data(dc, pipe, true); 4759 4760 // wait for double buffer 4761 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4762 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4763 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4764 4765 hubp = pipe->plane_res.hubp; 4766 hubp->funcs->set_blank_regs(hubp, true); 4767 } 4768 } 4769 4770 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4771 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4772 4773 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4774 pipe = &context->res_ctx.pipe_ctx[i]; 4775 4776 if (pipe->stream != NULL) { 4777 dc->hwss.disable_pixel_data(dc, pipe, false); 4778 4779 hubp = pipe->plane_res.hubp; 4780 hubp->funcs->set_blank_regs(hubp, false); 4781 } 4782 } 4783 } 4784 4785 4786 /** 4787 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4788 * @dc: pointer to dc of the dm calling this 4789 * @enable: True = transition to DC mode, false = transition back to AC mode 4790 * 4791 * Some SoCs define additional clock limits when in DC mode, DM should 4792 * invoke this function when the platform undergoes a power source transition 4793 * so DC can apply/unapply the limit. This interface may be disruptive to 4794 * the onscreen content. 4795 * 4796 * Context: Triggered by OS through DM interface, or manually by escape calls. 4797 * Need to hold a dclock when doing so. 4798 * 4799 * Return: none (void function) 4800 * 4801 */ 4802 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4803 { 4804 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 4805 bool p_state_change_support; 4806 4807 if (!dc->config.dc_mode_clk_limit_support) 4808 return; 4809 4810 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4811 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 4812 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 4813 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 4814 } 4815 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4816 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4817 4818 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4819 if (p_state_change_support) { 4820 if (funcMin <= softMax) 4821 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4822 // else: No-Op 4823 } else { 4824 if (funcMin <= softMax) 4825 blank_and_force_memclk(dc, true, softMax); 4826 // else: No-Op 4827 } 4828 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4829 if (p_state_change_support) { 4830 if (funcMin <= softMax) 4831 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4832 // else: No-Op 4833 } else { 4834 if (funcMin <= softMax) 4835 blank_and_force_memclk(dc, true, maxDPM); 4836 // else: No-Op 4837 } 4838 } 4839 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4840 } 4841 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4842 struct dc_cursor_attributes *cursor_attr) 4843 { 4844 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4845 return true; 4846 return false; 4847 } 4848 4849 /* cleanup on driver unload */ 4850 void dc_hardware_release(struct dc *dc) 4851 { 4852 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4853 4854 if (dc->hwss.hardware_release) 4855 dc->hwss.hardware_release(dc); 4856 } 4857 4858 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4859 { 4860 if (dc->current_state) 4861 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4862 } 4863 4864 /** 4865 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4866 * 4867 * @dc: [in] dc structure 4868 * 4869 * Checks whether DMUB FW supports outbox notifications, if supported DM 4870 * should register outbox interrupt prior to actually enabling interrupts 4871 * via dc_enable_dmub_outbox 4872 * 4873 * Return: 4874 * True if DMUB FW supports outbox notifications, False otherwise 4875 */ 4876 bool dc_is_dmub_outbox_supported(struct dc *dc) 4877 { 4878 switch (dc->ctx->asic_id.chip_family) { 4879 4880 case FAMILY_YELLOW_CARP: 4881 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4882 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4883 !dc->debug.dpia_debug.bits.disable_dpia) 4884 return true; 4885 break; 4886 4887 case AMDGPU_FAMILY_GC_11_0_1: 4888 case AMDGPU_FAMILY_GC_11_5_0: 4889 if (!dc->debug.dpia_debug.bits.disable_dpia) 4890 return true; 4891 break; 4892 4893 default: 4894 break; 4895 } 4896 4897 /* dmub aux needs dmub notifications to be enabled */ 4898 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4899 4900 } 4901 4902 /** 4903 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 4904 * 4905 * @dc: [in] dc structure 4906 * 4907 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4908 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 4909 * API shall be removed after switching. 4910 * 4911 * Return: 4912 * True if DMUB FW supports outbox notifications, False otherwise 4913 */ 4914 bool dc_enable_dmub_notifications(struct dc *dc) 4915 { 4916 return dc_is_dmub_outbox_supported(dc); 4917 } 4918 4919 /** 4920 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4921 * 4922 * @dc: [in] dc structure 4923 * 4924 * Enables DMUB unsolicited notifications to x86 via outbox. 4925 */ 4926 void dc_enable_dmub_outbox(struct dc *dc) 4927 { 4928 struct dc_context *dc_ctx = dc->ctx; 4929 4930 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4931 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4932 } 4933 4934 /** 4935 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4936 * Sets port index appropriately for legacy DDC 4937 * @dc: dc structure 4938 * @link_index: link index 4939 * @payload: aux payload 4940 * 4941 * Returns: True if successful, False if failure 4942 */ 4943 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4944 uint32_t link_index, 4945 struct aux_payload *payload) 4946 { 4947 uint8_t action; 4948 union dmub_rb_cmd cmd = {0}; 4949 4950 ASSERT(payload->length <= 16); 4951 4952 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4953 cmd.dp_aux_access.header.payload_bytes = 0; 4954 /* For dpia, ddc_pin is set to NULL */ 4955 if (!dc->links[link_index]->ddc->ddc_pin) 4956 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4957 else 4958 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4959 4960 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4961 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4962 cmd.dp_aux_access.aux_control.timeout = 0; 4963 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4964 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4965 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4966 4967 /* set aux action */ 4968 if (payload->i2c_over_aux) { 4969 if (payload->write) { 4970 if (payload->mot) 4971 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4972 else 4973 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4974 } else { 4975 if (payload->mot) 4976 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4977 else 4978 action = DP_AUX_REQ_ACTION_I2C_READ; 4979 } 4980 } else { 4981 if (payload->write) 4982 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4983 else 4984 action = DP_AUX_REQ_ACTION_DPCD_READ; 4985 } 4986 4987 cmd.dp_aux_access.aux_control.dpaux.action = action; 4988 4989 if (payload->length && payload->write) { 4990 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4991 payload->data, 4992 payload->length 4993 ); 4994 } 4995 4996 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 4997 4998 return true; 4999 } 5000 5001 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 5002 uint8_t dpia_port_index) 5003 { 5004 uint8_t index, link_index = 0xFF; 5005 5006 for (index = 0; index < dc->link_count; index++) { 5007 /* ddc_hw_inst has dpia port index for dpia links 5008 * and ddc instance for legacy links 5009 */ 5010 if (!dc->links[index]->ddc->ddc_pin) { 5011 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 5012 link_index = index; 5013 break; 5014 } 5015 } 5016 } 5017 ASSERT(link_index != 0xFF); 5018 return link_index; 5019 } 5020 5021 /** 5022 * dc_process_dmub_set_config_async - Submits set_config command 5023 * 5024 * @dc: [in] dc structure 5025 * @link_index: [in] link_index: link index 5026 * @payload: [in] aux payload 5027 * @notify: [out] set_config immediate reply 5028 * 5029 * Submits set_config command to dmub via inbox message. 5030 * 5031 * Return: 5032 * True if successful, False if failure 5033 */ 5034 bool dc_process_dmub_set_config_async(struct dc *dc, 5035 uint32_t link_index, 5036 struct set_config_cmd_payload *payload, 5037 struct dmub_notification *notify) 5038 { 5039 union dmub_rb_cmd cmd = {0}; 5040 bool is_cmd_complete = true; 5041 5042 /* prepare SET_CONFIG command */ 5043 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 5044 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 5045 5046 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 5047 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 5048 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 5049 5050 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 5051 /* command is not processed by dmub */ 5052 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 5053 return is_cmd_complete; 5054 } 5055 5056 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 5057 if (cmd.set_config_access.header.ret_status == 1) 5058 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 5059 else 5060 /* cmd pending, will receive notification via outbox */ 5061 is_cmd_complete = false; 5062 5063 return is_cmd_complete; 5064 } 5065 5066 /** 5067 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 5068 * 5069 * @dc: [in] dc structure 5070 * @link_index: [in] link index 5071 * @mst_alloc_slots: [in] mst slots to be allotted 5072 * @mst_slots_in_use: [out] mst slots in use returned in failure case 5073 * 5074 * Submits mst slot allocation command to dmub via inbox message 5075 * 5076 * Return: 5077 * DC_OK if successful, DC_ERROR if failure 5078 */ 5079 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 5080 uint32_t link_index, 5081 uint8_t mst_alloc_slots, 5082 uint8_t *mst_slots_in_use) 5083 { 5084 union dmub_rb_cmd cmd = {0}; 5085 5086 /* prepare MST_ALLOC_SLOTS command */ 5087 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 5088 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 5089 5090 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 5091 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 5092 5093 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 5094 /* command is not processed by dmub */ 5095 return DC_ERROR_UNEXPECTED; 5096 5097 /* command processed by dmub, if ret_status is 1 */ 5098 if (cmd.set_config_access.header.ret_status != 1) 5099 /* command processing error */ 5100 return DC_ERROR_UNEXPECTED; 5101 5102 /* command processed and we have a status of 2, mst not enabled in dpia */ 5103 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 5104 return DC_FAIL_UNSUPPORTED_1; 5105 5106 /* previously configured mst alloc and used slots did not match */ 5107 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 5108 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 5109 return DC_NOT_SUPPORTED; 5110 } 5111 5112 return DC_OK; 5113 } 5114 5115 /** 5116 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 5117 * 5118 * @dc: [in] dc structure 5119 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 5120 * 5121 * Submits dpia hpd int enable command to dmub via inbox message 5122 */ 5123 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 5124 uint32_t hpd_int_enable) 5125 { 5126 union dmub_rb_cmd cmd = {0}; 5127 5128 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 5129 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 5130 5131 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5132 5133 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 5134 } 5135 5136 /** 5137 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 5138 * 5139 * @dc: [in] dc structure 5140 * 5141 * 5142 */ 5143 void dc_print_dmub_diagnostic_data(const struct dc *dc) 5144 { 5145 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 5146 } 5147 5148 /** 5149 * dc_disable_accelerated_mode - disable accelerated mode 5150 * @dc: dc structure 5151 */ 5152 void dc_disable_accelerated_mode(struct dc *dc) 5153 { 5154 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 5155 } 5156 5157 5158 /** 5159 * dc_notify_vsync_int_state - notifies vsync enable/disable state 5160 * @dc: dc structure 5161 * @stream: stream where vsync int state changed 5162 * @enable: whether vsync is enabled or disabled 5163 * 5164 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 5165 * interrupts after steady state is reached. 5166 */ 5167 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 5168 { 5169 int i; 5170 int edp_num; 5171 struct pipe_ctx *pipe = NULL; 5172 struct dc_link *link = stream->sink->link; 5173 struct dc_link *edp_links[MAX_NUM_EDP]; 5174 5175 5176 if (link->psr_settings.psr_feature_enabled) 5177 return; 5178 5179 if (link->replay_settings.replay_feature_enabled) 5180 return; 5181 5182 /*find primary pipe associated with stream*/ 5183 for (i = 0; i < MAX_PIPES; i++) { 5184 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5185 5186 if (pipe->stream == stream && pipe->stream_res.tg) 5187 break; 5188 } 5189 5190 if (i == MAX_PIPES) { 5191 ASSERT(0); 5192 return; 5193 } 5194 5195 dc_get_edp_links(dc, edp_links, &edp_num); 5196 5197 /* Determine panel inst */ 5198 for (i = 0; i < edp_num; i++) { 5199 if (edp_links[i] == link) 5200 break; 5201 } 5202 5203 if (i == edp_num) { 5204 return; 5205 } 5206 5207 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 5208 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 5209 } 5210 5211 /***************************************************************************** 5212 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 5213 * ABM 5214 * @dc: dc structure 5215 * @stream: stream where vsync int state changed 5216 * @pData: abm hw states 5217 * 5218 ****************************************************************************/ 5219 bool dc_abm_save_restore( 5220 struct dc *dc, 5221 struct dc_stream_state *stream, 5222 struct abm_save_restore *pData) 5223 { 5224 int i; 5225 int edp_num; 5226 struct pipe_ctx *pipe = NULL; 5227 struct dc_link *link = stream->sink->link; 5228 struct dc_link *edp_links[MAX_NUM_EDP]; 5229 5230 5231 /*find primary pipe associated with stream*/ 5232 for (i = 0; i < MAX_PIPES; i++) { 5233 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5234 5235 if (pipe->stream == stream && pipe->stream_res.tg) 5236 break; 5237 } 5238 5239 if (i == MAX_PIPES) { 5240 ASSERT(0); 5241 return false; 5242 } 5243 5244 dc_get_edp_links(dc, edp_links, &edp_num); 5245 5246 /* Determine panel inst */ 5247 for (i = 0; i < edp_num; i++) 5248 if (edp_links[i] == link) 5249 break; 5250 5251 if (i == edp_num) 5252 return false; 5253 5254 if (pipe->stream_res.abm && 5255 pipe->stream_res.abm->funcs->save_restore) 5256 return pipe->stream_res.abm->funcs->save_restore( 5257 pipe->stream_res.abm, 5258 i, 5259 pData); 5260 return false; 5261 } 5262 5263 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 5264 { 5265 unsigned int i; 5266 bool subvp_in_use = false; 5267 5268 for (i = 0; i < dc->current_state->stream_count; i++) { 5269 if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) { 5270 subvp_in_use = true; 5271 break; 5272 } 5273 } 5274 properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size; 5275 } 5276 5277 /** 5278 ***************************************************************************** 5279 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 5280 * 5281 * Called when DM wants to power on/off eDP. 5282 * Only work on links with flag skip_implict_edp_power_control is set. 5283 * 5284 ***************************************************************************** 5285 */ 5286 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 5287 bool powerOn) 5288 { 5289 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 5290 return; 5291 5292 if (edp_link->skip_implict_edp_power_control == false) 5293 return; 5294 5295 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 5296 } 5297 5298