1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "dc_link_dp.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "amdgpu_dm_trace.h" 41 42 #include "vid.h" 43 #include "amdgpu.h" 44 #include "amdgpu_display.h" 45 #include "amdgpu_ucode.h" 46 #include "atom.h" 47 #include "amdgpu_dm.h" 48 #ifdef CONFIG_DRM_AMD_DC_HDCP 49 #include "amdgpu_dm_hdcp.h" 50 #include <drm/drm_hdcp.h> 51 #endif 52 #include "amdgpu_pm.h" 53 54 #include "amd_shared.h" 55 #include "amdgpu_dm_irq.h" 56 #include "dm_helpers.h" 57 #include "amdgpu_dm_mst_types.h" 58 #if defined(CONFIG_DEBUG_FS) 59 #include "amdgpu_dm_debugfs.h" 60 #endif 61 #include "amdgpu_dm_psr.h" 62 63 #include "ivsrcid/ivsrcid_vislands30.h" 64 65 #include "i2caux_interface.h" 66 #include <linux/module.h> 67 #include <linux/moduleparam.h> 68 #include <linux/types.h> 69 #include <linux/pm_runtime.h> 70 #include <linux/pci.h> 71 #include <linux/firmware.h> 72 #include <linux/component.h> 73 74 #include <drm/drm_atomic.h> 75 #include <drm/drm_atomic_uapi.h> 76 #include <drm/drm_atomic_helper.h> 77 #include <drm/drm_dp_mst_helper.h> 78 #include <drm/drm_fb_helper.h> 79 #include <drm/drm_fourcc.h> 80 #include <drm/drm_edid.h> 81 #include <drm/drm_vblank.h> 82 #include <drm/drm_audio_component.h> 83 84 #if defined(CONFIG_DRM_AMD_DC_DCN) 85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 86 87 #include "dcn/dcn_1_0_offset.h" 88 #include "dcn/dcn_1_0_sh_mask.h" 89 #include "soc15_hw_ip.h" 90 #include "vega10_ip_offset.h" 91 92 #include "soc15_common.h" 93 #endif 94 95 #include "modules/inc/mod_freesync.h" 96 #include "modules/power/power_helpers.h" 97 #include "modules/inc/mod_info_packet.h" 98 99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 115 116 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 118 119 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 121 122 /* Number of bytes in PSP header for firmware. */ 123 #define PSP_HEADER_BYTES 0x100 124 125 /* Number of bytes in PSP footer for firmware. */ 126 #define PSP_FOOTER_BYTES 0x100 127 128 /** 129 * DOC: overview 130 * 131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 133 * requests into DC requests, and DC responses into DRM responses. 134 * 135 * The root control structure is &struct amdgpu_display_manager. 136 */ 137 138 /* basic init/fini API */ 139 static int amdgpu_dm_init(struct amdgpu_device *adev); 140 static void amdgpu_dm_fini(struct amdgpu_device *adev); 141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 142 143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 144 { 145 switch (link->dpcd_caps.dongle_type) { 146 case DISPLAY_DONGLE_NONE: 147 return DRM_MODE_SUBCONNECTOR_Native; 148 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 149 return DRM_MODE_SUBCONNECTOR_VGA; 150 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 151 case DISPLAY_DONGLE_DP_DVI_DONGLE: 152 return DRM_MODE_SUBCONNECTOR_DVID; 153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 154 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 155 return DRM_MODE_SUBCONNECTOR_HDMIA; 156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 157 default: 158 return DRM_MODE_SUBCONNECTOR_Unknown; 159 } 160 } 161 162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 163 { 164 struct dc_link *link = aconnector->dc_link; 165 struct drm_connector *connector = &aconnector->base; 166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 167 168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 169 return; 170 171 if (aconnector->dc_sink) 172 subconnector = get_subconnector_type(link); 173 174 drm_object_property_set_value(&connector->base, 175 connector->dev->mode_config.dp_subconnector_property, 176 subconnector); 177 } 178 179 /* 180 * initializes drm_device display related structures, based on the information 181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 182 * drm_encoder, drm_mode_config 183 * 184 * Returns 0 on success 185 */ 186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 187 /* removes and deallocates the drm structures, created by the above function */ 188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 189 190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 191 struct drm_plane *plane, 192 unsigned long possible_crtcs, 193 const struct dc_plane_cap *plane_cap); 194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 195 struct drm_plane *plane, 196 uint32_t link_index); 197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 198 struct amdgpu_dm_connector *amdgpu_dm_connector, 199 uint32_t link_index, 200 struct amdgpu_encoder *amdgpu_encoder); 201 static int amdgpu_dm_encoder_init(struct drm_device *dev, 202 struct amdgpu_encoder *aencoder, 203 uint32_t link_index); 204 205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 206 207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 208 209 static int amdgpu_dm_atomic_check(struct drm_device *dev, 210 struct drm_atomic_state *state); 211 212 static void handle_cursor_update(struct drm_plane *plane, 213 struct drm_plane_state *old_plane_state); 214 215 static const struct drm_format_info * 216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 217 218 static bool 219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 220 struct drm_crtc_state *new_crtc_state); 221 /* 222 * dm_vblank_get_counter 223 * 224 * @brief 225 * Get counter for number of vertical blanks 226 * 227 * @param 228 * struct amdgpu_device *adev - [in] desired amdgpu device 229 * int disp_idx - [in] which CRTC to get the counter from 230 * 231 * @return 232 * Counter for vertical blanks 233 */ 234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 235 { 236 if (crtc >= adev->mode_info.num_crtc) 237 return 0; 238 else { 239 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 240 241 if (acrtc->dm_irq_params.stream == NULL) { 242 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 243 crtc); 244 return 0; 245 } 246 247 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 248 } 249 } 250 251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 252 u32 *vbl, u32 *position) 253 { 254 uint32_t v_blank_start, v_blank_end, h_position, v_position; 255 256 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 257 return -EINVAL; 258 else { 259 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 260 261 if (acrtc->dm_irq_params.stream == NULL) { 262 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 263 crtc); 264 return 0; 265 } 266 267 /* 268 * TODO rework base driver to use values directly. 269 * for now parse it back into reg-format 270 */ 271 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 272 &v_blank_start, 273 &v_blank_end, 274 &h_position, 275 &v_position); 276 277 *position = v_position | (h_position << 16); 278 *vbl = v_blank_start | (v_blank_end << 16); 279 } 280 281 return 0; 282 } 283 284 static bool dm_is_idle(void *handle) 285 { 286 /* XXX todo */ 287 return true; 288 } 289 290 static int dm_wait_for_idle(void *handle) 291 { 292 /* XXX todo */ 293 return 0; 294 } 295 296 static bool dm_check_soft_reset(void *handle) 297 { 298 return false; 299 } 300 301 static int dm_soft_reset(void *handle) 302 { 303 /* XXX todo */ 304 return 0; 305 } 306 307 static struct amdgpu_crtc * 308 get_crtc_by_otg_inst(struct amdgpu_device *adev, 309 int otg_inst) 310 { 311 struct drm_device *dev = adev_to_drm(adev); 312 struct drm_crtc *crtc; 313 struct amdgpu_crtc *amdgpu_crtc; 314 315 if (WARN_ON(otg_inst == -1)) 316 return adev->mode_info.crtcs[0]; 317 318 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 319 amdgpu_crtc = to_amdgpu_crtc(crtc); 320 321 if (amdgpu_crtc->otg_inst == otg_inst) 322 return amdgpu_crtc; 323 } 324 325 return NULL; 326 } 327 328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) 329 { 330 return acrtc->dm_irq_params.freesync_config.state == 331 VRR_STATE_ACTIVE_VARIABLE || 332 acrtc->dm_irq_params.freesync_config.state == 333 VRR_STATE_ACTIVE_FIXED; 334 } 335 336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) 337 { 338 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 339 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 340 } 341 342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 343 struct dm_crtc_state *new_state) 344 { 345 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 346 return true; 347 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) 348 return true; 349 else 350 return false; 351 } 352 353 /** 354 * dm_pflip_high_irq() - Handle pageflip interrupt 355 * @interrupt_params: ignored 356 * 357 * Handles the pageflip interrupt by notifying all interested parties 358 * that the pageflip has been completed. 359 */ 360 static void dm_pflip_high_irq(void *interrupt_params) 361 { 362 struct amdgpu_crtc *amdgpu_crtc; 363 struct common_irq_params *irq_params = interrupt_params; 364 struct amdgpu_device *adev = irq_params->adev; 365 unsigned long flags; 366 struct drm_pending_vblank_event *e; 367 uint32_t vpos, hpos, v_blank_start, v_blank_end; 368 bool vrr_active; 369 370 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 371 372 /* IRQ could occur when in initial stage */ 373 /* TODO work and BO cleanup */ 374 if (amdgpu_crtc == NULL) { 375 DC_LOG_PFLIP("CRTC is null, returning.\n"); 376 return; 377 } 378 379 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 380 381 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 382 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 383 amdgpu_crtc->pflip_status, 384 AMDGPU_FLIP_SUBMITTED, 385 amdgpu_crtc->crtc_id, 386 amdgpu_crtc); 387 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 388 return; 389 } 390 391 /* page flip completed. */ 392 e = amdgpu_crtc->event; 393 amdgpu_crtc->event = NULL; 394 395 WARN_ON(!e); 396 397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); 398 399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 400 if (!vrr_active || 401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 402 &v_blank_end, &hpos, &vpos) || 403 (vpos < v_blank_start)) { 404 /* Update to correct count and vblank timestamp if racing with 405 * vblank irq. This also updates to the correct vblank timestamp 406 * even in VRR mode, as scanout is past the front-porch atm. 407 */ 408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 409 410 /* Wake up userspace by sending the pageflip event with proper 411 * count and timestamp of vblank of flip completion. 412 */ 413 if (e) { 414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 415 416 /* Event sent, so done with vblank for this flip */ 417 drm_crtc_vblank_put(&amdgpu_crtc->base); 418 } 419 } else if (e) { 420 /* VRR active and inside front-porch: vblank count and 421 * timestamp for pageflip event will only be up to date after 422 * drm_crtc_handle_vblank() has been executed from late vblank 423 * irq handler after start of back-porch (vline 0). We queue the 424 * pageflip event for send-out by drm_crtc_handle_vblank() with 425 * updated timestamp and count, once it runs after us. 426 * 427 * We need to open-code this instead of using the helper 428 * drm_crtc_arm_vblank_event(), as that helper would 429 * call drm_crtc_accurate_vblank_count(), which we must 430 * not call in VRR mode while we are in front-porch! 431 */ 432 433 /* sequence will be replaced by real count during send-out. */ 434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 435 e->pipe = amdgpu_crtc->crtc_id; 436 437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 438 e = NULL; 439 } 440 441 /* Keep track of vblank of this flip for flip throttling. We use the 442 * cooked hw counter, as that one incremented at start of this vblank 443 * of pageflip completion, so last_flip_vblank is the forbidden count 444 * for queueing new pageflips if vsync + VRR is enabled. 445 */ 446 amdgpu_crtc->dm_irq_params.last_flip_vblank = 447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 448 449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 451 452 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 453 amdgpu_crtc->crtc_id, amdgpu_crtc, 454 vrr_active, (int) !e); 455 } 456 457 static void dm_vupdate_high_irq(void *interrupt_params) 458 { 459 struct common_irq_params *irq_params = interrupt_params; 460 struct amdgpu_device *adev = irq_params->adev; 461 struct amdgpu_crtc *acrtc; 462 struct drm_device *drm_dev; 463 struct drm_vblank_crtc *vblank; 464 ktime_t frame_duration_ns, previous_timestamp; 465 unsigned long flags; 466 int vrr_active; 467 468 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 469 470 if (acrtc) { 471 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 472 drm_dev = acrtc->base.dev; 473 vblank = &drm_dev->vblank[acrtc->base.index]; 474 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 475 frame_duration_ns = vblank->time - previous_timestamp; 476 477 if (frame_duration_ns > 0) { 478 trace_amdgpu_refresh_rate_track(acrtc->base.index, 479 frame_duration_ns, 480 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 481 atomic64_set(&irq_params->previous_timestamp, vblank->time); 482 } 483 484 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 485 acrtc->crtc_id, 486 vrr_active); 487 488 /* Core vblank handling is done here after end of front-porch in 489 * vrr mode, as vblank timestamping will give valid results 490 * while now done after front-porch. This will also deliver 491 * page-flip completion events that have been queued to us 492 * if a pageflip happened inside front-porch. 493 */ 494 if (vrr_active) { 495 drm_crtc_handle_vblank(&acrtc->base); 496 497 /* BTR processing for pre-DCE12 ASICs */ 498 if (acrtc->dm_irq_params.stream && 499 adev->family < AMDGPU_FAMILY_AI) { 500 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 501 mod_freesync_handle_v_update( 502 adev->dm.freesync_module, 503 acrtc->dm_irq_params.stream, 504 &acrtc->dm_irq_params.vrr_params); 505 506 dc_stream_adjust_vmin_vmax( 507 adev->dm.dc, 508 acrtc->dm_irq_params.stream, 509 &acrtc->dm_irq_params.vrr_params.adjust); 510 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 511 } 512 } 513 } 514 } 515 516 /** 517 * dm_crtc_high_irq() - Handles CRTC interrupt 518 * @interrupt_params: used for determining the CRTC instance 519 * 520 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 521 * event handler. 522 */ 523 static void dm_crtc_high_irq(void *interrupt_params) 524 { 525 struct common_irq_params *irq_params = interrupt_params; 526 struct amdgpu_device *adev = irq_params->adev; 527 struct amdgpu_crtc *acrtc; 528 unsigned long flags; 529 int vrr_active; 530 531 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 532 if (!acrtc) 533 return; 534 535 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 536 537 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 538 vrr_active, acrtc->dm_irq_params.active_planes); 539 540 /** 541 * Core vblank handling at start of front-porch is only possible 542 * in non-vrr mode, as only there vblank timestamping will give 543 * valid results while done in front-porch. Otherwise defer it 544 * to dm_vupdate_high_irq after end of front-porch. 545 */ 546 if (!vrr_active) 547 drm_crtc_handle_vblank(&acrtc->base); 548 549 /** 550 * Following stuff must happen at start of vblank, for crc 551 * computation and below-the-range btr support in vrr mode. 552 */ 553 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 554 555 /* BTR updates need to happen before VUPDATE on Vega and above. */ 556 if (adev->family < AMDGPU_FAMILY_AI) 557 return; 558 559 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 560 561 if (acrtc->dm_irq_params.stream && 562 acrtc->dm_irq_params.vrr_params.supported && 563 acrtc->dm_irq_params.freesync_config.state == 564 VRR_STATE_ACTIVE_VARIABLE) { 565 mod_freesync_handle_v_update(adev->dm.freesync_module, 566 acrtc->dm_irq_params.stream, 567 &acrtc->dm_irq_params.vrr_params); 568 569 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 570 &acrtc->dm_irq_params.vrr_params.adjust); 571 } 572 573 /* 574 * If there aren't any active_planes then DCH HUBP may be clock-gated. 575 * In that case, pageflip completion interrupts won't fire and pageflip 576 * completion events won't get delivered. Prevent this by sending 577 * pending pageflip events from here if a flip is still pending. 578 * 579 * If any planes are enabled, use dm_pflip_high_irq() instead, to 580 * avoid race conditions between flip programming and completion, 581 * which could cause too early flip completion events. 582 */ 583 if (adev->family >= AMDGPU_FAMILY_RV && 584 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 585 acrtc->dm_irq_params.active_planes == 0) { 586 if (acrtc->event) { 587 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 588 acrtc->event = NULL; 589 drm_crtc_vblank_put(&acrtc->base); 590 } 591 acrtc->pflip_status = AMDGPU_FLIP_NONE; 592 } 593 594 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 595 } 596 597 #if defined(CONFIG_DRM_AMD_DC_DCN) 598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 599 /** 600 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 601 * DCN generation ASICs 602 * @interrupt_params: interrupt parameters 603 * 604 * Used to set crc window/read out crc value at vertical line 0 position 605 */ 606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 607 { 608 struct common_irq_params *irq_params = interrupt_params; 609 struct amdgpu_device *adev = irq_params->adev; 610 struct amdgpu_crtc *acrtc; 611 612 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 613 614 if (!acrtc) 615 return; 616 617 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 618 } 619 #endif 620 621 #define DMUB_TRACE_MAX_READ 64 622 /** 623 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 624 * @interrupt_params: used for determining the Outbox instance 625 * 626 * Handles the Outbox Interrupt 627 * event handler. 628 */ 629 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 630 { 631 struct dmub_notification notify; 632 struct common_irq_params *irq_params = interrupt_params; 633 struct amdgpu_device *adev = irq_params->adev; 634 struct amdgpu_display_manager *dm = &adev->dm; 635 struct dmcub_trace_buf_entry entry = { 0 }; 636 uint32_t count = 0; 637 638 if (dc_enable_dmub_notifications(adev->dm.dc)) { 639 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 640 do { 641 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 642 } while (notify.pending_notification); 643 644 if (adev->dm.dmub_notify) 645 memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification)); 646 if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) 647 complete(&adev->dm.dmub_aux_transfer_done); 648 // TODO : HPD Implementation 649 650 } else { 651 DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); 652 } 653 } 654 655 656 do { 657 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 658 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 659 entry.param0, entry.param1); 660 661 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 662 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 663 } else 664 break; 665 666 count++; 667 668 } while (count <= DMUB_TRACE_MAX_READ); 669 670 ASSERT(count <= DMUB_TRACE_MAX_READ); 671 } 672 #endif 673 674 static int dm_set_clockgating_state(void *handle, 675 enum amd_clockgating_state state) 676 { 677 return 0; 678 } 679 680 static int dm_set_powergating_state(void *handle, 681 enum amd_powergating_state state) 682 { 683 return 0; 684 } 685 686 /* Prototypes of private functions */ 687 static int dm_early_init(void* handle); 688 689 /* Allocate memory for FBC compressed data */ 690 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 691 { 692 struct drm_device *dev = connector->dev; 693 struct amdgpu_device *adev = drm_to_adev(dev); 694 struct dm_compressor_info *compressor = &adev->dm.compressor; 695 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 696 struct drm_display_mode *mode; 697 unsigned long max_size = 0; 698 699 if (adev->dm.dc->fbc_compressor == NULL) 700 return; 701 702 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 703 return; 704 705 if (compressor->bo_ptr) 706 return; 707 708 709 list_for_each_entry(mode, &connector->modes, head) { 710 if (max_size < mode->htotal * mode->vtotal) 711 max_size = mode->htotal * mode->vtotal; 712 } 713 714 if (max_size) { 715 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 716 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 717 &compressor->gpu_addr, &compressor->cpu_addr); 718 719 if (r) 720 DRM_ERROR("DM: Failed to initialize FBC\n"); 721 else { 722 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 723 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 724 } 725 726 } 727 728 } 729 730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 731 int pipe, bool *enabled, 732 unsigned char *buf, int max_bytes) 733 { 734 struct drm_device *dev = dev_get_drvdata(kdev); 735 struct amdgpu_device *adev = drm_to_adev(dev); 736 struct drm_connector *connector; 737 struct drm_connector_list_iter conn_iter; 738 struct amdgpu_dm_connector *aconnector; 739 int ret = 0; 740 741 *enabled = false; 742 743 mutex_lock(&adev->dm.audio_lock); 744 745 drm_connector_list_iter_begin(dev, &conn_iter); 746 drm_for_each_connector_iter(connector, &conn_iter) { 747 aconnector = to_amdgpu_dm_connector(connector); 748 if (aconnector->audio_inst != port) 749 continue; 750 751 *enabled = true; 752 ret = drm_eld_size(connector->eld); 753 memcpy(buf, connector->eld, min(max_bytes, ret)); 754 755 break; 756 } 757 drm_connector_list_iter_end(&conn_iter); 758 759 mutex_unlock(&adev->dm.audio_lock); 760 761 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 762 763 return ret; 764 } 765 766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 767 .get_eld = amdgpu_dm_audio_component_get_eld, 768 }; 769 770 static int amdgpu_dm_audio_component_bind(struct device *kdev, 771 struct device *hda_kdev, void *data) 772 { 773 struct drm_device *dev = dev_get_drvdata(kdev); 774 struct amdgpu_device *adev = drm_to_adev(dev); 775 struct drm_audio_component *acomp = data; 776 777 acomp->ops = &amdgpu_dm_audio_component_ops; 778 acomp->dev = kdev; 779 adev->dm.audio_component = acomp; 780 781 return 0; 782 } 783 784 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 785 struct device *hda_kdev, void *data) 786 { 787 struct drm_device *dev = dev_get_drvdata(kdev); 788 struct amdgpu_device *adev = drm_to_adev(dev); 789 struct drm_audio_component *acomp = data; 790 791 acomp->ops = NULL; 792 acomp->dev = NULL; 793 adev->dm.audio_component = NULL; 794 } 795 796 #ifdef notyet 797 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 798 .bind = amdgpu_dm_audio_component_bind, 799 .unbind = amdgpu_dm_audio_component_unbind, 800 }; 801 #endif 802 803 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 804 { 805 int i, ret; 806 807 if (!amdgpu_audio) 808 return 0; 809 810 adev->mode_info.audio.enabled = true; 811 812 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 813 814 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 815 adev->mode_info.audio.pin[i].channels = -1; 816 adev->mode_info.audio.pin[i].rate = -1; 817 adev->mode_info.audio.pin[i].bits_per_sample = -1; 818 adev->mode_info.audio.pin[i].status_bits = 0; 819 adev->mode_info.audio.pin[i].category_code = 0; 820 adev->mode_info.audio.pin[i].connected = false; 821 adev->mode_info.audio.pin[i].id = 822 adev->dm.dc->res_pool->audios[i]->inst; 823 adev->mode_info.audio.pin[i].offset = 0; 824 } 825 826 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 827 if (ret < 0) 828 return ret; 829 830 adev->dm.audio_registered = true; 831 832 return 0; 833 } 834 835 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 836 { 837 if (!amdgpu_audio) 838 return; 839 840 if (!adev->mode_info.audio.enabled) 841 return; 842 843 if (adev->dm.audio_registered) { 844 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 845 adev->dm.audio_registered = false; 846 } 847 848 /* TODO: Disable audio? */ 849 850 adev->mode_info.audio.enabled = false; 851 } 852 853 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 854 { 855 struct drm_audio_component *acomp = adev->dm.audio_component; 856 857 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 858 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 859 860 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 861 pin, -1); 862 } 863 } 864 865 static int dm_dmub_hw_init(struct amdgpu_device *adev) 866 { 867 const struct dmcub_firmware_header_v1_0 *hdr; 868 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 869 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 870 const struct firmware *dmub_fw = adev->dm.dmub_fw; 871 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 872 struct abm *abm = adev->dm.dc->res_pool->abm; 873 struct dmub_srv_hw_params hw_params; 874 enum dmub_status status; 875 const unsigned char *fw_inst_const, *fw_bss_data; 876 uint32_t i, fw_inst_const_size, fw_bss_data_size; 877 bool has_hw_support; 878 879 if (!dmub_srv) 880 /* DMUB isn't supported on the ASIC. */ 881 return 0; 882 883 if (!fb_info) { 884 DRM_ERROR("No framebuffer info for DMUB service.\n"); 885 return -EINVAL; 886 } 887 888 if (!dmub_fw) { 889 /* Firmware required for DMUB support. */ 890 DRM_ERROR("No firmware provided for DMUB.\n"); 891 return -EINVAL; 892 } 893 894 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 895 if (status != DMUB_STATUS_OK) { 896 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 897 return -EINVAL; 898 } 899 900 if (!has_hw_support) { 901 DRM_INFO("DMUB unsupported on ASIC\n"); 902 return 0; 903 } 904 905 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 906 907 fw_inst_const = dmub_fw->data + 908 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 909 PSP_HEADER_BYTES; 910 911 fw_bss_data = dmub_fw->data + 912 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 913 le32_to_cpu(hdr->inst_const_bytes); 914 915 /* Copy firmware and bios info into FB memory. */ 916 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 917 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 918 919 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 920 921 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 922 * amdgpu_ucode_init_single_fw will load dmub firmware 923 * fw_inst_const part to cw0; otherwise, the firmware back door load 924 * will be done by dm_dmub_hw_init 925 */ 926 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 927 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 928 fw_inst_const_size); 929 } 930 931 if (fw_bss_data_size) 932 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 933 fw_bss_data, fw_bss_data_size); 934 935 /* Copy firmware bios info into FB memory. */ 936 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 937 adev->bios_size); 938 939 /* Reset regions that need to be reset. */ 940 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 941 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 942 943 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 944 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 945 946 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 947 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 948 949 /* Initialize hardware. */ 950 memset(&hw_params, 0, sizeof(hw_params)); 951 hw_params.fb_base = adev->gmc.fb_start; 952 hw_params.fb_offset = adev->gmc.aper_base; 953 954 /* backdoor load firmware and trigger dmub running */ 955 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 956 hw_params.load_inst_const = true; 957 958 if (dmcu) 959 hw_params.psp_version = dmcu->psp_version; 960 961 for (i = 0; i < fb_info->num_fb; ++i) 962 hw_params.fb[i] = &fb_info->fb[i]; 963 964 status = dmub_srv_hw_init(dmub_srv, &hw_params); 965 if (status != DMUB_STATUS_OK) { 966 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 967 return -EINVAL; 968 } 969 970 /* Wait for firmware load to finish. */ 971 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 972 if (status != DMUB_STATUS_OK) 973 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 974 975 /* Init DMCU and ABM if available. */ 976 if (dmcu && abm) { 977 dmcu->funcs->dmcu_init(dmcu); 978 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 979 } 980 981 if (!adev->dm.dc->ctx->dmub_srv) 982 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 983 if (!adev->dm.dc->ctx->dmub_srv) { 984 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 985 return -ENOMEM; 986 } 987 988 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 989 adev->dm.dmcub_fw_version); 990 991 return 0; 992 } 993 994 #if defined(CONFIG_DRM_AMD_DC_DCN) 995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 996 { 997 uint64_t pt_base; 998 uint32_t logical_addr_low; 999 uint32_t logical_addr_high; 1000 uint32_t agp_base, agp_bot, agp_top; 1001 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1002 1003 memset(pa_config, 0, sizeof(*pa_config)); 1004 1005 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1006 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1007 1008 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1009 /* 1010 * Raven2 has a HW issue that it is unable to use the vram which 1011 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1012 * workaround that increase system aperture high address (add 1) 1013 * to get rid of the VM fault and hardware hang. 1014 */ 1015 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1016 else 1017 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1018 1019 agp_base = 0; 1020 agp_bot = adev->gmc.agp_start >> 24; 1021 agp_top = adev->gmc.agp_end >> 24; 1022 1023 1024 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; 1025 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); 1026 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; 1027 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); 1028 page_table_base.high_part = upper_32_bits(pt_base) & 0xF; 1029 page_table_base.low_part = lower_32_bits(pt_base); 1030 1031 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1032 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1033 1034 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; 1035 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1036 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1037 1038 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1039 pa_config->system_aperture.fb_offset = adev->gmc.aper_base; 1040 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1041 1042 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1043 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1044 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1045 1046 pa_config->is_hvm_enabled = 0; 1047 1048 } 1049 #endif 1050 #if defined(CONFIG_DRM_AMD_DC_DCN) 1051 static void vblank_control_worker(struct work_struct *work) 1052 { 1053 struct vblank_control_work *vblank_work = 1054 container_of(work, struct vblank_control_work, work); 1055 struct amdgpu_display_manager *dm = vblank_work->dm; 1056 1057 mutex_lock(&dm->dc_lock); 1058 1059 if (vblank_work->enable) 1060 dm->active_vblank_irq_count++; 1061 else if(dm->active_vblank_irq_count) 1062 dm->active_vblank_irq_count--; 1063 1064 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); 1065 1066 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); 1067 1068 /* Control PSR based on vblank requirements from OS */ 1069 if (vblank_work->stream && vblank_work->stream->link) { 1070 if (vblank_work->enable) { 1071 if (vblank_work->stream->link->psr_settings.psr_allow_active) 1072 amdgpu_dm_psr_disable(vblank_work->stream); 1073 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && 1074 !vblank_work->stream->link->psr_settings.psr_allow_active && 1075 vblank_work->acrtc->dm_irq_params.allow_psr_entry) { 1076 amdgpu_dm_psr_enable(vblank_work->stream); 1077 } 1078 } 1079 1080 mutex_unlock(&dm->dc_lock); 1081 1082 dc_stream_release(vblank_work->stream); 1083 1084 kfree(vblank_work); 1085 } 1086 1087 #endif 1088 static int amdgpu_dm_init(struct amdgpu_device *adev) 1089 { 1090 struct dc_init_data init_data; 1091 #ifdef CONFIG_DRM_AMD_DC_HDCP 1092 struct dc_callback_init init_params; 1093 #endif 1094 int r; 1095 1096 adev->dm.ddev = adev_to_drm(adev); 1097 adev->dm.adev = adev; 1098 1099 /* Zero all the fields */ 1100 memset(&init_data, 0, sizeof(init_data)); 1101 #ifdef CONFIG_DRM_AMD_DC_HDCP 1102 memset(&init_params, 0, sizeof(init_params)); 1103 #endif 1104 1105 rw_init(&adev->dm.dc_lock, "dmdc"); 1106 rw_init(&adev->dm.audio_lock, "dmaud"); 1107 #if defined(CONFIG_DRM_AMD_DC_DCN) 1108 mtx_init(&adev->dm.vblank_lock, IPL_TTY); 1109 #endif 1110 1111 if(amdgpu_dm_irq_init(adev)) { 1112 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1113 goto error; 1114 } 1115 1116 init_data.asic_id.chip_family = adev->family; 1117 1118 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1119 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1120 init_data.asic_id.chip_id = adev->pdev->device; 1121 1122 init_data.asic_id.vram_width = adev->gmc.vram_width; 1123 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1124 init_data.asic_id.atombios_base_address = 1125 adev->mode_info.atom_context->bios; 1126 1127 init_data.driver = adev; 1128 1129 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1130 1131 if (!adev->dm.cgs_device) { 1132 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1133 goto error; 1134 } 1135 1136 init_data.cgs_device = adev->dm.cgs_device; 1137 1138 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1139 1140 switch (adev->asic_type) { 1141 case CHIP_CARRIZO: 1142 case CHIP_STONEY: 1143 case CHIP_RAVEN: 1144 case CHIP_RENOIR: 1145 init_data.flags.gpu_vm_support = true; 1146 switch (adev->dm.dmcub_fw_version) { 1147 case 0: /* development */ 1148 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1149 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1150 init_data.flags.disable_dmcu = false; 1151 break; 1152 default: 1153 init_data.flags.disable_dmcu = true; 1154 } 1155 break; 1156 case CHIP_VANGOGH: 1157 case CHIP_YELLOW_CARP: 1158 init_data.flags.gpu_vm_support = true; 1159 break; 1160 default: 1161 break; 1162 } 1163 1164 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1165 init_data.flags.fbc_support = true; 1166 1167 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1168 init_data.flags.multi_mon_pp_mclk_switch = true; 1169 1170 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1171 init_data.flags.disable_fractional_pwm = true; 1172 1173 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1174 init_data.flags.edp_no_power_sequencing = true; 1175 1176 init_data.flags.power_down_display_on_boot = true; 1177 1178 INIT_LIST_HEAD(&adev->dm.da_list); 1179 /* Display Core create. */ 1180 adev->dm.dc = dc_create(&init_data); 1181 1182 if (adev->dm.dc) { 1183 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 1184 } else { 1185 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1186 goto error; 1187 } 1188 1189 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1190 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1191 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1192 } 1193 1194 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1195 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1196 1197 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1198 adev->dm.dc->debug.disable_stutter = true; 1199 1200 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1201 adev->dm.dc->debug.disable_dsc = true; 1202 1203 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1204 adev->dm.dc->debug.disable_clock_gate = true; 1205 1206 r = dm_dmub_hw_init(adev); 1207 if (r) { 1208 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1209 goto error; 1210 } 1211 1212 dc_hardware_init(adev->dm.dc); 1213 1214 #if defined(CONFIG_DRM_AMD_DC_DCN) 1215 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1216 struct dc_phy_addr_space_config pa_config; 1217 1218 mmhub_read_system_context(adev, &pa_config); 1219 1220 // Call the DC init_memory func 1221 dc_setup_system_context(adev->dm.dc, &pa_config); 1222 } 1223 #endif 1224 1225 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1226 if (!adev->dm.freesync_module) { 1227 DRM_ERROR( 1228 "amdgpu: failed to initialize freesync_module.\n"); 1229 } else 1230 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1231 adev->dm.freesync_module); 1232 1233 amdgpu_dm_init_color_mod(); 1234 1235 #if defined(CONFIG_DRM_AMD_DC_DCN) 1236 if (adev->dm.dc->caps.max_links > 0) { 1237 adev->dm.vblank_control_workqueue = 1238 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1239 if (!adev->dm.vblank_control_workqueue) 1240 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1241 } 1242 #endif 1243 1244 #ifdef CONFIG_DRM_AMD_DC_HDCP 1245 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) { 1246 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1247 1248 if (!adev->dm.hdcp_workqueue) 1249 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1250 else 1251 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1252 1253 dc_init_callbacks(adev->dm.dc, &init_params); 1254 } 1255 #endif 1256 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1257 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1258 #endif 1259 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1260 init_completion(&adev->dm.dmub_aux_transfer_done); 1261 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1262 if (!adev->dm.dmub_notify) { 1263 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1264 goto error; 1265 } 1266 amdgpu_dm_outbox_init(adev); 1267 } 1268 1269 if (amdgpu_dm_initialize_drm_device(adev)) { 1270 DRM_ERROR( 1271 "amdgpu: failed to initialize sw for display support.\n"); 1272 goto error; 1273 } 1274 1275 /* create fake encoders for MST */ 1276 dm_dp_create_fake_mst_encoders(adev); 1277 1278 /* TODO: Add_display_info? */ 1279 1280 /* TODO use dynamic cursor width */ 1281 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1282 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1283 1284 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1285 DRM_ERROR( 1286 "amdgpu: failed to initialize sw for display support.\n"); 1287 goto error; 1288 } 1289 1290 1291 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1292 1293 return 0; 1294 error: 1295 amdgpu_dm_fini(adev); 1296 1297 return -EINVAL; 1298 } 1299 1300 static int amdgpu_dm_early_fini(void *handle) 1301 { 1302 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1303 1304 amdgpu_dm_audio_fini(adev); 1305 1306 return 0; 1307 } 1308 1309 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1310 { 1311 int i; 1312 1313 #if defined(CONFIG_DRM_AMD_DC_DCN) 1314 if (adev->dm.vblank_control_workqueue) { 1315 destroy_workqueue(adev->dm.vblank_control_workqueue); 1316 adev->dm.vblank_control_workqueue = NULL; 1317 } 1318 #endif 1319 1320 for (i = 0; i < adev->dm.display_indexes_num; i++) { 1321 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1322 } 1323 1324 amdgpu_dm_destroy_drm_device(&adev->dm); 1325 1326 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1327 if (adev->dm.crc_rd_wrk) { 1328 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 1329 kfree(adev->dm.crc_rd_wrk); 1330 adev->dm.crc_rd_wrk = NULL; 1331 } 1332 #endif 1333 #ifdef CONFIG_DRM_AMD_DC_HDCP 1334 if (adev->dm.hdcp_workqueue) { 1335 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1336 adev->dm.hdcp_workqueue = NULL; 1337 } 1338 1339 if (adev->dm.dc) 1340 dc_deinit_callbacks(adev->dm.dc); 1341 #endif 1342 1343 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1344 1345 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1346 kfree(adev->dm.dmub_notify); 1347 adev->dm.dmub_notify = NULL; 1348 } 1349 1350 if (adev->dm.dmub_bo) 1351 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1352 &adev->dm.dmub_bo_gpu_addr, 1353 &adev->dm.dmub_bo_cpu_addr); 1354 1355 /* DC Destroy TODO: Replace destroy DAL */ 1356 if (adev->dm.dc) 1357 dc_destroy(&adev->dm.dc); 1358 /* 1359 * TODO: pageflip, vlank interrupt 1360 * 1361 * amdgpu_dm_irq_fini(adev); 1362 */ 1363 1364 if (adev->dm.cgs_device) { 1365 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1366 adev->dm.cgs_device = NULL; 1367 } 1368 if (adev->dm.freesync_module) { 1369 mod_freesync_destroy(adev->dm.freesync_module); 1370 adev->dm.freesync_module = NULL; 1371 } 1372 1373 mutex_destroy(&adev->dm.audio_lock); 1374 mutex_destroy(&adev->dm.dc_lock); 1375 1376 return; 1377 } 1378 1379 static int load_dmcu_fw(struct amdgpu_device *adev) 1380 { 1381 const char *fw_name_dmcu = NULL; 1382 int r; 1383 const struct dmcu_firmware_header_v1_0 *hdr; 1384 1385 switch(adev->asic_type) { 1386 #if defined(CONFIG_DRM_AMD_DC_SI) 1387 case CHIP_TAHITI: 1388 case CHIP_PITCAIRN: 1389 case CHIP_VERDE: 1390 case CHIP_OLAND: 1391 #endif 1392 case CHIP_BONAIRE: 1393 case CHIP_HAWAII: 1394 case CHIP_KAVERI: 1395 case CHIP_KABINI: 1396 case CHIP_MULLINS: 1397 case CHIP_TONGA: 1398 case CHIP_FIJI: 1399 case CHIP_CARRIZO: 1400 case CHIP_STONEY: 1401 case CHIP_POLARIS11: 1402 case CHIP_POLARIS10: 1403 case CHIP_POLARIS12: 1404 case CHIP_VEGAM: 1405 case CHIP_VEGA10: 1406 case CHIP_VEGA12: 1407 case CHIP_VEGA20: 1408 case CHIP_NAVI10: 1409 case CHIP_NAVI14: 1410 case CHIP_RENOIR: 1411 case CHIP_SIENNA_CICHLID: 1412 case CHIP_NAVY_FLOUNDER: 1413 case CHIP_DIMGREY_CAVEFISH: 1414 case CHIP_BEIGE_GOBY: 1415 case CHIP_VANGOGH: 1416 case CHIP_YELLOW_CARP: 1417 return 0; 1418 case CHIP_NAVI12: 1419 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1420 break; 1421 case CHIP_RAVEN: 1422 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1423 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1424 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1425 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1426 else 1427 return 0; 1428 break; 1429 default: 1430 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 1431 return -EINVAL; 1432 } 1433 1434 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1435 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 1436 return 0; 1437 } 1438 1439 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); 1440 if (r == -ENOENT) { 1441 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 1442 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 1443 adev->dm.fw_dmcu = NULL; 1444 return 0; 1445 } 1446 if (r) { 1447 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", 1448 fw_name_dmcu); 1449 return r; 1450 } 1451 1452 r = amdgpu_ucode_validate(adev->dm.fw_dmcu); 1453 if (r) { 1454 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 1455 fw_name_dmcu); 1456 release_firmware(adev->dm.fw_dmcu); 1457 adev->dm.fw_dmcu = NULL; 1458 return r; 1459 } 1460 1461 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 1462 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 1463 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 1464 adev->firmware.fw_size += 1465 roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1466 1467 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 1468 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 1469 adev->firmware.fw_size += 1470 roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1471 1472 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 1473 1474 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 1475 1476 return 0; 1477 } 1478 1479 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 1480 { 1481 struct amdgpu_device *adev = ctx; 1482 1483 return dm_read_reg(adev->dm.dc->ctx, address); 1484 } 1485 1486 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 1487 uint32_t value) 1488 { 1489 struct amdgpu_device *adev = ctx; 1490 1491 return dm_write_reg(adev->dm.dc->ctx, address, value); 1492 } 1493 1494 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1495 { 1496 struct dmub_srv_create_params create_params; 1497 struct dmub_srv_region_params region_params; 1498 struct dmub_srv_region_info region_info; 1499 struct dmub_srv_fb_params fb_params; 1500 struct dmub_srv_fb_info *fb_info; 1501 struct dmub_srv *dmub_srv; 1502 const struct dmcub_firmware_header_v1_0 *hdr; 1503 const char *fw_name_dmub; 1504 enum dmub_asic dmub_asic; 1505 enum dmub_status status; 1506 int r; 1507 1508 switch (adev->asic_type) { 1509 case CHIP_RENOIR: 1510 dmub_asic = DMUB_ASIC_DCN21; 1511 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 1512 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 1513 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 1514 break; 1515 case CHIP_SIENNA_CICHLID: 1516 dmub_asic = DMUB_ASIC_DCN30; 1517 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 1518 break; 1519 case CHIP_NAVY_FLOUNDER: 1520 dmub_asic = DMUB_ASIC_DCN30; 1521 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 1522 break; 1523 case CHIP_VANGOGH: 1524 dmub_asic = DMUB_ASIC_DCN301; 1525 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 1526 break; 1527 case CHIP_DIMGREY_CAVEFISH: 1528 dmub_asic = DMUB_ASIC_DCN302; 1529 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 1530 break; 1531 case CHIP_BEIGE_GOBY: 1532 dmub_asic = DMUB_ASIC_DCN303; 1533 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 1534 break; 1535 case CHIP_YELLOW_CARP: 1536 dmub_asic = DMUB_ASIC_DCN31; 1537 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 1538 break; 1539 1540 default: 1541 /* ASIC doesn't support DMUB. */ 1542 return 0; 1543 } 1544 1545 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); 1546 if (r) { 1547 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 1548 return 0; 1549 } 1550 1551 r = amdgpu_ucode_validate(adev->dm.dmub_fw); 1552 if (r) { 1553 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); 1554 return 0; 1555 } 1556 1557 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 1558 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 1559 1560 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1561 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 1562 AMDGPU_UCODE_ID_DMCUB; 1563 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 1564 adev->dm.dmub_fw; 1565 adev->firmware.fw_size += 1566 roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 1567 1568 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1569 adev->dm.dmcub_fw_version); 1570 } 1571 1572 1573 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1574 dmub_srv = adev->dm.dmub_srv; 1575 1576 if (!dmub_srv) { 1577 DRM_ERROR("Failed to allocate DMUB service!\n"); 1578 return -ENOMEM; 1579 } 1580 1581 memset(&create_params, 0, sizeof(create_params)); 1582 create_params.user_ctx = adev; 1583 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1584 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1585 create_params.asic = dmub_asic; 1586 1587 /* Create the DMUB service. */ 1588 status = dmub_srv_create(dmub_srv, &create_params); 1589 if (status != DMUB_STATUS_OK) { 1590 DRM_ERROR("Error creating DMUB service: %d\n", status); 1591 return -EINVAL; 1592 } 1593 1594 /* Calculate the size of all the regions for the DMUB service. */ 1595 memset(®ion_params, 0, sizeof(region_params)); 1596 1597 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1598 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1599 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1600 region_params.vbios_size = adev->bios_size; 1601 region_params.fw_bss_data = region_params.bss_data_size ? 1602 adev->dm.dmub_fw->data + 1603 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1604 le32_to_cpu(hdr->inst_const_bytes) : NULL; 1605 region_params.fw_inst_const = 1606 adev->dm.dmub_fw->data + 1607 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1608 PSP_HEADER_BYTES; 1609 1610 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 1611 ®ion_info); 1612 1613 if (status != DMUB_STATUS_OK) { 1614 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 1615 return -EINVAL; 1616 } 1617 1618 /* 1619 * Allocate a framebuffer based on the total size of all the regions. 1620 * TODO: Move this into GART. 1621 */ 1622 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 1623 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 1624 &adev->dm.dmub_bo_gpu_addr, 1625 &adev->dm.dmub_bo_cpu_addr); 1626 if (r) 1627 return r; 1628 1629 /* Rebase the regions on the framebuffer address. */ 1630 memset(&fb_params, 0, sizeof(fb_params)); 1631 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 1632 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 1633 fb_params.region_info = ®ion_info; 1634 1635 adev->dm.dmub_fb_info = 1636 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 1637 fb_info = adev->dm.dmub_fb_info; 1638 1639 if (!fb_info) { 1640 DRM_ERROR( 1641 "Failed to allocate framebuffer info for DMUB service!\n"); 1642 return -ENOMEM; 1643 } 1644 1645 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); 1646 if (status != DMUB_STATUS_OK) { 1647 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 1648 return -EINVAL; 1649 } 1650 1651 return 0; 1652 } 1653 1654 static int dm_sw_init(void *handle) 1655 { 1656 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1657 int r; 1658 1659 r = dm_dmub_sw_init(adev); 1660 if (r) 1661 return r; 1662 1663 return load_dmcu_fw(adev); 1664 } 1665 1666 static int dm_sw_fini(void *handle) 1667 { 1668 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1669 1670 kfree(adev->dm.dmub_fb_info); 1671 adev->dm.dmub_fb_info = NULL; 1672 1673 if (adev->dm.dmub_srv) { 1674 dmub_srv_destroy(adev->dm.dmub_srv); 1675 adev->dm.dmub_srv = NULL; 1676 } 1677 1678 release_firmware(adev->dm.dmub_fw); 1679 adev->dm.dmub_fw = NULL; 1680 1681 release_firmware(adev->dm.fw_dmcu); 1682 adev->dm.fw_dmcu = NULL; 1683 1684 return 0; 1685 } 1686 1687 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 1688 { 1689 struct amdgpu_dm_connector *aconnector; 1690 struct drm_connector *connector; 1691 struct drm_connector_list_iter iter; 1692 int ret = 0; 1693 1694 drm_connector_list_iter_begin(dev, &iter); 1695 drm_for_each_connector_iter(connector, &iter) { 1696 aconnector = to_amdgpu_dm_connector(connector); 1697 if (aconnector->dc_link->type == dc_connection_mst_branch && 1698 aconnector->mst_mgr.aux) { 1699 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 1700 aconnector, 1701 aconnector->base.base.id); 1702 1703 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 1704 if (ret < 0) { 1705 DRM_ERROR("DM_MST: Failed to start MST\n"); 1706 aconnector->dc_link->type = 1707 dc_connection_single; 1708 break; 1709 } 1710 } 1711 } 1712 drm_connector_list_iter_end(&iter); 1713 1714 return ret; 1715 } 1716 1717 static int dm_late_init(void *handle) 1718 { 1719 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1720 1721 struct dmcu_iram_parameters params; 1722 unsigned int linear_lut[16]; 1723 int i; 1724 struct dmcu *dmcu = NULL; 1725 1726 dmcu = adev->dm.dc->res_pool->dmcu; 1727 1728 for (i = 0; i < 16; i++) 1729 linear_lut[i] = 0xFFFF * i / 15; 1730 1731 params.set = 0; 1732 params.backlight_ramping_override = false; 1733 params.backlight_ramping_start = 0xCCCC; 1734 params.backlight_ramping_reduction = 0xCCCCCCCC; 1735 params.backlight_lut_array_size = 16; 1736 params.backlight_lut_array = linear_lut; 1737 1738 /* Min backlight level after ABM reduction, Don't allow below 1% 1739 * 0xFFFF x 0.01 = 0x28F 1740 */ 1741 params.min_abm_backlight = 0x28F; 1742 /* In the case where abm is implemented on dmcub, 1743 * dmcu object will be null. 1744 * ABM 2.4 and up are implemented on dmcub. 1745 */ 1746 if (dmcu) { 1747 if (!dmcu_load_iram(dmcu, params)) 1748 return -EINVAL; 1749 } else if (adev->dm.dc->ctx->dmub_srv) { 1750 struct dc_link *edp_links[MAX_NUM_EDP]; 1751 int edp_num; 1752 1753 get_edp_links(adev->dm.dc, edp_links, &edp_num); 1754 for (i = 0; i < edp_num; i++) { 1755 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 1756 return -EINVAL; 1757 } 1758 } 1759 1760 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 1761 } 1762 1763 static void s3_handle_mst(struct drm_device *dev, bool suspend) 1764 { 1765 struct amdgpu_dm_connector *aconnector; 1766 struct drm_connector *connector; 1767 struct drm_connector_list_iter iter; 1768 struct drm_dp_mst_topology_mgr *mgr; 1769 int ret; 1770 bool need_hotplug = false; 1771 1772 drm_connector_list_iter_begin(dev, &iter); 1773 drm_for_each_connector_iter(connector, &iter) { 1774 aconnector = to_amdgpu_dm_connector(connector); 1775 if (aconnector->dc_link->type != dc_connection_mst_branch || 1776 aconnector->mst_port) 1777 continue; 1778 1779 mgr = &aconnector->mst_mgr; 1780 1781 if (suspend) { 1782 drm_dp_mst_topology_mgr_suspend(mgr); 1783 } else { 1784 ret = drm_dp_mst_topology_mgr_resume(mgr, true); 1785 if (ret < 0) { 1786 drm_dp_mst_topology_mgr_set_mst(mgr, false); 1787 need_hotplug = true; 1788 } 1789 } 1790 } 1791 drm_connector_list_iter_end(&iter); 1792 1793 if (need_hotplug) 1794 drm_kms_helper_hotplug_event(dev); 1795 } 1796 1797 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 1798 { 1799 struct smu_context *smu = &adev->smu; 1800 int ret = 0; 1801 1802 if (!is_support_sw_smu(adev)) 1803 return 0; 1804 1805 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 1806 * on window driver dc implementation. 1807 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 1808 * should be passed to smu during boot up and resume from s3. 1809 * boot up: dc calculate dcn watermark clock settings within dc_create, 1810 * dcn20_resource_construct 1811 * then call pplib functions below to pass the settings to smu: 1812 * smu_set_watermarks_for_clock_ranges 1813 * smu_set_watermarks_table 1814 * navi10_set_watermarks_table 1815 * smu_write_watermarks_table 1816 * 1817 * For Renoir, clock settings of dcn watermark are also fixed values. 1818 * dc has implemented different flow for window driver: 1819 * dc_hardware_init / dc_set_power_state 1820 * dcn10_init_hw 1821 * notify_wm_ranges 1822 * set_wm_ranges 1823 * -- Linux 1824 * smu_set_watermarks_for_clock_ranges 1825 * renoir_set_watermarks_table 1826 * smu_write_watermarks_table 1827 * 1828 * For Linux, 1829 * dc_hardware_init -> amdgpu_dm_init 1830 * dc_set_power_state --> dm_resume 1831 * 1832 * therefore, this function apply to navi10/12/14 but not Renoir 1833 * * 1834 */ 1835 switch(adev->asic_type) { 1836 case CHIP_NAVI10: 1837 case CHIP_NAVI14: 1838 case CHIP_NAVI12: 1839 break; 1840 default: 1841 return 0; 1842 } 1843 1844 ret = smu_write_watermarks_table(smu); 1845 if (ret) { 1846 DRM_ERROR("Failed to update WMTABLE!\n"); 1847 return ret; 1848 } 1849 1850 return 0; 1851 } 1852 1853 /** 1854 * dm_hw_init() - Initialize DC device 1855 * @handle: The base driver device containing the amdgpu_dm device. 1856 * 1857 * Initialize the &struct amdgpu_display_manager device. This involves calling 1858 * the initializers of each DM component, then populating the struct with them. 1859 * 1860 * Although the function implies hardware initialization, both hardware and 1861 * software are initialized here. Splitting them out to their relevant init 1862 * hooks is a future TODO item. 1863 * 1864 * Some notable things that are initialized here: 1865 * 1866 * - Display Core, both software and hardware 1867 * - DC modules that we need (freesync and color management) 1868 * - DRM software states 1869 * - Interrupt sources and handlers 1870 * - Vblank support 1871 * - Debug FS entries, if enabled 1872 */ 1873 static int dm_hw_init(void *handle) 1874 { 1875 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1876 /* Create DAL display manager */ 1877 amdgpu_dm_init(adev); 1878 amdgpu_dm_hpd_init(adev); 1879 1880 return 0; 1881 } 1882 1883 /** 1884 * dm_hw_fini() - Teardown DC device 1885 * @handle: The base driver device containing the amdgpu_dm device. 1886 * 1887 * Teardown components within &struct amdgpu_display_manager that require 1888 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 1889 * were loaded. Also flush IRQ workqueues and disable them. 1890 */ 1891 static int dm_hw_fini(void *handle) 1892 { 1893 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1894 1895 amdgpu_dm_hpd_fini(adev); 1896 1897 amdgpu_dm_irq_fini(adev); 1898 amdgpu_dm_fini(adev); 1899 return 0; 1900 } 1901 1902 1903 static int dm_enable_vblank(struct drm_crtc *crtc); 1904 static void dm_disable_vblank(struct drm_crtc *crtc); 1905 1906 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 1907 struct dc_state *state, bool enable) 1908 { 1909 enum dc_irq_source irq_source; 1910 struct amdgpu_crtc *acrtc; 1911 int rc = -EBUSY; 1912 int i = 0; 1913 1914 for (i = 0; i < state->stream_count; i++) { 1915 acrtc = get_crtc_by_otg_inst( 1916 adev, state->stream_status[i].primary_otg_inst); 1917 1918 if (acrtc && state->stream_status[i].plane_count != 0) { 1919 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 1920 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 1921 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 1922 acrtc->crtc_id, enable ? "en" : "dis", rc); 1923 if (rc) 1924 DRM_WARN("Failed to %s pflip interrupts\n", 1925 enable ? "enable" : "disable"); 1926 1927 if (enable) { 1928 rc = dm_enable_vblank(&acrtc->base); 1929 if (rc) 1930 DRM_WARN("Failed to enable vblank interrupts\n"); 1931 } else { 1932 dm_disable_vblank(&acrtc->base); 1933 } 1934 1935 } 1936 } 1937 1938 } 1939 1940 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 1941 { 1942 struct dc_state *context = NULL; 1943 enum dc_status res = DC_ERROR_UNEXPECTED; 1944 int i; 1945 struct dc_stream_state *del_streams[MAX_PIPES]; 1946 int del_streams_count = 0; 1947 1948 memset(del_streams, 0, sizeof(del_streams)); 1949 1950 context = dc_create_state(dc); 1951 if (context == NULL) 1952 goto context_alloc_fail; 1953 1954 dc_resource_state_copy_construct_current(dc, context); 1955 1956 /* First remove from context all streams */ 1957 for (i = 0; i < context->stream_count; i++) { 1958 struct dc_stream_state *stream = context->streams[i]; 1959 1960 del_streams[del_streams_count++] = stream; 1961 } 1962 1963 /* Remove all planes for removed streams and then remove the streams */ 1964 for (i = 0; i < del_streams_count; i++) { 1965 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 1966 res = DC_FAIL_DETACH_SURFACES; 1967 goto fail; 1968 } 1969 1970 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 1971 if (res != DC_OK) 1972 goto fail; 1973 } 1974 1975 1976 res = dc_validate_global_state(dc, context, false); 1977 1978 if (res != DC_OK) { 1979 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); 1980 goto fail; 1981 } 1982 1983 res = dc_commit_state(dc, context); 1984 1985 fail: 1986 dc_release_state(context); 1987 1988 context_alloc_fail: 1989 return res; 1990 } 1991 1992 static int dm_suspend(void *handle) 1993 { 1994 struct amdgpu_device *adev = handle; 1995 struct amdgpu_display_manager *dm = &adev->dm; 1996 int ret = 0; 1997 1998 if (amdgpu_in_reset(adev)) { 1999 mutex_lock(&dm->dc_lock); 2000 2001 #if defined(CONFIG_DRM_AMD_DC_DCN) 2002 dc_allow_idle_optimizations(adev->dm.dc, false); 2003 #endif 2004 2005 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2006 2007 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2008 2009 amdgpu_dm_commit_zero_streams(dm->dc); 2010 2011 amdgpu_dm_irq_suspend(adev); 2012 2013 return ret; 2014 } 2015 2016 WARN_ON(adev->dm.cached_state); 2017 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2018 2019 s3_handle_mst(adev_to_drm(adev), true); 2020 2021 amdgpu_dm_irq_suspend(adev); 2022 2023 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2024 2025 return 0; 2026 } 2027 2028 static struct amdgpu_dm_connector * 2029 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2030 struct drm_crtc *crtc) 2031 { 2032 uint32_t i; 2033 struct drm_connector_state *new_con_state; 2034 struct drm_connector *connector; 2035 struct drm_crtc *crtc_from_state; 2036 2037 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2038 crtc_from_state = new_con_state->crtc; 2039 2040 if (crtc_from_state == crtc) 2041 return to_amdgpu_dm_connector(connector); 2042 } 2043 2044 return NULL; 2045 } 2046 2047 static void emulated_link_detect(struct dc_link *link) 2048 { 2049 struct dc_sink_init_data sink_init_data = { 0 }; 2050 struct display_sink_capability sink_caps = { 0 }; 2051 enum dc_edid_status edid_status; 2052 struct dc_context *dc_ctx = link->ctx; 2053 struct dc_sink *sink = NULL; 2054 struct dc_sink *prev_sink = NULL; 2055 2056 link->type = dc_connection_none; 2057 prev_sink = link->local_sink; 2058 2059 if (prev_sink) 2060 dc_sink_release(prev_sink); 2061 2062 switch (link->connector_signal) { 2063 case SIGNAL_TYPE_HDMI_TYPE_A: { 2064 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2065 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2066 break; 2067 } 2068 2069 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2070 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2071 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2072 break; 2073 } 2074 2075 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2076 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2077 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2078 break; 2079 } 2080 2081 case SIGNAL_TYPE_LVDS: { 2082 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2083 sink_caps.signal = SIGNAL_TYPE_LVDS; 2084 break; 2085 } 2086 2087 case SIGNAL_TYPE_EDP: { 2088 sink_caps.transaction_type = 2089 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2090 sink_caps.signal = SIGNAL_TYPE_EDP; 2091 break; 2092 } 2093 2094 case SIGNAL_TYPE_DISPLAY_PORT: { 2095 sink_caps.transaction_type = 2096 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2097 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2098 break; 2099 } 2100 2101 default: 2102 DC_ERROR("Invalid connector type! signal:%d\n", 2103 link->connector_signal); 2104 return; 2105 } 2106 2107 sink_init_data.link = link; 2108 sink_init_data.sink_signal = sink_caps.signal; 2109 2110 sink = dc_sink_create(&sink_init_data); 2111 if (!sink) { 2112 DC_ERROR("Failed to create sink!\n"); 2113 return; 2114 } 2115 2116 /* dc_sink_create returns a new reference */ 2117 link->local_sink = sink; 2118 2119 edid_status = dm_helpers_read_local_edid( 2120 link->ctx, 2121 link, 2122 sink); 2123 2124 if (edid_status != EDID_OK) 2125 DC_ERROR("Failed to read EDID"); 2126 2127 } 2128 2129 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2130 struct amdgpu_display_manager *dm) 2131 { 2132 struct { 2133 struct dc_surface_update surface_updates[MAX_SURFACES]; 2134 struct dc_plane_info plane_infos[MAX_SURFACES]; 2135 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2136 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2137 struct dc_stream_update stream_update; 2138 } * bundle; 2139 int k, m; 2140 2141 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2142 2143 if (!bundle) { 2144 dm_error("Failed to allocate update bundle\n"); 2145 goto cleanup; 2146 } 2147 2148 for (k = 0; k < dc_state->stream_count; k++) { 2149 bundle->stream_update.stream = dc_state->streams[k]; 2150 2151 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2152 bundle->surface_updates[m].surface = 2153 dc_state->stream_status->plane_states[m]; 2154 bundle->surface_updates[m].surface->force_full_update = 2155 true; 2156 } 2157 dc_commit_updates_for_stream( 2158 dm->dc, bundle->surface_updates, 2159 dc_state->stream_status->plane_count, 2160 dc_state->streams[k], &bundle->stream_update, dc_state); 2161 } 2162 2163 cleanup: 2164 kfree(bundle); 2165 2166 return; 2167 } 2168 2169 static void dm_set_dpms_off(struct dc_link *link) 2170 { 2171 struct dc_stream_state *stream_state; 2172 struct amdgpu_dm_connector *aconnector = link->priv; 2173 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 2174 struct dc_stream_update stream_update; 2175 bool dpms_off = true; 2176 2177 memset(&stream_update, 0, sizeof(stream_update)); 2178 stream_update.dpms_off = &dpms_off; 2179 2180 mutex_lock(&adev->dm.dc_lock); 2181 stream_state = dc_stream_find_from_link(link); 2182 2183 if (stream_state == NULL) { 2184 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); 2185 mutex_unlock(&adev->dm.dc_lock); 2186 return; 2187 } 2188 2189 stream_update.stream = stream_state; 2190 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, 2191 stream_state, &stream_update, 2192 stream_state->ctx->dc->current_state); 2193 mutex_unlock(&adev->dm.dc_lock); 2194 } 2195 2196 static int dm_resume(void *handle) 2197 { 2198 struct amdgpu_device *adev = handle; 2199 struct drm_device *ddev = adev_to_drm(adev); 2200 struct amdgpu_display_manager *dm = &adev->dm; 2201 struct amdgpu_dm_connector *aconnector; 2202 struct drm_connector *connector; 2203 struct drm_connector_list_iter iter; 2204 struct drm_crtc *crtc; 2205 struct drm_crtc_state *new_crtc_state; 2206 struct dm_crtc_state *dm_new_crtc_state; 2207 struct drm_plane *plane; 2208 struct drm_plane_state *new_plane_state; 2209 struct dm_plane_state *dm_new_plane_state; 2210 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2211 enum dc_connection_type new_connection_type = dc_connection_none; 2212 struct dc_state *dc_state; 2213 int i, r, j; 2214 2215 if (amdgpu_in_reset(adev)) { 2216 dc_state = dm->cached_dc_state; 2217 2218 if (dc_enable_dmub_notifications(adev->dm.dc)) 2219 amdgpu_dm_outbox_init(adev); 2220 2221 r = dm_dmub_hw_init(adev); 2222 if (r) 2223 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2224 2225 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2226 dc_resume(dm->dc); 2227 2228 amdgpu_dm_irq_resume_early(adev); 2229 2230 for (i = 0; i < dc_state->stream_count; i++) { 2231 dc_state->streams[i]->mode_changed = true; 2232 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2233 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2234 = 0xffffffff; 2235 } 2236 } 2237 #if defined(CONFIG_DRM_AMD_DC_DCN) 2238 /* 2239 * Resource allocation happens for link encoders for newer ASIC in 2240 * dc_validate_global_state, so we need to revalidate it. 2241 * 2242 * This shouldn't fail (it passed once before), so warn if it does. 2243 */ 2244 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); 2245 #endif 2246 2247 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 2248 2249 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2250 2251 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2252 2253 dc_release_state(dm->cached_dc_state); 2254 dm->cached_dc_state = NULL; 2255 2256 amdgpu_dm_irq_resume_late(adev); 2257 2258 mutex_unlock(&dm->dc_lock); 2259 2260 return 0; 2261 } 2262 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2263 dc_release_state(dm_state->context); 2264 dm_state->context = dc_create_state(dm->dc); 2265 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2266 dc_resource_state_construct(dm->dc, dm_state->context); 2267 2268 /* Re-enable outbox interrupts for DPIA. */ 2269 if (dc_enable_dmub_notifications(adev->dm.dc)) 2270 amdgpu_dm_outbox_init(adev); 2271 2272 /* Before powering on DC we need to re-initialize DMUB. */ 2273 r = dm_dmub_hw_init(adev); 2274 if (r) 2275 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2276 2277 /* power on hardware */ 2278 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2279 2280 /* program HPD filter */ 2281 dc_resume(dm->dc); 2282 2283 /* 2284 * early enable HPD Rx IRQ, should be done before set mode as short 2285 * pulse interrupts are used for MST 2286 */ 2287 amdgpu_dm_irq_resume_early(adev); 2288 2289 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2290 s3_handle_mst(ddev, false); 2291 2292 /* Do detection*/ 2293 drm_connector_list_iter_begin(ddev, &iter); 2294 drm_for_each_connector_iter(connector, &iter) { 2295 aconnector = to_amdgpu_dm_connector(connector); 2296 2297 /* 2298 * this is the case when traversing through already created 2299 * MST connectors, should be skipped 2300 */ 2301 if (aconnector->dc_link && 2302 aconnector->dc_link->type == dc_connection_mst_branch) 2303 continue; 2304 2305 mutex_lock(&aconnector->hpd_lock); 2306 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2307 DRM_ERROR("KMS: Failed to detect connector\n"); 2308 2309 if (aconnector->base.force && new_connection_type == dc_connection_none) 2310 emulated_link_detect(aconnector->dc_link); 2311 else 2312 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2313 2314 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2315 aconnector->fake_enable = false; 2316 2317 if (aconnector->dc_sink) 2318 dc_sink_release(aconnector->dc_sink); 2319 aconnector->dc_sink = NULL; 2320 amdgpu_dm_update_connector_after_detect(aconnector); 2321 mutex_unlock(&aconnector->hpd_lock); 2322 } 2323 drm_connector_list_iter_end(&iter); 2324 2325 /* Force mode set in atomic commit */ 2326 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2327 new_crtc_state->active_changed = true; 2328 2329 /* 2330 * atomic_check is expected to create the dc states. We need to release 2331 * them here, since they were duplicated as part of the suspend 2332 * procedure. 2333 */ 2334 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2335 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2336 if (dm_new_crtc_state->stream) { 2337 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2338 dc_stream_release(dm_new_crtc_state->stream); 2339 dm_new_crtc_state->stream = NULL; 2340 } 2341 } 2342 2343 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2344 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2345 if (dm_new_plane_state->dc_state) { 2346 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2347 dc_plane_state_release(dm_new_plane_state->dc_state); 2348 dm_new_plane_state->dc_state = NULL; 2349 } 2350 } 2351 2352 drm_atomic_helper_resume(ddev, dm->cached_state); 2353 2354 dm->cached_state = NULL; 2355 2356 amdgpu_dm_irq_resume_late(adev); 2357 2358 amdgpu_dm_smu_write_watermarks_table(adev); 2359 2360 return 0; 2361 } 2362 2363 /** 2364 * DOC: DM Lifecycle 2365 * 2366 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 2367 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 2368 * the base driver's device list to be initialized and torn down accordingly. 2369 * 2370 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 2371 */ 2372 2373 static const struct amd_ip_funcs amdgpu_dm_funcs = { 2374 .name = "dm", 2375 .early_init = dm_early_init, 2376 .late_init = dm_late_init, 2377 .sw_init = dm_sw_init, 2378 .sw_fini = dm_sw_fini, 2379 .early_fini = amdgpu_dm_early_fini, 2380 .hw_init = dm_hw_init, 2381 .hw_fini = dm_hw_fini, 2382 .suspend = dm_suspend, 2383 .resume = dm_resume, 2384 .is_idle = dm_is_idle, 2385 .wait_for_idle = dm_wait_for_idle, 2386 .check_soft_reset = dm_check_soft_reset, 2387 .soft_reset = dm_soft_reset, 2388 .set_clockgating_state = dm_set_clockgating_state, 2389 .set_powergating_state = dm_set_powergating_state, 2390 }; 2391 2392 const struct amdgpu_ip_block_version dm_ip_block = 2393 { 2394 .type = AMD_IP_BLOCK_TYPE_DCE, 2395 .major = 1, 2396 .minor = 0, 2397 .rev = 0, 2398 .funcs = &amdgpu_dm_funcs, 2399 }; 2400 2401 2402 /** 2403 * DOC: atomic 2404 * 2405 * *WIP* 2406 */ 2407 2408 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 2409 .fb_create = amdgpu_display_user_framebuffer_create, 2410 .get_format_info = amd_get_format_info, 2411 .output_poll_changed = drm_fb_helper_output_poll_changed, 2412 .atomic_check = amdgpu_dm_atomic_check, 2413 .atomic_commit = drm_atomic_helper_commit, 2414 }; 2415 2416 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 2417 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 2418 }; 2419 2420 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 2421 { 2422 u32 max_cll, min_cll, max, min, q, r; 2423 struct amdgpu_dm_backlight_caps *caps; 2424 struct amdgpu_display_manager *dm; 2425 struct drm_connector *conn_base; 2426 struct amdgpu_device *adev; 2427 struct dc_link *link = NULL; 2428 static const u8 pre_computed_values[] = { 2429 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2430 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2431 int i; 2432 2433 if (!aconnector || !aconnector->dc_link) 2434 return; 2435 2436 link = aconnector->dc_link; 2437 if (link->connector_signal != SIGNAL_TYPE_EDP) 2438 return; 2439 2440 conn_base = &aconnector->base; 2441 adev = drm_to_adev(conn_base->dev); 2442 dm = &adev->dm; 2443 for (i = 0; i < dm->num_of_edps; i++) { 2444 if (link == dm->backlight_link[i]) 2445 break; 2446 } 2447 if (i >= dm->num_of_edps) 2448 return; 2449 caps = &dm->backlight_caps[i]; 2450 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 2451 caps->aux_support = false; 2452 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; 2453 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; 2454 2455 if (caps->ext_caps->bits.oled == 1 /*|| 2456 caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 2457 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) 2458 caps->aux_support = true; 2459 2460 if (amdgpu_backlight == 0) 2461 caps->aux_support = false; 2462 else if (amdgpu_backlight == 1) 2463 caps->aux_support = true; 2464 2465 /* From the specification (CTA-861-G), for calculating the maximum 2466 * luminance we need to use: 2467 * Luminance = 50*2**(CV/32) 2468 * Where CV is a one-byte value. 2469 * For calculating this expression we may need float point precision; 2470 * to avoid this complexity level, we take advantage that CV is divided 2471 * by a constant. From the Euclids division algorithm, we know that CV 2472 * can be written as: CV = 32*q + r. Next, we replace CV in the 2473 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just 2474 * need to pre-compute the value of r/32. For pre-computing the values 2475 * We just used the following Ruby line: 2476 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} 2477 * The results of the above expressions can be verified at 2478 * pre_computed_values. 2479 */ 2480 q = max_cll >> 5; 2481 r = max_cll % 32; 2482 max = (1 << q) * pre_computed_values[r]; 2483 2484 // min luminance: maxLum * (CV/255)^2 / 100 2485 q = DIV_ROUND_CLOSEST(min_cll, 255); 2486 min = max * DIV_ROUND_CLOSEST((q * q), 100); 2487 2488 caps->aux_max_input_signal = max; 2489 caps->aux_min_input_signal = min; 2490 } 2491 2492 void amdgpu_dm_update_connector_after_detect( 2493 struct amdgpu_dm_connector *aconnector) 2494 { 2495 struct drm_connector *connector = &aconnector->base; 2496 struct drm_device *dev = connector->dev; 2497 struct dc_sink *sink; 2498 2499 /* MST handled by drm_mst framework */ 2500 if (aconnector->mst_mgr.mst_state == true) 2501 return; 2502 2503 sink = aconnector->dc_link->local_sink; 2504 if (sink) 2505 dc_sink_retain(sink); 2506 2507 /* 2508 * Edid mgmt connector gets first update only in mode_valid hook and then 2509 * the connector sink is set to either fake or physical sink depends on link status. 2510 * Skip if already done during boot. 2511 */ 2512 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 2513 && aconnector->dc_em_sink) { 2514 2515 /* 2516 * For S3 resume with headless use eml_sink to fake stream 2517 * because on resume connector->sink is set to NULL 2518 */ 2519 mutex_lock(&dev->mode_config.mutex); 2520 2521 if (sink) { 2522 if (aconnector->dc_sink) { 2523 amdgpu_dm_update_freesync_caps(connector, NULL); 2524 /* 2525 * retain and release below are used to 2526 * bump up refcount for sink because the link doesn't point 2527 * to it anymore after disconnect, so on next crtc to connector 2528 * reshuffle by UMD we will get into unwanted dc_sink release 2529 */ 2530 dc_sink_release(aconnector->dc_sink); 2531 } 2532 aconnector->dc_sink = sink; 2533 dc_sink_retain(aconnector->dc_sink); 2534 amdgpu_dm_update_freesync_caps(connector, 2535 aconnector->edid); 2536 } else { 2537 amdgpu_dm_update_freesync_caps(connector, NULL); 2538 if (!aconnector->dc_sink) { 2539 aconnector->dc_sink = aconnector->dc_em_sink; 2540 dc_sink_retain(aconnector->dc_sink); 2541 } 2542 } 2543 2544 mutex_unlock(&dev->mode_config.mutex); 2545 2546 if (sink) 2547 dc_sink_release(sink); 2548 return; 2549 } 2550 2551 /* 2552 * TODO: temporary guard to look for proper fix 2553 * if this sink is MST sink, we should not do anything 2554 */ 2555 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2556 dc_sink_release(sink); 2557 return; 2558 } 2559 2560 if (aconnector->dc_sink == sink) { 2561 /* 2562 * We got a DP short pulse (Link Loss, DP CTS, etc...). 2563 * Do nothing!! 2564 */ 2565 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 2566 aconnector->connector_id); 2567 if (sink) 2568 dc_sink_release(sink); 2569 return; 2570 } 2571 2572 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 2573 aconnector->connector_id, aconnector->dc_sink, sink); 2574 2575 mutex_lock(&dev->mode_config.mutex); 2576 2577 /* 2578 * 1. Update status of the drm connector 2579 * 2. Send an event and let userspace tell us what to do 2580 */ 2581 if (sink) { 2582 /* 2583 * TODO: check if we still need the S3 mode update workaround. 2584 * If yes, put it here. 2585 */ 2586 if (aconnector->dc_sink) { 2587 amdgpu_dm_update_freesync_caps(connector, NULL); 2588 dc_sink_release(aconnector->dc_sink); 2589 } 2590 2591 aconnector->dc_sink = sink; 2592 dc_sink_retain(aconnector->dc_sink); 2593 if (sink->dc_edid.length == 0) { 2594 aconnector->edid = NULL; 2595 if (aconnector->dc_link->aux_mode) { 2596 drm_dp_cec_unset_edid( 2597 &aconnector->dm_dp_aux.aux); 2598 } 2599 } else { 2600 aconnector->edid = 2601 (struct edid *)sink->dc_edid.raw_edid; 2602 2603 drm_connector_update_edid_property(connector, 2604 aconnector->edid); 2605 if (aconnector->dc_link->aux_mode) 2606 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 2607 aconnector->edid); 2608 } 2609 2610 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 2611 update_connector_ext_caps(aconnector); 2612 } else { 2613 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 2614 amdgpu_dm_update_freesync_caps(connector, NULL); 2615 drm_connector_update_edid_property(connector, NULL); 2616 aconnector->num_modes = 0; 2617 dc_sink_release(aconnector->dc_sink); 2618 aconnector->dc_sink = NULL; 2619 aconnector->edid = NULL; 2620 #ifdef CONFIG_DRM_AMD_DC_HDCP 2621 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 2622 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2623 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2624 #endif 2625 } 2626 2627 mutex_unlock(&dev->mode_config.mutex); 2628 2629 update_subconnector_property(aconnector); 2630 2631 if (sink) 2632 dc_sink_release(sink); 2633 } 2634 2635 static void handle_hpd_irq(void *param) 2636 { 2637 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 2638 struct drm_connector *connector = &aconnector->base; 2639 struct drm_device *dev = connector->dev; 2640 enum dc_connection_type new_connection_type = dc_connection_none; 2641 struct amdgpu_device *adev = drm_to_adev(dev); 2642 #ifdef CONFIG_DRM_AMD_DC_HDCP 2643 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 2644 #endif 2645 2646 if (adev->dm.disable_hpd_irq) 2647 return; 2648 2649 /* 2650 * In case of failure or MST no need to update connector status or notify the OS 2651 * since (for MST case) MST does this in its own context. 2652 */ 2653 mutex_lock(&aconnector->hpd_lock); 2654 2655 #ifdef CONFIG_DRM_AMD_DC_HDCP 2656 if (adev->dm.hdcp_workqueue) { 2657 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 2658 dm_con_state->update_hdcp = true; 2659 } 2660 #endif 2661 if (aconnector->fake_enable) 2662 aconnector->fake_enable = false; 2663 2664 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2665 DRM_ERROR("KMS: Failed to detect connector\n"); 2666 2667 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2668 emulated_link_detect(aconnector->dc_link); 2669 2670 2671 drm_modeset_lock_all(dev); 2672 dm_restore_drm_connector_state(dev, connector); 2673 drm_modeset_unlock_all(dev); 2674 2675 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 2676 drm_kms_helper_hotplug_event(dev); 2677 2678 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 2679 if (new_connection_type == dc_connection_none && 2680 aconnector->dc_link->type == dc_connection_none) 2681 dm_set_dpms_off(aconnector->dc_link); 2682 2683 amdgpu_dm_update_connector_after_detect(aconnector); 2684 2685 drm_modeset_lock_all(dev); 2686 dm_restore_drm_connector_state(dev, connector); 2687 drm_modeset_unlock_all(dev); 2688 2689 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 2690 drm_kms_helper_hotplug_event(dev); 2691 } 2692 mutex_unlock(&aconnector->hpd_lock); 2693 2694 } 2695 2696 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) 2697 { 2698 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 2699 uint8_t dret; 2700 bool new_irq_handled = false; 2701 int dpcd_addr; 2702 int dpcd_bytes_to_read; 2703 2704 const int max_process_count = 30; 2705 int process_count = 0; 2706 2707 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 2708 2709 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 2710 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 2711 /* DPCD 0x200 - 0x201 for downstream IRQ */ 2712 dpcd_addr = DP_SINK_COUNT; 2713 } else { 2714 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 2715 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 2716 dpcd_addr = DP_SINK_COUNT_ESI; 2717 } 2718 2719 dret = drm_dp_dpcd_read( 2720 &aconnector->dm_dp_aux.aux, 2721 dpcd_addr, 2722 esi, 2723 dpcd_bytes_to_read); 2724 2725 while (dret == dpcd_bytes_to_read && 2726 process_count < max_process_count) { 2727 uint8_t retry; 2728 dret = 0; 2729 2730 process_count++; 2731 2732 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 2733 /* handle HPD short pulse irq */ 2734 if (aconnector->mst_mgr.mst_state) 2735 drm_dp_mst_hpd_irq( 2736 &aconnector->mst_mgr, 2737 esi, 2738 &new_irq_handled); 2739 2740 if (new_irq_handled) { 2741 /* ACK at DPCD to notify down stream */ 2742 const int ack_dpcd_bytes_to_write = 2743 dpcd_bytes_to_read - 1; 2744 2745 for (retry = 0; retry < 3; retry++) { 2746 uint8_t wret; 2747 2748 wret = drm_dp_dpcd_write( 2749 &aconnector->dm_dp_aux.aux, 2750 dpcd_addr + 1, 2751 &esi[1], 2752 ack_dpcd_bytes_to_write); 2753 if (wret == ack_dpcd_bytes_to_write) 2754 break; 2755 } 2756 2757 /* check if there is new irq to be handled */ 2758 dret = drm_dp_dpcd_read( 2759 &aconnector->dm_dp_aux.aux, 2760 dpcd_addr, 2761 esi, 2762 dpcd_bytes_to_read); 2763 2764 new_irq_handled = false; 2765 } else { 2766 break; 2767 } 2768 } 2769 2770 if (process_count == max_process_count) 2771 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 2772 } 2773 2774 static void handle_hpd_rx_irq(void *param) 2775 { 2776 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 2777 struct drm_connector *connector = &aconnector->base; 2778 struct drm_device *dev = connector->dev; 2779 struct dc_link *dc_link = aconnector->dc_link; 2780 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 2781 bool result = false; 2782 enum dc_connection_type new_connection_type = dc_connection_none; 2783 struct amdgpu_device *adev = drm_to_adev(dev); 2784 union hpd_irq_data hpd_irq_data; 2785 bool lock_flag = 0; 2786 2787 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 2788 2789 if (adev->dm.disable_hpd_irq) 2790 return; 2791 2792 2793 /* 2794 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 2795 * conflict, after implement i2c helper, this mutex should be 2796 * retired. 2797 */ 2798 mutex_lock(&aconnector->hpd_lock); 2799 2800 read_hpd_rx_irq_data(dc_link, &hpd_irq_data); 2801 2802 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || 2803 (dc_link->type == dc_connection_mst_branch)) { 2804 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { 2805 result = true; 2806 dm_handle_hpd_rx_irq(aconnector); 2807 goto out; 2808 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 2809 result = false; 2810 dm_handle_hpd_rx_irq(aconnector); 2811 goto out; 2812 } 2813 } 2814 2815 /* 2816 * TODO: We need the lock to avoid touching DC state while it's being 2817 * modified during automated compliance testing, or when link loss 2818 * happens. While this should be split into subhandlers and proper 2819 * interfaces to avoid having to conditionally lock like this in the 2820 * outer layer, we need this workaround temporarily to allow MST 2821 * lightup in some scenarios to avoid timeout. 2822 */ 2823 if (!amdgpu_in_reset(adev) && 2824 (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) || 2825 hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) { 2826 mutex_lock(&adev->dm.dc_lock); 2827 lock_flag = 1; 2828 } 2829 2830 #ifdef CONFIG_DRM_AMD_DC_HDCP 2831 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); 2832 #else 2833 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL); 2834 #endif 2835 if (!amdgpu_in_reset(adev) && lock_flag) 2836 mutex_unlock(&adev->dm.dc_lock); 2837 2838 out: 2839 if (result && !is_mst_root_connector) { 2840 /* Downstream Port status changed. */ 2841 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 2842 DRM_ERROR("KMS: Failed to detect connector\n"); 2843 2844 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2845 emulated_link_detect(dc_link); 2846 2847 if (aconnector->fake_enable) 2848 aconnector->fake_enable = false; 2849 2850 amdgpu_dm_update_connector_after_detect(aconnector); 2851 2852 2853 drm_modeset_lock_all(dev); 2854 dm_restore_drm_connector_state(dev, connector); 2855 drm_modeset_unlock_all(dev); 2856 2857 drm_kms_helper_hotplug_event(dev); 2858 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 2859 2860 if (aconnector->fake_enable) 2861 aconnector->fake_enable = false; 2862 2863 amdgpu_dm_update_connector_after_detect(aconnector); 2864 2865 2866 drm_modeset_lock_all(dev); 2867 dm_restore_drm_connector_state(dev, connector); 2868 drm_modeset_unlock_all(dev); 2869 2870 drm_kms_helper_hotplug_event(dev); 2871 } 2872 } 2873 #ifdef CONFIG_DRM_AMD_DC_HDCP 2874 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 2875 if (adev->dm.hdcp_workqueue) 2876 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 2877 } 2878 #endif 2879 2880 if (dc_link->type != dc_connection_mst_branch) 2881 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 2882 2883 mutex_unlock(&aconnector->hpd_lock); 2884 } 2885 2886 static void register_hpd_handlers(struct amdgpu_device *adev) 2887 { 2888 struct drm_device *dev = adev_to_drm(adev); 2889 struct drm_connector *connector; 2890 struct amdgpu_dm_connector *aconnector; 2891 const struct dc_link *dc_link; 2892 struct dc_interrupt_params int_params = {0}; 2893 2894 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 2895 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 2896 2897 list_for_each_entry(connector, 2898 &dev->mode_config.connector_list, head) { 2899 2900 aconnector = to_amdgpu_dm_connector(connector); 2901 dc_link = aconnector->dc_link; 2902 2903 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 2904 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 2905 int_params.irq_source = dc_link->irq_source_hpd; 2906 2907 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2908 handle_hpd_irq, 2909 (void *) aconnector); 2910 } 2911 2912 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 2913 2914 /* Also register for DP short pulse (hpd_rx). */ 2915 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 2916 int_params.irq_source = dc_link->irq_source_hpd_rx; 2917 2918 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2919 handle_hpd_rx_irq, 2920 (void *) aconnector); 2921 } 2922 } 2923 } 2924 2925 #if defined(CONFIG_DRM_AMD_DC_SI) 2926 /* Register IRQ sources and initialize IRQ callbacks */ 2927 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 2928 { 2929 struct dc *dc = adev->dm.dc; 2930 struct common_irq_params *c_irq_params; 2931 struct dc_interrupt_params int_params = {0}; 2932 int r; 2933 int i; 2934 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 2935 2936 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 2937 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 2938 2939 /* 2940 * Actions of amdgpu_irq_add_id(): 2941 * 1. Register a set() function with base driver. 2942 * Base driver will call set() function to enable/disable an 2943 * interrupt in DC hardware. 2944 * 2. Register amdgpu_dm_irq_handler(). 2945 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 2946 * coming from DC hardware. 2947 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 2948 * for acknowledging and handling. */ 2949 2950 /* Use VBLANK interrupt */ 2951 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2952 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); 2953 if (r) { 2954 DRM_ERROR("Failed to add crtc irq id!\n"); 2955 return r; 2956 } 2957 2958 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2959 int_params.irq_source = 2960 dc_interrupt_to_irq_source(dc, i+1 , 0); 2961 2962 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 2963 2964 c_irq_params->adev = adev; 2965 c_irq_params->irq_src = int_params.irq_source; 2966 2967 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2968 dm_crtc_high_irq, c_irq_params); 2969 } 2970 2971 /* Use GRPH_PFLIP interrupt */ 2972 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 2973 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 2974 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 2975 if (r) { 2976 DRM_ERROR("Failed to add page flip irq id!\n"); 2977 return r; 2978 } 2979 2980 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2981 int_params.irq_source = 2982 dc_interrupt_to_irq_source(dc, i, 0); 2983 2984 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 2985 2986 c_irq_params->adev = adev; 2987 c_irq_params->irq_src = int_params.irq_source; 2988 2989 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2990 dm_pflip_high_irq, c_irq_params); 2991 2992 } 2993 2994 /* HPD */ 2995 r = amdgpu_irq_add_id(adev, client_id, 2996 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2997 if (r) { 2998 DRM_ERROR("Failed to add hpd irq id!\n"); 2999 return r; 3000 } 3001 3002 register_hpd_handlers(adev); 3003 3004 return 0; 3005 } 3006 #endif 3007 3008 /* Register IRQ sources and initialize IRQ callbacks */ 3009 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3010 { 3011 struct dc *dc = adev->dm.dc; 3012 struct common_irq_params *c_irq_params; 3013 struct dc_interrupt_params int_params = {0}; 3014 int r; 3015 int i; 3016 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3017 3018 if (adev->asic_type >= CHIP_VEGA10) 3019 client_id = SOC15_IH_CLIENTID_DCE; 3020 3021 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3022 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3023 3024 /* 3025 * Actions of amdgpu_irq_add_id(): 3026 * 1. Register a set() function with base driver. 3027 * Base driver will call set() function to enable/disable an 3028 * interrupt in DC hardware. 3029 * 2. Register amdgpu_dm_irq_handler(). 3030 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3031 * coming from DC hardware. 3032 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3033 * for acknowledging and handling. */ 3034 3035 /* Use VBLANK interrupt */ 3036 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3037 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3038 if (r) { 3039 DRM_ERROR("Failed to add crtc irq id!\n"); 3040 return r; 3041 } 3042 3043 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3044 int_params.irq_source = 3045 dc_interrupt_to_irq_source(dc, i, 0); 3046 3047 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3048 3049 c_irq_params->adev = adev; 3050 c_irq_params->irq_src = int_params.irq_source; 3051 3052 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3053 dm_crtc_high_irq, c_irq_params); 3054 } 3055 3056 /* Use VUPDATE interrupt */ 3057 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3058 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3059 if (r) { 3060 DRM_ERROR("Failed to add vupdate irq id!\n"); 3061 return r; 3062 } 3063 3064 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3065 int_params.irq_source = 3066 dc_interrupt_to_irq_source(dc, i, 0); 3067 3068 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3069 3070 c_irq_params->adev = adev; 3071 c_irq_params->irq_src = int_params.irq_source; 3072 3073 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3074 dm_vupdate_high_irq, c_irq_params); 3075 } 3076 3077 /* Use GRPH_PFLIP interrupt */ 3078 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3079 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3080 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3081 if (r) { 3082 DRM_ERROR("Failed to add page flip irq id!\n"); 3083 return r; 3084 } 3085 3086 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3087 int_params.irq_source = 3088 dc_interrupt_to_irq_source(dc, i, 0); 3089 3090 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3091 3092 c_irq_params->adev = adev; 3093 c_irq_params->irq_src = int_params.irq_source; 3094 3095 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3096 dm_pflip_high_irq, c_irq_params); 3097 3098 } 3099 3100 /* HPD */ 3101 r = amdgpu_irq_add_id(adev, client_id, 3102 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3103 if (r) { 3104 DRM_ERROR("Failed to add hpd irq id!\n"); 3105 return r; 3106 } 3107 3108 register_hpd_handlers(adev); 3109 3110 return 0; 3111 } 3112 3113 #if defined(CONFIG_DRM_AMD_DC_DCN) 3114 /* Register IRQ sources and initialize IRQ callbacks */ 3115 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3116 { 3117 struct dc *dc = adev->dm.dc; 3118 struct common_irq_params *c_irq_params; 3119 struct dc_interrupt_params int_params = {0}; 3120 int r; 3121 int i; 3122 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3123 static const unsigned int vrtl_int_srcid[] = { 3124 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3125 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3126 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3127 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3128 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3129 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3130 }; 3131 #endif 3132 3133 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3134 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3135 3136 /* 3137 * Actions of amdgpu_irq_add_id(): 3138 * 1. Register a set() function with base driver. 3139 * Base driver will call set() function to enable/disable an 3140 * interrupt in DC hardware. 3141 * 2. Register amdgpu_dm_irq_handler(). 3142 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3143 * coming from DC hardware. 3144 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3145 * for acknowledging and handling. 3146 */ 3147 3148 /* Use VSTARTUP interrupt */ 3149 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3150 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3151 i++) { 3152 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3153 3154 if (r) { 3155 DRM_ERROR("Failed to add crtc irq id!\n"); 3156 return r; 3157 } 3158 3159 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3160 int_params.irq_source = 3161 dc_interrupt_to_irq_source(dc, i, 0); 3162 3163 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3164 3165 c_irq_params->adev = adev; 3166 c_irq_params->irq_src = int_params.irq_source; 3167 3168 amdgpu_dm_irq_register_interrupt( 3169 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3170 } 3171 3172 /* Use otg vertical line interrupt */ 3173 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3174 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3175 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3176 vrtl_int_srcid[i], &adev->vline0_irq); 3177 3178 if (r) { 3179 DRM_ERROR("Failed to add vline0 irq id!\n"); 3180 return r; 3181 } 3182 3183 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3184 int_params.irq_source = 3185 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3186 3187 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3188 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3189 break; 3190 } 3191 3192 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3193 - DC_IRQ_SOURCE_DC1_VLINE0]; 3194 3195 c_irq_params->adev = adev; 3196 c_irq_params->irq_src = int_params.irq_source; 3197 3198 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3199 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3200 } 3201 #endif 3202 3203 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3204 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3205 * to trigger at end of each vblank, regardless of state of the lock, 3206 * matching DCE behaviour. 3207 */ 3208 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3209 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3210 i++) { 3211 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3212 3213 if (r) { 3214 DRM_ERROR("Failed to add vupdate irq id!\n"); 3215 return r; 3216 } 3217 3218 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3219 int_params.irq_source = 3220 dc_interrupt_to_irq_source(dc, i, 0); 3221 3222 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3223 3224 c_irq_params->adev = adev; 3225 c_irq_params->irq_src = int_params.irq_source; 3226 3227 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3228 dm_vupdate_high_irq, c_irq_params); 3229 } 3230 3231 /* Use GRPH_PFLIP interrupt */ 3232 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3233 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3234 i++) { 3235 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3236 if (r) { 3237 DRM_ERROR("Failed to add page flip irq id!\n"); 3238 return r; 3239 } 3240 3241 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3242 int_params.irq_source = 3243 dc_interrupt_to_irq_source(dc, i, 0); 3244 3245 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3246 3247 c_irq_params->adev = adev; 3248 c_irq_params->irq_src = int_params.irq_source; 3249 3250 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3251 dm_pflip_high_irq, c_irq_params); 3252 3253 } 3254 3255 /* HPD */ 3256 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3257 &adev->hpd_irq); 3258 if (r) { 3259 DRM_ERROR("Failed to add hpd irq id!\n"); 3260 return r; 3261 } 3262 3263 register_hpd_handlers(adev); 3264 3265 return 0; 3266 } 3267 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3268 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3269 { 3270 struct dc *dc = adev->dm.dc; 3271 struct common_irq_params *c_irq_params; 3272 struct dc_interrupt_params int_params = {0}; 3273 int r, i; 3274 3275 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3276 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3277 3278 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3279 &adev->dmub_outbox_irq); 3280 if (r) { 3281 DRM_ERROR("Failed to add outbox irq id!\n"); 3282 return r; 3283 } 3284 3285 if (dc->ctx->dmub_srv) { 3286 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3287 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3288 int_params.irq_source = 3289 dc_interrupt_to_irq_source(dc, i, 0); 3290 3291 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3292 3293 c_irq_params->adev = adev; 3294 c_irq_params->irq_src = int_params.irq_source; 3295 3296 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3297 dm_dmub_outbox1_low_irq, c_irq_params); 3298 } 3299 3300 return 0; 3301 } 3302 #endif 3303 3304 /* 3305 * Acquires the lock for the atomic state object and returns 3306 * the new atomic state. 3307 * 3308 * This should only be called during atomic check. 3309 */ 3310 static int dm_atomic_get_state(struct drm_atomic_state *state, 3311 struct dm_atomic_state **dm_state) 3312 { 3313 struct drm_device *dev = state->dev; 3314 struct amdgpu_device *adev = drm_to_adev(dev); 3315 struct amdgpu_display_manager *dm = &adev->dm; 3316 struct drm_private_state *priv_state; 3317 3318 if (*dm_state) 3319 return 0; 3320 3321 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3322 if (IS_ERR(priv_state)) 3323 return PTR_ERR(priv_state); 3324 3325 *dm_state = to_dm_atomic_state(priv_state); 3326 3327 return 0; 3328 } 3329 3330 static struct dm_atomic_state * 3331 dm_atomic_get_new_state(struct drm_atomic_state *state) 3332 { 3333 struct drm_device *dev = state->dev; 3334 struct amdgpu_device *adev = drm_to_adev(dev); 3335 struct amdgpu_display_manager *dm = &adev->dm; 3336 struct drm_private_obj *obj; 3337 struct drm_private_state *new_obj_state; 3338 int i; 3339 3340 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3341 if (obj->funcs == dm->atomic_obj.funcs) 3342 return to_dm_atomic_state(new_obj_state); 3343 } 3344 3345 return NULL; 3346 } 3347 3348 static struct drm_private_state * 3349 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3350 { 3351 struct dm_atomic_state *old_state, *new_state; 3352 3353 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3354 if (!new_state) 3355 return NULL; 3356 3357 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3358 3359 old_state = to_dm_atomic_state(obj->state); 3360 3361 if (old_state && old_state->context) 3362 new_state->context = dc_copy_state(old_state->context); 3363 3364 if (!new_state->context) { 3365 kfree(new_state); 3366 return NULL; 3367 } 3368 3369 return &new_state->base; 3370 } 3371 3372 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3373 struct drm_private_state *state) 3374 { 3375 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3376 3377 if (dm_state && dm_state->context) 3378 dc_release_state(dm_state->context); 3379 3380 kfree(dm_state); 3381 } 3382 3383 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3384 .atomic_duplicate_state = dm_atomic_duplicate_state, 3385 .atomic_destroy_state = dm_atomic_destroy_state, 3386 }; 3387 3388 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3389 { 3390 struct dm_atomic_state *state; 3391 int r; 3392 3393 adev->mode_info.mode_config_initialized = true; 3394 3395 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3396 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3397 3398 adev_to_drm(adev)->mode_config.max_width = 16384; 3399 adev_to_drm(adev)->mode_config.max_height = 16384; 3400 3401 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3402 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 3403 /* indicates support for immediate flip */ 3404 adev_to_drm(adev)->mode_config.async_page_flip = true; 3405 3406 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 3407 3408 state = kzalloc(sizeof(*state), GFP_KERNEL); 3409 if (!state) 3410 return -ENOMEM; 3411 3412 state->context = dc_create_state(adev->dm.dc); 3413 if (!state->context) { 3414 kfree(state); 3415 return -ENOMEM; 3416 } 3417 3418 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 3419 3420 drm_atomic_private_obj_init(adev_to_drm(adev), 3421 &adev->dm.atomic_obj, 3422 &state->base, 3423 &dm_atomic_state_funcs); 3424 3425 r = amdgpu_display_modeset_create_props(adev); 3426 if (r) { 3427 dc_release_state(state->context); 3428 kfree(state); 3429 return r; 3430 } 3431 3432 r = amdgpu_dm_audio_init(adev); 3433 if (r) { 3434 dc_release_state(state->context); 3435 kfree(state); 3436 return r; 3437 } 3438 3439 return 0; 3440 } 3441 3442 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 3443 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 3444 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 3445 3446 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3447 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3448 3449 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 3450 int bl_idx) 3451 { 3452 #if defined(CONFIG_ACPI) 3453 struct amdgpu_dm_backlight_caps caps; 3454 3455 memset(&caps, 0, sizeof(caps)); 3456 3457 if (dm->backlight_caps[bl_idx].caps_valid) 3458 return; 3459 3460 amdgpu_acpi_get_backlight_caps(&caps); 3461 if (caps.caps_valid) { 3462 dm->backlight_caps[bl_idx].caps_valid = true; 3463 if (caps.aux_support) 3464 return; 3465 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 3466 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 3467 } else { 3468 dm->backlight_caps[bl_idx].min_input_signal = 3469 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3470 dm->backlight_caps[bl_idx].max_input_signal = 3471 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3472 } 3473 #else 3474 if (dm->backlight_caps[bl_idx].aux_support) 3475 return; 3476 3477 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3478 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3479 #endif 3480 } 3481 3482 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 3483 unsigned *min, unsigned *max) 3484 { 3485 if (!caps) 3486 return 0; 3487 3488 if (caps->aux_support) { 3489 // Firmware limits are in nits, DC API wants millinits. 3490 *max = 1000 * caps->aux_max_input_signal; 3491 *min = 1000 * caps->aux_min_input_signal; 3492 } else { 3493 // Firmware limits are 8-bit, PWM control is 16-bit. 3494 *max = 0x101 * caps->max_input_signal; 3495 *min = 0x101 * caps->min_input_signal; 3496 } 3497 return 1; 3498 } 3499 3500 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 3501 uint32_t brightness) 3502 { 3503 unsigned min, max; 3504 3505 if (!get_brightness_range(caps, &min, &max)) 3506 return brightness; 3507 3508 // Rescale 0..255 to min..max 3509 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 3510 AMDGPU_MAX_BL_LEVEL); 3511 } 3512 3513 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 3514 uint32_t brightness) 3515 { 3516 unsigned min, max; 3517 3518 if (!get_brightness_range(caps, &min, &max)) 3519 return brightness; 3520 3521 if (brightness < min) 3522 return 0; 3523 // Rescale min..max to 0..255 3524 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 3525 max - min); 3526 } 3527 3528 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 3529 int bl_idx, 3530 u32 user_brightness) 3531 { 3532 struct amdgpu_dm_backlight_caps caps; 3533 struct dc_link *link; 3534 u32 brightness; 3535 bool rc; 3536 3537 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3538 caps = dm->backlight_caps[bl_idx]; 3539 3540 dm->brightness[bl_idx] = user_brightness; 3541 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 3542 link = (struct dc_link *)dm->backlight_link[bl_idx]; 3543 3544 /* Change brightness based on AUX property */ 3545 if (caps.aux_support) { 3546 rc = dc_link_set_backlight_level_nits(link, true, brightness, 3547 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 3548 if (!rc) 3549 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 3550 } else { 3551 rc = dc_link_set_backlight_level(link, brightness, 0); 3552 if (!rc) 3553 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 3554 } 3555 3556 if (rc) 3557 dm->actual_brightness[bl_idx] = user_brightness; 3558 } 3559 3560 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 3561 { 3562 struct amdgpu_display_manager *dm = bl_get_data(bd); 3563 int i; 3564 3565 for (i = 0; i < dm->num_of_edps; i++) { 3566 if (bd == dm->backlight_dev[i]) 3567 break; 3568 } 3569 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3570 i = 0; 3571 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 3572 3573 return 0; 3574 } 3575 3576 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 3577 int bl_idx) 3578 { 3579 struct amdgpu_dm_backlight_caps caps; 3580 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 3581 3582 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3583 caps = dm->backlight_caps[bl_idx]; 3584 3585 if (caps.aux_support) { 3586 u32 avg, peak; 3587 bool rc; 3588 3589 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 3590 if (!rc) 3591 return dm->brightness[bl_idx]; 3592 return convert_brightness_to_user(&caps, avg); 3593 } else { 3594 int ret = dc_link_get_backlight_level(link); 3595 3596 if (ret == DC_ERROR_UNEXPECTED) 3597 return dm->brightness[bl_idx]; 3598 return convert_brightness_to_user(&caps, ret); 3599 } 3600 } 3601 3602 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 3603 { 3604 struct amdgpu_display_manager *dm = bl_get_data(bd); 3605 int i; 3606 3607 for (i = 0; i < dm->num_of_edps; i++) { 3608 if (bd == dm->backlight_dev[i]) 3609 break; 3610 } 3611 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3612 i = 0; 3613 return amdgpu_dm_backlight_get_level(dm, i); 3614 } 3615 3616 static const struct backlight_ops amdgpu_dm_backlight_ops = { 3617 .options = BL_CORE_SUSPENDRESUME, 3618 .get_brightness = amdgpu_dm_backlight_get_brightness, 3619 .update_status = amdgpu_dm_backlight_update_status, 3620 }; 3621 3622 static void 3623 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 3624 { 3625 char bl_name[16]; 3626 struct backlight_properties props = { 0 }; 3627 3628 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); 3629 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; 3630 3631 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 3632 props.brightness = AMDGPU_MAX_BL_LEVEL; 3633 props.type = BACKLIGHT_RAW; 3634 3635 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 3636 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); 3637 3638 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, 3639 adev_to_drm(dm->adev)->dev, 3640 dm, 3641 &amdgpu_dm_backlight_ops, 3642 &props); 3643 3644 if (IS_ERR(dm->backlight_dev[dm->num_of_edps])) 3645 DRM_ERROR("DM: Backlight registration failed!\n"); 3646 else 3647 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 3648 } 3649 #endif 3650 3651 static int initialize_plane(struct amdgpu_display_manager *dm, 3652 struct amdgpu_mode_info *mode_info, int plane_id, 3653 enum drm_plane_type plane_type, 3654 const struct dc_plane_cap *plane_cap) 3655 { 3656 struct drm_plane *plane; 3657 unsigned long possible_crtcs; 3658 int ret = 0; 3659 3660 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 3661 if (!plane) { 3662 DRM_ERROR("KMS: Failed to allocate plane\n"); 3663 return -ENOMEM; 3664 } 3665 plane->type = plane_type; 3666 3667 /* 3668 * HACK: IGT tests expect that the primary plane for a CRTC 3669 * can only have one possible CRTC. Only expose support for 3670 * any CRTC if they're not going to be used as a primary plane 3671 * for a CRTC - like overlay or underlay planes. 3672 */ 3673 possible_crtcs = 1 << plane_id; 3674 if (plane_id >= dm->dc->caps.max_streams) 3675 possible_crtcs = 0xff; 3676 3677 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 3678 3679 if (ret) { 3680 DRM_ERROR("KMS: Failed to initialize plane\n"); 3681 kfree(plane); 3682 return ret; 3683 } 3684 3685 if (mode_info) 3686 mode_info->planes[plane_id] = plane; 3687 3688 return ret; 3689 } 3690 3691 3692 static void register_backlight_device(struct amdgpu_display_manager *dm, 3693 struct dc_link *link) 3694 { 3695 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3696 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3697 3698 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 3699 link->type != dc_connection_none) { 3700 /* 3701 * Event if registration failed, we should continue with 3702 * DM initialization because not having a backlight control 3703 * is better then a black screen. 3704 */ 3705 if (!dm->backlight_dev[dm->num_of_edps]) 3706 amdgpu_dm_register_backlight_device(dm); 3707 3708 if (dm->backlight_dev[dm->num_of_edps]) { 3709 dm->backlight_link[dm->num_of_edps] = link; 3710 dm->num_of_edps++; 3711 } 3712 } 3713 #endif 3714 } 3715 3716 3717 /* 3718 * In this architecture, the association 3719 * connector -> encoder -> crtc 3720 * id not really requried. The crtc and connector will hold the 3721 * display_index as an abstraction to use with DAL component 3722 * 3723 * Returns 0 on success 3724 */ 3725 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 3726 { 3727 struct amdgpu_display_manager *dm = &adev->dm; 3728 int32_t i; 3729 struct amdgpu_dm_connector *aconnector = NULL; 3730 struct amdgpu_encoder *aencoder = NULL; 3731 struct amdgpu_mode_info *mode_info = &adev->mode_info; 3732 uint32_t link_cnt; 3733 int32_t primary_planes; 3734 enum dc_connection_type new_connection_type = dc_connection_none; 3735 const struct dc_plane_cap *plane; 3736 3737 dm->display_indexes_num = dm->dc->caps.max_streams; 3738 /* Update the actual used number of crtc */ 3739 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 3740 3741 link_cnt = dm->dc->caps.max_links; 3742 if (amdgpu_dm_mode_config_init(dm->adev)) { 3743 DRM_ERROR("DM: Failed to initialize mode config\n"); 3744 return -EINVAL; 3745 } 3746 3747 /* There is one primary plane per CRTC */ 3748 primary_planes = dm->dc->caps.max_streams; 3749 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 3750 3751 /* 3752 * Initialize primary planes, implicit planes for legacy IOCTLS. 3753 * Order is reversed to match iteration order in atomic check. 3754 */ 3755 for (i = (primary_planes - 1); i >= 0; i--) { 3756 plane = &dm->dc->caps.planes[i]; 3757 3758 if (initialize_plane(dm, mode_info, i, 3759 DRM_PLANE_TYPE_PRIMARY, plane)) { 3760 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 3761 goto fail; 3762 } 3763 } 3764 3765 /* 3766 * Initialize overlay planes, index starting after primary planes. 3767 * These planes have a higher DRM index than the primary planes since 3768 * they should be considered as having a higher z-order. 3769 * Order is reversed to match iteration order in atomic check. 3770 * 3771 * Only support DCN for now, and only expose one so we don't encourage 3772 * userspace to use up all the pipes. 3773 */ 3774 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 3775 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 3776 3777 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 3778 continue; 3779 3780 if (!plane->blends_with_above || !plane->blends_with_below) 3781 continue; 3782 3783 if (!plane->pixel_format_support.argb8888) 3784 continue; 3785 3786 if (initialize_plane(dm, NULL, primary_planes + i, 3787 DRM_PLANE_TYPE_OVERLAY, plane)) { 3788 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 3789 goto fail; 3790 } 3791 3792 /* Only create one overlay plane. */ 3793 break; 3794 } 3795 3796 for (i = 0; i < dm->dc->caps.max_streams; i++) 3797 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 3798 DRM_ERROR("KMS: Failed to initialize crtc\n"); 3799 goto fail; 3800 } 3801 3802 #if defined(CONFIG_DRM_AMD_DC_DCN) 3803 /* Use Outbox interrupt */ 3804 switch (adev->asic_type) { 3805 case CHIP_SIENNA_CICHLID: 3806 case CHIP_NAVY_FLOUNDER: 3807 case CHIP_YELLOW_CARP: 3808 case CHIP_RENOIR: 3809 if (register_outbox_irq_handlers(dm->adev)) { 3810 DRM_ERROR("DM: Failed to initialize IRQ\n"); 3811 goto fail; 3812 } 3813 break; 3814 default: 3815 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type); 3816 } 3817 #endif 3818 3819 /* Disable vblank IRQs aggressively for power-saving. */ 3820 adev_to_drm(adev)->vblank_disable_immediate = true; 3821 3822 /* loops over all connectors on the board */ 3823 for (i = 0; i < link_cnt; i++) { 3824 struct dc_link *link = NULL; 3825 3826 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 3827 DRM_ERROR( 3828 "KMS: Cannot support more than %d display indexes\n", 3829 AMDGPU_DM_MAX_DISPLAY_INDEX); 3830 continue; 3831 } 3832 3833 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 3834 if (!aconnector) 3835 goto fail; 3836 3837 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 3838 if (!aencoder) 3839 goto fail; 3840 3841 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 3842 DRM_ERROR("KMS: Failed to initialize encoder\n"); 3843 goto fail; 3844 } 3845 3846 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 3847 DRM_ERROR("KMS: Failed to initialize connector\n"); 3848 goto fail; 3849 } 3850 3851 link = dc_get_link_at_index(dm->dc, i); 3852 3853 if (!dc_link_detect_sink(link, &new_connection_type)) 3854 DRM_ERROR("KMS: Failed to detect connector\n"); 3855 3856 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3857 emulated_link_detect(link); 3858 amdgpu_dm_update_connector_after_detect(aconnector); 3859 3860 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { 3861 amdgpu_dm_update_connector_after_detect(aconnector); 3862 register_backlight_device(dm, link); 3863 3864 if (dm->num_of_edps) 3865 update_connector_ext_caps(aconnector); 3866 if (amdgpu_dc_feature_mask & DC_PSR_MASK) 3867 amdgpu_dm_set_psr_caps(link); 3868 3869 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 3870 * PSR is also supported. 3871 */ 3872 if (link->psr_settings.psr_feature_enabled) 3873 adev_to_drm(adev)->vblank_disable_immediate = false; 3874 } 3875 3876 3877 } 3878 3879 /* Software is initialized. Now we can register interrupt handlers. */ 3880 switch (adev->asic_type) { 3881 #if defined(CONFIG_DRM_AMD_DC_SI) 3882 case CHIP_TAHITI: 3883 case CHIP_PITCAIRN: 3884 case CHIP_VERDE: 3885 case CHIP_OLAND: 3886 if (dce60_register_irq_handlers(dm->adev)) { 3887 DRM_ERROR("DM: Failed to initialize IRQ\n"); 3888 goto fail; 3889 } 3890 break; 3891 #endif 3892 case CHIP_BONAIRE: 3893 case CHIP_HAWAII: 3894 case CHIP_KAVERI: 3895 case CHIP_KABINI: 3896 case CHIP_MULLINS: 3897 case CHIP_TONGA: 3898 case CHIP_FIJI: 3899 case CHIP_CARRIZO: 3900 case CHIP_STONEY: 3901 case CHIP_POLARIS11: 3902 case CHIP_POLARIS10: 3903 case CHIP_POLARIS12: 3904 case CHIP_VEGAM: 3905 case CHIP_VEGA10: 3906 case CHIP_VEGA12: 3907 case CHIP_VEGA20: 3908 if (dce110_register_irq_handlers(dm->adev)) { 3909 DRM_ERROR("DM: Failed to initialize IRQ\n"); 3910 goto fail; 3911 } 3912 break; 3913 #if defined(CONFIG_DRM_AMD_DC_DCN) 3914 case CHIP_RAVEN: 3915 case CHIP_NAVI12: 3916 case CHIP_NAVI10: 3917 case CHIP_NAVI14: 3918 case CHIP_RENOIR: 3919 case CHIP_SIENNA_CICHLID: 3920 case CHIP_NAVY_FLOUNDER: 3921 case CHIP_DIMGREY_CAVEFISH: 3922 case CHIP_BEIGE_GOBY: 3923 case CHIP_VANGOGH: 3924 case CHIP_YELLOW_CARP: 3925 if (dcn10_register_irq_handlers(dm->adev)) { 3926 DRM_ERROR("DM: Failed to initialize IRQ\n"); 3927 goto fail; 3928 } 3929 break; 3930 #endif 3931 default: 3932 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 3933 goto fail; 3934 } 3935 3936 return 0; 3937 fail: 3938 kfree(aencoder); 3939 kfree(aconnector); 3940 3941 return -EINVAL; 3942 } 3943 3944 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 3945 { 3946 drm_atomic_private_obj_fini(&dm->atomic_obj); 3947 return; 3948 } 3949 3950 /****************************************************************************** 3951 * amdgpu_display_funcs functions 3952 *****************************************************************************/ 3953 3954 /* 3955 * dm_bandwidth_update - program display watermarks 3956 * 3957 * @adev: amdgpu_device pointer 3958 * 3959 * Calculate and program the display watermarks and line buffer allocation. 3960 */ 3961 static void dm_bandwidth_update(struct amdgpu_device *adev) 3962 { 3963 /* TODO: implement later */ 3964 } 3965 3966 static const struct amdgpu_display_funcs dm_display_funcs = { 3967 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 3968 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 3969 .backlight_set_level = NULL, /* never called for DC */ 3970 .backlight_get_level = NULL, /* never called for DC */ 3971 .hpd_sense = NULL,/* called unconditionally */ 3972 .hpd_set_polarity = NULL, /* called unconditionally */ 3973 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 3974 .page_flip_get_scanoutpos = 3975 dm_crtc_get_scanoutpos,/* called unconditionally */ 3976 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 3977 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 3978 }; 3979 3980 #if defined(CONFIG_DEBUG_KERNEL_DC) 3981 3982 static ssize_t s3_debug_store(struct device *device, 3983 struct device_attribute *attr, 3984 const char *buf, 3985 size_t count) 3986 { 3987 int ret; 3988 int s3_state; 3989 struct drm_device *drm_dev = dev_get_drvdata(device); 3990 struct amdgpu_device *adev = drm_to_adev(drm_dev); 3991 3992 ret = kstrtoint(buf, 0, &s3_state); 3993 3994 if (ret == 0) { 3995 if (s3_state) { 3996 dm_resume(adev); 3997 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 3998 } else 3999 dm_suspend(adev); 4000 } 4001 4002 return ret == 0 ? count : 0; 4003 } 4004 4005 DEVICE_ATTR_WO(s3_debug); 4006 4007 #endif 4008 4009 static int dm_early_init(void *handle) 4010 { 4011 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4012 4013 switch (adev->asic_type) { 4014 #if defined(CONFIG_DRM_AMD_DC_SI) 4015 case CHIP_TAHITI: 4016 case CHIP_PITCAIRN: 4017 case CHIP_VERDE: 4018 adev->mode_info.num_crtc = 6; 4019 adev->mode_info.num_hpd = 6; 4020 adev->mode_info.num_dig = 6; 4021 break; 4022 case CHIP_OLAND: 4023 adev->mode_info.num_crtc = 2; 4024 adev->mode_info.num_hpd = 2; 4025 adev->mode_info.num_dig = 2; 4026 break; 4027 #endif 4028 case CHIP_BONAIRE: 4029 case CHIP_HAWAII: 4030 adev->mode_info.num_crtc = 6; 4031 adev->mode_info.num_hpd = 6; 4032 adev->mode_info.num_dig = 6; 4033 break; 4034 case CHIP_KAVERI: 4035 adev->mode_info.num_crtc = 4; 4036 adev->mode_info.num_hpd = 6; 4037 adev->mode_info.num_dig = 7; 4038 break; 4039 case CHIP_KABINI: 4040 case CHIP_MULLINS: 4041 adev->mode_info.num_crtc = 2; 4042 adev->mode_info.num_hpd = 6; 4043 adev->mode_info.num_dig = 6; 4044 break; 4045 case CHIP_FIJI: 4046 case CHIP_TONGA: 4047 adev->mode_info.num_crtc = 6; 4048 adev->mode_info.num_hpd = 6; 4049 adev->mode_info.num_dig = 7; 4050 break; 4051 case CHIP_CARRIZO: 4052 adev->mode_info.num_crtc = 3; 4053 adev->mode_info.num_hpd = 6; 4054 adev->mode_info.num_dig = 9; 4055 break; 4056 case CHIP_STONEY: 4057 adev->mode_info.num_crtc = 2; 4058 adev->mode_info.num_hpd = 6; 4059 adev->mode_info.num_dig = 9; 4060 break; 4061 case CHIP_POLARIS11: 4062 case CHIP_POLARIS12: 4063 adev->mode_info.num_crtc = 5; 4064 adev->mode_info.num_hpd = 5; 4065 adev->mode_info.num_dig = 5; 4066 break; 4067 case CHIP_POLARIS10: 4068 case CHIP_VEGAM: 4069 adev->mode_info.num_crtc = 6; 4070 adev->mode_info.num_hpd = 6; 4071 adev->mode_info.num_dig = 6; 4072 break; 4073 case CHIP_VEGA10: 4074 case CHIP_VEGA12: 4075 case CHIP_VEGA20: 4076 adev->mode_info.num_crtc = 6; 4077 adev->mode_info.num_hpd = 6; 4078 adev->mode_info.num_dig = 6; 4079 break; 4080 #if defined(CONFIG_DRM_AMD_DC_DCN) 4081 case CHIP_RAVEN: 4082 case CHIP_RENOIR: 4083 case CHIP_VANGOGH: 4084 adev->mode_info.num_crtc = 4; 4085 adev->mode_info.num_hpd = 4; 4086 adev->mode_info.num_dig = 4; 4087 break; 4088 case CHIP_NAVI10: 4089 case CHIP_NAVI12: 4090 case CHIP_SIENNA_CICHLID: 4091 case CHIP_NAVY_FLOUNDER: 4092 adev->mode_info.num_crtc = 6; 4093 adev->mode_info.num_hpd = 6; 4094 adev->mode_info.num_dig = 6; 4095 break; 4096 case CHIP_YELLOW_CARP: 4097 adev->mode_info.num_crtc = 4; 4098 adev->mode_info.num_hpd = 4; 4099 adev->mode_info.num_dig = 4; 4100 break; 4101 case CHIP_NAVI14: 4102 case CHIP_DIMGREY_CAVEFISH: 4103 adev->mode_info.num_crtc = 5; 4104 adev->mode_info.num_hpd = 5; 4105 adev->mode_info.num_dig = 5; 4106 break; 4107 case CHIP_BEIGE_GOBY: 4108 adev->mode_info.num_crtc = 2; 4109 adev->mode_info.num_hpd = 2; 4110 adev->mode_info.num_dig = 2; 4111 break; 4112 #endif 4113 default: 4114 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 4115 return -EINVAL; 4116 } 4117 4118 amdgpu_dm_set_irq_funcs(adev); 4119 4120 if (adev->mode_info.funcs == NULL) 4121 adev->mode_info.funcs = &dm_display_funcs; 4122 4123 /* 4124 * Note: Do NOT change adev->audio_endpt_rreg and 4125 * adev->audio_endpt_wreg because they are initialised in 4126 * amdgpu_device_init() 4127 */ 4128 #if defined(CONFIG_DEBUG_KERNEL_DC) 4129 device_create_file( 4130 adev_to_drm(adev)->dev, 4131 &dev_attr_s3_debug); 4132 #endif 4133 4134 return 0; 4135 } 4136 4137 static bool modeset_required(struct drm_crtc_state *crtc_state, 4138 struct dc_stream_state *new_stream, 4139 struct dc_stream_state *old_stream) 4140 { 4141 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4142 } 4143 4144 static bool modereset_required(struct drm_crtc_state *crtc_state) 4145 { 4146 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4147 } 4148 4149 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4150 { 4151 drm_encoder_cleanup(encoder); 4152 kfree(encoder); 4153 } 4154 4155 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4156 .destroy = amdgpu_dm_encoder_destroy, 4157 }; 4158 4159 4160 static void get_min_max_dc_plane_scaling(struct drm_device *dev, 4161 struct drm_framebuffer *fb, 4162 int *min_downscale, int *max_upscale) 4163 { 4164 struct amdgpu_device *adev = drm_to_adev(dev); 4165 struct dc *dc = adev->dm.dc; 4166 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 4167 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 4168 4169 switch (fb->format->format) { 4170 case DRM_FORMAT_P010: 4171 case DRM_FORMAT_NV12: 4172 case DRM_FORMAT_NV21: 4173 *max_upscale = plane_cap->max_upscale_factor.nv12; 4174 *min_downscale = plane_cap->max_downscale_factor.nv12; 4175 break; 4176 4177 case DRM_FORMAT_XRGB16161616F: 4178 case DRM_FORMAT_ARGB16161616F: 4179 case DRM_FORMAT_XBGR16161616F: 4180 case DRM_FORMAT_ABGR16161616F: 4181 *max_upscale = plane_cap->max_upscale_factor.fp16; 4182 *min_downscale = plane_cap->max_downscale_factor.fp16; 4183 break; 4184 4185 default: 4186 *max_upscale = plane_cap->max_upscale_factor.argb8888; 4187 *min_downscale = plane_cap->max_downscale_factor.argb8888; 4188 break; 4189 } 4190 4191 /* 4192 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 4193 * scaling factor of 1.0 == 1000 units. 4194 */ 4195 if (*max_upscale == 1) 4196 *max_upscale = 1000; 4197 4198 if (*min_downscale == 1) 4199 *min_downscale = 1000; 4200 } 4201 4202 4203 static int fill_dc_scaling_info(const struct drm_plane_state *state, 4204 struct dc_scaling_info *scaling_info) 4205 { 4206 int scale_w, scale_h, min_downscale, max_upscale; 4207 4208 memset(scaling_info, 0, sizeof(*scaling_info)); 4209 4210 /* Source is fixed 16.16 but we ignore mantissa for now... */ 4211 scaling_info->src_rect.x = state->src_x >> 16; 4212 scaling_info->src_rect.y = state->src_y >> 16; 4213 4214 /* 4215 * For reasons we don't (yet) fully understand a non-zero 4216 * src_y coordinate into an NV12 buffer can cause a 4217 * system hang. To avoid hangs (and maybe be overly cautious) 4218 * let's reject both non-zero src_x and src_y. 4219 * 4220 * We currently know of only one use-case to reproduce a 4221 * scenario with non-zero src_x and src_y for NV12, which 4222 * is to gesture the YouTube Android app into full screen 4223 * on ChromeOS. 4224 */ 4225 if (state->fb && 4226 state->fb->format->format == DRM_FORMAT_NV12 && 4227 (scaling_info->src_rect.x != 0 || 4228 scaling_info->src_rect.y != 0)) 4229 return -EINVAL; 4230 4231 scaling_info->src_rect.width = state->src_w >> 16; 4232 if (scaling_info->src_rect.width == 0) 4233 return -EINVAL; 4234 4235 scaling_info->src_rect.height = state->src_h >> 16; 4236 if (scaling_info->src_rect.height == 0) 4237 return -EINVAL; 4238 4239 scaling_info->dst_rect.x = state->crtc_x; 4240 scaling_info->dst_rect.y = state->crtc_y; 4241 4242 if (state->crtc_w == 0) 4243 return -EINVAL; 4244 4245 scaling_info->dst_rect.width = state->crtc_w; 4246 4247 if (state->crtc_h == 0) 4248 return -EINVAL; 4249 4250 scaling_info->dst_rect.height = state->crtc_h; 4251 4252 /* DRM doesn't specify clipping on destination output. */ 4253 scaling_info->clip_rect = scaling_info->dst_rect; 4254 4255 /* Validate scaling per-format with DC plane caps */ 4256 if (state->plane && state->plane->dev && state->fb) { 4257 get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 4258 &min_downscale, &max_upscale); 4259 } else { 4260 min_downscale = 250; 4261 max_upscale = 16000; 4262 } 4263 4264 scale_w = scaling_info->dst_rect.width * 1000 / 4265 scaling_info->src_rect.width; 4266 4267 if (scale_w < min_downscale || scale_w > max_upscale) 4268 return -EINVAL; 4269 4270 scale_h = scaling_info->dst_rect.height * 1000 / 4271 scaling_info->src_rect.height; 4272 4273 if (scale_h < min_downscale || scale_h > max_upscale) 4274 return -EINVAL; 4275 4276 /* 4277 * The "scaling_quality" can be ignored for now, quality = 0 has DC 4278 * assume reasonable defaults based on the format. 4279 */ 4280 4281 return 0; 4282 } 4283 4284 static void 4285 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 4286 uint64_t tiling_flags) 4287 { 4288 /* Fill GFX8 params */ 4289 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 4290 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 4291 4292 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 4293 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 4294 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 4295 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 4296 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 4297 4298 /* XXX fix me for VI */ 4299 tiling_info->gfx8.num_banks = num_banks; 4300 tiling_info->gfx8.array_mode = 4301 DC_ARRAY_2D_TILED_THIN1; 4302 tiling_info->gfx8.tile_split = tile_split; 4303 tiling_info->gfx8.bank_width = bankw; 4304 tiling_info->gfx8.bank_height = bankh; 4305 tiling_info->gfx8.tile_aspect = mtaspect; 4306 tiling_info->gfx8.tile_mode = 4307 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 4308 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 4309 == DC_ARRAY_1D_TILED_THIN1) { 4310 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 4311 } 4312 4313 tiling_info->gfx8.pipe_config = 4314 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 4315 } 4316 4317 static void 4318 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 4319 union dc_tiling_info *tiling_info) 4320 { 4321 tiling_info->gfx9.num_pipes = 4322 adev->gfx.config.gb_addr_config_fields.num_pipes; 4323 tiling_info->gfx9.num_banks = 4324 adev->gfx.config.gb_addr_config_fields.num_banks; 4325 tiling_info->gfx9.pipe_interleave = 4326 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 4327 tiling_info->gfx9.num_shader_engines = 4328 adev->gfx.config.gb_addr_config_fields.num_se; 4329 tiling_info->gfx9.max_compressed_frags = 4330 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 4331 tiling_info->gfx9.num_rb_per_se = 4332 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 4333 tiling_info->gfx9.shaderEnable = 1; 4334 if (adev->asic_type == CHIP_SIENNA_CICHLID || 4335 adev->asic_type == CHIP_NAVY_FLOUNDER || 4336 adev->asic_type == CHIP_DIMGREY_CAVEFISH || 4337 adev->asic_type == CHIP_BEIGE_GOBY || 4338 adev->asic_type == CHIP_YELLOW_CARP || 4339 adev->asic_type == CHIP_VANGOGH) 4340 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 4341 } 4342 4343 static int 4344 validate_dcc(struct amdgpu_device *adev, 4345 const enum surface_pixel_format format, 4346 const enum dc_rotation_angle rotation, 4347 const union dc_tiling_info *tiling_info, 4348 const struct dc_plane_dcc_param *dcc, 4349 const struct dc_plane_address *address, 4350 const struct plane_size *plane_size) 4351 { 4352 struct dc *dc = adev->dm.dc; 4353 struct dc_dcc_surface_param input; 4354 struct dc_surface_dcc_cap output; 4355 4356 memset(&input, 0, sizeof(input)); 4357 memset(&output, 0, sizeof(output)); 4358 4359 if (!dcc->enable) 4360 return 0; 4361 4362 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 4363 !dc->cap_funcs.get_dcc_compression_cap) 4364 return -EINVAL; 4365 4366 input.format = format; 4367 input.surface_size.width = plane_size->surface_size.width; 4368 input.surface_size.height = plane_size->surface_size.height; 4369 input.swizzle_mode = tiling_info->gfx9.swizzle; 4370 4371 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 4372 input.scan = SCAN_DIRECTION_HORIZONTAL; 4373 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 4374 input.scan = SCAN_DIRECTION_VERTICAL; 4375 4376 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 4377 return -EINVAL; 4378 4379 if (!output.capable) 4380 return -EINVAL; 4381 4382 if (dcc->independent_64b_blks == 0 && 4383 output.grph.rgb.independent_64b_blks != 0) 4384 return -EINVAL; 4385 4386 return 0; 4387 } 4388 4389 static bool 4390 modifier_has_dcc(uint64_t modifier) 4391 { 4392 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 4393 } 4394 4395 static unsigned 4396 modifier_gfx9_swizzle_mode(uint64_t modifier) 4397 { 4398 if (modifier == DRM_FORMAT_MOD_LINEAR) 4399 return 0; 4400 4401 return AMD_FMT_MOD_GET(TILE, modifier); 4402 } 4403 4404 static const struct drm_format_info * 4405 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 4406 { 4407 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 4408 } 4409 4410 static void 4411 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 4412 union dc_tiling_info *tiling_info, 4413 uint64_t modifier) 4414 { 4415 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 4416 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 4417 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 4418 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); 4419 4420 fill_gfx9_tiling_info_from_device(adev, tiling_info); 4421 4422 if (!IS_AMD_FMT_MOD(modifier)) 4423 return; 4424 4425 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 4426 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 4427 4428 if (adev->family >= AMDGPU_FAMILY_NV) { 4429 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 4430 } else { 4431 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 4432 4433 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 4434 } 4435 } 4436 4437 enum dm_micro_swizzle { 4438 MICRO_SWIZZLE_Z = 0, 4439 MICRO_SWIZZLE_S = 1, 4440 MICRO_SWIZZLE_D = 2, 4441 MICRO_SWIZZLE_R = 3 4442 }; 4443 4444 static bool dm_plane_format_mod_supported(struct drm_plane *plane, 4445 uint32_t format, 4446 uint64_t modifier) 4447 { 4448 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4449 const struct drm_format_info *info = drm_format_info(format); 4450 int i; 4451 4452 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4453 4454 if (!info) 4455 return false; 4456 4457 /* 4458 * We always have to allow these modifiers: 4459 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 4460 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 4461 */ 4462 if (modifier == DRM_FORMAT_MOD_LINEAR || 4463 modifier == DRM_FORMAT_MOD_INVALID) { 4464 return true; 4465 } 4466 4467 /* Check that the modifier is on the list of the plane's supported modifiers. */ 4468 for (i = 0; i < plane->modifier_count; i++) { 4469 if (modifier == plane->modifiers[i]) 4470 break; 4471 } 4472 if (i == plane->modifier_count) 4473 return false; 4474 4475 /* 4476 * For D swizzle the canonical modifier depends on the bpp, so check 4477 * it here. 4478 */ 4479 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 4480 adev->family >= AMDGPU_FAMILY_NV) { 4481 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 4482 return false; 4483 } 4484 4485 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 4486 info->cpp[0] < 8) 4487 return false; 4488 4489 if (modifier_has_dcc(modifier)) { 4490 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4491 if (info->cpp[0] != 4) 4492 return false; 4493 /* We support multi-planar formats, but not when combined with 4494 * additional DCC metadata planes. */ 4495 if (info->num_planes > 1) 4496 return false; 4497 } 4498 4499 return true; 4500 } 4501 4502 static void 4503 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 4504 { 4505 if (!*mods) 4506 return; 4507 4508 if (*cap - *size < 1) { 4509 uint64_t new_cap = *cap * 2; 4510 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 4511 4512 if (!new_mods) { 4513 kfree(*mods); 4514 *mods = NULL; 4515 return; 4516 } 4517 4518 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 4519 kfree(*mods); 4520 *mods = new_mods; 4521 *cap = new_cap; 4522 } 4523 4524 (*mods)[*size] = mod; 4525 *size += 1; 4526 } 4527 4528 static void 4529 add_gfx9_modifiers(const struct amdgpu_device *adev, 4530 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4531 { 4532 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4533 int pipe_xor_bits = min(8, pipes + 4534 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 4535 int bank_xor_bits = min(8 - pipe_xor_bits, 4536 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 4537 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 4538 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 4539 4540 4541 if (adev->family == AMDGPU_FAMILY_RV) { 4542 /* Raven2 and later */ 4543 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 4544 4545 /* 4546 * No _D DCC swizzles yet because we only allow 32bpp, which 4547 * doesn't support _D on DCN 4548 */ 4549 4550 if (has_constant_encode) { 4551 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4552 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4553 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4554 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4555 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4556 AMD_FMT_MOD_SET(DCC, 1) | 4557 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4558 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4559 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 4560 } 4561 4562 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4563 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4564 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4565 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4566 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4567 AMD_FMT_MOD_SET(DCC, 1) | 4568 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4569 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4570 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 4571 4572 if (has_constant_encode) { 4573 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4574 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4575 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4576 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4577 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4578 AMD_FMT_MOD_SET(DCC, 1) | 4579 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4580 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4581 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4582 4583 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4584 AMD_FMT_MOD_SET(RB, rb) | 4585 AMD_FMT_MOD_SET(PIPE, pipes)); 4586 } 4587 4588 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4589 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4590 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4591 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4592 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4593 AMD_FMT_MOD_SET(DCC, 1) | 4594 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4595 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4596 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4597 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 4598 AMD_FMT_MOD_SET(RB, rb) | 4599 AMD_FMT_MOD_SET(PIPE, pipes)); 4600 } 4601 4602 /* 4603 * Only supported for 64bpp on Raven, will be filtered on format in 4604 * dm_plane_format_mod_supported. 4605 */ 4606 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4607 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 4608 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4609 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4610 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 4611 4612 if (adev->family == AMDGPU_FAMILY_RV) { 4613 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4614 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4615 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4616 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4617 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 4618 } 4619 4620 /* 4621 * Only supported for 64bpp on Raven, will be filtered on format in 4622 * dm_plane_format_mod_supported. 4623 */ 4624 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4625 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 4626 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4627 4628 if (adev->family == AMDGPU_FAMILY_RV) { 4629 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4630 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 4631 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4632 } 4633 } 4634 4635 static void 4636 add_gfx10_1_modifiers(const struct amdgpu_device *adev, 4637 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4638 { 4639 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4640 4641 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4642 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4643 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4644 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4645 AMD_FMT_MOD_SET(DCC, 1) | 4646 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4647 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4648 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4649 4650 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4651 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4652 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4653 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4654 AMD_FMT_MOD_SET(DCC, 1) | 4655 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4656 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4657 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4658 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4659 4660 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4661 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4662 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4663 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 4664 4665 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4666 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4667 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4668 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 4669 4670 4671 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 4672 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4673 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 4674 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4675 4676 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4677 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 4678 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4679 } 4680 4681 static void 4682 add_gfx10_3_modifiers(const struct amdgpu_device *adev, 4683 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4684 { 4685 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4686 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 4687 4688 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4689 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4690 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4691 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4692 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4693 AMD_FMT_MOD_SET(DCC, 1) | 4694 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4695 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4696 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4697 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4698 4699 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4700 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4701 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4702 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4703 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4704 AMD_FMT_MOD_SET(DCC, 1) | 4705 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4706 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4707 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4708 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4709 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4710 4711 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4712 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4713 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4714 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4715 AMD_FMT_MOD_SET(PACKERS, pkrs)); 4716 4717 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4718 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4719 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4720 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4721 AMD_FMT_MOD_SET(PACKERS, pkrs)); 4722 4723 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 4724 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4725 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 4726 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4727 4728 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4729 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 4730 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4731 } 4732 4733 static int 4734 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 4735 { 4736 uint64_t size = 0, capacity = 128; 4737 *mods = NULL; 4738 4739 /* We have not hooked up any pre-GFX9 modifiers. */ 4740 if (adev->family < AMDGPU_FAMILY_AI) 4741 return 0; 4742 4743 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 4744 4745 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 4746 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 4747 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 4748 return *mods ? 0 : -ENOMEM; 4749 } 4750 4751 switch (adev->family) { 4752 case AMDGPU_FAMILY_AI: 4753 case AMDGPU_FAMILY_RV: 4754 add_gfx9_modifiers(adev, mods, &size, &capacity); 4755 break; 4756 case AMDGPU_FAMILY_NV: 4757 case AMDGPU_FAMILY_VGH: 4758 case AMDGPU_FAMILY_YC: 4759 if (adev->asic_type >= CHIP_SIENNA_CICHLID) 4760 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 4761 else 4762 add_gfx10_1_modifiers(adev, mods, &size, &capacity); 4763 break; 4764 } 4765 4766 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 4767 4768 /* INVALID marks the end of the list. */ 4769 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 4770 4771 if (!*mods) 4772 return -ENOMEM; 4773 4774 return 0; 4775 } 4776 4777 static int 4778 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 4779 const struct amdgpu_framebuffer *afb, 4780 const enum surface_pixel_format format, 4781 const enum dc_rotation_angle rotation, 4782 const struct plane_size *plane_size, 4783 union dc_tiling_info *tiling_info, 4784 struct dc_plane_dcc_param *dcc, 4785 struct dc_plane_address *address, 4786 const bool force_disable_dcc) 4787 { 4788 const uint64_t modifier = afb->base.modifier; 4789 int ret = 0; 4790 4791 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 4792 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); 4793 4794 if (modifier_has_dcc(modifier) && !force_disable_dcc) { 4795 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 4796 4797 dcc->enable = 1; 4798 dcc->meta_pitch = afb->base.pitches[1]; 4799 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 4800 4801 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 4802 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 4803 } 4804 4805 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 4806 if (ret) 4807 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); 4808 4809 return ret; 4810 } 4811 4812 static int 4813 fill_plane_buffer_attributes(struct amdgpu_device *adev, 4814 const struct amdgpu_framebuffer *afb, 4815 const enum surface_pixel_format format, 4816 const enum dc_rotation_angle rotation, 4817 const uint64_t tiling_flags, 4818 union dc_tiling_info *tiling_info, 4819 struct plane_size *plane_size, 4820 struct dc_plane_dcc_param *dcc, 4821 struct dc_plane_address *address, 4822 bool tmz_surface, 4823 bool force_disable_dcc) 4824 { 4825 const struct drm_framebuffer *fb = &afb->base; 4826 int ret; 4827 4828 memset(tiling_info, 0, sizeof(*tiling_info)); 4829 memset(plane_size, 0, sizeof(*plane_size)); 4830 memset(dcc, 0, sizeof(*dcc)); 4831 memset(address, 0, sizeof(*address)); 4832 4833 address->tmz_surface = tmz_surface; 4834 4835 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 4836 uint64_t addr = afb->address + fb->offsets[0]; 4837 4838 plane_size->surface_size.x = 0; 4839 plane_size->surface_size.y = 0; 4840 plane_size->surface_size.width = fb->width; 4841 plane_size->surface_size.height = fb->height; 4842 plane_size->surface_pitch = 4843 fb->pitches[0] / fb->format->cpp[0]; 4844 4845 address->type = PLN_ADDR_TYPE_GRAPHICS; 4846 address->grph.addr.low_part = lower_32_bits(addr); 4847 address->grph.addr.high_part = upper_32_bits(addr); 4848 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 4849 uint64_t luma_addr = afb->address + fb->offsets[0]; 4850 uint64_t chroma_addr = afb->address + fb->offsets[1]; 4851 4852 plane_size->surface_size.x = 0; 4853 plane_size->surface_size.y = 0; 4854 plane_size->surface_size.width = fb->width; 4855 plane_size->surface_size.height = fb->height; 4856 plane_size->surface_pitch = 4857 fb->pitches[0] / fb->format->cpp[0]; 4858 4859 plane_size->chroma_size.x = 0; 4860 plane_size->chroma_size.y = 0; 4861 /* TODO: set these based on surface format */ 4862 plane_size->chroma_size.width = fb->width / 2; 4863 plane_size->chroma_size.height = fb->height / 2; 4864 4865 plane_size->chroma_pitch = 4866 fb->pitches[1] / fb->format->cpp[1]; 4867 4868 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 4869 address->video_progressive.luma_addr.low_part = 4870 lower_32_bits(luma_addr); 4871 address->video_progressive.luma_addr.high_part = 4872 upper_32_bits(luma_addr); 4873 address->video_progressive.chroma_addr.low_part = 4874 lower_32_bits(chroma_addr); 4875 address->video_progressive.chroma_addr.high_part = 4876 upper_32_bits(chroma_addr); 4877 } 4878 4879 if (adev->family >= AMDGPU_FAMILY_AI) { 4880 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 4881 rotation, plane_size, 4882 tiling_info, dcc, 4883 address, 4884 force_disable_dcc); 4885 if (ret) 4886 return ret; 4887 } else { 4888 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 4889 } 4890 4891 return 0; 4892 } 4893 4894 static void 4895 fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 4896 bool *per_pixel_alpha, bool *global_alpha, 4897 int *global_alpha_value) 4898 { 4899 *per_pixel_alpha = false; 4900 *global_alpha = false; 4901 *global_alpha_value = 0xff; 4902 4903 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 4904 return; 4905 4906 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 4907 static const uint32_t alpha_formats[] = { 4908 DRM_FORMAT_ARGB8888, 4909 DRM_FORMAT_RGBA8888, 4910 DRM_FORMAT_ABGR8888, 4911 }; 4912 uint32_t format = plane_state->fb->format->format; 4913 unsigned int i; 4914 4915 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 4916 if (format == alpha_formats[i]) { 4917 *per_pixel_alpha = true; 4918 break; 4919 } 4920 } 4921 } 4922 4923 if (plane_state->alpha < 0xffff) { 4924 *global_alpha = true; 4925 *global_alpha_value = plane_state->alpha >> 8; 4926 } 4927 } 4928 4929 static int 4930 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 4931 const enum surface_pixel_format format, 4932 enum dc_color_space *color_space) 4933 { 4934 bool full_range; 4935 4936 *color_space = COLOR_SPACE_SRGB; 4937 4938 /* DRM color properties only affect non-RGB formats. */ 4939 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 4940 return 0; 4941 4942 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 4943 4944 switch (plane_state->color_encoding) { 4945 case DRM_COLOR_YCBCR_BT601: 4946 if (full_range) 4947 *color_space = COLOR_SPACE_YCBCR601; 4948 else 4949 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 4950 break; 4951 4952 case DRM_COLOR_YCBCR_BT709: 4953 if (full_range) 4954 *color_space = COLOR_SPACE_YCBCR709; 4955 else 4956 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 4957 break; 4958 4959 case DRM_COLOR_YCBCR_BT2020: 4960 if (full_range) 4961 *color_space = COLOR_SPACE_2020_YCBCR; 4962 else 4963 return -EINVAL; 4964 break; 4965 4966 default: 4967 return -EINVAL; 4968 } 4969 4970 return 0; 4971 } 4972 4973 static int 4974 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 4975 const struct drm_plane_state *plane_state, 4976 const uint64_t tiling_flags, 4977 struct dc_plane_info *plane_info, 4978 struct dc_plane_address *address, 4979 bool tmz_surface, 4980 bool force_disable_dcc) 4981 { 4982 const struct drm_framebuffer *fb = plane_state->fb; 4983 const struct amdgpu_framebuffer *afb = 4984 to_amdgpu_framebuffer(plane_state->fb); 4985 int ret; 4986 4987 memset(plane_info, 0, sizeof(*plane_info)); 4988 4989 switch (fb->format->format) { 4990 case DRM_FORMAT_C8: 4991 plane_info->format = 4992 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 4993 break; 4994 case DRM_FORMAT_RGB565: 4995 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 4996 break; 4997 case DRM_FORMAT_XRGB8888: 4998 case DRM_FORMAT_ARGB8888: 4999 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5000 break; 5001 case DRM_FORMAT_XRGB2101010: 5002 case DRM_FORMAT_ARGB2101010: 5003 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5004 break; 5005 case DRM_FORMAT_XBGR2101010: 5006 case DRM_FORMAT_ABGR2101010: 5007 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5008 break; 5009 case DRM_FORMAT_XBGR8888: 5010 case DRM_FORMAT_ABGR8888: 5011 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5012 break; 5013 case DRM_FORMAT_NV21: 5014 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5015 break; 5016 case DRM_FORMAT_NV12: 5017 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5018 break; 5019 case DRM_FORMAT_P010: 5020 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5021 break; 5022 case DRM_FORMAT_XRGB16161616F: 5023 case DRM_FORMAT_ARGB16161616F: 5024 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5025 break; 5026 case DRM_FORMAT_XBGR16161616F: 5027 case DRM_FORMAT_ABGR16161616F: 5028 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5029 break; 5030 case DRM_FORMAT_XRGB16161616: 5031 case DRM_FORMAT_ARGB16161616: 5032 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5033 break; 5034 case DRM_FORMAT_XBGR16161616: 5035 case DRM_FORMAT_ABGR16161616: 5036 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5037 break; 5038 default: 5039 DRM_ERROR( 5040 "Unsupported screen format %p4cc\n", 5041 &fb->format->format); 5042 return -EINVAL; 5043 } 5044 5045 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5046 case DRM_MODE_ROTATE_0: 5047 plane_info->rotation = ROTATION_ANGLE_0; 5048 break; 5049 case DRM_MODE_ROTATE_90: 5050 plane_info->rotation = ROTATION_ANGLE_90; 5051 break; 5052 case DRM_MODE_ROTATE_180: 5053 plane_info->rotation = ROTATION_ANGLE_180; 5054 break; 5055 case DRM_MODE_ROTATE_270: 5056 plane_info->rotation = ROTATION_ANGLE_270; 5057 break; 5058 default: 5059 plane_info->rotation = ROTATION_ANGLE_0; 5060 break; 5061 } 5062 5063 plane_info->visible = true; 5064 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5065 5066 plane_info->layer_index = 0; 5067 5068 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5069 &plane_info->color_space); 5070 if (ret) 5071 return ret; 5072 5073 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, 5074 plane_info->rotation, tiling_flags, 5075 &plane_info->tiling_info, 5076 &plane_info->plane_size, 5077 &plane_info->dcc, address, tmz_surface, 5078 force_disable_dcc); 5079 if (ret) 5080 return ret; 5081 5082 fill_blending_from_plane_state( 5083 plane_state, &plane_info->per_pixel_alpha, 5084 &plane_info->global_alpha, &plane_info->global_alpha_value); 5085 5086 return 0; 5087 } 5088 5089 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5090 struct dc_plane_state *dc_plane_state, 5091 struct drm_plane_state *plane_state, 5092 struct drm_crtc_state *crtc_state) 5093 { 5094 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5095 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5096 struct dc_scaling_info scaling_info; 5097 struct dc_plane_info plane_info; 5098 int ret; 5099 bool force_disable_dcc = false; 5100 5101 ret = fill_dc_scaling_info(plane_state, &scaling_info); 5102 if (ret) 5103 return ret; 5104 5105 dc_plane_state->src_rect = scaling_info.src_rect; 5106 dc_plane_state->dst_rect = scaling_info.dst_rect; 5107 dc_plane_state->clip_rect = scaling_info.clip_rect; 5108 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5109 5110 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5111 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5112 afb->tiling_flags, 5113 &plane_info, 5114 &dc_plane_state->address, 5115 afb->tmz_surface, 5116 force_disable_dcc); 5117 if (ret) 5118 return ret; 5119 5120 dc_plane_state->format = plane_info.format; 5121 dc_plane_state->color_space = plane_info.color_space; 5122 dc_plane_state->format = plane_info.format; 5123 dc_plane_state->plane_size = plane_info.plane_size; 5124 dc_plane_state->rotation = plane_info.rotation; 5125 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5126 dc_plane_state->stereo_format = plane_info.stereo_format; 5127 dc_plane_state->tiling_info = plane_info.tiling_info; 5128 dc_plane_state->visible = plane_info.visible; 5129 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5130 dc_plane_state->global_alpha = plane_info.global_alpha; 5131 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5132 dc_plane_state->dcc = plane_info.dcc; 5133 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 5134 dc_plane_state->flip_int_enabled = true; 5135 5136 /* 5137 * Always set input transfer function, since plane state is refreshed 5138 * every time. 5139 */ 5140 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5141 if (ret) 5142 return ret; 5143 5144 return 0; 5145 } 5146 5147 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5148 const struct dm_connector_state *dm_state, 5149 struct dc_stream_state *stream) 5150 { 5151 enum amdgpu_rmx_type rmx_type; 5152 5153 struct rect src = { 0 }; /* viewport in composition space*/ 5154 struct rect dst = { 0 }; /* stream addressable area */ 5155 5156 /* no mode. nothing to be done */ 5157 if (!mode) 5158 return; 5159 5160 /* Full screen scaling by default */ 5161 src.width = mode->hdisplay; 5162 src.height = mode->vdisplay; 5163 dst.width = stream->timing.h_addressable; 5164 dst.height = stream->timing.v_addressable; 5165 5166 if (dm_state) { 5167 rmx_type = dm_state->scaling; 5168 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5169 if (src.width * dst.height < 5170 src.height * dst.width) { 5171 /* height needs less upscaling/more downscaling */ 5172 dst.width = src.width * 5173 dst.height / src.height; 5174 } else { 5175 /* width needs less upscaling/more downscaling */ 5176 dst.height = src.height * 5177 dst.width / src.width; 5178 } 5179 } else if (rmx_type == RMX_CENTER) { 5180 dst = src; 5181 } 5182 5183 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5184 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5185 5186 if (dm_state->underscan_enable) { 5187 dst.x += dm_state->underscan_hborder / 2; 5188 dst.y += dm_state->underscan_vborder / 2; 5189 dst.width -= dm_state->underscan_hborder; 5190 dst.height -= dm_state->underscan_vborder; 5191 } 5192 } 5193 5194 stream->src = src; 5195 stream->dst = dst; 5196 5197 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5198 dst.x, dst.y, dst.width, dst.height); 5199 5200 } 5201 5202 static enum dc_color_depth 5203 convert_color_depth_from_display_info(const struct drm_connector *connector, 5204 bool is_y420, int requested_bpc) 5205 { 5206 uint8_t bpc; 5207 5208 if (is_y420) { 5209 bpc = 8; 5210 5211 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5212 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5213 bpc = 16; 5214 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5215 bpc = 12; 5216 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5217 bpc = 10; 5218 } else { 5219 bpc = (uint8_t)connector->display_info.bpc; 5220 /* Assume 8 bpc by default if no bpc is specified. */ 5221 bpc = bpc ? bpc : 8; 5222 } 5223 5224 if (requested_bpc > 0) { 5225 /* 5226 * Cap display bpc based on the user requested value. 5227 * 5228 * The value for state->max_bpc may not correctly updated 5229 * depending on when the connector gets added to the state 5230 * or if this was called outside of atomic check, so it 5231 * can't be used directly. 5232 */ 5233 bpc = min_t(u8, bpc, requested_bpc); 5234 5235 /* Round down to the nearest even number. */ 5236 bpc = bpc - (bpc & 1); 5237 } 5238 5239 switch (bpc) { 5240 case 0: 5241 /* 5242 * Temporary Work around, DRM doesn't parse color depth for 5243 * EDID revision before 1.4 5244 * TODO: Fix edid parsing 5245 */ 5246 return COLOR_DEPTH_888; 5247 case 6: 5248 return COLOR_DEPTH_666; 5249 case 8: 5250 return COLOR_DEPTH_888; 5251 case 10: 5252 return COLOR_DEPTH_101010; 5253 case 12: 5254 return COLOR_DEPTH_121212; 5255 case 14: 5256 return COLOR_DEPTH_141414; 5257 case 16: 5258 return COLOR_DEPTH_161616; 5259 default: 5260 return COLOR_DEPTH_UNDEFINED; 5261 } 5262 } 5263 5264 static enum dc_aspect_ratio 5265 get_aspect_ratio(const struct drm_display_mode *mode_in) 5266 { 5267 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5268 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5269 } 5270 5271 static enum dc_color_space 5272 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 5273 { 5274 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5275 5276 switch (dc_crtc_timing->pixel_encoding) { 5277 case PIXEL_ENCODING_YCBCR422: 5278 case PIXEL_ENCODING_YCBCR444: 5279 case PIXEL_ENCODING_YCBCR420: 5280 { 5281 /* 5282 * 27030khz is the separation point between HDTV and SDTV 5283 * according to HDMI spec, we use YCbCr709 and YCbCr601 5284 * respectively 5285 */ 5286 if (dc_crtc_timing->pix_clk_100hz > 270300) { 5287 if (dc_crtc_timing->flags.Y_ONLY) 5288 color_space = 5289 COLOR_SPACE_YCBCR709_LIMITED; 5290 else 5291 color_space = COLOR_SPACE_YCBCR709; 5292 } else { 5293 if (dc_crtc_timing->flags.Y_ONLY) 5294 color_space = 5295 COLOR_SPACE_YCBCR601_LIMITED; 5296 else 5297 color_space = COLOR_SPACE_YCBCR601; 5298 } 5299 5300 } 5301 break; 5302 case PIXEL_ENCODING_RGB: 5303 color_space = COLOR_SPACE_SRGB; 5304 break; 5305 5306 default: 5307 WARN_ON(1); 5308 break; 5309 } 5310 5311 return color_space; 5312 } 5313 5314 static bool adjust_colour_depth_from_display_info( 5315 struct dc_crtc_timing *timing_out, 5316 const struct drm_display_info *info) 5317 { 5318 enum dc_color_depth depth = timing_out->display_color_depth; 5319 int normalized_clk; 5320 do { 5321 normalized_clk = timing_out->pix_clk_100hz / 10; 5322 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5323 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5324 normalized_clk /= 2; 5325 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5326 switch (depth) { 5327 case COLOR_DEPTH_888: 5328 break; 5329 case COLOR_DEPTH_101010: 5330 normalized_clk = (normalized_clk * 30) / 24; 5331 break; 5332 case COLOR_DEPTH_121212: 5333 normalized_clk = (normalized_clk * 36) / 24; 5334 break; 5335 case COLOR_DEPTH_161616: 5336 normalized_clk = (normalized_clk * 48) / 24; 5337 break; 5338 default: 5339 /* The above depths are the only ones valid for HDMI. */ 5340 return false; 5341 } 5342 if (normalized_clk <= info->max_tmds_clock) { 5343 timing_out->display_color_depth = depth; 5344 return true; 5345 } 5346 } while (--depth > COLOR_DEPTH_666); 5347 return false; 5348 } 5349 5350 static void fill_stream_properties_from_drm_display_mode( 5351 struct dc_stream_state *stream, 5352 const struct drm_display_mode *mode_in, 5353 const struct drm_connector *connector, 5354 const struct drm_connector_state *connector_state, 5355 const struct dc_stream_state *old_stream, 5356 int requested_bpc) 5357 { 5358 struct dc_crtc_timing *timing_out = &stream->timing; 5359 const struct drm_display_info *info = &connector->display_info; 5360 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5361 struct hdmi_vendor_infoframe hv_frame; 5362 struct hdmi_avi_infoframe avi_frame; 5363 5364 memset(&hv_frame, 0, sizeof(hv_frame)); 5365 memset(&avi_frame, 0, sizeof(avi_frame)); 5366 5367 timing_out->h_border_left = 0; 5368 timing_out->h_border_right = 0; 5369 timing_out->v_border_top = 0; 5370 timing_out->v_border_bottom = 0; 5371 /* TODO: un-hardcode */ 5372 if (drm_mode_is_420_only(info, mode_in) 5373 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5374 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5375 else if (drm_mode_is_420_also(info, mode_in) 5376 && aconnector->force_yuv420_output) 5377 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5378 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 5379 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5380 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5381 else 5382 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5383 5384 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5385 timing_out->display_color_depth = convert_color_depth_from_display_info( 5386 connector, 5387 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5388 requested_bpc); 5389 timing_out->scan_type = SCANNING_TYPE_NODATA; 5390 timing_out->hdmi_vic = 0; 5391 5392 if(old_stream) { 5393 timing_out->vic = old_stream->timing.vic; 5394 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5395 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5396 } else { 5397 timing_out->vic = drm_match_cea_mode(mode_in); 5398 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5399 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5400 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5401 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5402 } 5403 5404 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5405 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5406 timing_out->vic = avi_frame.video_code; 5407 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5408 timing_out->hdmi_vic = hv_frame.vic; 5409 } 5410 5411 if (is_freesync_video_mode(mode_in, aconnector)) { 5412 timing_out->h_addressable = mode_in->hdisplay; 5413 timing_out->h_total = mode_in->htotal; 5414 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5415 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5416 timing_out->v_total = mode_in->vtotal; 5417 timing_out->v_addressable = mode_in->vdisplay; 5418 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5419 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5420 timing_out->pix_clk_100hz = mode_in->clock * 10; 5421 } else { 5422 timing_out->h_addressable = mode_in->crtc_hdisplay; 5423 timing_out->h_total = mode_in->crtc_htotal; 5424 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5425 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5426 timing_out->v_total = mode_in->crtc_vtotal; 5427 timing_out->v_addressable = mode_in->crtc_vdisplay; 5428 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5429 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5430 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5431 } 5432 5433 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5434 5435 stream->output_color_space = get_output_color_space(timing_out); 5436 5437 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5438 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5439 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5440 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5441 drm_mode_is_420_also(info, mode_in) && 5442 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5443 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5444 adjust_colour_depth_from_display_info(timing_out, info); 5445 } 5446 } 5447 } 5448 5449 static void fill_audio_info(struct audio_info *audio_info, 5450 const struct drm_connector *drm_connector, 5451 const struct dc_sink *dc_sink) 5452 { 5453 int i = 0; 5454 int cea_revision = 0; 5455 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5456 5457 audio_info->manufacture_id = edid_caps->manufacturer_id; 5458 audio_info->product_id = edid_caps->product_id; 5459 5460 cea_revision = drm_connector->display_info.cea_rev; 5461 5462 #ifdef __linux__ 5463 strscpy(audio_info->display_name, 5464 edid_caps->display_name, 5465 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5466 #else 5467 strncpy(audio_info->display_name, 5468 edid_caps->display_name, 5469 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1); 5470 #endif 5471 5472 if (cea_revision >= 3) { 5473 audio_info->mode_count = edid_caps->audio_mode_count; 5474 5475 for (i = 0; i < audio_info->mode_count; ++i) { 5476 audio_info->modes[i].format_code = 5477 (enum audio_format_code) 5478 (edid_caps->audio_modes[i].format_code); 5479 audio_info->modes[i].channel_count = 5480 edid_caps->audio_modes[i].channel_count; 5481 audio_info->modes[i].sample_rates.all = 5482 edid_caps->audio_modes[i].sample_rate; 5483 audio_info->modes[i].sample_size = 5484 edid_caps->audio_modes[i].sample_size; 5485 } 5486 } 5487 5488 audio_info->flags.all = edid_caps->speaker_flags; 5489 5490 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5491 if (drm_connector->latency_present[0]) { 5492 audio_info->video_latency = drm_connector->video_latency[0]; 5493 audio_info->audio_latency = drm_connector->audio_latency[0]; 5494 } 5495 5496 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5497 5498 } 5499 5500 static void 5501 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5502 struct drm_display_mode *dst_mode) 5503 { 5504 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5505 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5506 dst_mode->crtc_clock = src_mode->crtc_clock; 5507 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5508 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5509 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 5510 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 5511 dst_mode->crtc_htotal = src_mode->crtc_htotal; 5512 dst_mode->crtc_hskew = src_mode->crtc_hskew; 5513 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 5514 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 5515 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 5516 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 5517 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 5518 } 5519 5520 static void 5521 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 5522 const struct drm_display_mode *native_mode, 5523 bool scale_enabled) 5524 { 5525 if (scale_enabled) { 5526 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5527 } else if (native_mode->clock == drm_mode->clock && 5528 native_mode->htotal == drm_mode->htotal && 5529 native_mode->vtotal == drm_mode->vtotal) { 5530 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5531 } else { 5532 /* no scaling nor amdgpu inserted, no need to patch */ 5533 } 5534 } 5535 5536 static struct dc_sink * 5537 create_fake_sink(struct amdgpu_dm_connector *aconnector) 5538 { 5539 struct dc_sink_init_data sink_init_data = { 0 }; 5540 struct dc_sink *sink = NULL; 5541 sink_init_data.link = aconnector->dc_link; 5542 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5543 5544 sink = dc_sink_create(&sink_init_data); 5545 if (!sink) { 5546 DRM_ERROR("Failed to create sink!\n"); 5547 return NULL; 5548 } 5549 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 5550 5551 return sink; 5552 } 5553 5554 static void set_multisync_trigger_params( 5555 struct dc_stream_state *stream) 5556 { 5557 struct dc_stream_state *master = NULL; 5558 5559 if (stream->triggered_crtc_reset.enabled) { 5560 master = stream->triggered_crtc_reset.event_source; 5561 stream->triggered_crtc_reset.event = 5562 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 5563 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 5564 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 5565 } 5566 } 5567 5568 static void set_master_stream(struct dc_stream_state *stream_set[], 5569 int stream_count) 5570 { 5571 int j, highest_rfr = 0, master_stream = 0; 5572 5573 for (j = 0; j < stream_count; j++) { 5574 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 5575 int refresh_rate = 0; 5576 5577 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 5578 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 5579 if (refresh_rate > highest_rfr) { 5580 highest_rfr = refresh_rate; 5581 master_stream = j; 5582 } 5583 } 5584 } 5585 for (j = 0; j < stream_count; j++) { 5586 if (stream_set[j]) 5587 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 5588 } 5589 } 5590 5591 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 5592 { 5593 int i = 0; 5594 struct dc_stream_state *stream; 5595 5596 if (context->stream_count < 2) 5597 return; 5598 for (i = 0; i < context->stream_count ; i++) { 5599 if (!context->streams[i]) 5600 continue; 5601 /* 5602 * TODO: add a function to read AMD VSDB bits and set 5603 * crtc_sync_master.multi_sync_enabled flag 5604 * For now it's set to false 5605 */ 5606 } 5607 5608 set_master_stream(context->streams, context->stream_count); 5609 5610 for (i = 0; i < context->stream_count ; i++) { 5611 stream = context->streams[i]; 5612 5613 if (!stream) 5614 continue; 5615 5616 set_multisync_trigger_params(stream); 5617 } 5618 } 5619 5620 #if defined(CONFIG_DRM_AMD_DC_DCN) 5621 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 5622 struct dc_sink *sink, struct dc_stream_state *stream, 5623 struct dsc_dec_dpcd_caps *dsc_caps) 5624 { 5625 stream->timing.flags.DSC = 0; 5626 dsc_caps->is_dsc_supported = false; 5627 5628 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5629 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 5630 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5631 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5632 dsc_caps); 5633 } 5634 } 5635 5636 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 5637 struct dc_sink *sink, struct dc_stream_state *stream, 5638 struct dsc_dec_dpcd_caps *dsc_caps) 5639 { 5640 struct drm_connector *drm_connector = &aconnector->base; 5641 uint32_t link_bandwidth_kbps; 5642 5643 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5644 dc_link_get_link_cap(aconnector->dc_link)); 5645 /* Set DSC policy according to dsc_clock_en */ 5646 dc_dsc_policy_set_enable_dsc_when_not_needed( 5647 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 5648 5649 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5650 5651 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5652 dsc_caps, 5653 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 5654 0, 5655 link_bandwidth_kbps, 5656 &stream->timing, 5657 &stream->timing.dsc_cfg)) { 5658 stream->timing.flags.DSC = 1; 5659 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); 5660 } 5661 } 5662 5663 /* Overwrite the stream flag if DSC is enabled through debugfs */ 5664 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 5665 stream->timing.flags.DSC = 1; 5666 5667 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 5668 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 5669 5670 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 5671 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 5672 5673 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 5674 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 5675 } 5676 #endif 5677 5678 /** 5679 * DOC: FreeSync Video 5680 * 5681 * When a userspace application wants to play a video, the content follows a 5682 * standard format definition that usually specifies the FPS for that format. 5683 * The below list illustrates some video format and the expected FPS, 5684 * respectively: 5685 * 5686 * - TV/NTSC (23.976 FPS) 5687 * - Cinema (24 FPS) 5688 * - TV/PAL (25 FPS) 5689 * - TV/NTSC (29.97 FPS) 5690 * - TV/NTSC (30 FPS) 5691 * - Cinema HFR (48 FPS) 5692 * - TV/PAL (50 FPS) 5693 * - Commonly used (60 FPS) 5694 * - Multiples of 24 (48,72,96 FPS) 5695 * 5696 * The list of standards video format is not huge and can be added to the 5697 * connector modeset list beforehand. With that, userspace can leverage 5698 * FreeSync to extends the front porch in order to attain the target refresh 5699 * rate. Such a switch will happen seamlessly, without screen blanking or 5700 * reprogramming of the output in any other way. If the userspace requests a 5701 * modesetting change compatible with FreeSync modes that only differ in the 5702 * refresh rate, DC will skip the full update and avoid blink during the 5703 * transition. For example, the video player can change the modesetting from 5704 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 5705 * causing any display blink. This same concept can be applied to a mode 5706 * setting change. 5707 */ 5708 static struct drm_display_mode * 5709 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 5710 bool use_probed_modes) 5711 { 5712 struct drm_display_mode *m, *m_pref = NULL; 5713 u16 current_refresh, highest_refresh; 5714 struct list_head *list_head = use_probed_modes ? 5715 &aconnector->base.probed_modes : 5716 &aconnector->base.modes; 5717 5718 if (aconnector->freesync_vid_base.clock != 0) 5719 return &aconnector->freesync_vid_base; 5720 5721 /* Find the preferred mode */ 5722 list_for_each_entry (m, list_head, head) { 5723 if (m->type & DRM_MODE_TYPE_PREFERRED) { 5724 m_pref = m; 5725 break; 5726 } 5727 } 5728 5729 if (!m_pref) { 5730 /* Probably an EDID with no preferred mode. Fallback to first entry */ 5731 m_pref = list_first_entry_or_null( 5732 &aconnector->base.modes, struct drm_display_mode, head); 5733 if (!m_pref) { 5734 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 5735 return NULL; 5736 } 5737 } 5738 5739 highest_refresh = drm_mode_vrefresh(m_pref); 5740 5741 /* 5742 * Find the mode with highest refresh rate with same resolution. 5743 * For some monitors, preferred mode is not the mode with highest 5744 * supported refresh rate. 5745 */ 5746 list_for_each_entry (m, list_head, head) { 5747 current_refresh = drm_mode_vrefresh(m); 5748 5749 if (m->hdisplay == m_pref->hdisplay && 5750 m->vdisplay == m_pref->vdisplay && 5751 highest_refresh < current_refresh) { 5752 highest_refresh = current_refresh; 5753 m_pref = m; 5754 } 5755 } 5756 5757 aconnector->freesync_vid_base = *m_pref; 5758 return m_pref; 5759 } 5760 5761 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 5762 struct amdgpu_dm_connector *aconnector) 5763 { 5764 struct drm_display_mode *high_mode; 5765 int timing_diff; 5766 5767 high_mode = get_highest_refresh_rate_mode(aconnector, false); 5768 if (!high_mode || !mode) 5769 return false; 5770 5771 timing_diff = high_mode->vtotal - mode->vtotal; 5772 5773 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 5774 high_mode->hdisplay != mode->hdisplay || 5775 high_mode->vdisplay != mode->vdisplay || 5776 high_mode->hsync_start != mode->hsync_start || 5777 high_mode->hsync_end != mode->hsync_end || 5778 high_mode->htotal != mode->htotal || 5779 high_mode->hskew != mode->hskew || 5780 high_mode->vscan != mode->vscan || 5781 high_mode->vsync_start - mode->vsync_start != timing_diff || 5782 high_mode->vsync_end - mode->vsync_end != timing_diff) 5783 return false; 5784 else 5785 return true; 5786 } 5787 5788 static struct dc_stream_state * 5789 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 5790 const struct drm_display_mode *drm_mode, 5791 const struct dm_connector_state *dm_state, 5792 const struct dc_stream_state *old_stream, 5793 int requested_bpc) 5794 { 5795 struct drm_display_mode *preferred_mode = NULL; 5796 struct drm_connector *drm_connector; 5797 const struct drm_connector_state *con_state = 5798 dm_state ? &dm_state->base : NULL; 5799 struct dc_stream_state *stream = NULL; 5800 struct drm_display_mode mode = *drm_mode; 5801 struct drm_display_mode saved_mode; 5802 struct drm_display_mode *freesync_mode = NULL; 5803 bool native_mode_found = false; 5804 bool recalculate_timing = false; 5805 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; 5806 int mode_refresh; 5807 int preferred_refresh = 0; 5808 #if defined(CONFIG_DRM_AMD_DC_DCN) 5809 struct dsc_dec_dpcd_caps dsc_caps; 5810 #endif 5811 struct dc_sink *sink = NULL; 5812 5813 memset(&saved_mode, 0, sizeof(saved_mode)); 5814 5815 if (aconnector == NULL) { 5816 DRM_ERROR("aconnector is NULL!\n"); 5817 return stream; 5818 } 5819 5820 drm_connector = &aconnector->base; 5821 5822 if (!aconnector->dc_sink) { 5823 sink = create_fake_sink(aconnector); 5824 if (!sink) 5825 return stream; 5826 } else { 5827 sink = aconnector->dc_sink; 5828 dc_sink_retain(sink); 5829 } 5830 5831 stream = dc_create_stream_for_sink(sink); 5832 5833 if (stream == NULL) { 5834 DRM_ERROR("Failed to create stream for sink!\n"); 5835 goto finish; 5836 } 5837 5838 stream->dm_stream_context = aconnector; 5839 5840 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 5841 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 5842 5843 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 5844 /* Search for preferred mode */ 5845 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 5846 native_mode_found = true; 5847 break; 5848 } 5849 } 5850 if (!native_mode_found) 5851 preferred_mode = list_first_entry_or_null( 5852 &aconnector->base.modes, 5853 struct drm_display_mode, 5854 head); 5855 5856 mode_refresh = drm_mode_vrefresh(&mode); 5857 5858 if (preferred_mode == NULL) { 5859 /* 5860 * This may not be an error, the use case is when we have no 5861 * usermode calls to reset and set mode upon hotplug. In this 5862 * case, we call set mode ourselves to restore the previous mode 5863 * and the modelist may not be filled in in time. 5864 */ 5865 DRM_DEBUG_DRIVER("No preferred mode found\n"); 5866 } else { 5867 recalculate_timing = amdgpu_freesync_vid_mode && 5868 is_freesync_video_mode(&mode, aconnector); 5869 if (recalculate_timing) { 5870 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 5871 saved_mode = mode; 5872 mode = *freesync_mode; 5873 } else { 5874 decide_crtc_timing_for_drm_display_mode( 5875 &mode, preferred_mode, scale); 5876 5877 preferred_refresh = drm_mode_vrefresh(preferred_mode); 5878 } 5879 } 5880 5881 if (recalculate_timing) 5882 drm_mode_set_crtcinfo(&saved_mode, 0); 5883 else if (!dm_state) 5884 drm_mode_set_crtcinfo(&mode, 0); 5885 5886 /* 5887 * If scaling is enabled and refresh rate didn't change 5888 * we copy the vic and polarities of the old timings 5889 */ 5890 if (!scale || mode_refresh != preferred_refresh) 5891 fill_stream_properties_from_drm_display_mode( 5892 stream, &mode, &aconnector->base, con_state, NULL, 5893 requested_bpc); 5894 else 5895 fill_stream_properties_from_drm_display_mode( 5896 stream, &mode, &aconnector->base, con_state, old_stream, 5897 requested_bpc); 5898 5899 #if defined(CONFIG_DRM_AMD_DC_DCN) 5900 /* SST DSC determination policy */ 5901 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 5902 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 5903 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 5904 #endif 5905 5906 update_stream_scaling_settings(&mode, dm_state, stream); 5907 5908 fill_audio_info( 5909 &stream->audio_info, 5910 drm_connector, 5911 sink); 5912 5913 update_stream_signal(stream, sink); 5914 5915 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5916 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 5917 5918 if (stream->link->psr_settings.psr_feature_enabled) { 5919 // 5920 // should decide stream support vsc sdp colorimetry capability 5921 // before building vsc info packet 5922 // 5923 stream->use_vsc_sdp_for_colorimetry = false; 5924 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 5925 stream->use_vsc_sdp_for_colorimetry = 5926 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 5927 } else { 5928 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 5929 stream->use_vsc_sdp_for_colorimetry = true; 5930 } 5931 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); 5932 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 5933 5934 } 5935 finish: 5936 dc_sink_release(sink); 5937 5938 return stream; 5939 } 5940 5941 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 5942 { 5943 drm_crtc_cleanup(crtc); 5944 kfree(crtc); 5945 } 5946 5947 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 5948 struct drm_crtc_state *state) 5949 { 5950 struct dm_crtc_state *cur = to_dm_crtc_state(state); 5951 5952 /* TODO Destroy dc_stream objects are stream object is flattened */ 5953 if (cur->stream) 5954 dc_stream_release(cur->stream); 5955 5956 5957 __drm_atomic_helper_crtc_destroy_state(state); 5958 5959 5960 kfree(state); 5961 } 5962 5963 static void dm_crtc_reset_state(struct drm_crtc *crtc) 5964 { 5965 struct dm_crtc_state *state; 5966 5967 if (crtc->state) 5968 dm_crtc_destroy_state(crtc, crtc->state); 5969 5970 state = kzalloc(sizeof(*state), GFP_KERNEL); 5971 if (WARN_ON(!state)) 5972 return; 5973 5974 __drm_atomic_helper_crtc_reset(crtc, &state->base); 5975 } 5976 5977 static struct drm_crtc_state * 5978 dm_crtc_duplicate_state(struct drm_crtc *crtc) 5979 { 5980 struct dm_crtc_state *state, *cur; 5981 5982 cur = to_dm_crtc_state(crtc->state); 5983 5984 if (WARN_ON(!crtc->state)) 5985 return NULL; 5986 5987 state = kzalloc(sizeof(*state), GFP_KERNEL); 5988 if (!state) 5989 return NULL; 5990 5991 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 5992 5993 if (cur->stream) { 5994 state->stream = cur->stream; 5995 dc_stream_retain(state->stream); 5996 } 5997 5998 state->active_planes = cur->active_planes; 5999 state->vrr_infopacket = cur->vrr_infopacket; 6000 state->abm_level = cur->abm_level; 6001 state->vrr_supported = cur->vrr_supported; 6002 state->freesync_config = cur->freesync_config; 6003 state->cm_has_degamma = cur->cm_has_degamma; 6004 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 6005 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 6006 6007 return &state->base; 6008 } 6009 6010 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 6011 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) 6012 { 6013 crtc_debugfs_init(crtc); 6014 6015 return 0; 6016 } 6017 #endif 6018 6019 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 6020 { 6021 enum dc_irq_source irq_source; 6022 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6023 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6024 int rc; 6025 6026 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; 6027 6028 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 6029 6030 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 6031 acrtc->crtc_id, enable ? "en" : "dis", rc); 6032 return rc; 6033 } 6034 6035 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) 6036 { 6037 enum dc_irq_source irq_source; 6038 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6039 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6040 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 6041 #if defined(CONFIG_DRM_AMD_DC_DCN) 6042 struct amdgpu_display_manager *dm = &adev->dm; 6043 struct vblank_control_work *work; 6044 #endif 6045 int rc = 0; 6046 6047 if (enable) { 6048 /* vblank irq on -> Only need vupdate irq in vrr mode */ 6049 if (amdgpu_dm_vrr_active(acrtc_state)) 6050 rc = dm_set_vupdate_irq(crtc, true); 6051 } else { 6052 /* vblank irq off -> vupdate irq off */ 6053 rc = dm_set_vupdate_irq(crtc, false); 6054 } 6055 6056 if (rc) 6057 return rc; 6058 6059 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 6060 6061 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 6062 return -EBUSY; 6063 6064 if (amdgpu_in_reset(adev)) 6065 return 0; 6066 6067 #if defined(CONFIG_DRM_AMD_DC_DCN) 6068 if (dm->vblank_control_workqueue) { 6069 work = kzalloc(sizeof(*work), GFP_ATOMIC); 6070 if (!work) 6071 return -ENOMEM; 6072 6073 INIT_WORK(&work->work, vblank_control_worker); 6074 work->dm = dm; 6075 work->acrtc = acrtc; 6076 work->enable = enable; 6077 6078 if (acrtc_state->stream) { 6079 dc_stream_retain(acrtc_state->stream); 6080 work->stream = acrtc_state->stream; 6081 } 6082 6083 queue_work(dm->vblank_control_workqueue, &work->work); 6084 } 6085 #endif 6086 6087 return 0; 6088 } 6089 6090 static int dm_enable_vblank(struct drm_crtc *crtc) 6091 { 6092 return dm_set_vblank(crtc, true); 6093 } 6094 6095 static void dm_disable_vblank(struct drm_crtc *crtc) 6096 { 6097 dm_set_vblank(crtc, false); 6098 } 6099 6100 /* Implemented only the options currently availible for the driver */ 6101 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 6102 .reset = dm_crtc_reset_state, 6103 .destroy = amdgpu_dm_crtc_destroy, 6104 .set_config = drm_atomic_helper_set_config, 6105 .page_flip = drm_atomic_helper_page_flip, 6106 .atomic_duplicate_state = dm_crtc_duplicate_state, 6107 .atomic_destroy_state = dm_crtc_destroy_state, 6108 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 6109 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, 6110 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, 6111 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 6112 .enable_vblank = dm_enable_vblank, 6113 .disable_vblank = dm_disable_vblank, 6114 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 6115 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 6116 .late_register = amdgpu_dm_crtc_late_register, 6117 #endif 6118 }; 6119 6120 static enum drm_connector_status 6121 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6122 { 6123 bool connected; 6124 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6125 6126 /* 6127 * Notes: 6128 * 1. This interface is NOT called in context of HPD irq. 6129 * 2. This interface *is called* in context of user-mode ioctl. Which 6130 * makes it a bad place for *any* MST-related activity. 6131 */ 6132 6133 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6134 !aconnector->fake_enable) 6135 connected = (aconnector->dc_sink != NULL); 6136 else 6137 connected = (aconnector->base.force == DRM_FORCE_ON); 6138 6139 update_subconnector_property(aconnector); 6140 6141 return (connected ? connector_status_connected : 6142 connector_status_disconnected); 6143 } 6144 6145 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6146 struct drm_connector_state *connector_state, 6147 struct drm_property *property, 6148 uint64_t val) 6149 { 6150 struct drm_device *dev = connector->dev; 6151 struct amdgpu_device *adev = drm_to_adev(dev); 6152 struct dm_connector_state *dm_old_state = 6153 to_dm_connector_state(connector->state); 6154 struct dm_connector_state *dm_new_state = 6155 to_dm_connector_state(connector_state); 6156 6157 int ret = -EINVAL; 6158 6159 if (property == dev->mode_config.scaling_mode_property) { 6160 enum amdgpu_rmx_type rmx_type; 6161 6162 switch (val) { 6163 case DRM_MODE_SCALE_CENTER: 6164 rmx_type = RMX_CENTER; 6165 break; 6166 case DRM_MODE_SCALE_ASPECT: 6167 rmx_type = RMX_ASPECT; 6168 break; 6169 case DRM_MODE_SCALE_FULLSCREEN: 6170 rmx_type = RMX_FULL; 6171 break; 6172 case DRM_MODE_SCALE_NONE: 6173 default: 6174 rmx_type = RMX_OFF; 6175 break; 6176 } 6177 6178 if (dm_old_state->scaling == rmx_type) 6179 return 0; 6180 6181 dm_new_state->scaling = rmx_type; 6182 ret = 0; 6183 } else if (property == adev->mode_info.underscan_hborder_property) { 6184 dm_new_state->underscan_hborder = val; 6185 ret = 0; 6186 } else if (property == adev->mode_info.underscan_vborder_property) { 6187 dm_new_state->underscan_vborder = val; 6188 ret = 0; 6189 } else if (property == adev->mode_info.underscan_property) { 6190 dm_new_state->underscan_enable = val; 6191 ret = 0; 6192 } else if (property == adev->mode_info.abm_level_property) { 6193 dm_new_state->abm_level = val; 6194 ret = 0; 6195 } 6196 6197 return ret; 6198 } 6199 6200 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6201 const struct drm_connector_state *state, 6202 struct drm_property *property, 6203 uint64_t *val) 6204 { 6205 struct drm_device *dev = connector->dev; 6206 struct amdgpu_device *adev = drm_to_adev(dev); 6207 struct dm_connector_state *dm_state = 6208 to_dm_connector_state(state); 6209 int ret = -EINVAL; 6210 6211 if (property == dev->mode_config.scaling_mode_property) { 6212 switch (dm_state->scaling) { 6213 case RMX_CENTER: 6214 *val = DRM_MODE_SCALE_CENTER; 6215 break; 6216 case RMX_ASPECT: 6217 *val = DRM_MODE_SCALE_ASPECT; 6218 break; 6219 case RMX_FULL: 6220 *val = DRM_MODE_SCALE_FULLSCREEN; 6221 break; 6222 case RMX_OFF: 6223 default: 6224 *val = DRM_MODE_SCALE_NONE; 6225 break; 6226 } 6227 ret = 0; 6228 } else if (property == adev->mode_info.underscan_hborder_property) { 6229 *val = dm_state->underscan_hborder; 6230 ret = 0; 6231 } else if (property == adev->mode_info.underscan_vborder_property) { 6232 *val = dm_state->underscan_vborder; 6233 ret = 0; 6234 } else if (property == adev->mode_info.underscan_property) { 6235 *val = dm_state->underscan_enable; 6236 ret = 0; 6237 } else if (property == adev->mode_info.abm_level_property) { 6238 *val = dm_state->abm_level; 6239 ret = 0; 6240 } 6241 6242 return ret; 6243 } 6244 6245 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6246 { 6247 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6248 6249 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6250 } 6251 6252 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6253 { 6254 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6255 const struct dc_link *link = aconnector->dc_link; 6256 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6257 struct amdgpu_display_manager *dm = &adev->dm; 6258 int i; 6259 6260 /* 6261 * Call only if mst_mgr was iniitalized before since it's not done 6262 * for all connector types. 6263 */ 6264 if (aconnector->mst_mgr.dev) 6265 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6266 6267 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 6268 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 6269 for (i = 0; i < dm->num_of_edps; i++) { 6270 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { 6271 backlight_device_unregister(dm->backlight_dev[i]); 6272 dm->backlight_dev[i] = NULL; 6273 } 6274 } 6275 #endif 6276 6277 if (aconnector->dc_em_sink) 6278 dc_sink_release(aconnector->dc_em_sink); 6279 aconnector->dc_em_sink = NULL; 6280 if (aconnector->dc_sink) 6281 dc_sink_release(aconnector->dc_sink); 6282 aconnector->dc_sink = NULL; 6283 6284 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6285 drm_connector_unregister(connector); 6286 drm_connector_cleanup(connector); 6287 if (aconnector->i2c) { 6288 i2c_del_adapter(&aconnector->i2c->base); 6289 kfree(aconnector->i2c); 6290 } 6291 kfree(aconnector->dm_dp_aux.aux.name); 6292 6293 kfree(connector); 6294 } 6295 6296 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6297 { 6298 struct dm_connector_state *state = 6299 to_dm_connector_state(connector->state); 6300 6301 if (connector->state) 6302 __drm_atomic_helper_connector_destroy_state(connector->state); 6303 6304 kfree(state); 6305 6306 state = kzalloc(sizeof(*state), GFP_KERNEL); 6307 6308 if (state) { 6309 state->scaling = RMX_OFF; 6310 state->underscan_enable = false; 6311 state->underscan_hborder = 0; 6312 state->underscan_vborder = 0; 6313 state->base.max_requested_bpc = 8; 6314 state->vcpi_slots = 0; 6315 state->pbn = 0; 6316 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6317 state->abm_level = amdgpu_dm_abm_level; 6318 6319 __drm_atomic_helper_connector_reset(connector, &state->base); 6320 } 6321 } 6322 6323 struct drm_connector_state * 6324 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6325 { 6326 struct dm_connector_state *state = 6327 to_dm_connector_state(connector->state); 6328 6329 struct dm_connector_state *new_state = 6330 kmemdup(state, sizeof(*state), GFP_KERNEL); 6331 6332 if (!new_state) 6333 return NULL; 6334 6335 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6336 6337 new_state->freesync_capable = state->freesync_capable; 6338 new_state->abm_level = state->abm_level; 6339 new_state->scaling = state->scaling; 6340 new_state->underscan_enable = state->underscan_enable; 6341 new_state->underscan_hborder = state->underscan_hborder; 6342 new_state->underscan_vborder = state->underscan_vborder; 6343 new_state->vcpi_slots = state->vcpi_slots; 6344 new_state->pbn = state->pbn; 6345 return &new_state->base; 6346 } 6347 6348 static int 6349 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6350 { 6351 struct amdgpu_dm_connector *amdgpu_dm_connector = 6352 to_amdgpu_dm_connector(connector); 6353 int r; 6354 6355 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6356 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6357 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6358 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6359 if (r) 6360 return r; 6361 } 6362 6363 #if defined(CONFIG_DEBUG_FS) 6364 connector_debugfs_init(amdgpu_dm_connector); 6365 #endif 6366 6367 return 0; 6368 } 6369 6370 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6371 .reset = amdgpu_dm_connector_funcs_reset, 6372 .detect = amdgpu_dm_connector_detect, 6373 .fill_modes = drm_helper_probe_single_connector_modes, 6374 .destroy = amdgpu_dm_connector_destroy, 6375 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6376 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6377 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6378 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6379 .late_register = amdgpu_dm_connector_late_register, 6380 .early_unregister = amdgpu_dm_connector_unregister 6381 }; 6382 6383 static int get_modes(struct drm_connector *connector) 6384 { 6385 return amdgpu_dm_connector_get_modes(connector); 6386 } 6387 6388 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6389 { 6390 struct dc_sink_init_data init_params = { 6391 .link = aconnector->dc_link, 6392 .sink_signal = SIGNAL_TYPE_VIRTUAL 6393 }; 6394 struct edid *edid; 6395 6396 if (!aconnector->base.edid_blob_ptr) { 6397 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 6398 aconnector->base.name); 6399 6400 aconnector->base.force = DRM_FORCE_OFF; 6401 aconnector->base.override_edid = false; 6402 return; 6403 } 6404 6405 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 6406 6407 aconnector->edid = edid; 6408 6409 aconnector->dc_em_sink = dc_link_add_remote_sink( 6410 aconnector->dc_link, 6411 (uint8_t *)edid, 6412 (edid->extensions + 1) * EDID_LENGTH, 6413 &init_params); 6414 6415 if (aconnector->base.force == DRM_FORCE_ON) { 6416 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6417 aconnector->dc_link->local_sink : 6418 aconnector->dc_em_sink; 6419 dc_sink_retain(aconnector->dc_sink); 6420 } 6421 } 6422 6423 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 6424 { 6425 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 6426 6427 /* 6428 * In case of headless boot with force on for DP managed connector 6429 * Those settings have to be != 0 to get initial modeset 6430 */ 6431 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6432 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 6433 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 6434 } 6435 6436 6437 aconnector->base.override_edid = true; 6438 create_eml_sink(aconnector); 6439 } 6440 6441 static struct dc_stream_state * 6442 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6443 const struct drm_display_mode *drm_mode, 6444 const struct dm_connector_state *dm_state, 6445 const struct dc_stream_state *old_stream) 6446 { 6447 struct drm_connector *connector = &aconnector->base; 6448 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6449 struct dc_stream_state *stream; 6450 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 6451 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 6452 enum dc_status dc_result = DC_OK; 6453 6454 do { 6455 stream = create_stream_for_sink(aconnector, drm_mode, 6456 dm_state, old_stream, 6457 requested_bpc); 6458 if (stream == NULL) { 6459 DRM_ERROR("Failed to create stream for sink!\n"); 6460 break; 6461 } 6462 6463 dc_result = dc_validate_stream(adev->dm.dc, stream); 6464 6465 if (dc_result != DC_OK) { 6466 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 6467 drm_mode->hdisplay, 6468 drm_mode->vdisplay, 6469 drm_mode->clock, 6470 dc_result, 6471 dc_status_to_str(dc_result)); 6472 6473 dc_stream_release(stream); 6474 stream = NULL; 6475 requested_bpc -= 2; /* lower bpc to retry validation */ 6476 } 6477 6478 } while (stream == NULL && requested_bpc >= 6); 6479 6480 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 6481 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 6482 6483 aconnector->force_yuv420_output = true; 6484 stream = create_validate_stream_for_sink(aconnector, drm_mode, 6485 dm_state, old_stream); 6486 aconnector->force_yuv420_output = false; 6487 } 6488 6489 return stream; 6490 } 6491 6492 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 6493 struct drm_display_mode *mode) 6494 { 6495 int result = MODE_ERROR; 6496 struct dc_sink *dc_sink; 6497 /* TODO: Unhardcode stream count */ 6498 struct dc_stream_state *stream; 6499 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6500 6501 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 6502 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 6503 return result; 6504 6505 /* 6506 * Only run this the first time mode_valid is called to initilialize 6507 * EDID mgmt 6508 */ 6509 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 6510 !aconnector->dc_em_sink) 6511 handle_edid_mgmt(aconnector); 6512 6513 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 6514 6515 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 6516 aconnector->base.force != DRM_FORCE_ON) { 6517 DRM_ERROR("dc_sink is NULL!\n"); 6518 goto fail; 6519 } 6520 6521 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); 6522 if (stream) { 6523 dc_stream_release(stream); 6524 result = MODE_OK; 6525 } 6526 6527 fail: 6528 /* TODO: error handling*/ 6529 return result; 6530 } 6531 6532 static int fill_hdr_info_packet(const struct drm_connector_state *state, 6533 struct dc_info_packet *out) 6534 { 6535 struct hdmi_drm_infoframe frame; 6536 unsigned char buf[30]; /* 26 + 4 */ 6537 ssize_t len; 6538 int ret, i; 6539 6540 memset(out, 0, sizeof(*out)); 6541 6542 if (!state->hdr_output_metadata) 6543 return 0; 6544 6545 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 6546 if (ret) 6547 return ret; 6548 6549 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 6550 if (len < 0) 6551 return (int)len; 6552 6553 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 6554 if (len != 30) 6555 return -EINVAL; 6556 6557 /* Prepare the infopacket for DC. */ 6558 switch (state->connector->connector_type) { 6559 case DRM_MODE_CONNECTOR_HDMIA: 6560 out->hb0 = 0x87; /* type */ 6561 out->hb1 = 0x01; /* version */ 6562 out->hb2 = 0x1A; /* length */ 6563 out->sb[0] = buf[3]; /* checksum */ 6564 i = 1; 6565 break; 6566 6567 case DRM_MODE_CONNECTOR_DisplayPort: 6568 case DRM_MODE_CONNECTOR_eDP: 6569 out->hb0 = 0x00; /* sdp id, zero */ 6570 out->hb1 = 0x87; /* type */ 6571 out->hb2 = 0x1D; /* payload len - 1 */ 6572 out->hb3 = (0x13 << 2); /* sdp version */ 6573 out->sb[0] = 0x01; /* version */ 6574 out->sb[1] = 0x1A; /* length */ 6575 i = 2; 6576 break; 6577 6578 default: 6579 return -EINVAL; 6580 } 6581 6582 memcpy(&out->sb[i], &buf[4], 26); 6583 out->valid = true; 6584 6585 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 6586 sizeof(out->sb), false); 6587 6588 return 0; 6589 } 6590 6591 static int 6592 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 6593 struct drm_atomic_state *state) 6594 { 6595 struct drm_connector_state *new_con_state = 6596 drm_atomic_get_new_connector_state(state, conn); 6597 struct drm_connector_state *old_con_state = 6598 drm_atomic_get_old_connector_state(state, conn); 6599 struct drm_crtc *crtc = new_con_state->crtc; 6600 struct drm_crtc_state *new_crtc_state; 6601 int ret; 6602 6603 trace_amdgpu_dm_connector_atomic_check(new_con_state); 6604 6605 if (!crtc) 6606 return 0; 6607 6608 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 6609 struct dc_info_packet hdr_infopacket; 6610 6611 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 6612 if (ret) 6613 return ret; 6614 6615 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6616 if (IS_ERR(new_crtc_state)) 6617 return PTR_ERR(new_crtc_state); 6618 6619 /* 6620 * DC considers the stream backends changed if the 6621 * static metadata changes. Forcing the modeset also 6622 * gives a simple way for userspace to switch from 6623 * 8bpc to 10bpc when setting the metadata to enter 6624 * or exit HDR. 6625 * 6626 * Changing the static metadata after it's been 6627 * set is permissible, however. So only force a 6628 * modeset if we're entering or exiting HDR. 6629 */ 6630 new_crtc_state->mode_changed = 6631 !old_con_state->hdr_output_metadata || 6632 !new_con_state->hdr_output_metadata; 6633 } 6634 6635 return 0; 6636 } 6637 6638 static const struct drm_connector_helper_funcs 6639 amdgpu_dm_connector_helper_funcs = { 6640 /* 6641 * If hotplugging a second bigger display in FB Con mode, bigger resolution 6642 * modes will be filtered by drm_mode_validate_size(), and those modes 6643 * are missing after user start lightdm. So we need to renew modes list. 6644 * in get_modes call back, not just return the modes count 6645 */ 6646 .get_modes = get_modes, 6647 .mode_valid = amdgpu_dm_connector_mode_valid, 6648 .atomic_check = amdgpu_dm_connector_atomic_check, 6649 }; 6650 6651 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 6652 { 6653 } 6654 6655 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 6656 { 6657 struct drm_atomic_state *state = new_crtc_state->state; 6658 struct drm_plane *plane; 6659 int num_active = 0; 6660 6661 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { 6662 struct drm_plane_state *new_plane_state; 6663 6664 /* Cursor planes are "fake". */ 6665 if (plane->type == DRM_PLANE_TYPE_CURSOR) 6666 continue; 6667 6668 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 6669 6670 if (!new_plane_state) { 6671 /* 6672 * The plane is enable on the CRTC and hasn't changed 6673 * state. This means that it previously passed 6674 * validation and is therefore enabled. 6675 */ 6676 num_active += 1; 6677 continue; 6678 } 6679 6680 /* We need a framebuffer to be considered enabled. */ 6681 num_active += (new_plane_state->fb != NULL); 6682 } 6683 6684 return num_active; 6685 } 6686 6687 static void dm_update_crtc_active_planes(struct drm_crtc *crtc, 6688 struct drm_crtc_state *new_crtc_state) 6689 { 6690 struct dm_crtc_state *dm_new_crtc_state = 6691 to_dm_crtc_state(new_crtc_state); 6692 6693 dm_new_crtc_state->active_planes = 0; 6694 6695 if (!dm_new_crtc_state->stream) 6696 return; 6697 6698 dm_new_crtc_state->active_planes = 6699 count_crtc_active_planes(new_crtc_state); 6700 } 6701 6702 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 6703 struct drm_atomic_state *state) 6704 { 6705 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 6706 crtc); 6707 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6708 struct dc *dc = adev->dm.dc; 6709 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 6710 int ret = -EINVAL; 6711 6712 trace_amdgpu_dm_crtc_atomic_check(crtc_state); 6713 6714 dm_update_crtc_active_planes(crtc, crtc_state); 6715 6716 if (WARN_ON(unlikely(!dm_crtc_state->stream && 6717 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { 6718 return ret; 6719 } 6720 6721 /* 6722 * We require the primary plane to be enabled whenever the CRTC is, otherwise 6723 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other 6724 * planes are disabled, which is not supported by the hardware. And there is legacy 6725 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. 6726 */ 6727 if (crtc_state->enable && 6728 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { 6729 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); 6730 return -EINVAL; 6731 } 6732 6733 /* In some use cases, like reset, no stream is attached */ 6734 if (!dm_crtc_state->stream) 6735 return 0; 6736 6737 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 6738 return 0; 6739 6740 DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); 6741 return ret; 6742 } 6743 6744 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 6745 const struct drm_display_mode *mode, 6746 struct drm_display_mode *adjusted_mode) 6747 { 6748 return true; 6749 } 6750 6751 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 6752 .disable = dm_crtc_helper_disable, 6753 .atomic_check = dm_crtc_helper_atomic_check, 6754 .mode_fixup = dm_crtc_helper_mode_fixup, 6755 .get_scanout_position = amdgpu_crtc_get_scanout_position, 6756 }; 6757 6758 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 6759 { 6760 6761 } 6762 6763 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) 6764 { 6765 switch (display_color_depth) { 6766 case COLOR_DEPTH_666: 6767 return 6; 6768 case COLOR_DEPTH_888: 6769 return 8; 6770 case COLOR_DEPTH_101010: 6771 return 10; 6772 case COLOR_DEPTH_121212: 6773 return 12; 6774 case COLOR_DEPTH_141414: 6775 return 14; 6776 case COLOR_DEPTH_161616: 6777 return 16; 6778 default: 6779 break; 6780 } 6781 return 0; 6782 } 6783 6784 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 6785 struct drm_crtc_state *crtc_state, 6786 struct drm_connector_state *conn_state) 6787 { 6788 struct drm_atomic_state *state = crtc_state->state; 6789 struct drm_connector *connector = conn_state->connector; 6790 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6791 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 6792 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 6793 struct drm_dp_mst_topology_mgr *mst_mgr; 6794 struct drm_dp_mst_port *mst_port; 6795 enum dc_color_depth color_depth; 6796 int clock, bpp = 0; 6797 bool is_y420 = false; 6798 6799 if (!aconnector->port || !aconnector->dc_sink) 6800 return 0; 6801 6802 mst_port = aconnector->port; 6803 mst_mgr = &aconnector->mst_port->mst_mgr; 6804 6805 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 6806 return 0; 6807 6808 if (!state->duplicated) { 6809 int max_bpc = conn_state->max_requested_bpc; 6810 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 6811 aconnector->force_yuv420_output; 6812 color_depth = convert_color_depth_from_display_info(connector, 6813 is_y420, 6814 max_bpc); 6815 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 6816 clock = adjusted_mode->clock; 6817 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 6818 } 6819 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, 6820 mst_mgr, 6821 mst_port, 6822 dm_new_connector_state->pbn, 6823 dm_mst_get_pbn_divider(aconnector->dc_link)); 6824 if (dm_new_connector_state->vcpi_slots < 0) { 6825 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 6826 return dm_new_connector_state->vcpi_slots; 6827 } 6828 return 0; 6829 } 6830 6831 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 6832 .disable = dm_encoder_helper_disable, 6833 .atomic_check = dm_encoder_helper_atomic_check 6834 }; 6835 6836 #if defined(CONFIG_DRM_AMD_DC_DCN) 6837 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 6838 struct dc_state *dc_state, 6839 struct dsc_mst_fairness_vars *vars) 6840 { 6841 struct dc_stream_state *stream = NULL; 6842 struct drm_connector *connector; 6843 struct drm_connector_state *new_con_state; 6844 struct amdgpu_dm_connector *aconnector; 6845 struct dm_connector_state *dm_conn_state; 6846 int i, j, clock; 6847 int vcpi, pbn_div, pbn = 0; 6848 6849 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6850 6851 aconnector = to_amdgpu_dm_connector(connector); 6852 6853 if (!aconnector->port) 6854 continue; 6855 6856 if (!new_con_state || !new_con_state->crtc) 6857 continue; 6858 6859 dm_conn_state = to_dm_connector_state(new_con_state); 6860 6861 for (j = 0; j < dc_state->stream_count; j++) { 6862 stream = dc_state->streams[j]; 6863 if (!stream) 6864 continue; 6865 6866 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) 6867 break; 6868 6869 stream = NULL; 6870 } 6871 6872 if (!stream) 6873 continue; 6874 6875 if (stream->timing.flags.DSC != 1) { 6876 drm_dp_mst_atomic_enable_dsc(state, 6877 aconnector->port, 6878 dm_conn_state->pbn, 6879 0, 6880 false); 6881 continue; 6882 } 6883 6884 pbn_div = dm_mst_get_pbn_divider(stream->link); 6885 clock = stream->timing.pix_clk_100hz / 10; 6886 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 6887 for (j = 0; j < dc_state->stream_count; j++) { 6888 if (vars[j].aconnector == aconnector) { 6889 pbn = vars[j].pbn; 6890 break; 6891 } 6892 } 6893 6894 vcpi = drm_dp_mst_atomic_enable_dsc(state, 6895 aconnector->port, 6896 pbn, pbn_div, 6897 true); 6898 if (vcpi < 0) 6899 return vcpi; 6900 6901 dm_conn_state->pbn = pbn; 6902 dm_conn_state->vcpi_slots = vcpi; 6903 } 6904 return 0; 6905 } 6906 #endif 6907 6908 static void dm_drm_plane_reset(struct drm_plane *plane) 6909 { 6910 struct dm_plane_state *amdgpu_state = NULL; 6911 6912 if (plane->state) 6913 plane->funcs->atomic_destroy_state(plane, plane->state); 6914 6915 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 6916 WARN_ON(amdgpu_state == NULL); 6917 6918 if (amdgpu_state) 6919 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 6920 } 6921 6922 static struct drm_plane_state * 6923 dm_drm_plane_duplicate_state(struct drm_plane *plane) 6924 { 6925 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 6926 6927 old_dm_plane_state = to_dm_plane_state(plane->state); 6928 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 6929 if (!dm_plane_state) 6930 return NULL; 6931 6932 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 6933 6934 if (old_dm_plane_state->dc_state) { 6935 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 6936 dc_plane_state_retain(dm_plane_state->dc_state); 6937 } 6938 6939 return &dm_plane_state->base; 6940 } 6941 6942 static void dm_drm_plane_destroy_state(struct drm_plane *plane, 6943 struct drm_plane_state *state) 6944 { 6945 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 6946 6947 if (dm_plane_state->dc_state) 6948 dc_plane_state_release(dm_plane_state->dc_state); 6949 6950 drm_atomic_helper_plane_destroy_state(plane, state); 6951 } 6952 6953 static const struct drm_plane_funcs dm_plane_funcs = { 6954 .update_plane = drm_atomic_helper_update_plane, 6955 .disable_plane = drm_atomic_helper_disable_plane, 6956 .destroy = drm_primary_helper_destroy, 6957 .reset = dm_drm_plane_reset, 6958 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 6959 .atomic_destroy_state = dm_drm_plane_destroy_state, 6960 .format_mod_supported = dm_plane_format_mod_supported, 6961 }; 6962 6963 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 6964 struct drm_plane_state *new_state) 6965 { 6966 struct amdgpu_framebuffer *afb; 6967 struct drm_gem_object *obj; 6968 struct amdgpu_device *adev; 6969 struct amdgpu_bo *rbo; 6970 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 6971 struct list_head list; 6972 struct ttm_validate_buffer tv; 6973 struct ww_acquire_ctx ticket; 6974 uint32_t domain; 6975 int r; 6976 6977 if (!new_state->fb) { 6978 DRM_DEBUG_KMS("No FB bound\n"); 6979 return 0; 6980 } 6981 6982 afb = to_amdgpu_framebuffer(new_state->fb); 6983 obj = new_state->fb->obj[0]; 6984 rbo = gem_to_amdgpu_bo(obj); 6985 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 6986 INIT_LIST_HEAD(&list); 6987 6988 tv.bo = &rbo->tbo; 6989 tv.num_shared = 1; 6990 list_add(&tv.head, &list); 6991 6992 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 6993 if (r) { 6994 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 6995 return r; 6996 } 6997 6998 if (plane->type != DRM_PLANE_TYPE_CURSOR) 6999 domain = amdgpu_display_supported_domains(adev, rbo->flags); 7000 else 7001 domain = AMDGPU_GEM_DOMAIN_VRAM; 7002 7003 r = amdgpu_bo_pin(rbo, domain); 7004 if (unlikely(r != 0)) { 7005 if (r != -ERESTARTSYS) 7006 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 7007 ttm_eu_backoff_reservation(&ticket, &list); 7008 return r; 7009 } 7010 7011 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 7012 if (unlikely(r != 0)) { 7013 amdgpu_bo_unpin(rbo); 7014 ttm_eu_backoff_reservation(&ticket, &list); 7015 DRM_ERROR("%p bind failed\n", rbo); 7016 return r; 7017 } 7018 7019 ttm_eu_backoff_reservation(&ticket, &list); 7020 7021 afb->address = amdgpu_bo_gpu_offset(rbo); 7022 7023 amdgpu_bo_ref(rbo); 7024 7025 /** 7026 * We don't do surface updates on planes that have been newly created, 7027 * but we also don't have the afb->address during atomic check. 7028 * 7029 * Fill in buffer attributes depending on the address here, but only on 7030 * newly created planes since they're not being used by DC yet and this 7031 * won't modify global state. 7032 */ 7033 dm_plane_state_old = to_dm_plane_state(plane->state); 7034 dm_plane_state_new = to_dm_plane_state(new_state); 7035 7036 if (dm_plane_state_new->dc_state && 7037 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 7038 struct dc_plane_state *plane_state = 7039 dm_plane_state_new->dc_state; 7040 bool force_disable_dcc = !plane_state->dcc.enable; 7041 7042 fill_plane_buffer_attributes( 7043 adev, afb, plane_state->format, plane_state->rotation, 7044 afb->tiling_flags, 7045 &plane_state->tiling_info, &plane_state->plane_size, 7046 &plane_state->dcc, &plane_state->address, 7047 afb->tmz_surface, force_disable_dcc); 7048 } 7049 7050 return 0; 7051 } 7052 7053 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 7054 struct drm_plane_state *old_state) 7055 { 7056 struct amdgpu_bo *rbo; 7057 int r; 7058 7059 if (!old_state->fb) 7060 return; 7061 7062 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 7063 r = amdgpu_bo_reserve(rbo, false); 7064 if (unlikely(r)) { 7065 DRM_ERROR("failed to reserve rbo before unpin\n"); 7066 return; 7067 } 7068 7069 amdgpu_bo_unpin(rbo); 7070 amdgpu_bo_unreserve(rbo); 7071 amdgpu_bo_unref(&rbo); 7072 } 7073 7074 static int dm_plane_helper_check_state(struct drm_plane_state *state, 7075 struct drm_crtc_state *new_crtc_state) 7076 { 7077 struct drm_framebuffer *fb = state->fb; 7078 int min_downscale, max_upscale; 7079 int min_scale = 0; 7080 int max_scale = INT_MAX; 7081 7082 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 7083 if (fb && state->crtc) { 7084 /* Validate viewport to cover the case when only the position changes */ 7085 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 7086 int viewport_width = state->crtc_w; 7087 int viewport_height = state->crtc_h; 7088 7089 if (state->crtc_x < 0) 7090 viewport_width += state->crtc_x; 7091 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 7092 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 7093 7094 if (state->crtc_y < 0) 7095 viewport_height += state->crtc_y; 7096 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 7097 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 7098 7099 if (viewport_width < 0 || viewport_height < 0) { 7100 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 7101 return -EINVAL; 7102 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 7103 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 7104 return -EINVAL; 7105 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 7106 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 7107 return -EINVAL; 7108 } 7109 7110 } 7111 7112 /* Get min/max allowed scaling factors from plane caps. */ 7113 get_min_max_dc_plane_scaling(state->crtc->dev, fb, 7114 &min_downscale, &max_upscale); 7115 /* 7116 * Convert to drm convention: 16.16 fixed point, instead of dc's 7117 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 7118 * dst/src, so min_scale = 1.0 / max_upscale, etc. 7119 */ 7120 min_scale = (1000 << 16) / max_upscale; 7121 max_scale = (1000 << 16) / min_downscale; 7122 } 7123 7124 return drm_atomic_helper_check_plane_state( 7125 state, new_crtc_state, min_scale, max_scale, true, true); 7126 } 7127 7128 static int dm_plane_atomic_check(struct drm_plane *plane, 7129 struct drm_atomic_state *state) 7130 { 7131 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 7132 plane); 7133 struct amdgpu_device *adev = drm_to_adev(plane->dev); 7134 struct dc *dc = adev->dm.dc; 7135 struct dm_plane_state *dm_plane_state; 7136 struct dc_scaling_info scaling_info; 7137 struct drm_crtc_state *new_crtc_state; 7138 int ret; 7139 7140 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 7141 7142 dm_plane_state = to_dm_plane_state(new_plane_state); 7143 7144 if (!dm_plane_state->dc_state) 7145 return 0; 7146 7147 new_crtc_state = 7148 drm_atomic_get_new_crtc_state(state, 7149 new_plane_state->crtc); 7150 if (!new_crtc_state) 7151 return -EINVAL; 7152 7153 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 7154 if (ret) 7155 return ret; 7156 7157 ret = fill_dc_scaling_info(new_plane_state, &scaling_info); 7158 if (ret) 7159 return ret; 7160 7161 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 7162 return 0; 7163 7164 return -EINVAL; 7165 } 7166 7167 static int dm_plane_atomic_async_check(struct drm_plane *plane, 7168 struct drm_atomic_state *state) 7169 { 7170 /* Only support async updates on cursor planes. */ 7171 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7172 return -EINVAL; 7173 7174 return 0; 7175 } 7176 7177 static void dm_plane_atomic_async_update(struct drm_plane *plane, 7178 struct drm_atomic_state *state) 7179 { 7180 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 7181 plane); 7182 struct drm_plane_state *old_state = 7183 drm_atomic_get_old_plane_state(state, plane); 7184 7185 trace_amdgpu_dm_atomic_update_cursor(new_state); 7186 7187 swap(plane->state->fb, new_state->fb); 7188 7189 plane->state->src_x = new_state->src_x; 7190 plane->state->src_y = new_state->src_y; 7191 plane->state->src_w = new_state->src_w; 7192 plane->state->src_h = new_state->src_h; 7193 plane->state->crtc_x = new_state->crtc_x; 7194 plane->state->crtc_y = new_state->crtc_y; 7195 plane->state->crtc_w = new_state->crtc_w; 7196 plane->state->crtc_h = new_state->crtc_h; 7197 7198 handle_cursor_update(plane, old_state); 7199 } 7200 7201 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 7202 .prepare_fb = dm_plane_helper_prepare_fb, 7203 .cleanup_fb = dm_plane_helper_cleanup_fb, 7204 .atomic_check = dm_plane_atomic_check, 7205 .atomic_async_check = dm_plane_atomic_async_check, 7206 .atomic_async_update = dm_plane_atomic_async_update 7207 }; 7208 7209 /* 7210 * TODO: these are currently initialized to rgb formats only. 7211 * For future use cases we should either initialize them dynamically based on 7212 * plane capabilities, or initialize this array to all formats, so internal drm 7213 * check will succeed, and let DC implement proper check 7214 */ 7215 static const uint32_t rgb_formats[] = { 7216 DRM_FORMAT_XRGB8888, 7217 DRM_FORMAT_ARGB8888, 7218 DRM_FORMAT_RGBA8888, 7219 DRM_FORMAT_XRGB2101010, 7220 DRM_FORMAT_XBGR2101010, 7221 DRM_FORMAT_ARGB2101010, 7222 DRM_FORMAT_ABGR2101010, 7223 DRM_FORMAT_XRGB16161616, 7224 DRM_FORMAT_XBGR16161616, 7225 DRM_FORMAT_ARGB16161616, 7226 DRM_FORMAT_ABGR16161616, 7227 DRM_FORMAT_XBGR8888, 7228 DRM_FORMAT_ABGR8888, 7229 DRM_FORMAT_RGB565, 7230 }; 7231 7232 static const uint32_t overlay_formats[] = { 7233 DRM_FORMAT_XRGB8888, 7234 DRM_FORMAT_ARGB8888, 7235 DRM_FORMAT_RGBA8888, 7236 DRM_FORMAT_XBGR8888, 7237 DRM_FORMAT_ABGR8888, 7238 DRM_FORMAT_RGB565 7239 }; 7240 7241 static const u32 cursor_formats[] = { 7242 DRM_FORMAT_ARGB8888 7243 }; 7244 7245 static int get_plane_formats(const struct drm_plane *plane, 7246 const struct dc_plane_cap *plane_cap, 7247 uint32_t *formats, int max_formats) 7248 { 7249 int i, num_formats = 0; 7250 7251 /* 7252 * TODO: Query support for each group of formats directly from 7253 * DC plane caps. This will require adding more formats to the 7254 * caps list. 7255 */ 7256 7257 switch (plane->type) { 7258 case DRM_PLANE_TYPE_PRIMARY: 7259 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 7260 if (num_formats >= max_formats) 7261 break; 7262 7263 formats[num_formats++] = rgb_formats[i]; 7264 } 7265 7266 if (plane_cap && plane_cap->pixel_format_support.nv12) 7267 formats[num_formats++] = DRM_FORMAT_NV12; 7268 if (plane_cap && plane_cap->pixel_format_support.p010) 7269 formats[num_formats++] = DRM_FORMAT_P010; 7270 if (plane_cap && plane_cap->pixel_format_support.fp16) { 7271 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 7272 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 7273 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 7274 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 7275 } 7276 break; 7277 7278 case DRM_PLANE_TYPE_OVERLAY: 7279 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 7280 if (num_formats >= max_formats) 7281 break; 7282 7283 formats[num_formats++] = overlay_formats[i]; 7284 } 7285 break; 7286 7287 case DRM_PLANE_TYPE_CURSOR: 7288 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 7289 if (num_formats >= max_formats) 7290 break; 7291 7292 formats[num_formats++] = cursor_formats[i]; 7293 } 7294 break; 7295 } 7296 7297 return num_formats; 7298 } 7299 7300 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 7301 struct drm_plane *plane, 7302 unsigned long possible_crtcs, 7303 const struct dc_plane_cap *plane_cap) 7304 { 7305 uint32_t formats[32]; 7306 int num_formats; 7307 int res = -EPERM; 7308 unsigned int supported_rotations; 7309 uint64_t *modifiers = NULL; 7310 7311 num_formats = get_plane_formats(plane, plane_cap, formats, 7312 ARRAY_SIZE(formats)); 7313 7314 res = get_plane_modifiers(dm->adev, plane->type, &modifiers); 7315 if (res) 7316 return res; 7317 7318 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 7319 &dm_plane_funcs, formats, num_formats, 7320 modifiers, plane->type, NULL); 7321 kfree(modifiers); 7322 if (res) 7323 return res; 7324 7325 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 7326 plane_cap && plane_cap->per_pixel_alpha) { 7327 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 7328 BIT(DRM_MODE_BLEND_PREMULTI); 7329 7330 drm_plane_create_alpha_property(plane); 7331 drm_plane_create_blend_mode_property(plane, blend_caps); 7332 } 7333 7334 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 7335 plane_cap && 7336 (plane_cap->pixel_format_support.nv12 || 7337 plane_cap->pixel_format_support.p010)) { 7338 /* This only affects YUV formats. */ 7339 drm_plane_create_color_properties( 7340 plane, 7341 BIT(DRM_COLOR_YCBCR_BT601) | 7342 BIT(DRM_COLOR_YCBCR_BT709) | 7343 BIT(DRM_COLOR_YCBCR_BT2020), 7344 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 7345 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 7346 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 7347 } 7348 7349 supported_rotations = 7350 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 7351 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 7352 7353 if (dm->adev->asic_type >= CHIP_BONAIRE && 7354 plane->type != DRM_PLANE_TYPE_CURSOR) 7355 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 7356 supported_rotations); 7357 7358 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 7359 7360 /* Create (reset) the plane state */ 7361 if (plane->funcs->reset) 7362 plane->funcs->reset(plane); 7363 7364 return 0; 7365 } 7366 7367 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 7368 struct drm_plane *plane, 7369 uint32_t crtc_index) 7370 { 7371 struct amdgpu_crtc *acrtc = NULL; 7372 struct drm_plane *cursor_plane; 7373 7374 int res = -ENOMEM; 7375 7376 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 7377 if (!cursor_plane) 7378 goto fail; 7379 7380 cursor_plane->type = DRM_PLANE_TYPE_CURSOR; 7381 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); 7382 7383 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 7384 if (!acrtc) 7385 goto fail; 7386 7387 res = drm_crtc_init_with_planes( 7388 dm->ddev, 7389 &acrtc->base, 7390 plane, 7391 cursor_plane, 7392 &amdgpu_dm_crtc_funcs, NULL); 7393 7394 if (res) 7395 goto fail; 7396 7397 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 7398 7399 /* Create (reset) the plane state */ 7400 if (acrtc->base.funcs->reset) 7401 acrtc->base.funcs->reset(&acrtc->base); 7402 7403 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 7404 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 7405 7406 acrtc->crtc_id = crtc_index; 7407 acrtc->base.enabled = false; 7408 acrtc->otg_inst = -1; 7409 7410 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 7411 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 7412 true, MAX_COLOR_LUT_ENTRIES); 7413 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 7414 7415 return 0; 7416 7417 fail: 7418 kfree(acrtc); 7419 kfree(cursor_plane); 7420 return res; 7421 } 7422 7423 7424 static int to_drm_connector_type(enum amd_signal_type st) 7425 { 7426 switch (st) { 7427 case SIGNAL_TYPE_HDMI_TYPE_A: 7428 return DRM_MODE_CONNECTOR_HDMIA; 7429 case SIGNAL_TYPE_EDP: 7430 return DRM_MODE_CONNECTOR_eDP; 7431 case SIGNAL_TYPE_LVDS: 7432 return DRM_MODE_CONNECTOR_LVDS; 7433 case SIGNAL_TYPE_RGB: 7434 return DRM_MODE_CONNECTOR_VGA; 7435 case SIGNAL_TYPE_DISPLAY_PORT: 7436 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7437 return DRM_MODE_CONNECTOR_DisplayPort; 7438 case SIGNAL_TYPE_DVI_DUAL_LINK: 7439 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7440 return DRM_MODE_CONNECTOR_DVID; 7441 case SIGNAL_TYPE_VIRTUAL: 7442 return DRM_MODE_CONNECTOR_VIRTUAL; 7443 7444 default: 7445 return DRM_MODE_CONNECTOR_Unknown; 7446 } 7447 } 7448 7449 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7450 { 7451 struct drm_encoder *encoder; 7452 7453 /* There is only one encoder per connector */ 7454 drm_connector_for_each_possible_encoder(connector, encoder) 7455 return encoder; 7456 7457 return NULL; 7458 } 7459 7460 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7461 { 7462 struct drm_encoder *encoder; 7463 struct amdgpu_encoder *amdgpu_encoder; 7464 7465 encoder = amdgpu_dm_connector_to_encoder(connector); 7466 7467 if (encoder == NULL) 7468 return; 7469 7470 amdgpu_encoder = to_amdgpu_encoder(encoder); 7471 7472 amdgpu_encoder->native_mode.clock = 0; 7473 7474 if (!list_empty(&connector->probed_modes)) { 7475 struct drm_display_mode *preferred_mode = NULL; 7476 7477 list_for_each_entry(preferred_mode, 7478 &connector->probed_modes, 7479 head) { 7480 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7481 amdgpu_encoder->native_mode = *preferred_mode; 7482 7483 break; 7484 } 7485 7486 } 7487 } 7488 7489 static struct drm_display_mode * 7490 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7491 char *name, 7492 int hdisplay, int vdisplay) 7493 { 7494 struct drm_device *dev = encoder->dev; 7495 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7496 struct drm_display_mode *mode = NULL; 7497 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7498 7499 mode = drm_mode_duplicate(dev, native_mode); 7500 7501 if (mode == NULL) 7502 return NULL; 7503 7504 mode->hdisplay = hdisplay; 7505 mode->vdisplay = vdisplay; 7506 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7507 #ifdef __linux__ 7508 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7509 #else 7510 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7511 #endif 7512 7513 return mode; 7514 7515 } 7516 7517 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7518 struct drm_connector *connector) 7519 { 7520 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7521 struct drm_display_mode *mode = NULL; 7522 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7523 struct amdgpu_dm_connector *amdgpu_dm_connector = 7524 to_amdgpu_dm_connector(connector); 7525 int i; 7526 int n; 7527 struct mode_size { 7528 char name[DRM_DISPLAY_MODE_LEN]; 7529 int w; 7530 int h; 7531 } common_modes[] = { 7532 { "640x480", 640, 480}, 7533 { "800x600", 800, 600}, 7534 { "1024x768", 1024, 768}, 7535 { "1280x720", 1280, 720}, 7536 { "1280x800", 1280, 800}, 7537 {"1280x1024", 1280, 1024}, 7538 { "1440x900", 1440, 900}, 7539 {"1680x1050", 1680, 1050}, 7540 {"1600x1200", 1600, 1200}, 7541 {"1920x1080", 1920, 1080}, 7542 {"1920x1200", 1920, 1200} 7543 }; 7544 7545 n = ARRAY_SIZE(common_modes); 7546 7547 for (i = 0; i < n; i++) { 7548 struct drm_display_mode *curmode = NULL; 7549 bool mode_existed = false; 7550 7551 if (common_modes[i].w > native_mode->hdisplay || 7552 common_modes[i].h > native_mode->vdisplay || 7553 (common_modes[i].w == native_mode->hdisplay && 7554 common_modes[i].h == native_mode->vdisplay)) 7555 continue; 7556 7557 list_for_each_entry(curmode, &connector->probed_modes, head) { 7558 if (common_modes[i].w == curmode->hdisplay && 7559 common_modes[i].h == curmode->vdisplay) { 7560 mode_existed = true; 7561 break; 7562 } 7563 } 7564 7565 if (mode_existed) 7566 continue; 7567 7568 mode = amdgpu_dm_create_common_mode(encoder, 7569 common_modes[i].name, common_modes[i].w, 7570 common_modes[i].h); 7571 if (!mode) 7572 continue; 7573 7574 drm_mode_probed_add(connector, mode); 7575 amdgpu_dm_connector->num_modes++; 7576 } 7577 } 7578 7579 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7580 { 7581 struct drm_encoder *encoder; 7582 struct amdgpu_encoder *amdgpu_encoder; 7583 const struct drm_display_mode *native_mode; 7584 7585 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7586 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7587 return; 7588 7589 encoder = amdgpu_dm_connector_to_encoder(connector); 7590 if (!encoder) 7591 return; 7592 7593 amdgpu_encoder = to_amdgpu_encoder(encoder); 7594 7595 native_mode = &amdgpu_encoder->native_mode; 7596 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7597 return; 7598 7599 drm_connector_set_panel_orientation_with_quirk(connector, 7600 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7601 native_mode->hdisplay, 7602 native_mode->vdisplay); 7603 } 7604 7605 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7606 struct edid *edid) 7607 { 7608 struct amdgpu_dm_connector *amdgpu_dm_connector = 7609 to_amdgpu_dm_connector(connector); 7610 7611 if (edid) { 7612 /* empty probed_modes */ 7613 INIT_LIST_HEAD(&connector->probed_modes); 7614 amdgpu_dm_connector->num_modes = 7615 drm_add_edid_modes(connector, edid); 7616 7617 /* sorting the probed modes before calling function 7618 * amdgpu_dm_get_native_mode() since EDID can have 7619 * more than one preferred mode. The modes that are 7620 * later in the probed mode list could be of higher 7621 * and preferred resolution. For example, 3840x2160 7622 * resolution in base EDID preferred timing and 4096x2160 7623 * preferred resolution in DID extension block later. 7624 */ 7625 drm_mode_sort(&connector->probed_modes); 7626 amdgpu_dm_get_native_mode(connector); 7627 7628 /* Freesync capabilities are reset by calling 7629 * drm_add_edid_modes() and need to be 7630 * restored here. 7631 */ 7632 amdgpu_dm_update_freesync_caps(connector, edid); 7633 7634 amdgpu_set_panel_orientation(connector); 7635 } else { 7636 amdgpu_dm_connector->num_modes = 0; 7637 } 7638 } 7639 7640 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7641 struct drm_display_mode *mode) 7642 { 7643 struct drm_display_mode *m; 7644 7645 list_for_each_entry (m, &aconnector->base.probed_modes, head) { 7646 if (drm_mode_equal(m, mode)) 7647 return true; 7648 } 7649 7650 return false; 7651 } 7652 7653 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7654 { 7655 const struct drm_display_mode *m; 7656 struct drm_display_mode *new_mode; 7657 uint i; 7658 uint32_t new_modes_count = 0; 7659 7660 /* Standard FPS values 7661 * 7662 * 23.976 - TV/NTSC 7663 * 24 - Cinema 7664 * 25 - TV/PAL 7665 * 29.97 - TV/NTSC 7666 * 30 - TV/NTSC 7667 * 48 - Cinema HFR 7668 * 50 - TV/PAL 7669 * 60 - Commonly used 7670 * 48,72,96 - Multiples of 24 7671 */ 7672 static const uint32_t common_rates[] = { 7673 23976, 24000, 25000, 29970, 30000, 7674 48000, 50000, 60000, 72000, 96000 7675 }; 7676 7677 /* 7678 * Find mode with highest refresh rate with the same resolution 7679 * as the preferred mode. Some monitors report a preferred mode 7680 * with lower resolution than the highest refresh rate supported. 7681 */ 7682 7683 m = get_highest_refresh_rate_mode(aconnector, true); 7684 if (!m) 7685 return 0; 7686 7687 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 7688 uint64_t target_vtotal, target_vtotal_diff; 7689 uint64_t num, den; 7690 7691 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 7692 continue; 7693 7694 if (common_rates[i] < aconnector->min_vfreq * 1000 || 7695 common_rates[i] > aconnector->max_vfreq * 1000) 7696 continue; 7697 7698 num = (unsigned long long)m->clock * 1000 * 1000; 7699 den = common_rates[i] * (unsigned long long)m->htotal; 7700 target_vtotal = div_u64(num, den); 7701 target_vtotal_diff = target_vtotal - m->vtotal; 7702 7703 /* Check for illegal modes */ 7704 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 7705 m->vsync_end + target_vtotal_diff < m->vsync_start || 7706 m->vtotal + target_vtotal_diff < m->vsync_end) 7707 continue; 7708 7709 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 7710 if (!new_mode) 7711 goto out; 7712 7713 new_mode->vtotal += (u16)target_vtotal_diff; 7714 new_mode->vsync_start += (u16)target_vtotal_diff; 7715 new_mode->vsync_end += (u16)target_vtotal_diff; 7716 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7717 new_mode->type |= DRM_MODE_TYPE_DRIVER; 7718 7719 if (!is_duplicate_mode(aconnector, new_mode)) { 7720 drm_mode_probed_add(&aconnector->base, new_mode); 7721 new_modes_count += 1; 7722 } else 7723 drm_mode_destroy(aconnector->base.dev, new_mode); 7724 } 7725 out: 7726 return new_modes_count; 7727 } 7728 7729 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 7730 struct edid *edid) 7731 { 7732 struct amdgpu_dm_connector *amdgpu_dm_connector = 7733 to_amdgpu_dm_connector(connector); 7734 7735 if (!(amdgpu_freesync_vid_mode && edid)) 7736 return; 7737 7738 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 7739 amdgpu_dm_connector->num_modes += 7740 add_fs_modes(amdgpu_dm_connector); 7741 } 7742 7743 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 7744 { 7745 struct amdgpu_dm_connector *amdgpu_dm_connector = 7746 to_amdgpu_dm_connector(connector); 7747 struct drm_encoder *encoder; 7748 struct edid *edid = amdgpu_dm_connector->edid; 7749 7750 encoder = amdgpu_dm_connector_to_encoder(connector); 7751 7752 if (!drm_edid_is_valid(edid)) { 7753 amdgpu_dm_connector->num_modes = 7754 drm_add_modes_noedid(connector, 640, 480); 7755 } else { 7756 amdgpu_dm_connector_ddc_get_modes(connector, edid); 7757 amdgpu_dm_connector_add_common_modes(encoder, connector); 7758 amdgpu_dm_connector_add_freesync_modes(connector, edid); 7759 } 7760 amdgpu_dm_fbc_init(connector); 7761 7762 return amdgpu_dm_connector->num_modes; 7763 } 7764 7765 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 7766 struct amdgpu_dm_connector *aconnector, 7767 int connector_type, 7768 struct dc_link *link, 7769 int link_index) 7770 { 7771 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 7772 7773 /* 7774 * Some of the properties below require access to state, like bpc. 7775 * Allocate some default initial connector state with our reset helper. 7776 */ 7777 if (aconnector->base.funcs->reset) 7778 aconnector->base.funcs->reset(&aconnector->base); 7779 7780 aconnector->connector_id = link_index; 7781 aconnector->dc_link = link; 7782 aconnector->base.interlace_allowed = false; 7783 aconnector->base.doublescan_allowed = false; 7784 aconnector->base.stereo_allowed = false; 7785 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 7786 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 7787 aconnector->audio_inst = -1; 7788 rw_init(&aconnector->hpd_lock, "dmhpd"); 7789 7790 /* 7791 * configure support HPD hot plug connector_>polled default value is 0 7792 * which means HPD hot plug not supported 7793 */ 7794 switch (connector_type) { 7795 case DRM_MODE_CONNECTOR_HDMIA: 7796 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7797 aconnector->base.ycbcr_420_allowed = 7798 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 7799 break; 7800 case DRM_MODE_CONNECTOR_DisplayPort: 7801 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7802 aconnector->base.ycbcr_420_allowed = 7803 link->link_enc->features.dp_ycbcr420_supported ? true : false; 7804 break; 7805 case DRM_MODE_CONNECTOR_DVID: 7806 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7807 break; 7808 default: 7809 break; 7810 } 7811 7812 drm_object_attach_property(&aconnector->base.base, 7813 dm->ddev->mode_config.scaling_mode_property, 7814 DRM_MODE_SCALE_NONE); 7815 7816 drm_object_attach_property(&aconnector->base.base, 7817 adev->mode_info.underscan_property, 7818 UNDERSCAN_OFF); 7819 drm_object_attach_property(&aconnector->base.base, 7820 adev->mode_info.underscan_hborder_property, 7821 0); 7822 drm_object_attach_property(&aconnector->base.base, 7823 adev->mode_info.underscan_vborder_property, 7824 0); 7825 7826 if (!aconnector->mst_port) 7827 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 7828 7829 /* This defaults to the max in the range, but we want 8bpc for non-edp. */ 7830 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; 7831 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 7832 7833 if (connector_type == DRM_MODE_CONNECTOR_eDP && 7834 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 7835 drm_object_attach_property(&aconnector->base.base, 7836 adev->mode_info.abm_level_property, 0); 7837 } 7838 7839 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 7840 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 7841 connector_type == DRM_MODE_CONNECTOR_eDP) { 7842 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 7843 7844 if (!aconnector->mst_port) 7845 drm_connector_attach_vrr_capable_property(&aconnector->base); 7846 7847 #ifdef CONFIG_DRM_AMD_DC_HDCP 7848 if (adev->dm.hdcp_workqueue) 7849 drm_connector_attach_content_protection_property(&aconnector->base, true); 7850 #endif 7851 } 7852 } 7853 7854 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 7855 struct i2c_msg *msgs, int num) 7856 { 7857 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 7858 struct ddc_service *ddc_service = i2c->ddc_service; 7859 struct i2c_command cmd; 7860 int i; 7861 int result = -EIO; 7862 7863 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 7864 7865 if (!cmd.payloads) 7866 return result; 7867 7868 cmd.number_of_payloads = num; 7869 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 7870 cmd.speed = 100; 7871 7872 for (i = 0; i < num; i++) { 7873 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 7874 cmd.payloads[i].address = msgs[i].addr; 7875 cmd.payloads[i].length = msgs[i].len; 7876 cmd.payloads[i].data = msgs[i].buf; 7877 } 7878 7879 if (dc_submit_i2c( 7880 ddc_service->ctx->dc, 7881 ddc_service->ddc_pin->hw_info.ddc_channel, 7882 &cmd)) 7883 result = num; 7884 7885 kfree(cmd.payloads); 7886 return result; 7887 } 7888 7889 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 7890 { 7891 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 7892 } 7893 7894 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 7895 .master_xfer = amdgpu_dm_i2c_xfer, 7896 .functionality = amdgpu_dm_i2c_func, 7897 }; 7898 7899 static struct amdgpu_i2c_adapter * 7900 create_i2c(struct ddc_service *ddc_service, 7901 int link_index, 7902 int *res) 7903 { 7904 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 7905 struct amdgpu_i2c_adapter *i2c; 7906 7907 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 7908 if (!i2c) 7909 return NULL; 7910 #ifdef notyet 7911 i2c->base.owner = THIS_MODULE; 7912 i2c->base.class = I2C_CLASS_DDC; 7913 i2c->base.dev.parent = &adev->pdev->dev; 7914 #endif 7915 i2c->base.algo = &amdgpu_dm_i2c_algo; 7916 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 7917 i2c_set_adapdata(&i2c->base, i2c); 7918 i2c->ddc_service = ddc_service; 7919 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; 7920 7921 return i2c; 7922 } 7923 7924 7925 /* 7926 * Note: this function assumes that dc_link_detect() was called for the 7927 * dc_link which will be represented by this aconnector. 7928 */ 7929 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 7930 struct amdgpu_dm_connector *aconnector, 7931 uint32_t link_index, 7932 struct amdgpu_encoder *aencoder) 7933 { 7934 int res = 0; 7935 int connector_type; 7936 struct dc *dc = dm->dc; 7937 struct dc_link *link = dc_get_link_at_index(dc, link_index); 7938 struct amdgpu_i2c_adapter *i2c; 7939 7940 link->priv = aconnector; 7941 7942 DRM_DEBUG_DRIVER("%s()\n", __func__); 7943 7944 i2c = create_i2c(link->ddc, link->link_index, &res); 7945 if (!i2c) { 7946 DRM_ERROR("Failed to create i2c adapter data\n"); 7947 return -ENOMEM; 7948 } 7949 7950 aconnector->i2c = i2c; 7951 res = i2c_add_adapter(&i2c->base); 7952 7953 if (res) { 7954 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 7955 goto out_free; 7956 } 7957 7958 connector_type = to_drm_connector_type(link->connector_signal); 7959 7960 res = drm_connector_init_with_ddc( 7961 dm->ddev, 7962 &aconnector->base, 7963 &amdgpu_dm_connector_funcs, 7964 connector_type, 7965 &i2c->base); 7966 7967 if (res) { 7968 DRM_ERROR("connector_init failed\n"); 7969 aconnector->connector_id = -1; 7970 goto out_free; 7971 } 7972 7973 drm_connector_helper_add( 7974 &aconnector->base, 7975 &amdgpu_dm_connector_helper_funcs); 7976 7977 amdgpu_dm_connector_init_helper( 7978 dm, 7979 aconnector, 7980 connector_type, 7981 link, 7982 link_index); 7983 7984 drm_connector_attach_encoder( 7985 &aconnector->base, &aencoder->base); 7986 7987 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 7988 || connector_type == DRM_MODE_CONNECTOR_eDP) 7989 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 7990 7991 out_free: 7992 if (res) { 7993 kfree(i2c); 7994 aconnector->i2c = NULL; 7995 } 7996 return res; 7997 } 7998 7999 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8000 { 8001 switch (adev->mode_info.num_crtc) { 8002 case 1: 8003 return 0x1; 8004 case 2: 8005 return 0x3; 8006 case 3: 8007 return 0x7; 8008 case 4: 8009 return 0xf; 8010 case 5: 8011 return 0x1f; 8012 case 6: 8013 default: 8014 return 0x3f; 8015 } 8016 } 8017 8018 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8019 struct amdgpu_encoder *aencoder, 8020 uint32_t link_index) 8021 { 8022 struct amdgpu_device *adev = drm_to_adev(dev); 8023 8024 int res = drm_encoder_init(dev, 8025 &aencoder->base, 8026 &amdgpu_dm_encoder_funcs, 8027 DRM_MODE_ENCODER_TMDS, 8028 NULL); 8029 8030 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8031 8032 if (!res) 8033 aencoder->encoder_id = link_index; 8034 else 8035 aencoder->encoder_id = -1; 8036 8037 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8038 8039 return res; 8040 } 8041 8042 static void manage_dm_interrupts(struct amdgpu_device *adev, 8043 struct amdgpu_crtc *acrtc, 8044 bool enable) 8045 { 8046 /* 8047 * We have no guarantee that the frontend index maps to the same 8048 * backend index - some even map to more than one. 8049 * 8050 * TODO: Use a different interrupt or check DC itself for the mapping. 8051 */ 8052 int irq_type = 8053 amdgpu_display_crtc_idx_to_irq_type( 8054 adev, 8055 acrtc->crtc_id); 8056 8057 if (enable) { 8058 drm_crtc_vblank_on(&acrtc->base); 8059 amdgpu_irq_get( 8060 adev, 8061 &adev->pageflip_irq, 8062 irq_type); 8063 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8064 amdgpu_irq_get( 8065 adev, 8066 &adev->vline0_irq, 8067 irq_type); 8068 #endif 8069 } else { 8070 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8071 amdgpu_irq_put( 8072 adev, 8073 &adev->vline0_irq, 8074 irq_type); 8075 #endif 8076 amdgpu_irq_put( 8077 adev, 8078 &adev->pageflip_irq, 8079 irq_type); 8080 drm_crtc_vblank_off(&acrtc->base); 8081 } 8082 } 8083 8084 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8085 struct amdgpu_crtc *acrtc) 8086 { 8087 int irq_type = 8088 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8089 8090 /** 8091 * This reads the current state for the IRQ and force reapplies 8092 * the setting to hardware. 8093 */ 8094 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8095 } 8096 8097 static bool 8098 is_scaling_state_different(const struct dm_connector_state *dm_state, 8099 const struct dm_connector_state *old_dm_state) 8100 { 8101 if (dm_state->scaling != old_dm_state->scaling) 8102 return true; 8103 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8104 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8105 return true; 8106 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8107 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8108 return true; 8109 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8110 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8111 return true; 8112 return false; 8113 } 8114 8115 #ifdef CONFIG_DRM_AMD_DC_HDCP 8116 static bool is_content_protection_different(struct drm_connector_state *state, 8117 const struct drm_connector_state *old_state, 8118 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 8119 { 8120 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8121 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8122 8123 /* Handle: Type0/1 change */ 8124 if (old_state->hdcp_content_type != state->hdcp_content_type && 8125 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8126 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8127 return true; 8128 } 8129 8130 /* CP is being re enabled, ignore this 8131 * 8132 * Handles: ENABLED -> DESIRED 8133 */ 8134 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8135 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8136 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8137 return false; 8138 } 8139 8140 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8141 * 8142 * Handles: UNDESIRED -> ENABLED 8143 */ 8144 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8145 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8146 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8147 8148 /* Stream removed and re-enabled 8149 * 8150 * Can sometimes overlap with the HPD case, 8151 * thus set update_hdcp to false to avoid 8152 * setting HDCP multiple times. 8153 * 8154 * Handles: DESIRED -> DESIRED (Special case) 8155 */ 8156 if (!(old_state->crtc && old_state->crtc->enabled) && 8157 state->crtc && state->crtc->enabled && 8158 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8159 dm_con_state->update_hdcp = false; 8160 return true; 8161 } 8162 8163 /* Hot-plug, headless s3, dpms 8164 * 8165 * Only start HDCP if the display is connected/enabled. 8166 * update_hdcp flag will be set to false until the next 8167 * HPD comes in. 8168 * 8169 * Handles: DESIRED -> DESIRED (Special case) 8170 */ 8171 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8172 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8173 dm_con_state->update_hdcp = false; 8174 return true; 8175 } 8176 8177 /* 8178 * Handles: UNDESIRED -> UNDESIRED 8179 * DESIRED -> DESIRED 8180 * ENABLED -> ENABLED 8181 */ 8182 if (old_state->content_protection == state->content_protection) 8183 return false; 8184 8185 /* 8186 * Handles: UNDESIRED -> DESIRED 8187 * DESIRED -> UNDESIRED 8188 * ENABLED -> UNDESIRED 8189 */ 8190 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) 8191 return true; 8192 8193 /* 8194 * Handles: DESIRED -> ENABLED 8195 */ 8196 return false; 8197 } 8198 8199 #endif 8200 static void remove_stream(struct amdgpu_device *adev, 8201 struct amdgpu_crtc *acrtc, 8202 struct dc_stream_state *stream) 8203 { 8204 /* this is the update mode case */ 8205 8206 acrtc->otg_inst = -1; 8207 acrtc->enabled = false; 8208 } 8209 8210 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 8211 struct dc_cursor_position *position) 8212 { 8213 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8214 int x, y; 8215 int xorigin = 0, yorigin = 0; 8216 8217 if (!crtc || !plane->state->fb) 8218 return 0; 8219 8220 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 8221 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 8222 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 8223 __func__, 8224 plane->state->crtc_w, 8225 plane->state->crtc_h); 8226 return -EINVAL; 8227 } 8228 8229 x = plane->state->crtc_x; 8230 y = plane->state->crtc_y; 8231 8232 if (x <= -amdgpu_crtc->max_cursor_width || 8233 y <= -amdgpu_crtc->max_cursor_height) 8234 return 0; 8235 8236 if (x < 0) { 8237 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 8238 x = 0; 8239 } 8240 if (y < 0) { 8241 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 8242 y = 0; 8243 } 8244 position->enable = true; 8245 position->translate_by_source = true; 8246 position->x = x; 8247 position->y = y; 8248 position->x_hotspot = xorigin; 8249 position->y_hotspot = yorigin; 8250 8251 return 0; 8252 } 8253 8254 static void handle_cursor_update(struct drm_plane *plane, 8255 struct drm_plane_state *old_plane_state) 8256 { 8257 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8258 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8259 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8260 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8261 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8262 uint64_t address = afb ? afb->address : 0; 8263 struct dc_cursor_position position = {0}; 8264 struct dc_cursor_attributes attributes; 8265 int ret; 8266 8267 if (!plane->state->fb && !old_plane_state->fb) 8268 return; 8269 8270 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", 8271 __func__, 8272 amdgpu_crtc->crtc_id, 8273 plane->state->crtc_w, 8274 plane->state->crtc_h); 8275 8276 ret = get_cursor_position(plane, crtc, &position); 8277 if (ret) 8278 return; 8279 8280 if (!position.enable) { 8281 /* turn off cursor */ 8282 if (crtc_state && crtc_state->stream) { 8283 mutex_lock(&adev->dm.dc_lock); 8284 dc_stream_set_cursor_position(crtc_state->stream, 8285 &position); 8286 mutex_unlock(&adev->dm.dc_lock); 8287 } 8288 return; 8289 } 8290 8291 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8292 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8293 8294 memset(&attributes, 0, sizeof(attributes)); 8295 attributes.address.high_part = upper_32_bits(address); 8296 attributes.address.low_part = lower_32_bits(address); 8297 attributes.width = plane->state->crtc_w; 8298 attributes.height = plane->state->crtc_h; 8299 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8300 attributes.rotation_angle = 0; 8301 attributes.attribute_flags.value = 0; 8302 8303 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8304 8305 if (crtc_state->stream) { 8306 mutex_lock(&adev->dm.dc_lock); 8307 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8308 &attributes)) 8309 DRM_ERROR("DC failed to set cursor attributes\n"); 8310 8311 if (!dc_stream_set_cursor_position(crtc_state->stream, 8312 &position)) 8313 DRM_ERROR("DC failed to set cursor position\n"); 8314 mutex_unlock(&adev->dm.dc_lock); 8315 } 8316 } 8317 8318 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8319 { 8320 8321 assert_spin_locked(&acrtc->base.dev->event_lock); 8322 WARN_ON(acrtc->event); 8323 8324 acrtc->event = acrtc->base.state->event; 8325 8326 /* Set the flip status */ 8327 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8328 8329 /* Mark this event as consumed */ 8330 acrtc->base.state->event = NULL; 8331 8332 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8333 acrtc->crtc_id); 8334 } 8335 8336 static void update_freesync_state_on_stream( 8337 struct amdgpu_display_manager *dm, 8338 struct dm_crtc_state *new_crtc_state, 8339 struct dc_stream_state *new_stream, 8340 struct dc_plane_state *surface, 8341 u32 flip_timestamp_in_us) 8342 { 8343 struct mod_vrr_params vrr_params; 8344 struct dc_info_packet vrr_infopacket = {0}; 8345 struct amdgpu_device *adev = dm->adev; 8346 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8347 unsigned long flags; 8348 bool pack_sdp_v1_3 = false; 8349 8350 if (!new_stream) 8351 return; 8352 8353 /* 8354 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8355 * For now it's sufficient to just guard against these conditions. 8356 */ 8357 8358 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8359 return; 8360 8361 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8362 vrr_params = acrtc->dm_irq_params.vrr_params; 8363 8364 if (surface) { 8365 mod_freesync_handle_preflip( 8366 dm->freesync_module, 8367 surface, 8368 new_stream, 8369 flip_timestamp_in_us, 8370 &vrr_params); 8371 8372 if (adev->family < AMDGPU_FAMILY_AI && 8373 amdgpu_dm_vrr_active(new_crtc_state)) { 8374 mod_freesync_handle_v_update(dm->freesync_module, 8375 new_stream, &vrr_params); 8376 8377 /* Need to call this before the frame ends. */ 8378 dc_stream_adjust_vmin_vmax(dm->dc, 8379 new_crtc_state->stream, 8380 &vrr_params.adjust); 8381 } 8382 } 8383 8384 mod_freesync_build_vrr_infopacket( 8385 dm->freesync_module, 8386 new_stream, 8387 &vrr_params, 8388 PACKET_TYPE_VRR, 8389 TRANSFER_FUNC_UNKNOWN, 8390 &vrr_infopacket, 8391 pack_sdp_v1_3); 8392 8393 new_crtc_state->freesync_timing_changed |= 8394 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 8395 &vrr_params.adjust, 8396 sizeof(vrr_params.adjust)) != 0); 8397 8398 new_crtc_state->freesync_vrr_info_changed |= 8399 (memcmp(&new_crtc_state->vrr_infopacket, 8400 &vrr_infopacket, 8401 sizeof(vrr_infopacket)) != 0); 8402 8403 acrtc->dm_irq_params.vrr_params = vrr_params; 8404 new_crtc_state->vrr_infopacket = vrr_infopacket; 8405 8406 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; 8407 new_stream->vrr_infopacket = vrr_infopacket; 8408 8409 if (new_crtc_state->freesync_vrr_info_changed) 8410 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8411 new_crtc_state->base.crtc->base.id, 8412 (int)new_crtc_state->base.vrr_enabled, 8413 (int)vrr_params.state); 8414 8415 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8416 } 8417 8418 static void update_stream_irq_parameters( 8419 struct amdgpu_display_manager *dm, 8420 struct dm_crtc_state *new_crtc_state) 8421 { 8422 struct dc_stream_state *new_stream = new_crtc_state->stream; 8423 struct mod_vrr_params vrr_params; 8424 struct mod_freesync_config config = new_crtc_state->freesync_config; 8425 struct amdgpu_device *adev = dm->adev; 8426 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8427 unsigned long flags; 8428 8429 if (!new_stream) 8430 return; 8431 8432 /* 8433 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8434 * For now it's sufficient to just guard against these conditions. 8435 */ 8436 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8437 return; 8438 8439 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8440 vrr_params = acrtc->dm_irq_params.vrr_params; 8441 8442 if (new_crtc_state->vrr_supported && 8443 config.min_refresh_in_uhz && 8444 config.max_refresh_in_uhz) { 8445 /* 8446 * if freesync compatible mode was set, config.state will be set 8447 * in atomic check 8448 */ 8449 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8450 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8451 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8452 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8453 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8454 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8455 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8456 } else { 8457 config.state = new_crtc_state->base.vrr_enabled ? 8458 VRR_STATE_ACTIVE_VARIABLE : 8459 VRR_STATE_INACTIVE; 8460 } 8461 } else { 8462 config.state = VRR_STATE_UNSUPPORTED; 8463 } 8464 8465 mod_freesync_build_vrr_params(dm->freesync_module, 8466 new_stream, 8467 &config, &vrr_params); 8468 8469 new_crtc_state->freesync_timing_changed |= 8470 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 8471 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); 8472 8473 new_crtc_state->freesync_config = config; 8474 /* Copy state for access from DM IRQ handler */ 8475 acrtc->dm_irq_params.freesync_config = config; 8476 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8477 acrtc->dm_irq_params.vrr_params = vrr_params; 8478 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8479 } 8480 8481 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8482 struct dm_crtc_state *new_state) 8483 { 8484 bool old_vrr_active = amdgpu_dm_vrr_active(old_state); 8485 bool new_vrr_active = amdgpu_dm_vrr_active(new_state); 8486 8487 if (!old_vrr_active && new_vrr_active) { 8488 /* Transition VRR inactive -> active: 8489 * While VRR is active, we must not disable vblank irq, as a 8490 * reenable after disable would compute bogus vblank/pflip 8491 * timestamps if it likely happened inside display front-porch. 8492 * 8493 * We also need vupdate irq for the actual core vblank handling 8494 * at end of vblank. 8495 */ 8496 dm_set_vupdate_irq(new_state->base.crtc, true); 8497 drm_crtc_vblank_get(new_state->base.crtc); 8498 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8499 __func__, new_state->base.crtc->base.id); 8500 } else if (old_vrr_active && !new_vrr_active) { 8501 /* Transition VRR active -> inactive: 8502 * Allow vblank irq disable again for fixed refresh rate. 8503 */ 8504 dm_set_vupdate_irq(new_state->base.crtc, false); 8505 drm_crtc_vblank_put(new_state->base.crtc); 8506 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8507 __func__, new_state->base.crtc->base.id); 8508 } 8509 } 8510 8511 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8512 { 8513 struct drm_plane *plane; 8514 struct drm_plane_state *old_plane_state; 8515 int i; 8516 8517 /* 8518 * TODO: Make this per-stream so we don't issue redundant updates for 8519 * commits with multiple streams. 8520 */ 8521 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8522 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8523 handle_cursor_update(plane, old_plane_state); 8524 } 8525 8526 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8527 struct dc_state *dc_state, 8528 struct drm_device *dev, 8529 struct amdgpu_display_manager *dm, 8530 struct drm_crtc *pcrtc, 8531 bool wait_for_vblank) 8532 { 8533 uint32_t i; 8534 uint64_t timestamp_ns; 8535 struct drm_plane *plane; 8536 struct drm_plane_state *old_plane_state, *new_plane_state; 8537 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8538 struct drm_crtc_state *new_pcrtc_state = 8539 drm_atomic_get_new_crtc_state(state, pcrtc); 8540 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8541 struct dm_crtc_state *dm_old_crtc_state = 8542 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8543 int planes_count = 0, vpos, hpos; 8544 long r; 8545 unsigned long flags; 8546 struct amdgpu_bo *abo; 8547 uint32_t target_vblank, last_flip_vblank; 8548 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 8549 bool pflip_present = false; 8550 struct { 8551 struct dc_surface_update surface_updates[MAX_SURFACES]; 8552 struct dc_plane_info plane_infos[MAX_SURFACES]; 8553 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8554 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8555 struct dc_stream_update stream_update; 8556 } *bundle; 8557 8558 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8559 8560 if (!bundle) { 8561 dm_error("Failed to allocate update bundle\n"); 8562 goto cleanup; 8563 } 8564 8565 /* 8566 * Disable the cursor first if we're disabling all the planes. 8567 * It'll remain on the screen after the planes are re-enabled 8568 * if we don't. 8569 */ 8570 if (acrtc_state->active_planes == 0) 8571 amdgpu_dm_commit_cursors(state); 8572 8573 /* update planes when needed */ 8574 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 8575 struct drm_crtc *crtc = new_plane_state->crtc; 8576 struct drm_crtc_state *new_crtc_state; 8577 struct drm_framebuffer *fb = new_plane_state->fb; 8578 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 8579 bool plane_needs_flip; 8580 struct dc_plane_state *dc_plane; 8581 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 8582 8583 /* Cursor plane is handled after stream updates */ 8584 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8585 continue; 8586 8587 if (!fb || !crtc || pcrtc != crtc) 8588 continue; 8589 8590 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 8591 if (!new_crtc_state->active) 8592 continue; 8593 8594 dc_plane = dm_new_plane_state->dc_state; 8595 8596 bundle->surface_updates[planes_count].surface = dc_plane; 8597 if (new_pcrtc_state->color_mgmt_changed) { 8598 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 8599 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 8600 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 8601 } 8602 8603 fill_dc_scaling_info(new_plane_state, 8604 &bundle->scaling_infos[planes_count]); 8605 8606 bundle->surface_updates[planes_count].scaling_info = 8607 &bundle->scaling_infos[planes_count]; 8608 8609 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 8610 8611 pflip_present = pflip_present || plane_needs_flip; 8612 8613 if (!plane_needs_flip) { 8614 planes_count += 1; 8615 continue; 8616 } 8617 8618 abo = gem_to_amdgpu_bo(fb->obj[0]); 8619 8620 /* 8621 * Wait for all fences on this FB. Do limited wait to avoid 8622 * deadlock during GPU reset when this fence will not signal 8623 * but we hold reservation lock for the BO. 8624 */ 8625 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, 8626 msecs_to_jiffies(5000)); 8627 if (unlikely(r <= 0)) 8628 DRM_ERROR("Waiting for fences timed out!"); 8629 8630 fill_dc_plane_info_and_addr( 8631 dm->adev, new_plane_state, 8632 afb->tiling_flags, 8633 &bundle->plane_infos[planes_count], 8634 &bundle->flip_addrs[planes_count].address, 8635 afb->tmz_surface, false); 8636 8637 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n", 8638 new_plane_state->plane->index, 8639 bundle->plane_infos[planes_count].dcc.enable); 8640 8641 bundle->surface_updates[planes_count].plane_info = 8642 &bundle->plane_infos[planes_count]; 8643 8644 /* 8645 * Only allow immediate flips for fast updates that don't 8646 * change FB pitch, DCC state, rotation or mirroing. 8647 */ 8648 bundle->flip_addrs[planes_count].flip_immediate = 8649 crtc->state->async_flip && 8650 acrtc_state->update_type == UPDATE_TYPE_FAST; 8651 8652 timestamp_ns = ktime_get_ns(); 8653 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 8654 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 8655 bundle->surface_updates[planes_count].surface = dc_plane; 8656 8657 if (!bundle->surface_updates[planes_count].surface) { 8658 DRM_ERROR("No surface for CRTC: id=%d\n", 8659 acrtc_attach->crtc_id); 8660 continue; 8661 } 8662 8663 if (plane == pcrtc->primary) 8664 update_freesync_state_on_stream( 8665 dm, 8666 acrtc_state, 8667 acrtc_state->stream, 8668 dc_plane, 8669 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 8670 8671 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n", 8672 __func__, 8673 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 8674 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 8675 8676 planes_count += 1; 8677 8678 } 8679 8680 if (pflip_present) { 8681 if (!vrr_active) { 8682 /* Use old throttling in non-vrr fixed refresh rate mode 8683 * to keep flip scheduling based on target vblank counts 8684 * working in a backwards compatible way, e.g., for 8685 * clients using the GLX_OML_sync_control extension or 8686 * DRI3/Present extension with defined target_msc. 8687 */ 8688 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 8689 } 8690 else { 8691 /* For variable refresh rate mode only: 8692 * Get vblank of last completed flip to avoid > 1 vrr 8693 * flips per video frame by use of throttling, but allow 8694 * flip programming anywhere in the possibly large 8695 * variable vrr vblank interval for fine-grained flip 8696 * timing control and more opportunity to avoid stutter 8697 * on late submission of flips. 8698 */ 8699 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8700 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 8701 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8702 } 8703 8704 target_vblank = last_flip_vblank + wait_for_vblank; 8705 8706 /* 8707 * Wait until we're out of the vertical blank period before the one 8708 * targeted by the flip 8709 */ 8710 while ((acrtc_attach->enabled && 8711 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 8712 0, &vpos, &hpos, NULL, 8713 NULL, &pcrtc->hwmode) 8714 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 8715 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 8716 (int)(target_vblank - 8717 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 8718 usleep_range(1000, 1100); 8719 } 8720 8721 /** 8722 * Prepare the flip event for the pageflip interrupt to handle. 8723 * 8724 * This only works in the case where we've already turned on the 8725 * appropriate hardware blocks (eg. HUBP) so in the transition case 8726 * from 0 -> n planes we have to skip a hardware generated event 8727 * and rely on sending it from software. 8728 */ 8729 if (acrtc_attach->base.state->event && 8730 acrtc_state->active_planes > 0) { 8731 drm_crtc_vblank_get(pcrtc); 8732 8733 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8734 8735 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 8736 prepare_flip_isr(acrtc_attach); 8737 8738 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8739 } 8740 8741 if (acrtc_state->stream) { 8742 if (acrtc_state->freesync_vrr_info_changed) 8743 bundle->stream_update.vrr_infopacket = 8744 &acrtc_state->stream->vrr_infopacket; 8745 } 8746 } 8747 8748 /* Update the planes if changed or disable if we don't have any. */ 8749 if ((planes_count || acrtc_state->active_planes == 0) && 8750 acrtc_state->stream) { 8751 #if defined(CONFIG_DRM_AMD_DC_DCN) 8752 /* 8753 * If PSR or idle optimizations are enabled then flush out 8754 * any pending work before hardware programming. 8755 */ 8756 if (dm->vblank_control_workqueue) 8757 flush_workqueue(dm->vblank_control_workqueue); 8758 #endif 8759 8760 bundle->stream_update.stream = acrtc_state->stream; 8761 if (new_pcrtc_state->mode_changed) { 8762 bundle->stream_update.src = acrtc_state->stream->src; 8763 bundle->stream_update.dst = acrtc_state->stream->dst; 8764 } 8765 8766 if (new_pcrtc_state->color_mgmt_changed) { 8767 /* 8768 * TODO: This isn't fully correct since we've actually 8769 * already modified the stream in place. 8770 */ 8771 bundle->stream_update.gamut_remap = 8772 &acrtc_state->stream->gamut_remap_matrix; 8773 bundle->stream_update.output_csc_transform = 8774 &acrtc_state->stream->csc_color_matrix; 8775 bundle->stream_update.out_transfer_func = 8776 acrtc_state->stream->out_transfer_func; 8777 } 8778 8779 acrtc_state->stream->abm_level = acrtc_state->abm_level; 8780 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 8781 bundle->stream_update.abm_level = &acrtc_state->abm_level; 8782 8783 /* 8784 * If FreeSync state on the stream has changed then we need to 8785 * re-adjust the min/max bounds now that DC doesn't handle this 8786 * as part of commit. 8787 */ 8788 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 8789 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8790 dc_stream_adjust_vmin_vmax( 8791 dm->dc, acrtc_state->stream, 8792 &acrtc_attach->dm_irq_params.vrr_params.adjust); 8793 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8794 } 8795 mutex_lock(&dm->dc_lock); 8796 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8797 acrtc_state->stream->link->psr_settings.psr_allow_active) 8798 amdgpu_dm_psr_disable(acrtc_state->stream); 8799 8800 dc_commit_updates_for_stream(dm->dc, 8801 bundle->surface_updates, 8802 planes_count, 8803 acrtc_state->stream, 8804 &bundle->stream_update, 8805 dc_state); 8806 8807 /** 8808 * Enable or disable the interrupts on the backend. 8809 * 8810 * Most pipes are put into power gating when unused. 8811 * 8812 * When power gating is enabled on a pipe we lose the 8813 * interrupt enablement state when power gating is disabled. 8814 * 8815 * So we need to update the IRQ control state in hardware 8816 * whenever the pipe turns on (since it could be previously 8817 * power gated) or off (since some pipes can't be power gated 8818 * on some ASICs). 8819 */ 8820 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 8821 dm_update_pflip_irq_state(drm_to_adev(dev), 8822 acrtc_attach); 8823 8824 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8825 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 8826 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 8827 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8828 8829 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 8830 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8831 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 8832 struct amdgpu_dm_connector *aconn = 8833 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8834 8835 if (aconn->psr_skip_count > 0) 8836 aconn->psr_skip_count--; 8837 8838 /* Allow PSR when skip count is 0. */ 8839 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 8840 } else { 8841 acrtc_attach->dm_irq_params.allow_psr_entry = false; 8842 } 8843 8844 mutex_unlock(&dm->dc_lock); 8845 } 8846 8847 /* 8848 * Update cursor state *after* programming all the planes. 8849 * This avoids redundant programming in the case where we're going 8850 * to be disabling a single plane - those pipes are being disabled. 8851 */ 8852 if (acrtc_state->active_planes) 8853 amdgpu_dm_commit_cursors(state); 8854 8855 cleanup: 8856 kfree(bundle); 8857 } 8858 8859 static void amdgpu_dm_commit_audio(struct drm_device *dev, 8860 struct drm_atomic_state *state) 8861 { 8862 struct amdgpu_device *adev = drm_to_adev(dev); 8863 struct amdgpu_dm_connector *aconnector; 8864 struct drm_connector *connector; 8865 struct drm_connector_state *old_con_state, *new_con_state; 8866 struct drm_crtc_state *new_crtc_state; 8867 struct dm_crtc_state *new_dm_crtc_state; 8868 const struct dc_stream_status *status; 8869 int i, inst; 8870 8871 /* Notify device removals. */ 8872 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8873 if (old_con_state->crtc != new_con_state->crtc) { 8874 /* CRTC changes require notification. */ 8875 goto notify; 8876 } 8877 8878 if (!new_con_state->crtc) 8879 continue; 8880 8881 new_crtc_state = drm_atomic_get_new_crtc_state( 8882 state, new_con_state->crtc); 8883 8884 if (!new_crtc_state) 8885 continue; 8886 8887 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8888 continue; 8889 8890 notify: 8891 aconnector = to_amdgpu_dm_connector(connector); 8892 8893 mutex_lock(&adev->dm.audio_lock); 8894 inst = aconnector->audio_inst; 8895 aconnector->audio_inst = -1; 8896 mutex_unlock(&adev->dm.audio_lock); 8897 8898 amdgpu_dm_audio_eld_notify(adev, inst); 8899 } 8900 8901 /* Notify audio device additions. */ 8902 for_each_new_connector_in_state(state, connector, new_con_state, i) { 8903 if (!new_con_state->crtc) 8904 continue; 8905 8906 new_crtc_state = drm_atomic_get_new_crtc_state( 8907 state, new_con_state->crtc); 8908 8909 if (!new_crtc_state) 8910 continue; 8911 8912 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8913 continue; 8914 8915 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 8916 if (!new_dm_crtc_state->stream) 8917 continue; 8918 8919 status = dc_stream_get_status(new_dm_crtc_state->stream); 8920 if (!status) 8921 continue; 8922 8923 aconnector = to_amdgpu_dm_connector(connector); 8924 8925 mutex_lock(&adev->dm.audio_lock); 8926 inst = status->audio_inst; 8927 aconnector->audio_inst = inst; 8928 mutex_unlock(&adev->dm.audio_lock); 8929 8930 amdgpu_dm_audio_eld_notify(adev, inst); 8931 } 8932 } 8933 8934 /* 8935 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 8936 * @crtc_state: the DRM CRTC state 8937 * @stream_state: the DC stream state. 8938 * 8939 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 8940 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 8941 */ 8942 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 8943 struct dc_stream_state *stream_state) 8944 { 8945 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8946 } 8947 8948 /** 8949 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 8950 * @state: The atomic state to commit 8951 * 8952 * This will tell DC to commit the constructed DC state from atomic_check, 8953 * programming the hardware. Any failures here implies a hardware failure, since 8954 * atomic check should have filtered anything non-kosher. 8955 */ 8956 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 8957 { 8958 struct drm_device *dev = state->dev; 8959 struct amdgpu_device *adev = drm_to_adev(dev); 8960 struct amdgpu_display_manager *dm = &adev->dm; 8961 struct dm_atomic_state *dm_state; 8962 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 8963 uint32_t i, j; 8964 struct drm_crtc *crtc; 8965 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8966 unsigned long flags; 8967 bool wait_for_vblank = true; 8968 struct drm_connector *connector; 8969 struct drm_connector_state *old_con_state, *new_con_state; 8970 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8971 int crtc_disable_count = 0; 8972 bool mode_set_reset_required = false; 8973 8974 trace_amdgpu_dm_atomic_commit_tail_begin(state); 8975 8976 drm_atomic_helper_update_legacy_modeset_state(dev, state); 8977 8978 dm_state = dm_atomic_get_new_state(state); 8979 if (dm_state && dm_state->context) { 8980 dc_state = dm_state->context; 8981 } else { 8982 /* No state changes, retain current state. */ 8983 dc_state_temp = dc_create_state(dm->dc); 8984 ASSERT(dc_state_temp); 8985 dc_state = dc_state_temp; 8986 dc_resource_state_copy_construct_current(dm->dc, dc_state); 8987 } 8988 8989 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, 8990 new_crtc_state, i) { 8991 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8992 8993 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8994 8995 if (old_crtc_state->active && 8996 (!new_crtc_state->active || 8997 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8998 manage_dm_interrupts(adev, acrtc, false); 8999 dc_stream_release(dm_old_crtc_state->stream); 9000 } 9001 } 9002 9003 drm_atomic_helper_calc_timestamping_constants(state); 9004 9005 /* update changed items */ 9006 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9007 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9008 9009 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9010 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9011 9012 DRM_DEBUG_ATOMIC( 9013 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 9014 "planes_changed:%d, mode_changed:%d,active_changed:%d," 9015 "connectors_changed:%d\n", 9016 acrtc->crtc_id, 9017 new_crtc_state->enable, 9018 new_crtc_state->active, 9019 new_crtc_state->planes_changed, 9020 new_crtc_state->mode_changed, 9021 new_crtc_state->active_changed, 9022 new_crtc_state->connectors_changed); 9023 9024 /* Disable cursor if disabling crtc */ 9025 if (old_crtc_state->active && !new_crtc_state->active) { 9026 struct dc_cursor_position position; 9027 9028 memset(&position, 0, sizeof(position)); 9029 mutex_lock(&dm->dc_lock); 9030 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 9031 mutex_unlock(&dm->dc_lock); 9032 } 9033 9034 /* Copy all transient state flags into dc state */ 9035 if (dm_new_crtc_state->stream) { 9036 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9037 dm_new_crtc_state->stream); 9038 } 9039 9040 /* handles headless hotplug case, updating new_state and 9041 * aconnector as needed 9042 */ 9043 9044 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9045 9046 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 9047 9048 if (!dm_new_crtc_state->stream) { 9049 /* 9050 * this could happen because of issues with 9051 * userspace notifications delivery. 9052 * In this case userspace tries to set mode on 9053 * display which is disconnected in fact. 9054 * dc_sink is NULL in this case on aconnector. 9055 * We expect reset mode will come soon. 9056 * 9057 * This can also happen when unplug is done 9058 * during resume sequence ended 9059 * 9060 * In this case, we want to pretend we still 9061 * have a sink to keep the pipe running so that 9062 * hw state is consistent with the sw state 9063 */ 9064 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9065 __func__, acrtc->base.base.id); 9066 continue; 9067 } 9068 9069 if (dm_old_crtc_state->stream) 9070 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9071 9072 pm_runtime_get_noresume(dev->dev); 9073 9074 acrtc->enabled = true; 9075 acrtc->hw_mode = new_crtc_state->mode; 9076 crtc->hwmode = new_crtc_state->mode; 9077 mode_set_reset_required = true; 9078 } else if (modereset_required(new_crtc_state)) { 9079 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 9080 /* i.e. reset mode */ 9081 if (dm_old_crtc_state->stream) 9082 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9083 9084 mode_set_reset_required = true; 9085 } 9086 } /* for_each_crtc_in_state() */ 9087 9088 if (dc_state) { 9089 /* if there mode set or reset, disable eDP PSR */ 9090 if (mode_set_reset_required) { 9091 #if defined(CONFIG_DRM_AMD_DC_DCN) 9092 if (dm->vblank_control_workqueue) 9093 flush_workqueue(dm->vblank_control_workqueue); 9094 #endif 9095 amdgpu_dm_psr_disable_all(dm); 9096 } 9097 9098 dm_enable_per_frame_crtc_master_sync(dc_state); 9099 mutex_lock(&dm->dc_lock); 9100 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 9101 #if defined(CONFIG_DRM_AMD_DC_DCN) 9102 /* Allow idle optimization when vblank count is 0 for display off */ 9103 if (dm->active_vblank_irq_count == 0) 9104 dc_allow_idle_optimizations(dm->dc,true); 9105 #endif 9106 mutex_unlock(&dm->dc_lock); 9107 } 9108 9109 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9110 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9111 9112 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9113 9114 if (dm_new_crtc_state->stream != NULL) { 9115 const struct dc_stream_status *status = 9116 dc_stream_get_status(dm_new_crtc_state->stream); 9117 9118 if (!status) 9119 status = dc_stream_get_status_from_state(dc_state, 9120 dm_new_crtc_state->stream); 9121 if (!status) 9122 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 9123 else 9124 acrtc->otg_inst = status->primary_otg_inst; 9125 } 9126 } 9127 #ifdef CONFIG_DRM_AMD_DC_HDCP 9128 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9129 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9130 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9131 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9132 9133 new_crtc_state = NULL; 9134 9135 if (acrtc) 9136 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9137 9138 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9139 9140 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9141 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9142 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9143 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9144 dm_new_con_state->update_hdcp = true; 9145 continue; 9146 } 9147 9148 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 9149 hdcp_update_display( 9150 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9151 new_con_state->hdcp_content_type, 9152 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); 9153 } 9154 #endif 9155 9156 /* Handle connector state changes */ 9157 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9158 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9159 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9160 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9161 struct dc_surface_update dummy_updates[MAX_SURFACES]; 9162 struct dc_stream_update stream_update; 9163 struct dc_info_packet hdr_packet; 9164 struct dc_stream_status *status = NULL; 9165 bool abm_changed, hdr_changed, scaling_changed; 9166 9167 memset(&dummy_updates, 0, sizeof(dummy_updates)); 9168 memset(&stream_update, 0, sizeof(stream_update)); 9169 9170 if (acrtc) { 9171 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9172 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9173 } 9174 9175 /* Skip any modesets/resets */ 9176 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9177 continue; 9178 9179 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9180 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9181 9182 scaling_changed = is_scaling_state_different(dm_new_con_state, 9183 dm_old_con_state); 9184 9185 abm_changed = dm_new_crtc_state->abm_level != 9186 dm_old_crtc_state->abm_level; 9187 9188 hdr_changed = 9189 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9190 9191 if (!scaling_changed && !abm_changed && !hdr_changed) 9192 continue; 9193 9194 stream_update.stream = dm_new_crtc_state->stream; 9195 if (scaling_changed) { 9196 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9197 dm_new_con_state, dm_new_crtc_state->stream); 9198 9199 stream_update.src = dm_new_crtc_state->stream->src; 9200 stream_update.dst = dm_new_crtc_state->stream->dst; 9201 } 9202 9203 if (abm_changed) { 9204 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9205 9206 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9207 } 9208 9209 if (hdr_changed) { 9210 fill_hdr_info_packet(new_con_state, &hdr_packet); 9211 stream_update.hdr_static_metadata = &hdr_packet; 9212 } 9213 9214 status = dc_stream_get_status(dm_new_crtc_state->stream); 9215 9216 if (WARN_ON(!status)) 9217 continue; 9218 9219 WARN_ON(!status->plane_count); 9220 9221 /* 9222 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9223 * Here we create an empty update on each plane. 9224 * To fix this, DC should permit updating only stream properties. 9225 */ 9226 for (j = 0; j < status->plane_count; j++) 9227 dummy_updates[j].surface = status->plane_states[0]; 9228 9229 9230 mutex_lock(&dm->dc_lock); 9231 dc_commit_updates_for_stream(dm->dc, 9232 dummy_updates, 9233 status->plane_count, 9234 dm_new_crtc_state->stream, 9235 &stream_update, 9236 dc_state); 9237 mutex_unlock(&dm->dc_lock); 9238 } 9239 9240 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9241 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9242 new_crtc_state, i) { 9243 if (old_crtc_state->active && !new_crtc_state->active) 9244 crtc_disable_count++; 9245 9246 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9247 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9248 9249 /* For freesync config update on crtc state and params for irq */ 9250 update_stream_irq_parameters(dm, dm_new_crtc_state); 9251 9252 /* Handle vrr on->off / off->on transitions */ 9253 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 9254 dm_new_crtc_state); 9255 } 9256 9257 /** 9258 * Enable interrupts for CRTCs that are newly enabled or went through 9259 * a modeset. It was intentionally deferred until after the front end 9260 * state was modified to wait until the OTG was on and so the IRQ 9261 * handlers didn't access stale or invalid state. 9262 */ 9263 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9264 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9265 #ifdef CONFIG_DEBUG_FS 9266 bool configure_crc = false; 9267 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9268 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9269 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; 9270 #endif 9271 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9272 cur_crc_src = acrtc->dm_irq_params.crc_src; 9273 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9274 #endif 9275 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9276 9277 if (new_crtc_state->active && 9278 (!old_crtc_state->active || 9279 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9280 dc_stream_retain(dm_new_crtc_state->stream); 9281 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 9282 manage_dm_interrupts(adev, acrtc, true); 9283 9284 #ifdef CONFIG_DEBUG_FS 9285 /** 9286 * Frontend may have changed so reapply the CRC capture 9287 * settings for the stream. 9288 */ 9289 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9290 9291 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 9292 configure_crc = true; 9293 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9294 if (amdgpu_dm_crc_window_is_activated(crtc)) { 9295 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9296 acrtc->dm_irq_params.crc_window.update_win = true; 9297 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; 9298 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 9299 crc_rd_wrk->crtc = crtc; 9300 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 9301 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9302 } 9303 #endif 9304 } 9305 9306 if (configure_crc) 9307 if (amdgpu_dm_crtc_configure_crc_source( 9308 crtc, dm_new_crtc_state, cur_crc_src)) 9309 DRM_DEBUG_DRIVER("Failed to configure crc source"); 9310 #endif 9311 } 9312 } 9313 9314 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 9315 if (new_crtc_state->async_flip) 9316 wait_for_vblank = false; 9317 9318 /* update planes when needed per crtc*/ 9319 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 9320 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9321 9322 if (dm_new_crtc_state->stream) 9323 amdgpu_dm_commit_planes(state, dc_state, dev, 9324 dm, crtc, wait_for_vblank); 9325 } 9326 9327 /* Update audio instances for each connector. */ 9328 amdgpu_dm_commit_audio(dev, state); 9329 9330 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ 9331 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 9332 /* restore the backlight level */ 9333 for (i = 0; i < dm->num_of_edps; i++) { 9334 if (dm->backlight_dev[i] && 9335 (dm->actual_brightness[i] != dm->brightness[i])) 9336 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9337 } 9338 #endif 9339 /* 9340 * send vblank event on all events not handled in flip and 9341 * mark consumed event for drm_atomic_helper_commit_hw_done 9342 */ 9343 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9344 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9345 9346 if (new_crtc_state->event) 9347 drm_send_event_locked(dev, &new_crtc_state->event->base); 9348 9349 new_crtc_state->event = NULL; 9350 } 9351 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9352 9353 /* Signal HW programming completion */ 9354 drm_atomic_helper_commit_hw_done(state); 9355 9356 if (wait_for_vblank) 9357 drm_atomic_helper_wait_for_flip_done(dev, state); 9358 9359 drm_atomic_helper_cleanup_planes(dev, state); 9360 9361 /* return the stolen vga memory back to VRAM */ 9362 if (!adev->mman.keep_stolen_vga_memory) 9363 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9364 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9365 9366 /* 9367 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9368 * so we can put the GPU into runtime suspend if we're not driving any 9369 * displays anymore 9370 */ 9371 for (i = 0; i < crtc_disable_count; i++) 9372 pm_runtime_put_autosuspend(dev->dev); 9373 pm_runtime_mark_last_busy(dev->dev); 9374 9375 if (dc_state_temp) 9376 dc_release_state(dc_state_temp); 9377 } 9378 9379 9380 static int dm_force_atomic_commit(struct drm_connector *connector) 9381 { 9382 int ret = 0; 9383 struct drm_device *ddev = connector->dev; 9384 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9385 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9386 struct drm_plane *plane = disconnected_acrtc->base.primary; 9387 struct drm_connector_state *conn_state; 9388 struct drm_crtc_state *crtc_state; 9389 struct drm_plane_state *plane_state; 9390 9391 if (!state) 9392 return -ENOMEM; 9393 9394 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9395 9396 /* Construct an atomic state to restore previous display setting */ 9397 9398 /* 9399 * Attach connectors to drm_atomic_state 9400 */ 9401 conn_state = drm_atomic_get_connector_state(state, connector); 9402 9403 ret = PTR_ERR_OR_ZERO(conn_state); 9404 if (ret) 9405 goto out; 9406 9407 /* Attach crtc to drm_atomic_state*/ 9408 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9409 9410 ret = PTR_ERR_OR_ZERO(crtc_state); 9411 if (ret) 9412 goto out; 9413 9414 /* force a restore */ 9415 crtc_state->mode_changed = true; 9416 9417 /* Attach plane to drm_atomic_state */ 9418 plane_state = drm_atomic_get_plane_state(state, plane); 9419 9420 ret = PTR_ERR_OR_ZERO(plane_state); 9421 if (ret) 9422 goto out; 9423 9424 /* Call commit internally with the state we just constructed */ 9425 ret = drm_atomic_commit(state); 9426 9427 out: 9428 drm_atomic_state_put(state); 9429 if (ret) 9430 DRM_ERROR("Restoring old state failed with %i\n", ret); 9431 9432 return ret; 9433 } 9434 9435 /* 9436 * This function handles all cases when set mode does not come upon hotplug. 9437 * This includes when a display is unplugged then plugged back into the 9438 * same port and when running without usermode desktop manager supprot 9439 */ 9440 void dm_restore_drm_connector_state(struct drm_device *dev, 9441 struct drm_connector *connector) 9442 { 9443 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9444 struct amdgpu_crtc *disconnected_acrtc; 9445 struct dm_crtc_state *acrtc_state; 9446 9447 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9448 return; 9449 9450 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9451 if (!disconnected_acrtc) 9452 return; 9453 9454 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 9455 if (!acrtc_state->stream) 9456 return; 9457 9458 /* 9459 * If the previous sink is not released and different from the current, 9460 * we deduce we are in a state where we can not rely on usermode call 9461 * to turn on the display, so we do it here 9462 */ 9463 if (acrtc_state->stream->sink != aconnector->dc_sink) 9464 dm_force_atomic_commit(&aconnector->base); 9465 } 9466 9467 /* 9468 * Grabs all modesetting locks to serialize against any blocking commits, 9469 * Waits for completion of all non blocking commits. 9470 */ 9471 static int do_aquire_global_lock(struct drm_device *dev, 9472 struct drm_atomic_state *state) 9473 { 9474 struct drm_crtc *crtc; 9475 struct drm_crtc_commit *commit; 9476 long ret; 9477 9478 /* 9479 * Adding all modeset locks to aquire_ctx will 9480 * ensure that when the framework release it the 9481 * extra locks we are locking here will get released to 9482 */ 9483 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 9484 if (ret) 9485 return ret; 9486 9487 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9488 spin_lock(&crtc->commit_lock); 9489 commit = list_first_entry_or_null(&crtc->commit_list, 9490 struct drm_crtc_commit, commit_entry); 9491 if (commit) 9492 drm_crtc_commit_get(commit); 9493 spin_unlock(&crtc->commit_lock); 9494 9495 if (!commit) 9496 continue; 9497 9498 /* 9499 * Make sure all pending HW programming completed and 9500 * page flips done 9501 */ 9502 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 9503 9504 if (ret > 0) 9505 ret = wait_for_completion_interruptible_timeout( 9506 &commit->flip_done, 10*HZ); 9507 9508 if (ret == 0) 9509 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 9510 "timed out\n", crtc->base.id, crtc->name); 9511 9512 drm_crtc_commit_put(commit); 9513 } 9514 9515 return ret < 0 ? ret : 0; 9516 } 9517 9518 static void get_freesync_config_for_crtc( 9519 struct dm_crtc_state *new_crtc_state, 9520 struct dm_connector_state *new_con_state) 9521 { 9522 struct mod_freesync_config config = {0}; 9523 struct amdgpu_dm_connector *aconnector = 9524 to_amdgpu_dm_connector(new_con_state->base.connector); 9525 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9526 int vrefresh = drm_mode_vrefresh(mode); 9527 bool fs_vid_mode = false; 9528 9529 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9530 vrefresh >= aconnector->min_vfreq && 9531 vrefresh <= aconnector->max_vfreq; 9532 9533 if (new_crtc_state->vrr_supported) { 9534 new_crtc_state->stream->ignore_msa_timing_param = true; 9535 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 9536 9537 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 9538 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 9539 config.vsif_supported = true; 9540 config.btr = true; 9541 9542 if (fs_vid_mode) { 9543 config.state = VRR_STATE_ACTIVE_FIXED; 9544 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 9545 goto out; 9546 } else if (new_crtc_state->base.vrr_enabled) { 9547 config.state = VRR_STATE_ACTIVE_VARIABLE; 9548 } else { 9549 config.state = VRR_STATE_INACTIVE; 9550 } 9551 } 9552 out: 9553 new_crtc_state->freesync_config = config; 9554 } 9555 9556 static void reset_freesync_config_for_crtc( 9557 struct dm_crtc_state *new_crtc_state) 9558 { 9559 new_crtc_state->vrr_supported = false; 9560 9561 memset(&new_crtc_state->vrr_infopacket, 0, 9562 sizeof(new_crtc_state->vrr_infopacket)); 9563 } 9564 9565 static bool 9566 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 9567 struct drm_crtc_state *new_crtc_state) 9568 { 9569 struct drm_display_mode old_mode, new_mode; 9570 9571 if (!old_crtc_state || !new_crtc_state) 9572 return false; 9573 9574 old_mode = old_crtc_state->mode; 9575 new_mode = new_crtc_state->mode; 9576 9577 if (old_mode.clock == new_mode.clock && 9578 old_mode.hdisplay == new_mode.hdisplay && 9579 old_mode.vdisplay == new_mode.vdisplay && 9580 old_mode.htotal == new_mode.htotal && 9581 old_mode.vtotal != new_mode.vtotal && 9582 old_mode.hsync_start == new_mode.hsync_start && 9583 old_mode.vsync_start != new_mode.vsync_start && 9584 old_mode.hsync_end == new_mode.hsync_end && 9585 old_mode.vsync_end != new_mode.vsync_end && 9586 old_mode.hskew == new_mode.hskew && 9587 old_mode.vscan == new_mode.vscan && 9588 (old_mode.vsync_end - old_mode.vsync_start) == 9589 (new_mode.vsync_end - new_mode.vsync_start)) 9590 return true; 9591 9592 return false; 9593 } 9594 9595 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { 9596 uint64_t num, den, res; 9597 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 9598 9599 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 9600 9601 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 9602 den = (unsigned long long)new_crtc_state->mode.htotal * 9603 (unsigned long long)new_crtc_state->mode.vtotal; 9604 9605 res = div_u64(num, den); 9606 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 9607 } 9608 9609 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 9610 struct drm_atomic_state *state, 9611 struct drm_crtc *crtc, 9612 struct drm_crtc_state *old_crtc_state, 9613 struct drm_crtc_state *new_crtc_state, 9614 bool enable, 9615 bool *lock_and_validation_needed) 9616 { 9617 struct dm_atomic_state *dm_state = NULL; 9618 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9619 struct dc_stream_state *new_stream; 9620 int ret = 0; 9621 9622 /* 9623 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 9624 * update changed items 9625 */ 9626 struct amdgpu_crtc *acrtc = NULL; 9627 struct amdgpu_dm_connector *aconnector = NULL; 9628 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9629 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 9630 9631 new_stream = NULL; 9632 9633 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9634 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9635 acrtc = to_amdgpu_crtc(crtc); 9636 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9637 9638 /* TODO This hack should go away */ 9639 if (aconnector && enable) { 9640 /* Make sure fake sink is created in plug-in scenario */ 9641 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9642 &aconnector->base); 9643 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9644 &aconnector->base); 9645 9646 if (IS_ERR(drm_new_conn_state)) { 9647 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 9648 goto fail; 9649 } 9650 9651 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 9652 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 9653 9654 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9655 goto skip_modeset; 9656 9657 new_stream = create_validate_stream_for_sink(aconnector, 9658 &new_crtc_state->mode, 9659 dm_new_conn_state, 9660 dm_old_crtc_state->stream); 9661 9662 /* 9663 * we can have no stream on ACTION_SET if a display 9664 * was disconnected during S3, in this case it is not an 9665 * error, the OS will be updated after detection, and 9666 * will do the right thing on next atomic commit 9667 */ 9668 9669 if (!new_stream) { 9670 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9671 __func__, acrtc->base.base.id); 9672 ret = -ENOMEM; 9673 goto fail; 9674 } 9675 9676 /* 9677 * TODO: Check VSDB bits to decide whether this should 9678 * be enabled or not. 9679 */ 9680 new_stream->triggered_crtc_reset.enabled = 9681 dm->force_timing_sync; 9682 9683 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9684 9685 ret = fill_hdr_info_packet(drm_new_conn_state, 9686 &new_stream->hdr_static_metadata); 9687 if (ret) 9688 goto fail; 9689 9690 /* 9691 * If we already removed the old stream from the context 9692 * (and set the new stream to NULL) then we can't reuse 9693 * the old stream even if the stream and scaling are unchanged. 9694 * We'll hit the BUG_ON and black screen. 9695 * 9696 * TODO: Refactor this function to allow this check to work 9697 * in all conditions. 9698 */ 9699 if (amdgpu_freesync_vid_mode && 9700 dm_new_crtc_state->stream && 9701 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 9702 goto skip_modeset; 9703 9704 if (dm_new_crtc_state->stream && 9705 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9706 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 9707 new_crtc_state->mode_changed = false; 9708 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 9709 new_crtc_state->mode_changed); 9710 } 9711 } 9712 9713 /* mode_changed flag may get updated above, need to check again */ 9714 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9715 goto skip_modeset; 9716 9717 DRM_DEBUG_ATOMIC( 9718 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 9719 "planes_changed:%d, mode_changed:%d,active_changed:%d," 9720 "connectors_changed:%d\n", 9721 acrtc->crtc_id, 9722 new_crtc_state->enable, 9723 new_crtc_state->active, 9724 new_crtc_state->planes_changed, 9725 new_crtc_state->mode_changed, 9726 new_crtc_state->active_changed, 9727 new_crtc_state->connectors_changed); 9728 9729 /* Remove stream for any changed/disabled CRTC */ 9730 if (!enable) { 9731 9732 if (!dm_old_crtc_state->stream) 9733 goto skip_modeset; 9734 9735 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 9736 is_timing_unchanged_for_freesync(new_crtc_state, 9737 old_crtc_state)) { 9738 new_crtc_state->mode_changed = false; 9739 DRM_DEBUG_DRIVER( 9740 "Mode change not required for front porch change, " 9741 "setting mode_changed to %d", 9742 new_crtc_state->mode_changed); 9743 9744 set_freesync_fixed_config(dm_new_crtc_state); 9745 9746 goto skip_modeset; 9747 } else if (amdgpu_freesync_vid_mode && aconnector && 9748 is_freesync_video_mode(&new_crtc_state->mode, 9749 aconnector)) { 9750 struct drm_display_mode *high_mode; 9751 9752 high_mode = get_highest_refresh_rate_mode(aconnector, false); 9753 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { 9754 set_freesync_fixed_config(dm_new_crtc_state); 9755 } 9756 } 9757 9758 ret = dm_atomic_get_state(state, &dm_state); 9759 if (ret) 9760 goto fail; 9761 9762 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 9763 crtc->base.id); 9764 9765 /* i.e. reset mode */ 9766 if (dc_remove_stream_from_ctx( 9767 dm->dc, 9768 dm_state->context, 9769 dm_old_crtc_state->stream) != DC_OK) { 9770 ret = -EINVAL; 9771 goto fail; 9772 } 9773 9774 dc_stream_release(dm_old_crtc_state->stream); 9775 dm_new_crtc_state->stream = NULL; 9776 9777 reset_freesync_config_for_crtc(dm_new_crtc_state); 9778 9779 *lock_and_validation_needed = true; 9780 9781 } else {/* Add stream for any updated/enabled CRTC */ 9782 /* 9783 * Quick fix to prevent NULL pointer on new_stream when 9784 * added MST connectors not found in existing crtc_state in the chained mode 9785 * TODO: need to dig out the root cause of that 9786 */ 9787 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) 9788 goto skip_modeset; 9789 9790 if (modereset_required(new_crtc_state)) 9791 goto skip_modeset; 9792 9793 if (modeset_required(new_crtc_state, new_stream, 9794 dm_old_crtc_state->stream)) { 9795 9796 WARN_ON(dm_new_crtc_state->stream); 9797 9798 ret = dm_atomic_get_state(state, &dm_state); 9799 if (ret) 9800 goto fail; 9801 9802 dm_new_crtc_state->stream = new_stream; 9803 9804 dc_stream_retain(new_stream); 9805 9806 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 9807 crtc->base.id); 9808 9809 if (dc_add_stream_to_ctx( 9810 dm->dc, 9811 dm_state->context, 9812 dm_new_crtc_state->stream) != DC_OK) { 9813 ret = -EINVAL; 9814 goto fail; 9815 } 9816 9817 *lock_and_validation_needed = true; 9818 } 9819 } 9820 9821 skip_modeset: 9822 /* Release extra reference */ 9823 if (new_stream) 9824 dc_stream_release(new_stream); 9825 9826 /* 9827 * We want to do dc stream updates that do not require a 9828 * full modeset below. 9829 */ 9830 if (!(enable && aconnector && new_crtc_state->active)) 9831 return 0; 9832 /* 9833 * Given above conditions, the dc state cannot be NULL because: 9834 * 1. We're in the process of enabling CRTCs (just been added 9835 * to the dc context, or already is on the context) 9836 * 2. Has a valid connector attached, and 9837 * 3. Is currently active and enabled. 9838 * => The dc stream state currently exists. 9839 */ 9840 BUG_ON(dm_new_crtc_state->stream == NULL); 9841 9842 /* Scaling or underscan settings */ 9843 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 9844 drm_atomic_crtc_needs_modeset(new_crtc_state)) 9845 update_stream_scaling_settings( 9846 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 9847 9848 /* ABM settings */ 9849 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9850 9851 /* 9852 * Color management settings. We also update color properties 9853 * when a modeset is needed, to ensure it gets reprogrammed. 9854 */ 9855 if (dm_new_crtc_state->base.color_mgmt_changed || 9856 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 9857 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 9858 if (ret) 9859 goto fail; 9860 } 9861 9862 /* Update Freesync settings. */ 9863 get_freesync_config_for_crtc(dm_new_crtc_state, 9864 dm_new_conn_state); 9865 9866 return ret; 9867 9868 fail: 9869 if (new_stream) 9870 dc_stream_release(new_stream); 9871 return ret; 9872 } 9873 9874 static bool should_reset_plane(struct drm_atomic_state *state, 9875 struct drm_plane *plane, 9876 struct drm_plane_state *old_plane_state, 9877 struct drm_plane_state *new_plane_state) 9878 { 9879 struct drm_plane *other; 9880 struct drm_plane_state *old_other_state, *new_other_state; 9881 struct drm_crtc_state *new_crtc_state; 9882 int i; 9883 9884 /* 9885 * TODO: Remove this hack once the checks below are sufficient 9886 * enough to determine when we need to reset all the planes on 9887 * the stream. 9888 */ 9889 if (state->allow_modeset) 9890 return true; 9891 9892 /* Exit early if we know that we're adding or removing the plane. */ 9893 if (old_plane_state->crtc != new_plane_state->crtc) 9894 return true; 9895 9896 /* old crtc == new_crtc == NULL, plane not in context. */ 9897 if (!new_plane_state->crtc) 9898 return false; 9899 9900 new_crtc_state = 9901 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 9902 9903 if (!new_crtc_state) 9904 return true; 9905 9906 /* CRTC Degamma changes currently require us to recreate planes. */ 9907 if (new_crtc_state->color_mgmt_changed) 9908 return true; 9909 9910 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 9911 return true; 9912 9913 /* 9914 * If there are any new primary or overlay planes being added or 9915 * removed then the z-order can potentially change. To ensure 9916 * correct z-order and pipe acquisition the current DC architecture 9917 * requires us to remove and recreate all existing planes. 9918 * 9919 * TODO: Come up with a more elegant solution for this. 9920 */ 9921 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 9922 struct amdgpu_framebuffer *old_afb, *new_afb; 9923 if (other->type == DRM_PLANE_TYPE_CURSOR) 9924 continue; 9925 9926 if (old_other_state->crtc != new_plane_state->crtc && 9927 new_other_state->crtc != new_plane_state->crtc) 9928 continue; 9929 9930 if (old_other_state->crtc != new_other_state->crtc) 9931 return true; 9932 9933 /* Src/dst size and scaling updates. */ 9934 if (old_other_state->src_w != new_other_state->src_w || 9935 old_other_state->src_h != new_other_state->src_h || 9936 old_other_state->crtc_w != new_other_state->crtc_w || 9937 old_other_state->crtc_h != new_other_state->crtc_h) 9938 return true; 9939 9940 /* Rotation / mirroring updates. */ 9941 if (old_other_state->rotation != new_other_state->rotation) 9942 return true; 9943 9944 /* Blending updates. */ 9945 if (old_other_state->pixel_blend_mode != 9946 new_other_state->pixel_blend_mode) 9947 return true; 9948 9949 /* Alpha updates. */ 9950 if (old_other_state->alpha != new_other_state->alpha) 9951 return true; 9952 9953 /* Colorspace changes. */ 9954 if (old_other_state->color_range != new_other_state->color_range || 9955 old_other_state->color_encoding != new_other_state->color_encoding) 9956 return true; 9957 9958 /* Framebuffer checks fall at the end. */ 9959 if (!old_other_state->fb || !new_other_state->fb) 9960 continue; 9961 9962 /* Pixel format changes can require bandwidth updates. */ 9963 if (old_other_state->fb->format != new_other_state->fb->format) 9964 return true; 9965 9966 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 9967 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 9968 9969 /* Tiling and DCC changes also require bandwidth updates. */ 9970 if (old_afb->tiling_flags != new_afb->tiling_flags || 9971 old_afb->base.modifier != new_afb->base.modifier) 9972 return true; 9973 } 9974 9975 return false; 9976 } 9977 9978 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 9979 struct drm_plane_state *new_plane_state, 9980 struct drm_framebuffer *fb) 9981 { 9982 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 9983 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 9984 unsigned int pitch; 9985 bool linear; 9986 9987 if (fb->width > new_acrtc->max_cursor_width || 9988 fb->height > new_acrtc->max_cursor_height) { 9989 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 9990 new_plane_state->fb->width, 9991 new_plane_state->fb->height); 9992 return -EINVAL; 9993 } 9994 if (new_plane_state->src_w != fb->width << 16 || 9995 new_plane_state->src_h != fb->height << 16) { 9996 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9997 return -EINVAL; 9998 } 9999 10000 /* Pitch in pixels */ 10001 pitch = fb->pitches[0] / fb->format->cpp[0]; 10002 10003 if (fb->width != pitch) { 10004 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10005 fb->width, pitch); 10006 return -EINVAL; 10007 } 10008 10009 switch (pitch) { 10010 case 64: 10011 case 128: 10012 case 256: 10013 /* FB pitch is supported by cursor plane */ 10014 break; 10015 default: 10016 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10017 return -EINVAL; 10018 } 10019 10020 /* Core DRM takes care of checking FB modifiers, so we only need to 10021 * check tiling flags when the FB doesn't have a modifier. */ 10022 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10023 if (adev->family < AMDGPU_FAMILY_AI) { 10024 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10025 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10026 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10027 } else { 10028 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10029 } 10030 if (!linear) { 10031 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10032 return -EINVAL; 10033 } 10034 } 10035 10036 return 0; 10037 } 10038 10039 static int dm_update_plane_state(struct dc *dc, 10040 struct drm_atomic_state *state, 10041 struct drm_plane *plane, 10042 struct drm_plane_state *old_plane_state, 10043 struct drm_plane_state *new_plane_state, 10044 bool enable, 10045 bool *lock_and_validation_needed) 10046 { 10047 10048 struct dm_atomic_state *dm_state = NULL; 10049 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10050 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10051 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10052 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10053 struct amdgpu_crtc *new_acrtc; 10054 bool needs_reset; 10055 int ret = 0; 10056 10057 10058 new_plane_crtc = new_plane_state->crtc; 10059 old_plane_crtc = old_plane_state->crtc; 10060 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10061 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10062 10063 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 10064 if (!enable || !new_plane_crtc || 10065 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10066 return 0; 10067 10068 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10069 10070 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10071 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10072 return -EINVAL; 10073 } 10074 10075 if (new_plane_state->fb) { 10076 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10077 new_plane_state->fb); 10078 if (ret) 10079 return ret; 10080 } 10081 10082 return 0; 10083 } 10084 10085 needs_reset = should_reset_plane(state, plane, old_plane_state, 10086 new_plane_state); 10087 10088 /* Remove any changed/removed planes */ 10089 if (!enable) { 10090 if (!needs_reset) 10091 return 0; 10092 10093 if (!old_plane_crtc) 10094 return 0; 10095 10096 old_crtc_state = drm_atomic_get_old_crtc_state( 10097 state, old_plane_crtc); 10098 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10099 10100 if (!dm_old_crtc_state->stream) 10101 return 0; 10102 10103 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10104 plane->base.id, old_plane_crtc->base.id); 10105 10106 ret = dm_atomic_get_state(state, &dm_state); 10107 if (ret) 10108 return ret; 10109 10110 if (!dc_remove_plane_from_context( 10111 dc, 10112 dm_old_crtc_state->stream, 10113 dm_old_plane_state->dc_state, 10114 dm_state->context)) { 10115 10116 return -EINVAL; 10117 } 10118 10119 10120 dc_plane_state_release(dm_old_plane_state->dc_state); 10121 dm_new_plane_state->dc_state = NULL; 10122 10123 *lock_and_validation_needed = true; 10124 10125 } else { /* Add new planes */ 10126 struct dc_plane_state *dc_new_plane_state; 10127 10128 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10129 return 0; 10130 10131 if (!new_plane_crtc) 10132 return 0; 10133 10134 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 10135 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10136 10137 if (!dm_new_crtc_state->stream) 10138 return 0; 10139 10140 if (!needs_reset) 10141 return 0; 10142 10143 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 10144 if (ret) 10145 return ret; 10146 10147 WARN_ON(dm_new_plane_state->dc_state); 10148 10149 dc_new_plane_state = dc_create_plane_state(dc); 10150 if (!dc_new_plane_state) 10151 return -ENOMEM; 10152 10153 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 10154 plane->base.id, new_plane_crtc->base.id); 10155 10156 ret = fill_dc_plane_attributes( 10157 drm_to_adev(new_plane_crtc->dev), 10158 dc_new_plane_state, 10159 new_plane_state, 10160 new_crtc_state); 10161 if (ret) { 10162 dc_plane_state_release(dc_new_plane_state); 10163 return ret; 10164 } 10165 10166 ret = dm_atomic_get_state(state, &dm_state); 10167 if (ret) { 10168 dc_plane_state_release(dc_new_plane_state); 10169 return ret; 10170 } 10171 10172 /* 10173 * Any atomic check errors that occur after this will 10174 * not need a release. The plane state will be attached 10175 * to the stream, and therefore part of the atomic 10176 * state. It'll be released when the atomic state is 10177 * cleaned. 10178 */ 10179 if (!dc_add_plane_to_context( 10180 dc, 10181 dm_new_crtc_state->stream, 10182 dc_new_plane_state, 10183 dm_state->context)) { 10184 10185 dc_plane_state_release(dc_new_plane_state); 10186 return -EINVAL; 10187 } 10188 10189 dm_new_plane_state->dc_state = dc_new_plane_state; 10190 10191 /* Tell DC to do a full surface update every time there 10192 * is a plane change. Inefficient, but works for now. 10193 */ 10194 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 10195 10196 *lock_and_validation_needed = true; 10197 } 10198 10199 10200 return ret; 10201 } 10202 10203 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 10204 struct drm_crtc *crtc, 10205 struct drm_crtc_state *new_crtc_state) 10206 { 10207 struct drm_plane_state *new_cursor_state, *new_primary_state; 10208 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h; 10209 10210 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 10211 * cursor per pipe but it's going to inherit the scaling and 10212 * positioning from the underlying pipe. Check the cursor plane's 10213 * blending properties match the primary plane's. */ 10214 10215 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 10216 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary); 10217 if (!new_cursor_state || !new_primary_state || 10218 !new_cursor_state->fb || !new_primary_state->fb) { 10219 return 0; 10220 } 10221 10222 cursor_scale_w = new_cursor_state->crtc_w * 1000 / 10223 (new_cursor_state->src_w >> 16); 10224 cursor_scale_h = new_cursor_state->crtc_h * 1000 / 10225 (new_cursor_state->src_h >> 16); 10226 10227 primary_scale_w = new_primary_state->crtc_w * 1000 / 10228 (new_primary_state->src_w >> 16); 10229 primary_scale_h = new_primary_state->crtc_h * 1000 / 10230 (new_primary_state->src_h >> 16); 10231 10232 if (cursor_scale_w != primary_scale_w || 10233 cursor_scale_h != primary_scale_h) { 10234 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); 10235 return -EINVAL; 10236 } 10237 10238 return 0; 10239 } 10240 10241 #if defined(CONFIG_DRM_AMD_DC_DCN) 10242 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 10243 { 10244 struct drm_connector *connector; 10245 struct drm_connector_state *conn_state, *old_conn_state; 10246 struct amdgpu_dm_connector *aconnector = NULL; 10247 int i; 10248 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 10249 if (!conn_state->crtc) 10250 conn_state = old_conn_state; 10251 10252 if (conn_state->crtc != crtc) 10253 continue; 10254 10255 aconnector = to_amdgpu_dm_connector(connector); 10256 if (!aconnector->port || !aconnector->mst_port) 10257 aconnector = NULL; 10258 else 10259 break; 10260 } 10261 10262 if (!aconnector) 10263 return 0; 10264 10265 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); 10266 } 10267 #endif 10268 10269 static int validate_overlay(struct drm_atomic_state *state) 10270 { 10271 int i; 10272 struct drm_plane *plane; 10273 struct drm_plane_state *new_plane_state; 10274 struct drm_plane_state *primary_state, *overlay_state = NULL; 10275 10276 /* Check if primary plane is contained inside overlay */ 10277 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) { 10278 if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 10279 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10280 return 0; 10281 10282 overlay_state = new_plane_state; 10283 continue; 10284 } 10285 } 10286 10287 /* check if we're making changes to the overlay plane */ 10288 if (!overlay_state) 10289 return 0; 10290 10291 /* check if overlay plane is enabled */ 10292 if (!overlay_state->crtc) 10293 return 0; 10294 10295 /* find the primary plane for the CRTC that the overlay is enabled on */ 10296 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); 10297 if (IS_ERR(primary_state)) 10298 return PTR_ERR(primary_state); 10299 10300 /* check if primary plane is enabled */ 10301 if (!primary_state->crtc) 10302 return 0; 10303 10304 /* Perform the bounds check to ensure the overlay plane covers the primary */ 10305 if (primary_state->crtc_x < overlay_state->crtc_x || 10306 primary_state->crtc_y < overlay_state->crtc_y || 10307 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || 10308 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { 10309 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); 10310 return -EINVAL; 10311 } 10312 10313 return 0; 10314 } 10315 10316 /** 10317 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10318 * @dev: The DRM device 10319 * @state: The atomic state to commit 10320 * 10321 * Validate that the given atomic state is programmable by DC into hardware. 10322 * This involves constructing a &struct dc_state reflecting the new hardware 10323 * state we wish to commit, then querying DC to see if it is programmable. It's 10324 * important not to modify the existing DC state. Otherwise, atomic_check 10325 * may unexpectedly commit hardware changes. 10326 * 10327 * When validating the DC state, it's important that the right locks are 10328 * acquired. For full updates case which removes/adds/updates streams on one 10329 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10330 * that any such full update commit will wait for completion of any outstanding 10331 * flip using DRMs synchronization events. 10332 * 10333 * Note that DM adds the affected connectors for all CRTCs in state, when that 10334 * might not seem necessary. This is because DC stream creation requires the 10335 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10336 * be possible but non-trivial - a possible TODO item. 10337 * 10338 * Return: -Error code if validation failed. 10339 */ 10340 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10341 struct drm_atomic_state *state) 10342 { 10343 struct amdgpu_device *adev = drm_to_adev(dev); 10344 struct dm_atomic_state *dm_state = NULL; 10345 struct dc *dc = adev->dm.dc; 10346 struct drm_connector *connector; 10347 struct drm_connector_state *old_con_state, *new_con_state; 10348 struct drm_crtc *crtc; 10349 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10350 struct drm_plane *plane; 10351 struct drm_plane_state *old_plane_state, *new_plane_state; 10352 enum dc_status status; 10353 int ret, i; 10354 bool lock_and_validation_needed = false; 10355 struct dm_crtc_state *dm_old_crtc_state; 10356 #if defined(CONFIG_DRM_AMD_DC_DCN) 10357 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10358 #endif 10359 10360 trace_amdgpu_dm_atomic_check_begin(state); 10361 10362 ret = drm_atomic_helper_check_modeset(dev, state); 10363 if (ret) 10364 goto fail; 10365 10366 /* Check connector changes */ 10367 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10368 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10369 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10370 10371 /* Skip connectors that are disabled or part of modeset already. */ 10372 if (!old_con_state->crtc && !new_con_state->crtc) 10373 continue; 10374 10375 if (!new_con_state->crtc) 10376 continue; 10377 10378 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10379 if (IS_ERR(new_crtc_state)) { 10380 ret = PTR_ERR(new_crtc_state); 10381 goto fail; 10382 } 10383 10384 if (dm_old_con_state->abm_level != 10385 dm_new_con_state->abm_level) 10386 new_crtc_state->connectors_changed = true; 10387 } 10388 10389 #if defined(CONFIG_DRM_AMD_DC_DCN) 10390 if (dc_resource_is_dsc_encoding_supported(dc)) { 10391 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10392 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10393 ret = add_affected_mst_dsc_crtcs(state, crtc); 10394 if (ret) 10395 goto fail; 10396 } 10397 } 10398 } 10399 #endif 10400 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10401 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10402 10403 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10404 !new_crtc_state->color_mgmt_changed && 10405 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10406 dm_old_crtc_state->dsc_force_changed == false) 10407 continue; 10408 10409 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10410 if (ret) 10411 goto fail; 10412 10413 if (!new_crtc_state->enable) 10414 continue; 10415 10416 ret = drm_atomic_add_affected_connectors(state, crtc); 10417 if (ret) 10418 return ret; 10419 10420 ret = drm_atomic_add_affected_planes(state, crtc); 10421 if (ret) 10422 goto fail; 10423 10424 if (dm_old_crtc_state->dsc_force_changed) 10425 new_crtc_state->mode_changed = true; 10426 } 10427 10428 /* 10429 * Add all primary and overlay planes on the CRTC to the state 10430 * whenever a plane is enabled to maintain correct z-ordering 10431 * and to enable fast surface updates. 10432 */ 10433 drm_for_each_crtc(crtc, dev) { 10434 bool modified = false; 10435 10436 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 10437 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10438 continue; 10439 10440 if (new_plane_state->crtc == crtc || 10441 old_plane_state->crtc == crtc) { 10442 modified = true; 10443 break; 10444 } 10445 } 10446 10447 if (!modified) 10448 continue; 10449 10450 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 10451 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10452 continue; 10453 10454 new_plane_state = 10455 drm_atomic_get_plane_state(state, plane); 10456 10457 if (IS_ERR(new_plane_state)) { 10458 ret = PTR_ERR(new_plane_state); 10459 goto fail; 10460 } 10461 } 10462 } 10463 10464 /* Remove exiting planes if they are modified */ 10465 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10466 ret = dm_update_plane_state(dc, state, plane, 10467 old_plane_state, 10468 new_plane_state, 10469 false, 10470 &lock_and_validation_needed); 10471 if (ret) 10472 goto fail; 10473 } 10474 10475 /* Disable all crtcs which require disable */ 10476 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10477 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10478 old_crtc_state, 10479 new_crtc_state, 10480 false, 10481 &lock_and_validation_needed); 10482 if (ret) 10483 goto fail; 10484 } 10485 10486 /* Enable all crtcs which require enable */ 10487 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10488 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10489 old_crtc_state, 10490 new_crtc_state, 10491 true, 10492 &lock_and_validation_needed); 10493 if (ret) 10494 goto fail; 10495 } 10496 10497 ret = validate_overlay(state); 10498 if (ret) 10499 goto fail; 10500 10501 /* Add new/modified planes */ 10502 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10503 ret = dm_update_plane_state(dc, state, plane, 10504 old_plane_state, 10505 new_plane_state, 10506 true, 10507 &lock_and_validation_needed); 10508 if (ret) 10509 goto fail; 10510 } 10511 10512 /* Run this here since we want to validate the streams we created */ 10513 ret = drm_atomic_helper_check_planes(dev, state); 10514 if (ret) 10515 goto fail; 10516 10517 /* Check cursor planes scaling */ 10518 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10519 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 10520 if (ret) 10521 goto fail; 10522 } 10523 10524 if (state->legacy_cursor_update) { 10525 /* 10526 * This is a fast cursor update coming from the plane update 10527 * helper, check if it can be done asynchronously for better 10528 * performance. 10529 */ 10530 state->async_update = 10531 !drm_atomic_helper_async_check(dev, state); 10532 10533 /* 10534 * Skip the remaining global validation if this is an async 10535 * update. Cursor updates can be done without affecting 10536 * state or bandwidth calcs and this avoids the performance 10537 * penalty of locking the private state object and 10538 * allocating a new dc_state. 10539 */ 10540 if (state->async_update) 10541 return 0; 10542 } 10543 10544 /* Check scaling and underscan changes*/ 10545 /* TODO Removed scaling changes validation due to inability to commit 10546 * new stream into context w\o causing full reset. Need to 10547 * decide how to handle. 10548 */ 10549 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10550 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10551 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10552 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10553 10554 /* Skip any modesets/resets */ 10555 if (!acrtc || drm_atomic_crtc_needs_modeset( 10556 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 10557 continue; 10558 10559 /* Skip any thing not scale or underscan changes */ 10560 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 10561 continue; 10562 10563 lock_and_validation_needed = true; 10564 } 10565 10566 /** 10567 * Streams and planes are reset when there are changes that affect 10568 * bandwidth. Anything that affects bandwidth needs to go through 10569 * DC global validation to ensure that the configuration can be applied 10570 * to hardware. 10571 * 10572 * We have to currently stall out here in atomic_check for outstanding 10573 * commits to finish in this case because our IRQ handlers reference 10574 * DRM state directly - we can end up disabling interrupts too early 10575 * if we don't. 10576 * 10577 * TODO: Remove this stall and drop DM state private objects. 10578 */ 10579 if (lock_and_validation_needed) { 10580 ret = dm_atomic_get_state(state, &dm_state); 10581 if (ret) 10582 goto fail; 10583 10584 ret = do_aquire_global_lock(dev, state); 10585 if (ret) 10586 goto fail; 10587 10588 #if defined(CONFIG_DRM_AMD_DC_DCN) 10589 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) 10590 goto fail; 10591 10592 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10593 if (ret) 10594 goto fail; 10595 #endif 10596 10597 /* 10598 * Perform validation of MST topology in the state: 10599 * We need to perform MST atomic check before calling 10600 * dc_validate_global_state(), or there is a chance 10601 * to get stuck in an infinite loop and hang eventually. 10602 */ 10603 ret = drm_dp_mst_atomic_check(state); 10604 if (ret) 10605 goto fail; 10606 status = dc_validate_global_state(dc, dm_state->context, false); 10607 if (status != DC_OK) { 10608 drm_dbg_atomic(dev, 10609 "DC global validation failure: %s (%d)", 10610 dc_status_to_str(status), status); 10611 ret = -EINVAL; 10612 goto fail; 10613 } 10614 } else { 10615 /* 10616 * The commit is a fast update. Fast updates shouldn't change 10617 * the DC context, affect global validation, and can have their 10618 * commit work done in parallel with other commits not touching 10619 * the same resource. If we have a new DC context as part of 10620 * the DM atomic state from validation we need to free it and 10621 * retain the existing one instead. 10622 * 10623 * Furthermore, since the DM atomic state only contains the DC 10624 * context and can safely be annulled, we can free the state 10625 * and clear the associated private object now to free 10626 * some memory and avoid a possible use-after-free later. 10627 */ 10628 10629 for (i = 0; i < state->num_private_objs; i++) { 10630 struct drm_private_obj *obj = state->private_objs[i].ptr; 10631 10632 if (obj->funcs == adev->dm.atomic_obj.funcs) { 10633 int j = state->num_private_objs-1; 10634 10635 dm_atomic_destroy_state(obj, 10636 state->private_objs[i].state); 10637 10638 /* If i is not at the end of the array then the 10639 * last element needs to be moved to where i was 10640 * before the array can safely be truncated. 10641 */ 10642 if (i != j) 10643 state->private_objs[i] = 10644 state->private_objs[j]; 10645 10646 state->private_objs[j].ptr = NULL; 10647 state->private_objs[j].state = NULL; 10648 state->private_objs[j].old_state = NULL; 10649 state->private_objs[j].new_state = NULL; 10650 10651 state->num_private_objs = j; 10652 break; 10653 } 10654 } 10655 } 10656 10657 /* Store the overall update type for use later in atomic check. */ 10658 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 10659 struct dm_crtc_state *dm_new_crtc_state = 10660 to_dm_crtc_state(new_crtc_state); 10661 10662 dm_new_crtc_state->update_type = lock_and_validation_needed ? 10663 UPDATE_TYPE_FULL : 10664 UPDATE_TYPE_FAST; 10665 } 10666 10667 /* Must be success */ 10668 WARN_ON(ret); 10669 10670 trace_amdgpu_dm_atomic_check_finish(state, ret); 10671 10672 return ret; 10673 10674 fail: 10675 if (ret == -EDEADLK) 10676 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 10677 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 10678 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 10679 else 10680 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 10681 10682 trace_amdgpu_dm_atomic_check_finish(state, ret); 10683 10684 return ret; 10685 } 10686 10687 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10688 struct amdgpu_dm_connector *amdgpu_dm_connector) 10689 { 10690 uint8_t dpcd_data; 10691 bool capable = false; 10692 10693 if (amdgpu_dm_connector->dc_link && 10694 dm_helpers_dp_read_dpcd( 10695 NULL, 10696 amdgpu_dm_connector->dc_link, 10697 DP_DOWN_STREAM_PORT_COUNT, 10698 &dpcd_data, 10699 sizeof(dpcd_data))) { 10700 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 10701 } 10702 10703 return capable; 10704 } 10705 10706 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10707 unsigned int offset, 10708 unsigned int total_length, 10709 uint8_t *data, 10710 unsigned int length, 10711 struct amdgpu_hdmi_vsdb_info *vsdb) 10712 { 10713 bool res; 10714 union dmub_rb_cmd cmd; 10715 struct dmub_cmd_send_edid_cea *input; 10716 struct dmub_cmd_edid_cea_output *output; 10717 10718 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 10719 return false; 10720 10721 memset(&cmd, 0, sizeof(cmd)); 10722 10723 input = &cmd.edid_cea.data.input; 10724 10725 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 10726 cmd.edid_cea.header.sub_type = 0; 10727 cmd.edid_cea.header.payload_bytes = 10728 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 10729 input->offset = offset; 10730 input->length = length; 10731 input->total_length = total_length; 10732 memcpy(input->payload, data, length); 10733 10734 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); 10735 if (!res) { 10736 DRM_ERROR("EDID CEA parser failed\n"); 10737 return false; 10738 } 10739 10740 output = &cmd.edid_cea.data.output; 10741 10742 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 10743 if (!output->ack.success) { 10744 DRM_ERROR("EDID CEA ack failed at offset %d\n", 10745 output->ack.offset); 10746 } 10747 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 10748 if (!output->amd_vsdb.vsdb_found) 10749 return false; 10750 10751 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 10752 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 10753 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 10754 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 10755 } else { 10756 if (output->type != 0) 10757 DRM_WARN("Unknown EDID CEA parser results\n"); 10758 return false; 10759 } 10760 10761 return true; 10762 } 10763 10764 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 10765 uint8_t *edid_ext, int len, 10766 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10767 { 10768 int i; 10769 10770 /* send extension block to DMCU for parsing */ 10771 for (i = 0; i < len; i += 8) { 10772 bool res; 10773 int offset; 10774 10775 /* send 8 bytes a time */ 10776 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 10777 return false; 10778 10779 if (i+8 == len) { 10780 /* EDID block sent completed, expect result */ 10781 int version, min_rate, max_rate; 10782 10783 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 10784 if (res) { 10785 /* amd vsdb found */ 10786 vsdb_info->freesync_supported = 1; 10787 vsdb_info->amd_vsdb_version = version; 10788 vsdb_info->min_refresh_rate_hz = min_rate; 10789 vsdb_info->max_refresh_rate_hz = max_rate; 10790 return true; 10791 } 10792 /* not amd vsdb */ 10793 return false; 10794 } 10795 10796 /* check for ack*/ 10797 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 10798 if (!res) 10799 return false; 10800 } 10801 10802 return false; 10803 } 10804 10805 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 10806 uint8_t *edid_ext, int len, 10807 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10808 { 10809 int i; 10810 10811 /* send extension block to DMCU for parsing */ 10812 for (i = 0; i < len; i += 8) { 10813 /* send 8 bytes a time */ 10814 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 10815 return false; 10816 } 10817 10818 return vsdb_info->freesync_supported; 10819 } 10820 10821 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 10822 uint8_t *edid_ext, int len, 10823 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10824 { 10825 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 10826 10827 if (adev->dm.dmub_srv) 10828 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 10829 else 10830 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 10831 } 10832 10833 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10834 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10835 { 10836 uint8_t *edid_ext = NULL; 10837 int i; 10838 bool valid_vsdb_found = false; 10839 10840 /*----- drm_find_cea_extension() -----*/ 10841 /* No EDID or EDID extensions */ 10842 if (edid == NULL || edid->extensions == 0) 10843 return -ENODEV; 10844 10845 /* Find CEA extension */ 10846 for (i = 0; i < edid->extensions; i++) { 10847 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 10848 if (edid_ext[0] == CEA_EXT) 10849 break; 10850 } 10851 10852 if (i == edid->extensions) 10853 return -ENODEV; 10854 10855 /*----- cea_db_offsets() -----*/ 10856 if (edid_ext[0] != CEA_EXT) 10857 return -ENODEV; 10858 10859 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 10860 10861 return valid_vsdb_found ? i : -ENODEV; 10862 } 10863 10864 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 10865 struct edid *edid) 10866 { 10867 int i = 0; 10868 struct detailed_timing *timing; 10869 struct detailed_non_pixel *data; 10870 struct detailed_data_monitor_range *range; 10871 struct amdgpu_dm_connector *amdgpu_dm_connector = 10872 to_amdgpu_dm_connector(connector); 10873 struct dm_connector_state *dm_con_state = NULL; 10874 10875 struct drm_device *dev = connector->dev; 10876 struct amdgpu_device *adev = drm_to_adev(dev); 10877 bool freesync_capable = false; 10878 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 10879 10880 if (!connector->state) { 10881 DRM_ERROR("%s - Connector has no state", __func__); 10882 goto update; 10883 } 10884 10885 if (!edid) { 10886 dm_con_state = to_dm_connector_state(connector->state); 10887 10888 amdgpu_dm_connector->min_vfreq = 0; 10889 amdgpu_dm_connector->max_vfreq = 0; 10890 amdgpu_dm_connector->pixel_clock_mhz = 0; 10891 10892 goto update; 10893 } 10894 10895 dm_con_state = to_dm_connector_state(connector->state); 10896 10897 if (!amdgpu_dm_connector->dc_sink) { 10898 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); 10899 goto update; 10900 } 10901 if (!adev->dm.freesync_module) 10902 goto update; 10903 10904 10905 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 10906 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { 10907 bool edid_check_required = false; 10908 10909 if (edid) { 10910 edid_check_required = is_dp_capable_without_timing_msa( 10911 adev->dm.dc, 10912 amdgpu_dm_connector); 10913 } 10914 10915 if (edid_check_required == true && (edid->version > 1 || 10916 (edid->version == 1 && edid->revision > 1))) { 10917 for (i = 0; i < 4; i++) { 10918 10919 timing = &edid->detailed_timings[i]; 10920 data = &timing->data.other_data; 10921 range = &data->data.range; 10922 /* 10923 * Check if monitor has continuous frequency mode 10924 */ 10925 if (data->type != EDID_DETAIL_MONITOR_RANGE) 10926 continue; 10927 /* 10928 * Check for flag range limits only. If flag == 1 then 10929 * no additional timing information provided. 10930 * Default GTF, GTF Secondary curve and CVT are not 10931 * supported 10932 */ 10933 if (range->flags != 1) 10934 continue; 10935 10936 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 10937 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 10938 amdgpu_dm_connector->pixel_clock_mhz = 10939 range->pixel_clock_mhz * 10; 10940 10941 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 10942 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 10943 10944 break; 10945 } 10946 10947 if (amdgpu_dm_connector->max_vfreq - 10948 amdgpu_dm_connector->min_vfreq > 10) { 10949 10950 freesync_capable = true; 10951 } 10952 } 10953 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 10954 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10955 if (i >= 0 && vsdb_info.freesync_supported) { 10956 timing = &edid->detailed_timings[i]; 10957 data = &timing->data.other_data; 10958 10959 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10960 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10961 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10962 freesync_capable = true; 10963 10964 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10965 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10966 } 10967 } 10968 10969 update: 10970 if (dm_con_state) 10971 dm_con_state->freesync_capable = freesync_capable; 10972 10973 if (connector->vrr_capable_property) 10974 drm_connector_set_vrr_capable_property(connector, 10975 freesync_capable); 10976 } 10977 10978 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 10979 { 10980 struct amdgpu_device *adev = drm_to_adev(dev); 10981 struct dc *dc = adev->dm.dc; 10982 int i; 10983 10984 mutex_lock(&adev->dm.dc_lock); 10985 if (dc->current_state) { 10986 for (i = 0; i < dc->current_state->stream_count; ++i) 10987 dc->current_state->streams[i] 10988 ->triggered_crtc_reset.enabled = 10989 adev->dm.force_timing_sync; 10990 10991 dm_enable_per_frame_crtc_master_sync(dc->current_state); 10992 dc_trigger_sync(dc, dc->current_state); 10993 } 10994 mutex_unlock(&adev->dm.dc_lock); 10995 } 10996 10997 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 10998 uint32_t value, const char *func_name) 10999 { 11000 #ifdef DM_CHECK_ADDR_0 11001 if (address == 0) { 11002 DC_ERR("invalid register write. address = 0"); 11003 return; 11004 } 11005 #endif 11006 cgs_write_register(ctx->cgs_device, address, value); 11007 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 11008 } 11009 11010 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 11011 const char *func_name) 11012 { 11013 uint32_t value; 11014 #ifdef DM_CHECK_ADDR_0 11015 if (address == 0) { 11016 DC_ERR("invalid register read; address = 0\n"); 11017 return 0; 11018 } 11019 #endif 11020 11021 if (ctx->dmub_srv && 11022 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 11023 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 11024 ASSERT(false); 11025 return 0; 11026 } 11027 11028 value = cgs_read_register(ctx->cgs_device, address); 11029 11030 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 11031 11032 return value; 11033 } 11034 11035 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, 11036 struct aux_payload *payload, enum aux_return_code_type *operation_result) 11037 { 11038 struct amdgpu_device *adev = ctx->driver_context; 11039 int ret = 0; 11040 11041 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload); 11042 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ); 11043 if (ret == 0) { 11044 *operation_result = AUX_RET_ERROR_TIMEOUT; 11045 return -1; 11046 } 11047 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result; 11048 11049 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { 11050 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command; 11051 11052 // For read case, Copy data to payload 11053 if (!payload->write && adev->dm.dmub_notify->aux_reply.length && 11054 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK)) 11055 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, 11056 adev->dm.dmub_notify->aux_reply.length); 11057 } 11058 11059 return adev->dm.dmub_notify->aux_reply.length; 11060 } 11061