xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 09467b48e8bc8b4905716062da846024139afbf2)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 #ifdef notyet
645 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
646 	.bind	= amdgpu_dm_audio_component_bind,
647 	.unbind	= amdgpu_dm_audio_component_unbind,
648 };
649 #endif
650 
651 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
652 {
653 	int i, ret;
654 
655 	if (!amdgpu_audio)
656 		return 0;
657 
658 	adev->mode_info.audio.enabled = true;
659 
660 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
661 
662 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
663 		adev->mode_info.audio.pin[i].channels = -1;
664 		adev->mode_info.audio.pin[i].rate = -1;
665 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
666 		adev->mode_info.audio.pin[i].status_bits = 0;
667 		adev->mode_info.audio.pin[i].category_code = 0;
668 		adev->mode_info.audio.pin[i].connected = false;
669 		adev->mode_info.audio.pin[i].id =
670 			adev->dm.dc->res_pool->audios[i]->inst;
671 		adev->mode_info.audio.pin[i].offset = 0;
672 	}
673 
674 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
675 	if (ret < 0)
676 		return ret;
677 
678 	adev->dm.audio_registered = true;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
684 {
685 	if (!amdgpu_audio)
686 		return;
687 
688 	if (!adev->mode_info.audio.enabled)
689 		return;
690 
691 	if (adev->dm.audio_registered) {
692 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
693 		adev->dm.audio_registered = false;
694 	}
695 
696 	/* TODO: Disable audio? */
697 
698 	adev->mode_info.audio.enabled = false;
699 }
700 
701 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
702 {
703 	struct drm_audio_component *acomp = adev->dm.audio_component;
704 
705 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
706 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
707 
708 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
709 						 pin, -1);
710 	}
711 }
712 
713 static int dm_dmub_hw_init(struct amdgpu_device *adev)
714 {
715 	const struct dmcub_firmware_header_v1_0 *hdr;
716 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
717 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
718 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
719 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
720 	struct abm *abm = adev->dm.dc->res_pool->abm;
721 	struct dmub_srv_hw_params hw_params;
722 	enum dmub_status status;
723 	const unsigned char *fw_inst_const, *fw_bss_data;
724 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
725 	bool has_hw_support;
726 
727 	if (!dmub_srv)
728 		/* DMUB isn't supported on the ASIC. */
729 		return 0;
730 
731 	if (!fb_info) {
732 		DRM_ERROR("No framebuffer info for DMUB service.\n");
733 		return -EINVAL;
734 	}
735 
736 	if (!dmub_fw) {
737 		/* Firmware required for DMUB support. */
738 		DRM_ERROR("No firmware provided for DMUB.\n");
739 		return -EINVAL;
740 	}
741 
742 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
743 	if (status != DMUB_STATUS_OK) {
744 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
745 		return -EINVAL;
746 	}
747 
748 	if (!has_hw_support) {
749 		DRM_INFO("DMUB unsupported on ASIC\n");
750 		return 0;
751 	}
752 
753 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
754 
755 	fw_inst_const = dmub_fw->data +
756 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
757 			PSP_HEADER_BYTES;
758 
759 	fw_bss_data = dmub_fw->data +
760 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 		      le32_to_cpu(hdr->inst_const_bytes);
762 
763 	/* Copy firmware and bios info into FB memory. */
764 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
765 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
766 
767 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
768 
769 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
770 	 * amdgpu_ucode_init_single_fw will load dmub firmware
771 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
772 	 * will be done by dm_dmub_hw_init
773 	 */
774 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
775 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
776 				fw_inst_const_size);
777 	}
778 
779 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
780 	       fw_bss_data_size);
781 
782 	/* Copy firmware bios info into FB memory. */
783 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
784 	       adev->bios_size);
785 
786 	/* Reset regions that need to be reset. */
787 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
788 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
789 
790 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
791 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
792 
793 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
794 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
795 
796 	/* Initialize hardware. */
797 	memset(&hw_params, 0, sizeof(hw_params));
798 	hw_params.fb_base = adev->gmc.fb_start;
799 	hw_params.fb_offset = adev->gmc.aper_base;
800 
801 	/* backdoor load firmware and trigger dmub running */
802 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
803 		hw_params.load_inst_const = true;
804 
805 	if (dmcu)
806 		hw_params.psp_version = dmcu->psp_version;
807 
808 	for (i = 0; i < fb_info->num_fb; ++i)
809 		hw_params.fb[i] = &fb_info->fb[i];
810 
811 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
812 	if (status != DMUB_STATUS_OK) {
813 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
814 		return -EINVAL;
815 	}
816 
817 	/* Wait for firmware load to finish. */
818 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
819 	if (status != DMUB_STATUS_OK)
820 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
821 
822 	/* Init DMCU and ABM if available. */
823 	if (dmcu && abm) {
824 		dmcu->funcs->dmcu_init(dmcu);
825 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
826 	}
827 
828 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
829 	if (!adev->dm.dc->ctx->dmub_srv) {
830 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
831 		return -ENOMEM;
832 	}
833 
834 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
835 		 adev->dm.dmcub_fw_version);
836 
837 	return 0;
838 }
839 
840 static int amdgpu_dm_init(struct amdgpu_device *adev)
841 {
842 	struct dc_init_data init_data;
843 #ifdef CONFIG_DRM_AMD_DC_HDCP
844 	struct dc_callback_init init_params;
845 #endif
846 	int r;
847 
848 	adev->dm.ddev = adev->ddev;
849 	adev->dm.adev = adev;
850 
851 	/* Zero all the fields */
852 	memset(&init_data, 0, sizeof(init_data));
853 #ifdef CONFIG_DRM_AMD_DC_HDCP
854 	memset(&init_params, 0, sizeof(init_params));
855 #endif
856 
857 	rw_init(&adev->dm.dc_lock, "dmdc");
858 	rw_init(&adev->dm.audio_lock, "dmaud");
859 
860 	if(amdgpu_dm_irq_init(adev)) {
861 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
862 		goto error;
863 	}
864 
865 	init_data.asic_id.chip_family = adev->family;
866 
867 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
868 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
869 
870 	init_data.asic_id.vram_width = adev->gmc.vram_width;
871 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
872 	init_data.asic_id.atombios_base_address =
873 		adev->mode_info.atom_context->bios;
874 
875 	init_data.driver = adev;
876 
877 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
878 
879 	if (!adev->dm.cgs_device) {
880 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
881 		goto error;
882 	}
883 
884 	init_data.cgs_device = adev->dm.cgs_device;
885 
886 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
887 
888 	switch (adev->asic_type) {
889 	case CHIP_CARRIZO:
890 	case CHIP_STONEY:
891 	case CHIP_RAVEN:
892 	case CHIP_RENOIR:
893 		init_data.flags.gpu_vm_support = true;
894 		break;
895 	default:
896 		break;
897 	}
898 
899 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
900 		init_data.flags.fbc_support = true;
901 
902 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
903 		init_data.flags.multi_mon_pp_mclk_switch = true;
904 
905 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
906 		init_data.flags.disable_fractional_pwm = true;
907 
908 	init_data.flags.power_down_display_on_boot = true;
909 
910 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
911 
912 	/* Display Core create. */
913 	adev->dm.dc = dc_create(&init_data);
914 
915 	if (adev->dm.dc) {
916 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
917 	} else {
918 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
919 		goto error;
920 	}
921 
922 	r = dm_dmub_hw_init(adev);
923 	if (r) {
924 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
925 		goto error;
926 	}
927 
928 	dc_hardware_init(adev->dm.dc);
929 
930 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
931 	if (!adev->dm.freesync_module) {
932 		DRM_ERROR(
933 		"amdgpu: failed to initialize freesync_module.\n");
934 	} else
935 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
936 				adev->dm.freesync_module);
937 
938 	amdgpu_dm_init_color_mod();
939 
940 #ifdef CONFIG_DRM_AMD_DC_HDCP
941 	if (adev->asic_type >= CHIP_RAVEN) {
942 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
943 
944 		if (!adev->dm.hdcp_workqueue)
945 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
946 		else
947 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
948 
949 		dc_init_callbacks(adev->dm.dc, &init_params);
950 	}
951 #endif
952 	if (amdgpu_dm_initialize_drm_device(adev)) {
953 		DRM_ERROR(
954 		"amdgpu: failed to initialize sw for display support.\n");
955 		goto error;
956 	}
957 
958 	/* Update the actual used number of crtc */
959 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
960 
961 	/* create fake encoders for MST */
962 	dm_dp_create_fake_mst_encoders(adev);
963 
964 	/* TODO: Add_display_info? */
965 
966 	/* TODO use dynamic cursor width */
967 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
968 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
969 
970 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
971 		DRM_ERROR(
972 		"amdgpu: failed to initialize sw for display support.\n");
973 		goto error;
974 	}
975 
976 	DRM_DEBUG_DRIVER("KMS initialized.\n");
977 
978 	return 0;
979 error:
980 	amdgpu_dm_fini(adev);
981 
982 	return -EINVAL;
983 }
984 
985 static void amdgpu_dm_fini(struct amdgpu_device *adev)
986 {
987 	int i;
988 
989 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
990 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
991 	}
992 
993 	amdgpu_dm_audio_fini(adev);
994 
995 	amdgpu_dm_destroy_drm_device(&adev->dm);
996 
997 #ifdef CONFIG_DRM_AMD_DC_HDCP
998 	if (adev->dm.hdcp_workqueue) {
999 		hdcp_destroy(adev->dm.hdcp_workqueue);
1000 		adev->dm.hdcp_workqueue = NULL;
1001 	}
1002 
1003 	if (adev->dm.dc)
1004 		dc_deinit_callbacks(adev->dm.dc);
1005 #endif
1006 	if (adev->dm.dc->ctx->dmub_srv) {
1007 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1008 		adev->dm.dc->ctx->dmub_srv = NULL;
1009 	}
1010 
1011 	if (adev->dm.dmub_bo)
1012 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1013 				      &adev->dm.dmub_bo_gpu_addr,
1014 				      &adev->dm.dmub_bo_cpu_addr);
1015 
1016 	/* DC Destroy TODO: Replace destroy DAL */
1017 	if (adev->dm.dc)
1018 		dc_destroy(&adev->dm.dc);
1019 	/*
1020 	 * TODO: pageflip, vlank interrupt
1021 	 *
1022 	 * amdgpu_dm_irq_fini(adev);
1023 	 */
1024 
1025 	if (adev->dm.cgs_device) {
1026 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1027 		adev->dm.cgs_device = NULL;
1028 	}
1029 	if (adev->dm.freesync_module) {
1030 		mod_freesync_destroy(adev->dm.freesync_module);
1031 		adev->dm.freesync_module = NULL;
1032 	}
1033 
1034 	mutex_destroy(&adev->dm.audio_lock);
1035 	mutex_destroy(&adev->dm.dc_lock);
1036 
1037 	return;
1038 }
1039 
1040 static int load_dmcu_fw(struct amdgpu_device *adev)
1041 {
1042 	const char *fw_name_dmcu = NULL;
1043 	int r;
1044 	const struct dmcu_firmware_header_v1_0 *hdr;
1045 
1046 	switch(adev->asic_type) {
1047 	case CHIP_BONAIRE:
1048 	case CHIP_HAWAII:
1049 	case CHIP_KAVERI:
1050 	case CHIP_KABINI:
1051 	case CHIP_MULLINS:
1052 	case CHIP_TONGA:
1053 	case CHIP_FIJI:
1054 	case CHIP_CARRIZO:
1055 	case CHIP_STONEY:
1056 	case CHIP_POLARIS11:
1057 	case CHIP_POLARIS10:
1058 	case CHIP_POLARIS12:
1059 	case CHIP_VEGAM:
1060 	case CHIP_VEGA10:
1061 	case CHIP_VEGA12:
1062 	case CHIP_VEGA20:
1063 	case CHIP_NAVI10:
1064 	case CHIP_NAVI14:
1065 	case CHIP_RENOIR:
1066 		return 0;
1067 	case CHIP_NAVI12:
1068 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1069 		break;
1070 	case CHIP_RAVEN:
1071 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1072 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1073 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1074 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1075 		else
1076 			return 0;
1077 		break;
1078 	default:
1079 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1080 		return -EINVAL;
1081 	}
1082 
1083 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1085 		return 0;
1086 	}
1087 
1088 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1089 	if (r == -ENOENT) {
1090 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1091 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1092 		adev->dm.fw_dmcu = NULL;
1093 		return 0;
1094 	}
1095 	if (r) {
1096 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1097 			fw_name_dmcu);
1098 		return r;
1099 	}
1100 
1101 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1102 	if (r) {
1103 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1104 			fw_name_dmcu);
1105 		release_firmware(adev->dm.fw_dmcu);
1106 		adev->dm.fw_dmcu = NULL;
1107 		return r;
1108 	}
1109 
1110 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1111 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1112 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1113 	adev->firmware.fw_size +=
1114 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1115 
1116 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1117 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1118 	adev->firmware.fw_size +=
1119 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1120 
1121 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1122 
1123 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1124 
1125 	return 0;
1126 }
1127 
1128 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1129 {
1130 	struct amdgpu_device *adev = ctx;
1131 
1132 	return dm_read_reg(adev->dm.dc->ctx, address);
1133 }
1134 
1135 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1136 				     uint32_t value)
1137 {
1138 	struct amdgpu_device *adev = ctx;
1139 
1140 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1141 }
1142 
1143 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1144 {
1145 	struct dmub_srv_create_params create_params;
1146 	struct dmub_srv_region_params region_params;
1147 	struct dmub_srv_region_info region_info;
1148 	struct dmub_srv_fb_params fb_params;
1149 	struct dmub_srv_fb_info *fb_info;
1150 	struct dmub_srv *dmub_srv;
1151 	const struct dmcub_firmware_header_v1_0 *hdr;
1152 	const char *fw_name_dmub;
1153 	enum dmub_asic dmub_asic;
1154 	enum dmub_status status;
1155 	int r;
1156 
1157 	switch (adev->asic_type) {
1158 	case CHIP_RENOIR:
1159 		dmub_asic = DMUB_ASIC_DCN21;
1160 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1161 		break;
1162 
1163 	default:
1164 		/* ASIC doesn't support DMUB. */
1165 		return 0;
1166 	}
1167 
1168 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1169 	if (r) {
1170 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1171 		return 0;
1172 	}
1173 
1174 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1175 	if (r) {
1176 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1177 		return 0;
1178 	}
1179 
1180 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1181 
1182 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1183 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1184 			AMDGPU_UCODE_ID_DMCUB;
1185 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1186 			adev->dm.dmub_fw;
1187 		adev->firmware.fw_size +=
1188 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1189 
1190 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1191 			 adev->dm.dmcub_fw_version);
1192 	}
1193 
1194 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1195 
1196 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1197 	dmub_srv = adev->dm.dmub_srv;
1198 
1199 	if (!dmub_srv) {
1200 		DRM_ERROR("Failed to allocate DMUB service!\n");
1201 		return -ENOMEM;
1202 	}
1203 
1204 	memset(&create_params, 0, sizeof(create_params));
1205 	create_params.user_ctx = adev;
1206 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1207 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1208 	create_params.asic = dmub_asic;
1209 
1210 	/* Create the DMUB service. */
1211 	status = dmub_srv_create(dmub_srv, &create_params);
1212 	if (status != DMUB_STATUS_OK) {
1213 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1214 		return -EINVAL;
1215 	}
1216 
1217 	/* Calculate the size of all the regions for the DMUB service. */
1218 	memset(&region_params, 0, sizeof(region_params));
1219 
1220 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1221 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1222 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1223 	region_params.vbios_size = adev->bios_size;
1224 	region_params.fw_bss_data =
1225 		adev->dm.dmub_fw->data +
1226 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1227 		le32_to_cpu(hdr->inst_const_bytes);
1228 
1229 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1230 					   &region_info);
1231 
1232 	if (status != DMUB_STATUS_OK) {
1233 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1234 		return -EINVAL;
1235 	}
1236 
1237 	/*
1238 	 * Allocate a framebuffer based on the total size of all the regions.
1239 	 * TODO: Move this into GART.
1240 	 */
1241 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1242 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1243 				    &adev->dm.dmub_bo_gpu_addr,
1244 				    &adev->dm.dmub_bo_cpu_addr);
1245 	if (r)
1246 		return r;
1247 
1248 	/* Rebase the regions on the framebuffer address. */
1249 	memset(&fb_params, 0, sizeof(fb_params));
1250 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1251 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1252 	fb_params.region_info = &region_info;
1253 
1254 	adev->dm.dmub_fb_info =
1255 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1256 	fb_info = adev->dm.dmub_fb_info;
1257 
1258 	if (!fb_info) {
1259 		DRM_ERROR(
1260 			"Failed to allocate framebuffer info for DMUB service!\n");
1261 		return -ENOMEM;
1262 	}
1263 
1264 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1265 	if (status != DMUB_STATUS_OK) {
1266 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1267 		return -EINVAL;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static int dm_sw_init(void *handle)
1274 {
1275 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 	int r;
1277 
1278 	r = dm_dmub_sw_init(adev);
1279 	if (r)
1280 		return r;
1281 
1282 	return load_dmcu_fw(adev);
1283 }
1284 
1285 static int dm_sw_fini(void *handle)
1286 {
1287 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288 
1289 	kfree(adev->dm.dmub_fb_info);
1290 	adev->dm.dmub_fb_info = NULL;
1291 
1292 	if (adev->dm.dmub_srv) {
1293 		dmub_srv_destroy(adev->dm.dmub_srv);
1294 		adev->dm.dmub_srv = NULL;
1295 	}
1296 
1297 	if (adev->dm.dmub_fw) {
1298 		release_firmware(adev->dm.dmub_fw);
1299 		adev->dm.dmub_fw = NULL;
1300 	}
1301 
1302 	if(adev->dm.fw_dmcu) {
1303 		release_firmware(adev->dm.fw_dmcu);
1304 		adev->dm.fw_dmcu = NULL;
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1311 {
1312 	struct amdgpu_dm_connector *aconnector;
1313 	struct drm_connector *connector;
1314 	struct drm_connector_list_iter iter;
1315 	int ret = 0;
1316 
1317 	drm_connector_list_iter_begin(dev, &iter);
1318 	drm_for_each_connector_iter(connector, &iter) {
1319 		aconnector = to_amdgpu_dm_connector(connector);
1320 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1321 		    aconnector->mst_mgr.aux) {
1322 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1323 					 aconnector,
1324 					 aconnector->base.base.id);
1325 
1326 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1327 			if (ret < 0) {
1328 				DRM_ERROR("DM_MST: Failed to start MST\n");
1329 				aconnector->dc_link->type =
1330 					dc_connection_single;
1331 				break;
1332 			}
1333 		}
1334 	}
1335 	drm_connector_list_iter_end(&iter);
1336 
1337 	return ret;
1338 }
1339 
1340 static int dm_late_init(void *handle)
1341 {
1342 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1343 
1344 	struct dmcu_iram_parameters params;
1345 	unsigned int linear_lut[16];
1346 	int i;
1347 	struct dmcu *dmcu = NULL;
1348 	bool ret;
1349 
1350 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1351 		return detect_mst_link_for_all_connectors(adev->ddev);
1352 
1353 	dmcu = adev->dm.dc->res_pool->dmcu;
1354 
1355 	for (i = 0; i < 16; i++)
1356 		linear_lut[i] = 0xFFFF * i / 15;
1357 
1358 	params.set = 0;
1359 	params.backlight_ramping_start = 0xCCCC;
1360 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1361 	params.backlight_lut_array_size = 16;
1362 	params.backlight_lut_array = linear_lut;
1363 
1364 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1365 	 * 0xFFFF x 0.01 = 0x28F
1366 	 */
1367 	params.min_abm_backlight = 0x28F;
1368 
1369 	ret = dmcu_load_iram(dmcu, params);
1370 
1371 	if (!ret)
1372 		return -EINVAL;
1373 
1374 	return detect_mst_link_for_all_connectors(adev->ddev);
1375 }
1376 
1377 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1378 {
1379 	struct amdgpu_dm_connector *aconnector;
1380 	struct drm_connector *connector;
1381 	struct drm_connector_list_iter iter;
1382 	struct drm_dp_mst_topology_mgr *mgr;
1383 	int ret;
1384 	bool need_hotplug = false;
1385 
1386 	drm_connector_list_iter_begin(dev, &iter);
1387 	drm_for_each_connector_iter(connector, &iter) {
1388 		aconnector = to_amdgpu_dm_connector(connector);
1389 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1390 		    aconnector->mst_port)
1391 			continue;
1392 
1393 		mgr = &aconnector->mst_mgr;
1394 
1395 		if (suspend) {
1396 			drm_dp_mst_topology_mgr_suspend(mgr);
1397 		} else {
1398 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1399 			if (ret < 0) {
1400 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1401 				need_hotplug = true;
1402 			}
1403 		}
1404 	}
1405 	drm_connector_list_iter_end(&iter);
1406 
1407 	if (need_hotplug)
1408 		drm_kms_helper_hotplug_event(dev);
1409 }
1410 
1411 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1412 {
1413 	struct smu_context *smu = &adev->smu;
1414 	int ret = 0;
1415 
1416 	if (!is_support_sw_smu(adev))
1417 		return 0;
1418 
1419 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1420 	 * on window driver dc implementation.
1421 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1422 	 * should be passed to smu during boot up and resume from s3.
1423 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1424 	 * dcn20_resource_construct
1425 	 * then call pplib functions below to pass the settings to smu:
1426 	 * smu_set_watermarks_for_clock_ranges
1427 	 * smu_set_watermarks_table
1428 	 * navi10_set_watermarks_table
1429 	 * smu_write_watermarks_table
1430 	 *
1431 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1432 	 * dc has implemented different flow for window driver:
1433 	 * dc_hardware_init / dc_set_power_state
1434 	 * dcn10_init_hw
1435 	 * notify_wm_ranges
1436 	 * set_wm_ranges
1437 	 * -- Linux
1438 	 * smu_set_watermarks_for_clock_ranges
1439 	 * renoir_set_watermarks_table
1440 	 * smu_write_watermarks_table
1441 	 *
1442 	 * For Linux,
1443 	 * dc_hardware_init -> amdgpu_dm_init
1444 	 * dc_set_power_state --> dm_resume
1445 	 *
1446 	 * therefore, this function apply to navi10/12/14 but not Renoir
1447 	 * *
1448 	 */
1449 	switch(adev->asic_type) {
1450 	case CHIP_NAVI10:
1451 	case CHIP_NAVI14:
1452 	case CHIP_NAVI12:
1453 		break;
1454 	default:
1455 		return 0;
1456 	}
1457 
1458 	mutex_lock(&smu->mutex);
1459 
1460 	/* pass data to smu controller */
1461 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1462 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1463 		ret = smu_write_watermarks_table(smu);
1464 
1465 		if (ret) {
1466 			mutex_unlock(&smu->mutex);
1467 			DRM_ERROR("Failed to update WMTABLE!\n");
1468 			return ret;
1469 		}
1470 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1471 	}
1472 
1473 	mutex_unlock(&smu->mutex);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * dm_hw_init() - Initialize DC device
1480  * @handle: The base driver device containing the amdgpu_dm device.
1481  *
1482  * Initialize the &struct amdgpu_display_manager device. This involves calling
1483  * the initializers of each DM component, then populating the struct with them.
1484  *
1485  * Although the function implies hardware initialization, both hardware and
1486  * software are initialized here. Splitting them out to their relevant init
1487  * hooks is a future TODO item.
1488  *
1489  * Some notable things that are initialized here:
1490  *
1491  * - Display Core, both software and hardware
1492  * - DC modules that we need (freesync and color management)
1493  * - DRM software states
1494  * - Interrupt sources and handlers
1495  * - Vblank support
1496  * - Debug FS entries, if enabled
1497  */
1498 static int dm_hw_init(void *handle)
1499 {
1500 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1501 	/* Create DAL display manager */
1502 	amdgpu_dm_init(adev);
1503 	amdgpu_dm_hpd_init(adev);
1504 
1505 	return 0;
1506 }
1507 
1508 /**
1509  * dm_hw_fini() - Teardown DC device
1510  * @handle: The base driver device containing the amdgpu_dm device.
1511  *
1512  * Teardown components within &struct amdgpu_display_manager that require
1513  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1514  * were loaded. Also flush IRQ workqueues and disable them.
1515  */
1516 static int dm_hw_fini(void *handle)
1517 {
1518 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1519 
1520 	amdgpu_dm_hpd_fini(adev);
1521 
1522 	amdgpu_dm_irq_fini(adev);
1523 	amdgpu_dm_fini(adev);
1524 	return 0;
1525 }
1526 
1527 static int dm_suspend(void *handle)
1528 {
1529 	struct amdgpu_device *adev = handle;
1530 	struct amdgpu_display_manager *dm = &adev->dm;
1531 	int ret = 0;
1532 
1533 	WARN_ON(adev->dm.cached_state);
1534 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1535 
1536 	s3_handle_mst(adev->ddev, true);
1537 
1538 	amdgpu_dm_irq_suspend(adev);
1539 
1540 
1541 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1542 
1543 	return ret;
1544 }
1545 
1546 static struct amdgpu_dm_connector *
1547 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1548 					     struct drm_crtc *crtc)
1549 {
1550 	uint32_t i;
1551 	struct drm_connector_state *new_con_state;
1552 	struct drm_connector *connector;
1553 	struct drm_crtc *crtc_from_state;
1554 
1555 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1556 		crtc_from_state = new_con_state->crtc;
1557 
1558 		if (crtc_from_state == crtc)
1559 			return to_amdgpu_dm_connector(connector);
1560 	}
1561 
1562 	return NULL;
1563 }
1564 
1565 static void emulated_link_detect(struct dc_link *link)
1566 {
1567 	struct dc_sink_init_data sink_init_data = { 0 };
1568 	struct display_sink_capability sink_caps = { 0 };
1569 	enum dc_edid_status edid_status;
1570 	struct dc_context *dc_ctx = link->ctx;
1571 	struct dc_sink *sink = NULL;
1572 	struct dc_sink *prev_sink = NULL;
1573 
1574 	link->type = dc_connection_none;
1575 	prev_sink = link->local_sink;
1576 
1577 	if (prev_sink != NULL)
1578 		dc_sink_retain(prev_sink);
1579 
1580 	switch (link->connector_signal) {
1581 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1582 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1583 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1584 		break;
1585 	}
1586 
1587 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1588 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1589 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1590 		break;
1591 	}
1592 
1593 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1594 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1595 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1596 		break;
1597 	}
1598 
1599 	case SIGNAL_TYPE_LVDS: {
1600 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1601 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1602 		break;
1603 	}
1604 
1605 	case SIGNAL_TYPE_EDP: {
1606 		sink_caps.transaction_type =
1607 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1608 		sink_caps.signal = SIGNAL_TYPE_EDP;
1609 		break;
1610 	}
1611 
1612 	case SIGNAL_TYPE_DISPLAY_PORT: {
1613 		sink_caps.transaction_type =
1614 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1615 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1616 		break;
1617 	}
1618 
1619 	default:
1620 		DC_ERROR("Invalid connector type! signal:%d\n",
1621 			link->connector_signal);
1622 		return;
1623 	}
1624 
1625 	sink_init_data.link = link;
1626 	sink_init_data.sink_signal = sink_caps.signal;
1627 
1628 	sink = dc_sink_create(&sink_init_data);
1629 	if (!sink) {
1630 		DC_ERROR("Failed to create sink!\n");
1631 		return;
1632 	}
1633 
1634 	/* dc_sink_create returns a new reference */
1635 	link->local_sink = sink;
1636 
1637 	edid_status = dm_helpers_read_local_edid(
1638 			link->ctx,
1639 			link,
1640 			sink);
1641 
1642 	if (edid_status != EDID_OK)
1643 		DC_ERROR("Failed to read EDID");
1644 
1645 }
1646 
1647 static int dm_resume(void *handle)
1648 {
1649 	struct amdgpu_device *adev = handle;
1650 	struct drm_device *ddev = adev->ddev;
1651 	struct amdgpu_display_manager *dm = &adev->dm;
1652 	struct amdgpu_dm_connector *aconnector;
1653 	struct drm_connector *connector;
1654 	struct drm_connector_list_iter iter;
1655 	struct drm_crtc *crtc;
1656 	struct drm_crtc_state *new_crtc_state;
1657 	struct dm_crtc_state *dm_new_crtc_state;
1658 	struct drm_plane *plane;
1659 	struct drm_plane_state *new_plane_state;
1660 	struct dm_plane_state *dm_new_plane_state;
1661 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1662 	enum dc_connection_type new_connection_type = dc_connection_none;
1663 	int i, r;
1664 
1665 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1666 	dc_release_state(dm_state->context);
1667 	dm_state->context = dc_create_state(dm->dc);
1668 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1669 	dc_resource_state_construct(dm->dc, dm_state->context);
1670 
1671 	/* Before powering on DC we need to re-initialize DMUB. */
1672 	r = dm_dmub_hw_init(adev);
1673 	if (r)
1674 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1675 
1676 	/* power on hardware */
1677 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1678 
1679 	/* program HPD filter */
1680 	dc_resume(dm->dc);
1681 
1682 	/*
1683 	 * early enable HPD Rx IRQ, should be done before set mode as short
1684 	 * pulse interrupts are used for MST
1685 	 */
1686 	amdgpu_dm_irq_resume_early(adev);
1687 
1688 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1689 	s3_handle_mst(ddev, false);
1690 
1691 	/* Do detection*/
1692 	drm_connector_list_iter_begin(ddev, &iter);
1693 	drm_for_each_connector_iter(connector, &iter) {
1694 		aconnector = to_amdgpu_dm_connector(connector);
1695 
1696 		/*
1697 		 * this is the case when traversing through already created
1698 		 * MST connectors, should be skipped
1699 		 */
1700 		if (aconnector->mst_port)
1701 			continue;
1702 
1703 		mutex_lock(&aconnector->hpd_lock);
1704 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1705 			DRM_ERROR("KMS: Failed to detect connector\n");
1706 
1707 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1708 			emulated_link_detect(aconnector->dc_link);
1709 		else
1710 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1711 
1712 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1713 			aconnector->fake_enable = false;
1714 
1715 		if (aconnector->dc_sink)
1716 			dc_sink_release(aconnector->dc_sink);
1717 		aconnector->dc_sink = NULL;
1718 		amdgpu_dm_update_connector_after_detect(aconnector);
1719 		mutex_unlock(&aconnector->hpd_lock);
1720 	}
1721 	drm_connector_list_iter_end(&iter);
1722 
1723 	/* Force mode set in atomic commit */
1724 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1725 		new_crtc_state->active_changed = true;
1726 
1727 	/*
1728 	 * atomic_check is expected to create the dc states. We need to release
1729 	 * them here, since they were duplicated as part of the suspend
1730 	 * procedure.
1731 	 */
1732 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1733 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1734 		if (dm_new_crtc_state->stream) {
1735 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1736 			dc_stream_release(dm_new_crtc_state->stream);
1737 			dm_new_crtc_state->stream = NULL;
1738 		}
1739 	}
1740 
1741 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1742 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1743 		if (dm_new_plane_state->dc_state) {
1744 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1745 			dc_plane_state_release(dm_new_plane_state->dc_state);
1746 			dm_new_plane_state->dc_state = NULL;
1747 		}
1748 	}
1749 
1750 	drm_atomic_helper_resume(ddev, dm->cached_state);
1751 
1752 	dm->cached_state = NULL;
1753 
1754 	amdgpu_dm_irq_resume_late(adev);
1755 
1756 	amdgpu_dm_smu_write_watermarks_table(adev);
1757 
1758 	return 0;
1759 }
1760 
1761 /**
1762  * DOC: DM Lifecycle
1763  *
1764  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1765  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1766  * the base driver's device list to be initialized and torn down accordingly.
1767  *
1768  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1769  */
1770 
1771 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1772 	.name = "dm",
1773 	.early_init = dm_early_init,
1774 	.late_init = dm_late_init,
1775 	.sw_init = dm_sw_init,
1776 	.sw_fini = dm_sw_fini,
1777 	.hw_init = dm_hw_init,
1778 	.hw_fini = dm_hw_fini,
1779 	.suspend = dm_suspend,
1780 	.resume = dm_resume,
1781 	.is_idle = dm_is_idle,
1782 	.wait_for_idle = dm_wait_for_idle,
1783 	.check_soft_reset = dm_check_soft_reset,
1784 	.soft_reset = dm_soft_reset,
1785 	.set_clockgating_state = dm_set_clockgating_state,
1786 	.set_powergating_state = dm_set_powergating_state,
1787 };
1788 
1789 const struct amdgpu_ip_block_version dm_ip_block =
1790 {
1791 	.type = AMD_IP_BLOCK_TYPE_DCE,
1792 	.major = 1,
1793 	.minor = 0,
1794 	.rev = 0,
1795 	.funcs = &amdgpu_dm_funcs,
1796 };
1797 
1798 
1799 /**
1800  * DOC: atomic
1801  *
1802  * *WIP*
1803  */
1804 
1805 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1806 	.fb_create = amdgpu_display_user_framebuffer_create,
1807 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1808 	.atomic_check = amdgpu_dm_atomic_check,
1809 	.atomic_commit = amdgpu_dm_atomic_commit,
1810 };
1811 
1812 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1813 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1814 };
1815 
1816 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1817 {
1818 	u32 max_cll, min_cll, max, min, q, r;
1819 	struct amdgpu_dm_backlight_caps *caps;
1820 	struct amdgpu_display_manager *dm;
1821 	struct drm_connector *conn_base;
1822 	struct amdgpu_device *adev;
1823 	struct dc_link *link = NULL;
1824 	static const u8 pre_computed_values[] = {
1825 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1826 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1827 
1828 	if (!aconnector || !aconnector->dc_link)
1829 		return;
1830 
1831 	link = aconnector->dc_link;
1832 	if (link->connector_signal != SIGNAL_TYPE_EDP)
1833 		return;
1834 
1835 	conn_base = &aconnector->base;
1836 	adev = conn_base->dev->dev_private;
1837 	dm = &adev->dm;
1838 	caps = &dm->backlight_caps;
1839 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1840 	caps->aux_support = false;
1841 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1842 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1843 
1844 	if (caps->ext_caps->bits.oled == 1 ||
1845 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1846 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1847 		caps->aux_support = true;
1848 
1849 	/* From the specification (CTA-861-G), for calculating the maximum
1850 	 * luminance we need to use:
1851 	 *	Luminance = 50*2**(CV/32)
1852 	 * Where CV is a one-byte value.
1853 	 * For calculating this expression we may need float point precision;
1854 	 * to avoid this complexity level, we take advantage that CV is divided
1855 	 * by a constant. From the Euclids division algorithm, we know that CV
1856 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1857 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1858 	 * need to pre-compute the value of r/32. For pre-computing the values
1859 	 * We just used the following Ruby line:
1860 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1861 	 * The results of the above expressions can be verified at
1862 	 * pre_computed_values.
1863 	 */
1864 	q = max_cll >> 5;
1865 	r = max_cll % 32;
1866 	max = (1 << q) * pre_computed_values[r];
1867 
1868 	// min luminance: maxLum * (CV/255)^2 / 100
1869 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1870 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1871 
1872 	caps->aux_max_input_signal = max;
1873 	caps->aux_min_input_signal = min;
1874 }
1875 
1876 void amdgpu_dm_update_connector_after_detect(
1877 		struct amdgpu_dm_connector *aconnector)
1878 {
1879 	struct drm_connector *connector = &aconnector->base;
1880 	struct drm_device *dev = connector->dev;
1881 	struct dc_sink *sink;
1882 
1883 	/* MST handled by drm_mst framework */
1884 	if (aconnector->mst_mgr.mst_state == true)
1885 		return;
1886 
1887 
1888 	sink = aconnector->dc_link->local_sink;
1889 	if (sink)
1890 		dc_sink_retain(sink);
1891 
1892 	/*
1893 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1894 	 * the connector sink is set to either fake or physical sink depends on link status.
1895 	 * Skip if already done during boot.
1896 	 */
1897 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1898 			&& aconnector->dc_em_sink) {
1899 
1900 		/*
1901 		 * For S3 resume with headless use eml_sink to fake stream
1902 		 * because on resume connector->sink is set to NULL
1903 		 */
1904 		mutex_lock(&dev->mode_config.mutex);
1905 
1906 		if (sink) {
1907 			if (aconnector->dc_sink) {
1908 				amdgpu_dm_update_freesync_caps(connector, NULL);
1909 				/*
1910 				 * retain and release below are used to
1911 				 * bump up refcount for sink because the link doesn't point
1912 				 * to it anymore after disconnect, so on next crtc to connector
1913 				 * reshuffle by UMD we will get into unwanted dc_sink release
1914 				 */
1915 				dc_sink_release(aconnector->dc_sink);
1916 			}
1917 			aconnector->dc_sink = sink;
1918 			dc_sink_retain(aconnector->dc_sink);
1919 			amdgpu_dm_update_freesync_caps(connector,
1920 					aconnector->edid);
1921 		} else {
1922 			amdgpu_dm_update_freesync_caps(connector, NULL);
1923 			if (!aconnector->dc_sink) {
1924 				aconnector->dc_sink = aconnector->dc_em_sink;
1925 				dc_sink_retain(aconnector->dc_sink);
1926 			}
1927 		}
1928 
1929 		mutex_unlock(&dev->mode_config.mutex);
1930 
1931 		if (sink)
1932 			dc_sink_release(sink);
1933 		return;
1934 	}
1935 
1936 	/*
1937 	 * TODO: temporary guard to look for proper fix
1938 	 * if this sink is MST sink, we should not do anything
1939 	 */
1940 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1941 		dc_sink_release(sink);
1942 		return;
1943 	}
1944 
1945 	if (aconnector->dc_sink == sink) {
1946 		/*
1947 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1948 		 * Do nothing!!
1949 		 */
1950 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1951 				aconnector->connector_id);
1952 		if (sink)
1953 			dc_sink_release(sink);
1954 		return;
1955 	}
1956 
1957 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1958 		aconnector->connector_id, aconnector->dc_sink, sink);
1959 
1960 	mutex_lock(&dev->mode_config.mutex);
1961 
1962 	/*
1963 	 * 1. Update status of the drm connector
1964 	 * 2. Send an event and let userspace tell us what to do
1965 	 */
1966 	if (sink) {
1967 		/*
1968 		 * TODO: check if we still need the S3 mode update workaround.
1969 		 * If yes, put it here.
1970 		 */
1971 		if (aconnector->dc_sink)
1972 			amdgpu_dm_update_freesync_caps(connector, NULL);
1973 
1974 		aconnector->dc_sink = sink;
1975 		dc_sink_retain(aconnector->dc_sink);
1976 		if (sink->dc_edid.length == 0) {
1977 			aconnector->edid = NULL;
1978 			if (aconnector->dc_link->aux_mode) {
1979 				drm_dp_cec_unset_edid(
1980 					&aconnector->dm_dp_aux.aux);
1981 			}
1982 		} else {
1983 			aconnector->edid =
1984 				(struct edid *)sink->dc_edid.raw_edid;
1985 
1986 			drm_connector_update_edid_property(connector,
1987 							   aconnector->edid);
1988 
1989 			if (aconnector->dc_link->aux_mode)
1990 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1991 						    aconnector->edid);
1992 		}
1993 
1994 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1995 		update_connector_ext_caps(aconnector);
1996 	} else {
1997 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1998 		amdgpu_dm_update_freesync_caps(connector, NULL);
1999 		drm_connector_update_edid_property(connector, NULL);
2000 		aconnector->num_modes = 0;
2001 		dc_sink_release(aconnector->dc_sink);
2002 		aconnector->dc_sink = NULL;
2003 		aconnector->edid = NULL;
2004 #ifdef CONFIG_DRM_AMD_DC_HDCP
2005 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2006 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2007 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2008 #endif
2009 	}
2010 
2011 	mutex_unlock(&dev->mode_config.mutex);
2012 
2013 	if (sink)
2014 		dc_sink_release(sink);
2015 }
2016 
2017 static void handle_hpd_irq(void *param)
2018 {
2019 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2020 	struct drm_connector *connector = &aconnector->base;
2021 	struct drm_device *dev = connector->dev;
2022 	enum dc_connection_type new_connection_type = dc_connection_none;
2023 #ifdef CONFIG_DRM_AMD_DC_HDCP
2024 	struct amdgpu_device *adev = dev->dev_private;
2025 #endif
2026 
2027 	/*
2028 	 * In case of failure or MST no need to update connector status or notify the OS
2029 	 * since (for MST case) MST does this in its own context.
2030 	 */
2031 	mutex_lock(&aconnector->hpd_lock);
2032 
2033 #ifdef CONFIG_DRM_AMD_DC_HDCP
2034 	if (adev->dm.hdcp_workqueue)
2035 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2036 #endif
2037 	if (aconnector->fake_enable)
2038 		aconnector->fake_enable = false;
2039 
2040 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2041 		DRM_ERROR("KMS: Failed to detect connector\n");
2042 
2043 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2044 		emulated_link_detect(aconnector->dc_link);
2045 
2046 
2047 		drm_modeset_lock_all(dev);
2048 		dm_restore_drm_connector_state(dev, connector);
2049 		drm_modeset_unlock_all(dev);
2050 
2051 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2052 			drm_kms_helper_hotplug_event(dev);
2053 
2054 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2055 		amdgpu_dm_update_connector_after_detect(aconnector);
2056 
2057 
2058 		drm_modeset_lock_all(dev);
2059 		dm_restore_drm_connector_state(dev, connector);
2060 		drm_modeset_unlock_all(dev);
2061 
2062 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2063 			drm_kms_helper_hotplug_event(dev);
2064 	}
2065 	mutex_unlock(&aconnector->hpd_lock);
2066 
2067 }
2068 
2069 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2070 {
2071 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2072 	uint8_t dret;
2073 	bool new_irq_handled = false;
2074 	int dpcd_addr;
2075 	int dpcd_bytes_to_read;
2076 
2077 	const int max_process_count = 30;
2078 	int process_count = 0;
2079 
2080 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2081 
2082 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2083 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2084 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2085 		dpcd_addr = DP_SINK_COUNT;
2086 	} else {
2087 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2088 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2089 		dpcd_addr = DP_SINK_COUNT_ESI;
2090 	}
2091 
2092 	dret = drm_dp_dpcd_read(
2093 		&aconnector->dm_dp_aux.aux,
2094 		dpcd_addr,
2095 		esi,
2096 		dpcd_bytes_to_read);
2097 
2098 	while (dret == dpcd_bytes_to_read &&
2099 		process_count < max_process_count) {
2100 		uint8_t retry;
2101 		dret = 0;
2102 
2103 		process_count++;
2104 
2105 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2106 		/* handle HPD short pulse irq */
2107 		if (aconnector->mst_mgr.mst_state)
2108 			drm_dp_mst_hpd_irq(
2109 				&aconnector->mst_mgr,
2110 				esi,
2111 				&new_irq_handled);
2112 
2113 		if (new_irq_handled) {
2114 			/* ACK at DPCD to notify down stream */
2115 			const int ack_dpcd_bytes_to_write =
2116 				dpcd_bytes_to_read - 1;
2117 
2118 			for (retry = 0; retry < 3; retry++) {
2119 				uint8_t wret;
2120 
2121 				wret = drm_dp_dpcd_write(
2122 					&aconnector->dm_dp_aux.aux,
2123 					dpcd_addr + 1,
2124 					&esi[1],
2125 					ack_dpcd_bytes_to_write);
2126 				if (wret == ack_dpcd_bytes_to_write)
2127 					break;
2128 			}
2129 
2130 			/* check if there is new irq to be handled */
2131 			dret = drm_dp_dpcd_read(
2132 				&aconnector->dm_dp_aux.aux,
2133 				dpcd_addr,
2134 				esi,
2135 				dpcd_bytes_to_read);
2136 
2137 			new_irq_handled = false;
2138 		} else {
2139 			break;
2140 		}
2141 	}
2142 
2143 	if (process_count == max_process_count)
2144 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2145 }
2146 
2147 static void handle_hpd_rx_irq(void *param)
2148 {
2149 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2150 	struct drm_connector *connector = &aconnector->base;
2151 	struct drm_device *dev = connector->dev;
2152 	struct dc_link *dc_link = aconnector->dc_link;
2153 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2154 	enum dc_connection_type new_connection_type = dc_connection_none;
2155 #ifdef CONFIG_DRM_AMD_DC_HDCP
2156 	union hpd_irq_data hpd_irq_data;
2157 	struct amdgpu_device *adev = dev->dev_private;
2158 
2159 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2160 #endif
2161 
2162 	/*
2163 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2164 	 * conflict, after implement i2c helper, this mutex should be
2165 	 * retired.
2166 	 */
2167 	if (dc_link->type != dc_connection_mst_branch)
2168 		mutex_lock(&aconnector->hpd_lock);
2169 
2170 
2171 #ifdef CONFIG_DRM_AMD_DC_HDCP
2172 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2173 #else
2174 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2175 #endif
2176 			!is_mst_root_connector) {
2177 		/* Downstream Port status changed. */
2178 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2179 			DRM_ERROR("KMS: Failed to detect connector\n");
2180 
2181 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2182 			emulated_link_detect(dc_link);
2183 
2184 			if (aconnector->fake_enable)
2185 				aconnector->fake_enable = false;
2186 
2187 			amdgpu_dm_update_connector_after_detect(aconnector);
2188 
2189 
2190 			drm_modeset_lock_all(dev);
2191 			dm_restore_drm_connector_state(dev, connector);
2192 			drm_modeset_unlock_all(dev);
2193 
2194 			drm_kms_helper_hotplug_event(dev);
2195 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2196 
2197 			if (aconnector->fake_enable)
2198 				aconnector->fake_enable = false;
2199 
2200 			amdgpu_dm_update_connector_after_detect(aconnector);
2201 
2202 
2203 			drm_modeset_lock_all(dev);
2204 			dm_restore_drm_connector_state(dev, connector);
2205 			drm_modeset_unlock_all(dev);
2206 
2207 			drm_kms_helper_hotplug_event(dev);
2208 		}
2209 	}
2210 #ifdef CONFIG_DRM_AMD_DC_HDCP
2211 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2212 		if (adev->dm.hdcp_workqueue)
2213 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2214 	}
2215 #endif
2216 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2217 	    (dc_link->type == dc_connection_mst_branch))
2218 		dm_handle_hpd_rx_irq(aconnector);
2219 
2220 	if (dc_link->type != dc_connection_mst_branch) {
2221 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2222 		mutex_unlock(&aconnector->hpd_lock);
2223 	}
2224 }
2225 
2226 static void register_hpd_handlers(struct amdgpu_device *adev)
2227 {
2228 	struct drm_device *dev = adev->ddev;
2229 	struct drm_connector *connector;
2230 	struct amdgpu_dm_connector *aconnector;
2231 	const struct dc_link *dc_link;
2232 	struct dc_interrupt_params int_params = {0};
2233 
2234 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2235 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2236 
2237 	list_for_each_entry(connector,
2238 			&dev->mode_config.connector_list, head)	{
2239 
2240 		aconnector = to_amdgpu_dm_connector(connector);
2241 		dc_link = aconnector->dc_link;
2242 
2243 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2244 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2245 			int_params.irq_source = dc_link->irq_source_hpd;
2246 
2247 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2248 					handle_hpd_irq,
2249 					(void *) aconnector);
2250 		}
2251 
2252 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2253 
2254 			/* Also register for DP short pulse (hpd_rx). */
2255 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2256 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2257 
2258 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2259 					handle_hpd_rx_irq,
2260 					(void *) aconnector);
2261 		}
2262 	}
2263 }
2264 
2265 /* Register IRQ sources and initialize IRQ callbacks */
2266 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2267 {
2268 	struct dc *dc = adev->dm.dc;
2269 	struct common_irq_params *c_irq_params;
2270 	struct dc_interrupt_params int_params = {0};
2271 	int r;
2272 	int i;
2273 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2274 
2275 	if (adev->asic_type >= CHIP_VEGA10)
2276 		client_id = SOC15_IH_CLIENTID_DCE;
2277 
2278 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2279 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2280 
2281 	/*
2282 	 * Actions of amdgpu_irq_add_id():
2283 	 * 1. Register a set() function with base driver.
2284 	 *    Base driver will call set() function to enable/disable an
2285 	 *    interrupt in DC hardware.
2286 	 * 2. Register amdgpu_dm_irq_handler().
2287 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2288 	 *    coming from DC hardware.
2289 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2290 	 *    for acknowledging and handling. */
2291 
2292 	/* Use VBLANK interrupt */
2293 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2294 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2295 		if (r) {
2296 			DRM_ERROR("Failed to add crtc irq id!\n");
2297 			return r;
2298 		}
2299 
2300 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2301 		int_params.irq_source =
2302 			dc_interrupt_to_irq_source(dc, i, 0);
2303 
2304 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2305 
2306 		c_irq_params->adev = adev;
2307 		c_irq_params->irq_src = int_params.irq_source;
2308 
2309 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2310 				dm_crtc_high_irq, c_irq_params);
2311 	}
2312 
2313 	/* Use VUPDATE interrupt */
2314 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2315 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2316 		if (r) {
2317 			DRM_ERROR("Failed to add vupdate irq id!\n");
2318 			return r;
2319 		}
2320 
2321 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2322 		int_params.irq_source =
2323 			dc_interrupt_to_irq_source(dc, i, 0);
2324 
2325 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2326 
2327 		c_irq_params->adev = adev;
2328 		c_irq_params->irq_src = int_params.irq_source;
2329 
2330 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2331 				dm_vupdate_high_irq, c_irq_params);
2332 	}
2333 
2334 	/* Use GRPH_PFLIP interrupt */
2335 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2336 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2337 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2338 		if (r) {
2339 			DRM_ERROR("Failed to add page flip irq id!\n");
2340 			return r;
2341 		}
2342 
2343 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2344 		int_params.irq_source =
2345 			dc_interrupt_to_irq_source(dc, i, 0);
2346 
2347 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2348 
2349 		c_irq_params->adev = adev;
2350 		c_irq_params->irq_src = int_params.irq_source;
2351 
2352 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2353 				dm_pflip_high_irq, c_irq_params);
2354 
2355 	}
2356 
2357 	/* HPD */
2358 	r = amdgpu_irq_add_id(adev, client_id,
2359 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2360 	if (r) {
2361 		DRM_ERROR("Failed to add hpd irq id!\n");
2362 		return r;
2363 	}
2364 
2365 	register_hpd_handlers(adev);
2366 
2367 	return 0;
2368 }
2369 
2370 #if defined(CONFIG_DRM_AMD_DC_DCN)
2371 /* Register IRQ sources and initialize IRQ callbacks */
2372 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2373 {
2374 	struct dc *dc = adev->dm.dc;
2375 	struct common_irq_params *c_irq_params;
2376 	struct dc_interrupt_params int_params = {0};
2377 	int r;
2378 	int i;
2379 
2380 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2381 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2382 
2383 	/*
2384 	 * Actions of amdgpu_irq_add_id():
2385 	 * 1. Register a set() function with base driver.
2386 	 *    Base driver will call set() function to enable/disable an
2387 	 *    interrupt in DC hardware.
2388 	 * 2. Register amdgpu_dm_irq_handler().
2389 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2390 	 *    coming from DC hardware.
2391 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2392 	 *    for acknowledging and handling.
2393 	 */
2394 
2395 	/* Use VSTARTUP interrupt */
2396 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2397 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2398 			i++) {
2399 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2400 
2401 		if (r) {
2402 			DRM_ERROR("Failed to add crtc irq id!\n");
2403 			return r;
2404 		}
2405 
2406 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2407 		int_params.irq_source =
2408 			dc_interrupt_to_irq_source(dc, i, 0);
2409 
2410 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2411 
2412 		c_irq_params->adev = adev;
2413 		c_irq_params->irq_src = int_params.irq_source;
2414 
2415 		amdgpu_dm_irq_register_interrupt(
2416 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2417 	}
2418 
2419 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2420 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2421 	 * to trigger at end of each vblank, regardless of state of the lock,
2422 	 * matching DCE behaviour.
2423 	 */
2424 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2425 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2426 	     i++) {
2427 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2428 
2429 		if (r) {
2430 			DRM_ERROR("Failed to add vupdate irq id!\n");
2431 			return r;
2432 		}
2433 
2434 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435 		int_params.irq_source =
2436 			dc_interrupt_to_irq_source(dc, i, 0);
2437 
2438 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2439 
2440 		c_irq_params->adev = adev;
2441 		c_irq_params->irq_src = int_params.irq_source;
2442 
2443 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444 				dm_vupdate_high_irq, c_irq_params);
2445 	}
2446 
2447 	/* Use GRPH_PFLIP interrupt */
2448 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2450 			i++) {
2451 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2452 		if (r) {
2453 			DRM_ERROR("Failed to add page flip irq id!\n");
2454 			return r;
2455 		}
2456 
2457 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458 		int_params.irq_source =
2459 			dc_interrupt_to_irq_source(dc, i, 0);
2460 
2461 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2462 
2463 		c_irq_params->adev = adev;
2464 		c_irq_params->irq_src = int_params.irq_source;
2465 
2466 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 				dm_pflip_high_irq, c_irq_params);
2468 
2469 	}
2470 
2471 	/* HPD */
2472 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2473 			&adev->hpd_irq);
2474 	if (r) {
2475 		DRM_ERROR("Failed to add hpd irq id!\n");
2476 		return r;
2477 	}
2478 
2479 	register_hpd_handlers(adev);
2480 
2481 	return 0;
2482 }
2483 #endif
2484 
2485 /*
2486  * Acquires the lock for the atomic state object and returns
2487  * the new atomic state.
2488  *
2489  * This should only be called during atomic check.
2490  */
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492 			       struct dm_atomic_state **dm_state)
2493 {
2494 	struct drm_device *dev = state->dev;
2495 	struct amdgpu_device *adev = dev->dev_private;
2496 	struct amdgpu_display_manager *dm = &adev->dm;
2497 	struct drm_private_state *priv_state;
2498 
2499 	if (*dm_state)
2500 		return 0;
2501 
2502 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503 	if (IS_ERR(priv_state))
2504 		return PTR_ERR(priv_state);
2505 
2506 	*dm_state = to_dm_atomic_state(priv_state);
2507 
2508 	return 0;
2509 }
2510 
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2513 {
2514 	struct drm_device *dev = state->dev;
2515 	struct amdgpu_device *adev = dev->dev_private;
2516 	struct amdgpu_display_manager *dm = &adev->dm;
2517 	struct drm_private_obj *obj;
2518 	struct drm_private_state *new_obj_state;
2519 	int i;
2520 
2521 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522 		if (obj->funcs == dm->atomic_obj.funcs)
2523 			return to_dm_atomic_state(new_obj_state);
2524 	}
2525 
2526 	return NULL;
2527 }
2528 
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2531 {
2532 	struct drm_device *dev = state->dev;
2533 	struct amdgpu_device *adev = dev->dev_private;
2534 	struct amdgpu_display_manager *dm = &adev->dm;
2535 	struct drm_private_obj *obj;
2536 	struct drm_private_state *old_obj_state;
2537 	int i;
2538 
2539 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540 		if (obj->funcs == dm->atomic_obj.funcs)
2541 			return to_dm_atomic_state(old_obj_state);
2542 	}
2543 
2544 	return NULL;
2545 }
2546 
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2549 {
2550 	struct dm_atomic_state *old_state, *new_state;
2551 
2552 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2553 	if (!new_state)
2554 		return NULL;
2555 
2556 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2557 
2558 	old_state = to_dm_atomic_state(obj->state);
2559 
2560 	if (old_state && old_state->context)
2561 		new_state->context = dc_copy_state(old_state->context);
2562 
2563 	if (!new_state->context) {
2564 		kfree(new_state);
2565 		return NULL;
2566 	}
2567 
2568 	return &new_state->base;
2569 }
2570 
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572 				    struct drm_private_state *state)
2573 {
2574 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2575 
2576 	if (dm_state && dm_state->context)
2577 		dc_release_state(dm_state->context);
2578 
2579 	kfree(dm_state);
2580 }
2581 
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2584 	.atomic_destroy_state = dm_atomic_destroy_state,
2585 };
2586 
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2588 {
2589 	struct dm_atomic_state *state;
2590 	int r;
2591 
2592 	adev->mode_info.mode_config_initialized = true;
2593 
2594 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2596 
2597 	adev->ddev->mode_config.max_width = 16384;
2598 	adev->ddev->mode_config.max_height = 16384;
2599 
2600 	adev->ddev->mode_config.preferred_depth = 24;
2601 	adev->ddev->mode_config.prefer_shadow = 1;
2602 	/* indicates support for immediate flip */
2603 	adev->ddev->mode_config.async_page_flip = true;
2604 
2605 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2606 
2607 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2608 	if (!state)
2609 		return -ENOMEM;
2610 
2611 	state->context = dc_create_state(adev->dm.dc);
2612 	if (!state->context) {
2613 		kfree(state);
2614 		return -ENOMEM;
2615 	}
2616 
2617 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2618 
2619 	drm_atomic_private_obj_init(adev->ddev,
2620 				    &adev->dm.atomic_obj,
2621 				    &state->base,
2622 				    &dm_atomic_state_funcs);
2623 
2624 	r = amdgpu_display_modeset_create_props(adev);
2625 	if (r)
2626 		return r;
2627 
2628 	r = amdgpu_dm_audio_init(adev);
2629 	if (r)
2630 		return r;
2631 
2632 	return 0;
2633 }
2634 
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2638 
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2641 
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2643 {
2644 #if defined(CONFIG_ACPI)
2645 	struct amdgpu_dm_backlight_caps caps;
2646 
2647 	if (dm->backlight_caps.caps_valid)
2648 		return;
2649 
2650 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651 	if (caps.caps_valid) {
2652 		dm->backlight_caps.caps_valid = true;
2653 		if (caps.aux_support)
2654 			return;
2655 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2657 	} else {
2658 		dm->backlight_caps.min_input_signal =
2659 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660 		dm->backlight_caps.max_input_signal =
2661 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2662 	}
2663 #else
2664 	if (dm->backlight_caps.aux_support)
2665 		return;
2666 
2667 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 #endif
2670 }
2671 
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2673 {
2674 	bool rc;
2675 
2676 	if (!link)
2677 		return 1;
2678 
2679 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2681 
2682 	return rc ? 0 : 1;
2683 }
2684 
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686 			      const uint32_t user_brightness)
2687 {
2688 	u32 min, max, conversion_pace;
2689 	u32 brightness = user_brightness;
2690 
2691 	if (!caps)
2692 		goto out;
2693 
2694 	if (!caps->aux_support) {
2695 		max = caps->max_input_signal;
2696 		min = caps->min_input_signal;
2697 		/*
2698 		 * The brightness input is in the range 0-255
2699 		 * It needs to be rescaled to be between the
2700 		 * requested min and max input signal
2701 		 * It also needs to be scaled up by 0x101 to
2702 		 * match the DC interface which has a range of
2703 		 * 0 to 0xffff
2704 		 */
2705 		conversion_pace = 0x101;
2706 		brightness =
2707 			user_brightness
2708 			* conversion_pace
2709 			* (max - min)
2710 			/ AMDGPU_MAX_BL_LEVEL
2711 			+ min * conversion_pace;
2712 	} else {
2713 		/* TODO
2714 		 * We are doing a linear interpolation here, which is OK but
2715 		 * does not provide the optimal result. We probably want
2716 		 * something close to the Perceptual Quantizer (PQ) curve.
2717 		 */
2718 		max = caps->aux_max_input_signal;
2719 		min = caps->aux_min_input_signal;
2720 
2721 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722 			       + user_brightness * max;
2723 		// Multiple the value by 1000 since we use millinits
2724 		brightness *= 1000;
2725 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2726 	}
2727 
2728 out:
2729 	return brightness;
2730 }
2731 
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2733 {
2734 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2735 	struct amdgpu_dm_backlight_caps caps;
2736 	struct dc_link *link = NULL;
2737 	u32 brightness;
2738 	bool rc;
2739 
2740 	amdgpu_dm_update_backlight_caps(dm);
2741 	caps = dm->backlight_caps;
2742 
2743 	link = (struct dc_link *)dm->backlight_link;
2744 
2745 	brightness = convert_brightness(&caps, bd->props.brightness);
2746 	// Change brightness based on AUX property
2747 	if (caps.aux_support)
2748 		return set_backlight_via_aux(link, brightness);
2749 
2750 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2751 
2752 	return rc ? 0 : 1;
2753 }
2754 
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2756 {
2757 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2758 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2759 
2760 	if (ret == DC_ERROR_UNEXPECTED)
2761 		return bd->props.brightness;
2762 	return ret;
2763 }
2764 
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766 	.options = BL_CORE_SUSPENDRESUME,
2767 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2768 	.update_status	= amdgpu_dm_backlight_update_status,
2769 };
2770 
2771 static void
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2773 {
2774 	char bl_name[16];
2775 	struct backlight_properties props = { 0 };
2776 
2777 	amdgpu_dm_update_backlight_caps(dm);
2778 
2779 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2781 	props.type = BACKLIGHT_RAW;
2782 
2783 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784 			dm->adev->ddev->primary->index);
2785 
2786 	dm->backlight_dev = backlight_device_register(bl_name,
2787 			dm->adev->ddev->dev,
2788 			dm,
2789 			&amdgpu_dm_backlight_ops,
2790 			&props);
2791 
2792 	if (IS_ERR(dm->backlight_dev))
2793 		DRM_ERROR("DM: Backlight registration failed!\n");
2794 	else
2795 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2796 }
2797 
2798 #endif
2799 
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801 			    struct amdgpu_mode_info *mode_info, int plane_id,
2802 			    enum drm_plane_type plane_type,
2803 			    const struct dc_plane_cap *plane_cap)
2804 {
2805 	struct drm_plane *plane;
2806 	unsigned long possible_crtcs;
2807 	int ret = 0;
2808 
2809 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2810 	if (!plane) {
2811 		DRM_ERROR("KMS: Failed to allocate plane\n");
2812 		return -ENOMEM;
2813 	}
2814 	plane->type = plane_type;
2815 
2816 	/*
2817 	 * HACK: IGT tests expect that the primary plane for a CRTC
2818 	 * can only have one possible CRTC. Only expose support for
2819 	 * any CRTC if they're not going to be used as a primary plane
2820 	 * for a CRTC - like overlay or underlay planes.
2821 	 */
2822 	possible_crtcs = 1 << plane_id;
2823 	if (plane_id >= dm->dc->caps.max_streams)
2824 		possible_crtcs = 0xff;
2825 
2826 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2827 
2828 	if (ret) {
2829 		DRM_ERROR("KMS: Failed to initialize plane\n");
2830 		kfree(plane);
2831 		return ret;
2832 	}
2833 
2834 	if (mode_info)
2835 		mode_info->planes[plane_id] = plane;
2836 
2837 	return ret;
2838 }
2839 
2840 
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842 				      struct dc_link *link)
2843 {
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2846 
2847 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848 	    link->type != dc_connection_none) {
2849 		/*
2850 		 * Event if registration failed, we should continue with
2851 		 * DM initialization because not having a backlight control
2852 		 * is better then a black screen.
2853 		 */
2854 		amdgpu_dm_register_backlight_device(dm);
2855 
2856 		if (dm->backlight_dev)
2857 			dm->backlight_link = link;
2858 	}
2859 #endif
2860 }
2861 
2862 
2863 /*
2864  * In this architecture, the association
2865  * connector -> encoder -> crtc
2866  * id not really requried. The crtc and connector will hold the
2867  * display_index as an abstraction to use with DAL component
2868  *
2869  * Returns 0 on success
2870  */
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2872 {
2873 	struct amdgpu_display_manager *dm = &adev->dm;
2874 	int32_t i;
2875 	struct amdgpu_dm_connector *aconnector = NULL;
2876 	struct amdgpu_encoder *aencoder = NULL;
2877 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2878 	uint32_t link_cnt;
2879 	int32_t primary_planes;
2880 	enum dc_connection_type new_connection_type = dc_connection_none;
2881 	const struct dc_plane_cap *plane;
2882 
2883 	link_cnt = dm->dc->caps.max_links;
2884 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2885 		DRM_ERROR("DM: Failed to initialize mode config\n");
2886 		return -EINVAL;
2887 	}
2888 
2889 	/* There is one primary plane per CRTC */
2890 	primary_planes = dm->dc->caps.max_streams;
2891 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2892 
2893 	/*
2894 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2895 	 * Order is reversed to match iteration order in atomic check.
2896 	 */
2897 	for (i = (primary_planes - 1); i >= 0; i--) {
2898 		plane = &dm->dc->caps.planes[i];
2899 
2900 		if (initialize_plane(dm, mode_info, i,
2901 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2902 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2903 			goto fail;
2904 		}
2905 	}
2906 
2907 	/*
2908 	 * Initialize overlay planes, index starting after primary planes.
2909 	 * These planes have a higher DRM index than the primary planes since
2910 	 * they should be considered as having a higher z-order.
2911 	 * Order is reversed to match iteration order in atomic check.
2912 	 *
2913 	 * Only support DCN for now, and only expose one so we don't encourage
2914 	 * userspace to use up all the pipes.
2915 	 */
2916 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2918 
2919 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2920 			continue;
2921 
2922 		if (!plane->blends_with_above || !plane->blends_with_below)
2923 			continue;
2924 
2925 		if (!plane->pixel_format_support.argb8888)
2926 			continue;
2927 
2928 		if (initialize_plane(dm, NULL, primary_planes + i,
2929 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2930 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2931 			goto fail;
2932 		}
2933 
2934 		/* Only create one overlay plane. */
2935 		break;
2936 	}
2937 
2938 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2939 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2941 			goto fail;
2942 		}
2943 
2944 	dm->display_indexes_num = dm->dc->caps.max_streams;
2945 
2946 	/* loops over all connectors on the board */
2947 	for (i = 0; i < link_cnt; i++) {
2948 		struct dc_link *link = NULL;
2949 
2950 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2951 			DRM_ERROR(
2952 				"KMS: Cannot support more than %d display indexes\n",
2953 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2954 			continue;
2955 		}
2956 
2957 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2958 		if (!aconnector)
2959 			goto fail;
2960 
2961 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2962 		if (!aencoder)
2963 			goto fail;
2964 
2965 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2967 			goto fail;
2968 		}
2969 
2970 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971 			DRM_ERROR("KMS: Failed to initialize connector\n");
2972 			goto fail;
2973 		}
2974 
2975 		link = dc_get_link_at_index(dm->dc, i);
2976 
2977 		if (!dc_link_detect_sink(link, &new_connection_type))
2978 			DRM_ERROR("KMS: Failed to detect connector\n");
2979 
2980 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981 			emulated_link_detect(link);
2982 			amdgpu_dm_update_connector_after_detect(aconnector);
2983 
2984 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985 			amdgpu_dm_update_connector_after_detect(aconnector);
2986 			register_backlight_device(dm, link);
2987 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988 				amdgpu_dm_set_psr_caps(link);
2989 		}
2990 
2991 
2992 	}
2993 
2994 	/* Software is initialized. Now we can register interrupt handlers. */
2995 	switch (adev->asic_type) {
2996 	case CHIP_BONAIRE:
2997 	case CHIP_HAWAII:
2998 	case CHIP_KAVERI:
2999 	case CHIP_KABINI:
3000 	case CHIP_MULLINS:
3001 	case CHIP_TONGA:
3002 	case CHIP_FIJI:
3003 	case CHIP_CARRIZO:
3004 	case CHIP_STONEY:
3005 	case CHIP_POLARIS11:
3006 	case CHIP_POLARIS10:
3007 	case CHIP_POLARIS12:
3008 	case CHIP_VEGAM:
3009 	case CHIP_VEGA10:
3010 	case CHIP_VEGA12:
3011 	case CHIP_VEGA20:
3012 		if (dce110_register_irq_handlers(dm->adev)) {
3013 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3014 			goto fail;
3015 		}
3016 		break;
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3018 	case CHIP_RAVEN:
3019 	case CHIP_NAVI12:
3020 	case CHIP_NAVI10:
3021 	case CHIP_NAVI14:
3022 	case CHIP_RENOIR:
3023 		if (dcn10_register_irq_handlers(dm->adev)) {
3024 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3025 			goto fail;
3026 		}
3027 		break;
3028 #endif
3029 	default:
3030 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3031 		goto fail;
3032 	}
3033 
3034 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3036 
3037 	/* No userspace support. */
3038 	dm->dc->debug.disable_tri_buf = true;
3039 
3040 	return 0;
3041 fail:
3042 	kfree(aencoder);
3043 	kfree(aconnector);
3044 
3045 	return -EINVAL;
3046 }
3047 
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3049 {
3050 	drm_mode_config_cleanup(dm->ddev);
3051 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3052 	return;
3053 }
3054 
3055 /******************************************************************************
3056  * amdgpu_display_funcs functions
3057  *****************************************************************************/
3058 
3059 /*
3060  * dm_bandwidth_update - program display watermarks
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Calculate and program the display watermarks and line buffer allocation.
3065  */
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3067 {
3068 	/* TODO: implement later */
3069 }
3070 
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074 	.backlight_set_level = NULL, /* never called for DC */
3075 	.backlight_get_level = NULL, /* never called for DC */
3076 	.hpd_sense = NULL,/* called unconditionally */
3077 	.hpd_set_polarity = NULL, /* called unconditionally */
3078 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079 	.page_flip_get_scanoutpos =
3080 		dm_crtc_get_scanoutpos,/* called unconditionally */
3081 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3083 };
3084 
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3086 
3087 static ssize_t s3_debug_store(struct device *device,
3088 			      struct device_attribute *attr,
3089 			      const char *buf,
3090 			      size_t count)
3091 {
3092 	int ret;
3093 	int s3_state;
3094 	struct drm_device *drm_dev = dev_get_drvdata(device);
3095 	struct amdgpu_device *adev = drm_dev->dev_private;
3096 
3097 	ret = kstrtoint(buf, 0, &s3_state);
3098 
3099 	if (ret == 0) {
3100 		if (s3_state) {
3101 			dm_resume(adev);
3102 			drm_kms_helper_hotplug_event(adev->ddev);
3103 		} else
3104 			dm_suspend(adev);
3105 	}
3106 
3107 	return ret == 0 ? count : 0;
3108 }
3109 
3110 DEVICE_ATTR_WO(s3_debug);
3111 
3112 #endif
3113 
3114 static int dm_early_init(void *handle)
3115 {
3116 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117 
3118 	switch (adev->asic_type) {
3119 	case CHIP_BONAIRE:
3120 	case CHIP_HAWAII:
3121 		adev->mode_info.num_crtc = 6;
3122 		adev->mode_info.num_hpd = 6;
3123 		adev->mode_info.num_dig = 6;
3124 		break;
3125 	case CHIP_KAVERI:
3126 		adev->mode_info.num_crtc = 4;
3127 		adev->mode_info.num_hpd = 6;
3128 		adev->mode_info.num_dig = 7;
3129 		break;
3130 	case CHIP_KABINI:
3131 	case CHIP_MULLINS:
3132 		adev->mode_info.num_crtc = 2;
3133 		adev->mode_info.num_hpd = 6;
3134 		adev->mode_info.num_dig = 6;
3135 		break;
3136 	case CHIP_FIJI:
3137 	case CHIP_TONGA:
3138 		adev->mode_info.num_crtc = 6;
3139 		adev->mode_info.num_hpd = 6;
3140 		adev->mode_info.num_dig = 7;
3141 		break;
3142 	case CHIP_CARRIZO:
3143 		adev->mode_info.num_crtc = 3;
3144 		adev->mode_info.num_hpd = 6;
3145 		adev->mode_info.num_dig = 9;
3146 		break;
3147 	case CHIP_STONEY:
3148 		adev->mode_info.num_crtc = 2;
3149 		adev->mode_info.num_hpd = 6;
3150 		adev->mode_info.num_dig = 9;
3151 		break;
3152 	case CHIP_POLARIS11:
3153 	case CHIP_POLARIS12:
3154 		adev->mode_info.num_crtc = 5;
3155 		adev->mode_info.num_hpd = 5;
3156 		adev->mode_info.num_dig = 5;
3157 		break;
3158 	case CHIP_POLARIS10:
3159 	case CHIP_VEGAM:
3160 		adev->mode_info.num_crtc = 6;
3161 		adev->mode_info.num_hpd = 6;
3162 		adev->mode_info.num_dig = 6;
3163 		break;
3164 	case CHIP_VEGA10:
3165 	case CHIP_VEGA12:
3166 	case CHIP_VEGA20:
3167 		adev->mode_info.num_crtc = 6;
3168 		adev->mode_info.num_hpd = 6;
3169 		adev->mode_info.num_dig = 6;
3170 		break;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3172 	case CHIP_RAVEN:
3173 		adev->mode_info.num_crtc = 4;
3174 		adev->mode_info.num_hpd = 4;
3175 		adev->mode_info.num_dig = 4;
3176 		break;
3177 #endif
3178 	case CHIP_NAVI10:
3179 	case CHIP_NAVI12:
3180 		adev->mode_info.num_crtc = 6;
3181 		adev->mode_info.num_hpd = 6;
3182 		adev->mode_info.num_dig = 6;
3183 		break;
3184 	case CHIP_NAVI14:
3185 		adev->mode_info.num_crtc = 5;
3186 		adev->mode_info.num_hpd = 5;
3187 		adev->mode_info.num_dig = 5;
3188 		break;
3189 	case CHIP_RENOIR:
3190 		adev->mode_info.num_crtc = 4;
3191 		adev->mode_info.num_hpd = 4;
3192 		adev->mode_info.num_dig = 4;
3193 		break;
3194 	default:
3195 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3196 		return -EINVAL;
3197 	}
3198 
3199 	amdgpu_dm_set_irq_funcs(adev);
3200 
3201 	if (adev->mode_info.funcs == NULL)
3202 		adev->mode_info.funcs = &dm_display_funcs;
3203 
3204 	/*
3205 	 * Note: Do NOT change adev->audio_endpt_rreg and
3206 	 * adev->audio_endpt_wreg because they are initialised in
3207 	 * amdgpu_device_init()
3208 	 */
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3210 	device_create_file(
3211 		adev->ddev->dev,
3212 		&dev_attr_s3_debug);
3213 #endif
3214 
3215 	return 0;
3216 }
3217 
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219 			     struct dc_stream_state *new_stream,
3220 			     struct dc_stream_state *old_stream)
3221 {
3222 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3223 		return false;
3224 
3225 	if (!crtc_state->enable)
3226 		return false;
3227 
3228 	return crtc_state->active;
3229 }
3230 
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3232 {
3233 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3234 		return false;
3235 
3236 	return !crtc_state->enable || !crtc_state->active;
3237 }
3238 
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3240 {
3241 	drm_encoder_cleanup(encoder);
3242 	kfree(encoder);
3243 }
3244 
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246 	.destroy = amdgpu_dm_encoder_destroy,
3247 };
3248 
3249 
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251 				struct dc_scaling_info *scaling_info)
3252 {
3253 	int scale_w, scale_h;
3254 
3255 	memset(scaling_info, 0, sizeof(*scaling_info));
3256 
3257 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3258 	scaling_info->src_rect.x = state->src_x >> 16;
3259 	scaling_info->src_rect.y = state->src_y >> 16;
3260 
3261 	scaling_info->src_rect.width = state->src_w >> 16;
3262 	if (scaling_info->src_rect.width == 0)
3263 		return -EINVAL;
3264 
3265 	scaling_info->src_rect.height = state->src_h >> 16;
3266 	if (scaling_info->src_rect.height == 0)
3267 		return -EINVAL;
3268 
3269 	scaling_info->dst_rect.x = state->crtc_x;
3270 	scaling_info->dst_rect.y = state->crtc_y;
3271 
3272 	if (state->crtc_w == 0)
3273 		return -EINVAL;
3274 
3275 	scaling_info->dst_rect.width = state->crtc_w;
3276 
3277 	if (state->crtc_h == 0)
3278 		return -EINVAL;
3279 
3280 	scaling_info->dst_rect.height = state->crtc_h;
3281 
3282 	/* DRM doesn't specify clipping on destination output. */
3283 	scaling_info->clip_rect = scaling_info->dst_rect;
3284 
3285 	/* TODO: Validate scaling per-format with DC plane caps */
3286 	scale_w = scaling_info->dst_rect.width * 1000 /
3287 		  scaling_info->src_rect.width;
3288 
3289 	if (scale_w < 250 || scale_w > 16000)
3290 		return -EINVAL;
3291 
3292 	scale_h = scaling_info->dst_rect.height * 1000 /
3293 		  scaling_info->src_rect.height;
3294 
3295 	if (scale_h < 250 || scale_h > 16000)
3296 		return -EINVAL;
3297 
3298 	/*
3299 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300 	 * assume reasonable defaults based on the format.
3301 	 */
3302 
3303 	return 0;
3304 }
3305 
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307 		       uint64_t *tiling_flags)
3308 {
3309 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310 	int r = amdgpu_bo_reserve(rbo, false);
3311 
3312 	if (unlikely(r)) {
3313 		/* Don't show error message when returning -ERESTARTSYS */
3314 		if (r != -ERESTARTSYS)
3315 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3316 		return r;
3317 	}
3318 
3319 	if (tiling_flags)
3320 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3321 
3322 	amdgpu_bo_unreserve(rbo);
3323 
3324 	return r;
3325 }
3326 
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3328 {
3329 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3330 
3331 	return offset ? (address + offset * 256) : 0;
3332 }
3333 
3334 static int
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336 			  const struct amdgpu_framebuffer *afb,
3337 			  const enum surface_pixel_format format,
3338 			  const enum dc_rotation_angle rotation,
3339 			  const struct plane_size *plane_size,
3340 			  const union dc_tiling_info *tiling_info,
3341 			  const uint64_t info,
3342 			  struct dc_plane_dcc_param *dcc,
3343 			  struct dc_plane_address *address,
3344 			  bool force_disable_dcc)
3345 {
3346 	struct dc *dc = adev->dm.dc;
3347 	struct dc_dcc_surface_param input;
3348 	struct dc_surface_dcc_cap output;
3349 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3350 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3351 	uint64_t dcc_address;
3352 
3353 	memset(&input, 0, sizeof(input));
3354 	memset(&output, 0, sizeof(output));
3355 
3356 	if (force_disable_dcc)
3357 		return 0;
3358 
3359 	if (!offset)
3360 		return 0;
3361 
3362 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3363 		return 0;
3364 
3365 	if (!dc->cap_funcs.get_dcc_compression_cap)
3366 		return -EINVAL;
3367 
3368 	input.format = format;
3369 	input.surface_size.width = plane_size->surface_size.width;
3370 	input.surface_size.height = plane_size->surface_size.height;
3371 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3372 
3373 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3374 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3375 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3376 		input.scan = SCAN_DIRECTION_VERTICAL;
3377 
3378 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3379 		return -EINVAL;
3380 
3381 	if (!output.capable)
3382 		return -EINVAL;
3383 
3384 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3385 		return -EINVAL;
3386 
3387 	dcc->enable = 1;
3388 	dcc->meta_pitch =
3389 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3390 	dcc->independent_64b_blks = i64b;
3391 
3392 	dcc_address = get_dcc_address(afb->address, info);
3393 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3394 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3395 
3396 	return 0;
3397 }
3398 
3399 static int
3400 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3401 			     const struct amdgpu_framebuffer *afb,
3402 			     const enum surface_pixel_format format,
3403 			     const enum dc_rotation_angle rotation,
3404 			     const uint64_t tiling_flags,
3405 			     union dc_tiling_info *tiling_info,
3406 			     struct plane_size *plane_size,
3407 			     struct dc_plane_dcc_param *dcc,
3408 			     struct dc_plane_address *address,
3409 			     bool force_disable_dcc)
3410 {
3411 	const struct drm_framebuffer *fb = &afb->base;
3412 	int ret;
3413 
3414 	memset(tiling_info, 0, sizeof(*tiling_info));
3415 	memset(plane_size, 0, sizeof(*plane_size));
3416 	memset(dcc, 0, sizeof(*dcc));
3417 	memset(address, 0, sizeof(*address));
3418 
3419 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3420 		plane_size->surface_size.x = 0;
3421 		plane_size->surface_size.y = 0;
3422 		plane_size->surface_size.width = fb->width;
3423 		plane_size->surface_size.height = fb->height;
3424 		plane_size->surface_pitch =
3425 			fb->pitches[0] / fb->format->cpp[0];
3426 
3427 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3428 		address->grph.addr.low_part = lower_32_bits(afb->address);
3429 		address->grph.addr.high_part = upper_32_bits(afb->address);
3430 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3431 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3432 
3433 		plane_size->surface_size.x = 0;
3434 		plane_size->surface_size.y = 0;
3435 		plane_size->surface_size.width = fb->width;
3436 		plane_size->surface_size.height = fb->height;
3437 		plane_size->surface_pitch =
3438 			fb->pitches[0] / fb->format->cpp[0];
3439 
3440 		plane_size->chroma_size.x = 0;
3441 		plane_size->chroma_size.y = 0;
3442 		/* TODO: set these based on surface format */
3443 		plane_size->chroma_size.width = fb->width / 2;
3444 		plane_size->chroma_size.height = fb->height / 2;
3445 
3446 		plane_size->chroma_pitch =
3447 			fb->pitches[1] / fb->format->cpp[1];
3448 
3449 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3450 		address->video_progressive.luma_addr.low_part =
3451 			lower_32_bits(afb->address);
3452 		address->video_progressive.luma_addr.high_part =
3453 			upper_32_bits(afb->address);
3454 		address->video_progressive.chroma_addr.low_part =
3455 			lower_32_bits(chroma_addr);
3456 		address->video_progressive.chroma_addr.high_part =
3457 			upper_32_bits(chroma_addr);
3458 	}
3459 
3460 	/* Fill GFX8 params */
3461 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3462 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3463 
3464 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3465 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3466 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3467 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3468 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3469 
3470 		/* XXX fix me for VI */
3471 		tiling_info->gfx8.num_banks = num_banks;
3472 		tiling_info->gfx8.array_mode =
3473 				DC_ARRAY_2D_TILED_THIN1;
3474 		tiling_info->gfx8.tile_split = tile_split;
3475 		tiling_info->gfx8.bank_width = bankw;
3476 		tiling_info->gfx8.bank_height = bankh;
3477 		tiling_info->gfx8.tile_aspect = mtaspect;
3478 		tiling_info->gfx8.tile_mode =
3479 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3480 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3481 			== DC_ARRAY_1D_TILED_THIN1) {
3482 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3483 	}
3484 
3485 	tiling_info->gfx8.pipe_config =
3486 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3487 
3488 	if (adev->asic_type == CHIP_VEGA10 ||
3489 	    adev->asic_type == CHIP_VEGA12 ||
3490 	    adev->asic_type == CHIP_VEGA20 ||
3491 	    adev->asic_type == CHIP_NAVI10 ||
3492 	    adev->asic_type == CHIP_NAVI14 ||
3493 	    adev->asic_type == CHIP_NAVI12 ||
3494 	    adev->asic_type == CHIP_RENOIR ||
3495 	    adev->asic_type == CHIP_RAVEN) {
3496 		/* Fill GFX9 params */
3497 		tiling_info->gfx9.num_pipes =
3498 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3499 		tiling_info->gfx9.num_banks =
3500 			adev->gfx.config.gb_addr_config_fields.num_banks;
3501 		tiling_info->gfx9.pipe_interleave =
3502 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3503 		tiling_info->gfx9.num_shader_engines =
3504 			adev->gfx.config.gb_addr_config_fields.num_se;
3505 		tiling_info->gfx9.max_compressed_frags =
3506 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3507 		tiling_info->gfx9.num_rb_per_se =
3508 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3509 		tiling_info->gfx9.swizzle =
3510 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3511 		tiling_info->gfx9.shaderEnable = 1;
3512 
3513 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3514 						plane_size, tiling_info,
3515 						tiling_flags, dcc, address,
3516 						force_disable_dcc);
3517 		if (ret)
3518 			return ret;
3519 	}
3520 
3521 	return 0;
3522 }
3523 
3524 static void
3525 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3526 			       bool *per_pixel_alpha, bool *global_alpha,
3527 			       int *global_alpha_value)
3528 {
3529 	*per_pixel_alpha = false;
3530 	*global_alpha = false;
3531 	*global_alpha_value = 0xff;
3532 
3533 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3534 		return;
3535 
3536 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3537 		static const uint32_t alpha_formats[] = {
3538 			DRM_FORMAT_ARGB8888,
3539 			DRM_FORMAT_RGBA8888,
3540 			DRM_FORMAT_ABGR8888,
3541 		};
3542 		uint32_t format = plane_state->fb->format->format;
3543 		unsigned int i;
3544 
3545 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3546 			if (format == alpha_formats[i]) {
3547 				*per_pixel_alpha = true;
3548 				break;
3549 			}
3550 		}
3551 	}
3552 
3553 	if (plane_state->alpha < 0xffff) {
3554 		*global_alpha = true;
3555 		*global_alpha_value = plane_state->alpha >> 8;
3556 	}
3557 }
3558 
3559 static int
3560 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3561 			    const enum surface_pixel_format format,
3562 			    enum dc_color_space *color_space)
3563 {
3564 	bool full_range;
3565 
3566 	*color_space = COLOR_SPACE_SRGB;
3567 
3568 	/* DRM color properties only affect non-RGB formats. */
3569 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3570 		return 0;
3571 
3572 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3573 
3574 	switch (plane_state->color_encoding) {
3575 	case DRM_COLOR_YCBCR_BT601:
3576 		if (full_range)
3577 			*color_space = COLOR_SPACE_YCBCR601;
3578 		else
3579 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3580 		break;
3581 
3582 	case DRM_COLOR_YCBCR_BT709:
3583 		if (full_range)
3584 			*color_space = COLOR_SPACE_YCBCR709;
3585 		else
3586 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3587 		break;
3588 
3589 	case DRM_COLOR_YCBCR_BT2020:
3590 		if (full_range)
3591 			*color_space = COLOR_SPACE_2020_YCBCR;
3592 		else
3593 			return -EINVAL;
3594 		break;
3595 
3596 	default:
3597 		return -EINVAL;
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 static int
3604 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3605 			    const struct drm_plane_state *plane_state,
3606 			    const uint64_t tiling_flags,
3607 			    struct dc_plane_info *plane_info,
3608 			    struct dc_plane_address *address,
3609 			    bool force_disable_dcc)
3610 {
3611 	const struct drm_framebuffer *fb = plane_state->fb;
3612 	const struct amdgpu_framebuffer *afb =
3613 		to_amdgpu_framebuffer(plane_state->fb);
3614 	struct drm_format_name_buf format_name;
3615 	int ret;
3616 
3617 	memset(plane_info, 0, sizeof(*plane_info));
3618 
3619 	switch (fb->format->format) {
3620 	case DRM_FORMAT_C8:
3621 		plane_info->format =
3622 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3623 		break;
3624 	case DRM_FORMAT_RGB565:
3625 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3626 		break;
3627 	case DRM_FORMAT_XRGB8888:
3628 	case DRM_FORMAT_ARGB8888:
3629 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3630 		break;
3631 	case DRM_FORMAT_XRGB2101010:
3632 	case DRM_FORMAT_ARGB2101010:
3633 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3634 		break;
3635 	case DRM_FORMAT_XBGR2101010:
3636 	case DRM_FORMAT_ABGR2101010:
3637 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3638 		break;
3639 	case DRM_FORMAT_XBGR8888:
3640 	case DRM_FORMAT_ABGR8888:
3641 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3642 		break;
3643 	case DRM_FORMAT_NV21:
3644 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3645 		break;
3646 	case DRM_FORMAT_NV12:
3647 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3648 		break;
3649 	case DRM_FORMAT_P010:
3650 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3651 		break;
3652 	default:
3653 		DRM_ERROR(
3654 			"Unsupported screen format %s\n",
3655 			drm_get_format_name(fb->format->format, &format_name));
3656 		return -EINVAL;
3657 	}
3658 
3659 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3660 	case DRM_MODE_ROTATE_0:
3661 		plane_info->rotation = ROTATION_ANGLE_0;
3662 		break;
3663 	case DRM_MODE_ROTATE_90:
3664 		plane_info->rotation = ROTATION_ANGLE_90;
3665 		break;
3666 	case DRM_MODE_ROTATE_180:
3667 		plane_info->rotation = ROTATION_ANGLE_180;
3668 		break;
3669 	case DRM_MODE_ROTATE_270:
3670 		plane_info->rotation = ROTATION_ANGLE_270;
3671 		break;
3672 	default:
3673 		plane_info->rotation = ROTATION_ANGLE_0;
3674 		break;
3675 	}
3676 
3677 	plane_info->visible = true;
3678 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3679 
3680 	plane_info->layer_index = 0;
3681 
3682 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3683 					  &plane_info->color_space);
3684 	if (ret)
3685 		return ret;
3686 
3687 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3688 					   plane_info->rotation, tiling_flags,
3689 					   &plane_info->tiling_info,
3690 					   &plane_info->plane_size,
3691 					   &plane_info->dcc, address,
3692 					   force_disable_dcc);
3693 	if (ret)
3694 		return ret;
3695 
3696 	fill_blending_from_plane_state(
3697 		plane_state, &plane_info->per_pixel_alpha,
3698 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3699 
3700 	return 0;
3701 }
3702 
3703 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3704 				    struct dc_plane_state *dc_plane_state,
3705 				    struct drm_plane_state *plane_state,
3706 				    struct drm_crtc_state *crtc_state)
3707 {
3708 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3709 	const struct amdgpu_framebuffer *amdgpu_fb =
3710 		to_amdgpu_framebuffer(plane_state->fb);
3711 	struct dc_scaling_info scaling_info;
3712 	struct dc_plane_info plane_info;
3713 	uint64_t tiling_flags;
3714 	int ret;
3715 	bool force_disable_dcc = false;
3716 
3717 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3718 	if (ret)
3719 		return ret;
3720 
3721 	dc_plane_state->src_rect = scaling_info.src_rect;
3722 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3723 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3724 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3725 
3726 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3727 	if (ret)
3728 		return ret;
3729 
3730 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3731 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3732 					  &plane_info,
3733 					  &dc_plane_state->address,
3734 					  force_disable_dcc);
3735 	if (ret)
3736 		return ret;
3737 
3738 	dc_plane_state->format = plane_info.format;
3739 	dc_plane_state->color_space = plane_info.color_space;
3740 	dc_plane_state->format = plane_info.format;
3741 	dc_plane_state->plane_size = plane_info.plane_size;
3742 	dc_plane_state->rotation = plane_info.rotation;
3743 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3744 	dc_plane_state->stereo_format = plane_info.stereo_format;
3745 	dc_plane_state->tiling_info = plane_info.tiling_info;
3746 	dc_plane_state->visible = plane_info.visible;
3747 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3748 	dc_plane_state->global_alpha = plane_info.global_alpha;
3749 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3750 	dc_plane_state->dcc = plane_info.dcc;
3751 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3752 
3753 	/*
3754 	 * Always set input transfer function, since plane state is refreshed
3755 	 * every time.
3756 	 */
3757 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3758 	if (ret)
3759 		return ret;
3760 
3761 	return 0;
3762 }
3763 
3764 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3765 					   const struct dm_connector_state *dm_state,
3766 					   struct dc_stream_state *stream)
3767 {
3768 	enum amdgpu_rmx_type rmx_type;
3769 
3770 	struct rect src = { 0 }; /* viewport in composition space*/
3771 	struct rect dst = { 0 }; /* stream addressable area */
3772 
3773 	/* no mode. nothing to be done */
3774 	if (!mode)
3775 		return;
3776 
3777 	/* Full screen scaling by default */
3778 	src.width = mode->hdisplay;
3779 	src.height = mode->vdisplay;
3780 	dst.width = stream->timing.h_addressable;
3781 	dst.height = stream->timing.v_addressable;
3782 
3783 	if (dm_state) {
3784 		rmx_type = dm_state->scaling;
3785 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3786 			if (src.width * dst.height <
3787 					src.height * dst.width) {
3788 				/* height needs less upscaling/more downscaling */
3789 				dst.width = src.width *
3790 						dst.height / src.height;
3791 			} else {
3792 				/* width needs less upscaling/more downscaling */
3793 				dst.height = src.height *
3794 						dst.width / src.width;
3795 			}
3796 		} else if (rmx_type == RMX_CENTER) {
3797 			dst = src;
3798 		}
3799 
3800 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3801 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3802 
3803 		if (dm_state->underscan_enable) {
3804 			dst.x += dm_state->underscan_hborder / 2;
3805 			dst.y += dm_state->underscan_vborder / 2;
3806 			dst.width -= dm_state->underscan_hborder;
3807 			dst.height -= dm_state->underscan_vborder;
3808 		}
3809 	}
3810 
3811 	stream->src = src;
3812 	stream->dst = dst;
3813 
3814 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3815 			dst.x, dst.y, dst.width, dst.height);
3816 
3817 }
3818 
3819 static enum dc_color_depth
3820 convert_color_depth_from_display_info(const struct drm_connector *connector,
3821 				      bool is_y420, int requested_bpc)
3822 {
3823 	uint8_t bpc;
3824 
3825 	if (is_y420) {
3826 		bpc = 8;
3827 
3828 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3829 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3830 			bpc = 16;
3831 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3832 			bpc = 12;
3833 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3834 			bpc = 10;
3835 	} else {
3836 		bpc = (uint8_t)connector->display_info.bpc;
3837 		/* Assume 8 bpc by default if no bpc is specified. */
3838 		bpc = bpc ? bpc : 8;
3839 	}
3840 
3841 	if (requested_bpc > 0) {
3842 		/*
3843 		 * Cap display bpc based on the user requested value.
3844 		 *
3845 		 * The value for state->max_bpc may not correctly updated
3846 		 * depending on when the connector gets added to the state
3847 		 * or if this was called outside of atomic check, so it
3848 		 * can't be used directly.
3849 		 */
3850 		bpc = min_t(u8, bpc, requested_bpc);
3851 
3852 		/* Round down to the nearest even number. */
3853 		bpc = bpc - (bpc & 1);
3854 	}
3855 
3856 	switch (bpc) {
3857 	case 0:
3858 		/*
3859 		 * Temporary Work around, DRM doesn't parse color depth for
3860 		 * EDID revision before 1.4
3861 		 * TODO: Fix edid parsing
3862 		 */
3863 		return COLOR_DEPTH_888;
3864 	case 6:
3865 		return COLOR_DEPTH_666;
3866 	case 8:
3867 		return COLOR_DEPTH_888;
3868 	case 10:
3869 		return COLOR_DEPTH_101010;
3870 	case 12:
3871 		return COLOR_DEPTH_121212;
3872 	case 14:
3873 		return COLOR_DEPTH_141414;
3874 	case 16:
3875 		return COLOR_DEPTH_161616;
3876 	default:
3877 		return COLOR_DEPTH_UNDEFINED;
3878 	}
3879 }
3880 
3881 static enum dc_aspect_ratio
3882 get_aspect_ratio(const struct drm_display_mode *mode_in)
3883 {
3884 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3885 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3886 }
3887 
3888 static enum dc_color_space
3889 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3890 {
3891 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3892 
3893 	switch (dc_crtc_timing->pixel_encoding)	{
3894 	case PIXEL_ENCODING_YCBCR422:
3895 	case PIXEL_ENCODING_YCBCR444:
3896 	case PIXEL_ENCODING_YCBCR420:
3897 	{
3898 		/*
3899 		 * 27030khz is the separation point between HDTV and SDTV
3900 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3901 		 * respectively
3902 		 */
3903 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3904 			if (dc_crtc_timing->flags.Y_ONLY)
3905 				color_space =
3906 					COLOR_SPACE_YCBCR709_LIMITED;
3907 			else
3908 				color_space = COLOR_SPACE_YCBCR709;
3909 		} else {
3910 			if (dc_crtc_timing->flags.Y_ONLY)
3911 				color_space =
3912 					COLOR_SPACE_YCBCR601_LIMITED;
3913 			else
3914 				color_space = COLOR_SPACE_YCBCR601;
3915 		}
3916 
3917 	}
3918 	break;
3919 	case PIXEL_ENCODING_RGB:
3920 		color_space = COLOR_SPACE_SRGB;
3921 		break;
3922 
3923 	default:
3924 		WARN_ON(1);
3925 		break;
3926 	}
3927 
3928 	return color_space;
3929 }
3930 
3931 static bool adjust_colour_depth_from_display_info(
3932 	struct dc_crtc_timing *timing_out,
3933 	const struct drm_display_info *info)
3934 {
3935 	enum dc_color_depth depth = timing_out->display_color_depth;
3936 	int normalized_clk;
3937 	do {
3938 		normalized_clk = timing_out->pix_clk_100hz / 10;
3939 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3940 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3941 			normalized_clk /= 2;
3942 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3943 		switch (depth) {
3944 		case COLOR_DEPTH_888:
3945 			break;
3946 		case COLOR_DEPTH_101010:
3947 			normalized_clk = (normalized_clk * 30) / 24;
3948 			break;
3949 		case COLOR_DEPTH_121212:
3950 			normalized_clk = (normalized_clk * 36) / 24;
3951 			break;
3952 		case COLOR_DEPTH_161616:
3953 			normalized_clk = (normalized_clk * 48) / 24;
3954 			break;
3955 		default:
3956 			/* The above depths are the only ones valid for HDMI. */
3957 			return false;
3958 		}
3959 		if (normalized_clk <= info->max_tmds_clock) {
3960 			timing_out->display_color_depth = depth;
3961 			return true;
3962 		}
3963 	} while (--depth > COLOR_DEPTH_666);
3964 	return false;
3965 }
3966 
3967 static void fill_stream_properties_from_drm_display_mode(
3968 	struct dc_stream_state *stream,
3969 	const struct drm_display_mode *mode_in,
3970 	const struct drm_connector *connector,
3971 	const struct drm_connector_state *connector_state,
3972 	const struct dc_stream_state *old_stream,
3973 	int requested_bpc)
3974 {
3975 	struct dc_crtc_timing *timing_out = &stream->timing;
3976 	const struct drm_display_info *info = &connector->display_info;
3977 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3978 	struct hdmi_vendor_infoframe hv_frame;
3979 	struct hdmi_avi_infoframe avi_frame;
3980 
3981 	memset(&hv_frame, 0, sizeof(hv_frame));
3982 	memset(&avi_frame, 0, sizeof(avi_frame));
3983 
3984 	timing_out->h_border_left = 0;
3985 	timing_out->h_border_right = 0;
3986 	timing_out->v_border_top = 0;
3987 	timing_out->v_border_bottom = 0;
3988 	/* TODO: un-hardcode */
3989 	if (drm_mode_is_420_only(info, mode_in)
3990 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3991 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3992 	else if (drm_mode_is_420_also(info, mode_in)
3993 			&& aconnector->force_yuv420_output)
3994 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3995 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3996 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3997 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3998 	else
3999 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4000 
4001 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4002 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4003 		connector,
4004 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4005 		requested_bpc);
4006 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4007 	timing_out->hdmi_vic = 0;
4008 
4009 	if(old_stream) {
4010 		timing_out->vic = old_stream->timing.vic;
4011 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4012 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4013 	} else {
4014 		timing_out->vic = drm_match_cea_mode(mode_in);
4015 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4016 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4017 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4018 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4019 	}
4020 
4021 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4022 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4023 		timing_out->vic = avi_frame.video_code;
4024 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4025 		timing_out->hdmi_vic = hv_frame.vic;
4026 	}
4027 
4028 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4029 	timing_out->h_total = mode_in->crtc_htotal;
4030 	timing_out->h_sync_width =
4031 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4032 	timing_out->h_front_porch =
4033 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4034 	timing_out->v_total = mode_in->crtc_vtotal;
4035 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4036 	timing_out->v_front_porch =
4037 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4038 	timing_out->v_sync_width =
4039 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4040 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4041 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4042 
4043 	stream->output_color_space = get_output_color_space(timing_out);
4044 
4045 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4046 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4047 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4048 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4049 		    drm_mode_is_420_also(info, mode_in) &&
4050 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4051 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4052 			adjust_colour_depth_from_display_info(timing_out, info);
4053 		}
4054 	}
4055 }
4056 
4057 static void fill_audio_info(struct audio_info *audio_info,
4058 			    const struct drm_connector *drm_connector,
4059 			    const struct dc_sink *dc_sink)
4060 {
4061 	int i = 0;
4062 	int cea_revision = 0;
4063 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4064 
4065 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4066 	audio_info->product_id = edid_caps->product_id;
4067 
4068 	cea_revision = drm_connector->display_info.cea_rev;
4069 
4070 #ifdef __linux__
4071 	strscpy(audio_info->display_name,
4072 		edid_caps->display_name,
4073 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4074 #else
4075 	strncpy(audio_info->display_name,
4076 		edid_caps->display_name,
4077 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
4078 #endif
4079 
4080 	if (cea_revision >= 3) {
4081 		audio_info->mode_count = edid_caps->audio_mode_count;
4082 
4083 		for (i = 0; i < audio_info->mode_count; ++i) {
4084 			audio_info->modes[i].format_code =
4085 					(enum audio_format_code)
4086 					(edid_caps->audio_modes[i].format_code);
4087 			audio_info->modes[i].channel_count =
4088 					edid_caps->audio_modes[i].channel_count;
4089 			audio_info->modes[i].sample_rates.all =
4090 					edid_caps->audio_modes[i].sample_rate;
4091 			audio_info->modes[i].sample_size =
4092 					edid_caps->audio_modes[i].sample_size;
4093 		}
4094 	}
4095 
4096 	audio_info->flags.all = edid_caps->speaker_flags;
4097 
4098 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4099 	if (drm_connector->latency_present[0]) {
4100 		audio_info->video_latency = drm_connector->video_latency[0];
4101 		audio_info->audio_latency = drm_connector->audio_latency[0];
4102 	}
4103 
4104 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4105 
4106 }
4107 
4108 static void
4109 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4110 				      struct drm_display_mode *dst_mode)
4111 {
4112 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4113 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4114 	dst_mode->crtc_clock = src_mode->crtc_clock;
4115 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4116 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4117 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4118 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4119 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4120 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4121 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4122 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4123 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4124 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4125 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4126 }
4127 
4128 static void
4129 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4130 					const struct drm_display_mode *native_mode,
4131 					bool scale_enabled)
4132 {
4133 	if (scale_enabled) {
4134 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4135 	} else if (native_mode->clock == drm_mode->clock &&
4136 			native_mode->htotal == drm_mode->htotal &&
4137 			native_mode->vtotal == drm_mode->vtotal) {
4138 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4139 	} else {
4140 		/* no scaling nor amdgpu inserted, no need to patch */
4141 	}
4142 }
4143 
4144 static struct dc_sink *
4145 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4146 {
4147 	struct dc_sink_init_data sink_init_data = { 0 };
4148 	struct dc_sink *sink = NULL;
4149 	sink_init_data.link = aconnector->dc_link;
4150 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4151 
4152 	sink = dc_sink_create(&sink_init_data);
4153 	if (!sink) {
4154 		DRM_ERROR("Failed to create sink!\n");
4155 		return NULL;
4156 	}
4157 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4158 
4159 	return sink;
4160 }
4161 
4162 static void set_multisync_trigger_params(
4163 		struct dc_stream_state *stream)
4164 {
4165 	if (stream->triggered_crtc_reset.enabled) {
4166 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4167 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4168 	}
4169 }
4170 
4171 static void set_master_stream(struct dc_stream_state *stream_set[],
4172 			      int stream_count)
4173 {
4174 	int j, highest_rfr = 0, master_stream = 0;
4175 
4176 	for (j = 0;  j < stream_count; j++) {
4177 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4178 			int refresh_rate = 0;
4179 
4180 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4181 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4182 			if (refresh_rate > highest_rfr) {
4183 				highest_rfr = refresh_rate;
4184 				master_stream = j;
4185 			}
4186 		}
4187 	}
4188 	for (j = 0;  j < stream_count; j++) {
4189 		if (stream_set[j])
4190 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4191 	}
4192 }
4193 
4194 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4195 {
4196 	int i = 0;
4197 
4198 	if (context->stream_count < 2)
4199 		return;
4200 	for (i = 0; i < context->stream_count ; i++) {
4201 		if (!context->streams[i])
4202 			continue;
4203 		/*
4204 		 * TODO: add a function to read AMD VSDB bits and set
4205 		 * crtc_sync_master.multi_sync_enabled flag
4206 		 * For now it's set to false
4207 		 */
4208 		set_multisync_trigger_params(context->streams[i]);
4209 	}
4210 	set_master_stream(context->streams, context->stream_count);
4211 }
4212 
4213 static struct dc_stream_state *
4214 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4215 		       const struct drm_display_mode *drm_mode,
4216 		       const struct dm_connector_state *dm_state,
4217 		       const struct dc_stream_state *old_stream,
4218 		       int requested_bpc)
4219 {
4220 	struct drm_display_mode *preferred_mode = NULL;
4221 	struct drm_connector *drm_connector;
4222 	const struct drm_connector_state *con_state =
4223 		dm_state ? &dm_state->base : NULL;
4224 	struct dc_stream_state *stream = NULL;
4225 	struct drm_display_mode mode = *drm_mode;
4226 	bool native_mode_found = false;
4227 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4228 	int mode_refresh;
4229 	int preferred_refresh = 0;
4230 #if defined(CONFIG_DRM_AMD_DC_DCN)
4231 	struct dsc_dec_dpcd_caps dsc_caps;
4232 #endif
4233 	uint32_t link_bandwidth_kbps;
4234 
4235 	struct dc_sink *sink = NULL;
4236 	if (aconnector == NULL) {
4237 		DRM_ERROR("aconnector is NULL!\n");
4238 		return stream;
4239 	}
4240 
4241 	drm_connector = &aconnector->base;
4242 
4243 	if (!aconnector->dc_sink) {
4244 		sink = create_fake_sink(aconnector);
4245 		if (!sink)
4246 			return stream;
4247 	} else {
4248 		sink = aconnector->dc_sink;
4249 		dc_sink_retain(sink);
4250 	}
4251 
4252 	stream = dc_create_stream_for_sink(sink);
4253 
4254 	if (stream == NULL) {
4255 		DRM_ERROR("Failed to create stream for sink!\n");
4256 		goto finish;
4257 	}
4258 
4259 	stream->dm_stream_context = aconnector;
4260 
4261 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4262 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4263 
4264 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4265 		/* Search for preferred mode */
4266 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4267 			native_mode_found = true;
4268 			break;
4269 		}
4270 	}
4271 	if (!native_mode_found)
4272 		preferred_mode = list_first_entry_or_null(
4273 				&aconnector->base.modes,
4274 				struct drm_display_mode,
4275 				head);
4276 
4277 	mode_refresh = drm_mode_vrefresh(&mode);
4278 
4279 	if (preferred_mode == NULL) {
4280 		/*
4281 		 * This may not be an error, the use case is when we have no
4282 		 * usermode calls to reset and set mode upon hotplug. In this
4283 		 * case, we call set mode ourselves to restore the previous mode
4284 		 * and the modelist may not be filled in in time.
4285 		 */
4286 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4287 	} else {
4288 		decide_crtc_timing_for_drm_display_mode(
4289 				&mode, preferred_mode,
4290 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4291 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4292 	}
4293 
4294 	if (!dm_state)
4295 		drm_mode_set_crtcinfo(&mode, 0);
4296 
4297 	/*
4298 	* If scaling is enabled and refresh rate didn't change
4299 	* we copy the vic and polarities of the old timings
4300 	*/
4301 	if (!scale || mode_refresh != preferred_refresh)
4302 		fill_stream_properties_from_drm_display_mode(stream,
4303 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4304 	else
4305 		fill_stream_properties_from_drm_display_mode(stream,
4306 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4307 
4308 	stream->timing.flags.DSC = 0;
4309 
4310 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4311 #if defined(CONFIG_DRM_AMD_DC_DCN)
4312 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4313 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4314 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4315 				      &dsc_caps);
4316 #endif
4317 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4318 							     dc_link_get_link_cap(aconnector->dc_link));
4319 
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321 		if (dsc_caps.is_dsc_supported)
4322 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4323 						  &dsc_caps,
4324 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4325 						  link_bandwidth_kbps,
4326 						  &stream->timing,
4327 						  &stream->timing.dsc_cfg))
4328 				stream->timing.flags.DSC = 1;
4329 #endif
4330 	}
4331 
4332 	update_stream_scaling_settings(&mode, dm_state, stream);
4333 
4334 	fill_audio_info(
4335 		&stream->audio_info,
4336 		drm_connector,
4337 		sink);
4338 
4339 	update_stream_signal(stream, sink);
4340 
4341 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4342 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4343 	if (stream->link->psr_feature_enabled)	{
4344 		struct dc  *core_dc = stream->link->ctx->dc;
4345 
4346 		if (dc_is_dmcu_initialized(core_dc)) {
4347 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4348 
4349 			stream->psr_version = dmcu->dmcu_version.psr_version;
4350 
4351 			//
4352 			// should decide stream support vsc sdp colorimetry capability
4353 			// before building vsc info packet
4354 			//
4355 			stream->use_vsc_sdp_for_colorimetry = false;
4356 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4357 				stream->use_vsc_sdp_for_colorimetry =
4358 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4359 			} else {
4360 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4361 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4362 					stream->use_vsc_sdp_for_colorimetry = true;
4363 				}
4364 			}
4365 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4366 		}
4367 	}
4368 finish:
4369 	dc_sink_release(sink);
4370 
4371 	return stream;
4372 }
4373 
4374 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4375 {
4376 	drm_crtc_cleanup(crtc);
4377 	kfree(crtc);
4378 }
4379 
4380 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4381 				  struct drm_crtc_state *state)
4382 {
4383 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4384 
4385 	/* TODO Destroy dc_stream objects are stream object is flattened */
4386 	if (cur->stream)
4387 		dc_stream_release(cur->stream);
4388 
4389 
4390 	__drm_atomic_helper_crtc_destroy_state(state);
4391 
4392 
4393 	kfree(state);
4394 }
4395 
4396 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4397 {
4398 	struct dm_crtc_state *state;
4399 
4400 	if (crtc->state)
4401 		dm_crtc_destroy_state(crtc, crtc->state);
4402 
4403 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4404 	if (WARN_ON(!state))
4405 		return;
4406 
4407 	crtc->state = &state->base;
4408 	crtc->state->crtc = crtc;
4409 
4410 }
4411 
4412 static struct drm_crtc_state *
4413 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4414 {
4415 	struct dm_crtc_state *state, *cur;
4416 
4417 	cur = to_dm_crtc_state(crtc->state);
4418 
4419 	if (WARN_ON(!crtc->state))
4420 		return NULL;
4421 
4422 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4423 	if (!state)
4424 		return NULL;
4425 
4426 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4427 
4428 	if (cur->stream) {
4429 		state->stream = cur->stream;
4430 		dc_stream_retain(state->stream);
4431 	}
4432 
4433 	state->active_planes = cur->active_planes;
4434 	state->interrupts_enabled = cur->interrupts_enabled;
4435 	state->vrr_params = cur->vrr_params;
4436 	state->vrr_infopacket = cur->vrr_infopacket;
4437 	state->abm_level = cur->abm_level;
4438 	state->vrr_supported = cur->vrr_supported;
4439 	state->freesync_config = cur->freesync_config;
4440 	state->crc_src = cur->crc_src;
4441 	state->cm_has_degamma = cur->cm_has_degamma;
4442 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4443 
4444 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4445 
4446 	return &state->base;
4447 }
4448 
4449 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4450 {
4451 	enum dc_irq_source irq_source;
4452 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4453 	struct amdgpu_device *adev = crtc->dev->dev_private;
4454 	int rc;
4455 
4456 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4457 
4458 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4459 
4460 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4461 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4462 	return rc;
4463 }
4464 
4465 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4466 {
4467 	enum dc_irq_source irq_source;
4468 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4469 	struct amdgpu_device *adev = crtc->dev->dev_private;
4470 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4471 	int rc = 0;
4472 
4473 	if (enable) {
4474 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4475 		if (amdgpu_dm_vrr_active(acrtc_state))
4476 			rc = dm_set_vupdate_irq(crtc, true);
4477 	} else {
4478 		/* vblank irq off -> vupdate irq off */
4479 		rc = dm_set_vupdate_irq(crtc, false);
4480 	}
4481 
4482 	if (rc)
4483 		return rc;
4484 
4485 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4486 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4487 }
4488 
4489 static int dm_enable_vblank(struct drm_crtc *crtc)
4490 {
4491 	return dm_set_vblank(crtc, true);
4492 }
4493 
4494 static void dm_disable_vblank(struct drm_crtc *crtc)
4495 {
4496 	dm_set_vblank(crtc, false);
4497 }
4498 
4499 /* Implemented only the options currently availible for the driver */
4500 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4501 	.reset = dm_crtc_reset_state,
4502 	.destroy = amdgpu_dm_crtc_destroy,
4503 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4504 	.set_config = drm_atomic_helper_set_config,
4505 	.page_flip = drm_atomic_helper_page_flip,
4506 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4507 	.atomic_destroy_state = dm_crtc_destroy_state,
4508 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4509 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4510 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4511 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4512 	.enable_vblank = dm_enable_vblank,
4513 	.disable_vblank = dm_disable_vblank,
4514 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4515 };
4516 
4517 static enum drm_connector_status
4518 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4519 {
4520 	bool connected;
4521 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4522 
4523 	/*
4524 	 * Notes:
4525 	 * 1. This interface is NOT called in context of HPD irq.
4526 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4527 	 * makes it a bad place for *any* MST-related activity.
4528 	 */
4529 
4530 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4531 	    !aconnector->fake_enable)
4532 		connected = (aconnector->dc_sink != NULL);
4533 	else
4534 		connected = (aconnector->base.force == DRM_FORCE_ON);
4535 
4536 	return (connected ? connector_status_connected :
4537 			connector_status_disconnected);
4538 }
4539 
4540 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4541 					    struct drm_connector_state *connector_state,
4542 					    struct drm_property *property,
4543 					    uint64_t val)
4544 {
4545 	struct drm_device *dev = connector->dev;
4546 	struct amdgpu_device *adev = dev->dev_private;
4547 	struct dm_connector_state *dm_old_state =
4548 		to_dm_connector_state(connector->state);
4549 	struct dm_connector_state *dm_new_state =
4550 		to_dm_connector_state(connector_state);
4551 
4552 	int ret = -EINVAL;
4553 
4554 	if (property == dev->mode_config.scaling_mode_property) {
4555 		enum amdgpu_rmx_type rmx_type;
4556 
4557 		switch (val) {
4558 		case DRM_MODE_SCALE_CENTER:
4559 			rmx_type = RMX_CENTER;
4560 			break;
4561 		case DRM_MODE_SCALE_ASPECT:
4562 			rmx_type = RMX_ASPECT;
4563 			break;
4564 		case DRM_MODE_SCALE_FULLSCREEN:
4565 			rmx_type = RMX_FULL;
4566 			break;
4567 		case DRM_MODE_SCALE_NONE:
4568 		default:
4569 			rmx_type = RMX_OFF;
4570 			break;
4571 		}
4572 
4573 		if (dm_old_state->scaling == rmx_type)
4574 			return 0;
4575 
4576 		dm_new_state->scaling = rmx_type;
4577 		ret = 0;
4578 	} else if (property == adev->mode_info.underscan_hborder_property) {
4579 		dm_new_state->underscan_hborder = val;
4580 		ret = 0;
4581 	} else if (property == adev->mode_info.underscan_vborder_property) {
4582 		dm_new_state->underscan_vborder = val;
4583 		ret = 0;
4584 	} else if (property == adev->mode_info.underscan_property) {
4585 		dm_new_state->underscan_enable = val;
4586 		ret = 0;
4587 	} else if (property == adev->mode_info.abm_level_property) {
4588 		dm_new_state->abm_level = val;
4589 		ret = 0;
4590 	}
4591 
4592 	return ret;
4593 }
4594 
4595 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4596 					    const struct drm_connector_state *state,
4597 					    struct drm_property *property,
4598 					    uint64_t *val)
4599 {
4600 	struct drm_device *dev = connector->dev;
4601 	struct amdgpu_device *adev = dev->dev_private;
4602 	struct dm_connector_state *dm_state =
4603 		to_dm_connector_state(state);
4604 	int ret = -EINVAL;
4605 
4606 	if (property == dev->mode_config.scaling_mode_property) {
4607 		switch (dm_state->scaling) {
4608 		case RMX_CENTER:
4609 			*val = DRM_MODE_SCALE_CENTER;
4610 			break;
4611 		case RMX_ASPECT:
4612 			*val = DRM_MODE_SCALE_ASPECT;
4613 			break;
4614 		case RMX_FULL:
4615 			*val = DRM_MODE_SCALE_FULLSCREEN;
4616 			break;
4617 		case RMX_OFF:
4618 		default:
4619 			*val = DRM_MODE_SCALE_NONE;
4620 			break;
4621 		}
4622 		ret = 0;
4623 	} else if (property == adev->mode_info.underscan_hborder_property) {
4624 		*val = dm_state->underscan_hborder;
4625 		ret = 0;
4626 	} else if (property == adev->mode_info.underscan_vborder_property) {
4627 		*val = dm_state->underscan_vborder;
4628 		ret = 0;
4629 	} else if (property == adev->mode_info.underscan_property) {
4630 		*val = dm_state->underscan_enable;
4631 		ret = 0;
4632 	} else if (property == adev->mode_info.abm_level_property) {
4633 		*val = dm_state->abm_level;
4634 		ret = 0;
4635 	}
4636 
4637 	return ret;
4638 }
4639 
4640 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4641 {
4642 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4643 
4644 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4645 }
4646 
4647 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4648 {
4649 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4650 	const struct dc_link *link = aconnector->dc_link;
4651 	struct amdgpu_device *adev = connector->dev->dev_private;
4652 	struct amdgpu_display_manager *dm = &adev->dm;
4653 
4654 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4655 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4656 
4657 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4658 	    link->type != dc_connection_none &&
4659 	    dm->backlight_dev) {
4660 		backlight_device_unregister(dm->backlight_dev);
4661 		dm->backlight_dev = NULL;
4662 	}
4663 #endif
4664 
4665 	if (aconnector->dc_em_sink)
4666 		dc_sink_release(aconnector->dc_em_sink);
4667 	aconnector->dc_em_sink = NULL;
4668 	if (aconnector->dc_sink)
4669 		dc_sink_release(aconnector->dc_sink);
4670 	aconnector->dc_sink = NULL;
4671 
4672 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4673 	drm_connector_unregister(connector);
4674 	drm_connector_cleanup(connector);
4675 	if (aconnector->i2c) {
4676 		i2c_del_adapter(&aconnector->i2c->base);
4677 		kfree(aconnector->i2c);
4678 	}
4679 	kfree(aconnector->dm_dp_aux.aux.name);
4680 
4681 	kfree(connector);
4682 }
4683 
4684 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4685 {
4686 	struct dm_connector_state *state =
4687 		to_dm_connector_state(connector->state);
4688 
4689 	if (connector->state)
4690 		__drm_atomic_helper_connector_destroy_state(connector->state);
4691 
4692 	kfree(state);
4693 
4694 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4695 
4696 	if (state) {
4697 		state->scaling = RMX_OFF;
4698 		state->underscan_enable = false;
4699 		state->underscan_hborder = 0;
4700 		state->underscan_vborder = 0;
4701 		state->base.max_requested_bpc = 8;
4702 		state->vcpi_slots = 0;
4703 		state->pbn = 0;
4704 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4705 			state->abm_level = amdgpu_dm_abm_level;
4706 
4707 		__drm_atomic_helper_connector_reset(connector, &state->base);
4708 	}
4709 }
4710 
4711 struct drm_connector_state *
4712 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4713 {
4714 	struct dm_connector_state *state =
4715 		to_dm_connector_state(connector->state);
4716 
4717 	struct dm_connector_state *new_state =
4718 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4719 
4720 	if (!new_state)
4721 		return NULL;
4722 
4723 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4724 
4725 	new_state->freesync_capable = state->freesync_capable;
4726 	new_state->abm_level = state->abm_level;
4727 	new_state->scaling = state->scaling;
4728 	new_state->underscan_enable = state->underscan_enable;
4729 	new_state->underscan_hborder = state->underscan_hborder;
4730 	new_state->underscan_vborder = state->underscan_vborder;
4731 	new_state->vcpi_slots = state->vcpi_slots;
4732 	new_state->pbn = state->pbn;
4733 	return &new_state->base;
4734 }
4735 
4736 static int
4737 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4738 {
4739 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4740 		to_amdgpu_dm_connector(connector);
4741 	int r;
4742 
4743 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4744 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4745 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4746 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4747 		if (r)
4748 			return r;
4749 	}
4750 
4751 #if defined(CONFIG_DEBUG_FS)
4752 	connector_debugfs_init(amdgpu_dm_connector);
4753 #endif
4754 
4755 	return 0;
4756 }
4757 
4758 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4759 	.reset = amdgpu_dm_connector_funcs_reset,
4760 	.detect = amdgpu_dm_connector_detect,
4761 	.fill_modes = drm_helper_probe_single_connector_modes,
4762 	.destroy = amdgpu_dm_connector_destroy,
4763 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4764 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4765 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4766 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4767 	.late_register = amdgpu_dm_connector_late_register,
4768 	.early_unregister = amdgpu_dm_connector_unregister
4769 };
4770 
4771 static int get_modes(struct drm_connector *connector)
4772 {
4773 	return amdgpu_dm_connector_get_modes(connector);
4774 }
4775 
4776 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4777 {
4778 	struct dc_sink_init_data init_params = {
4779 			.link = aconnector->dc_link,
4780 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4781 	};
4782 	struct edid *edid;
4783 
4784 	if (!aconnector->base.edid_blob_ptr) {
4785 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4786 				aconnector->base.name);
4787 
4788 		aconnector->base.force = DRM_FORCE_OFF;
4789 		aconnector->base.override_edid = false;
4790 		return;
4791 	}
4792 
4793 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4794 
4795 	aconnector->edid = edid;
4796 
4797 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4798 		aconnector->dc_link,
4799 		(uint8_t *)edid,
4800 		(edid->extensions + 1) * EDID_LENGTH,
4801 		&init_params);
4802 
4803 	if (aconnector->base.force == DRM_FORCE_ON) {
4804 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4805 		aconnector->dc_link->local_sink :
4806 		aconnector->dc_em_sink;
4807 		dc_sink_retain(aconnector->dc_sink);
4808 	}
4809 }
4810 
4811 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4812 {
4813 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4814 
4815 	/*
4816 	 * In case of headless boot with force on for DP managed connector
4817 	 * Those settings have to be != 0 to get initial modeset
4818 	 */
4819 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4820 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4821 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4822 	}
4823 
4824 
4825 	aconnector->base.override_edid = true;
4826 	create_eml_sink(aconnector);
4827 }
4828 
4829 static struct dc_stream_state *
4830 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4831 				const struct drm_display_mode *drm_mode,
4832 				const struct dm_connector_state *dm_state,
4833 				const struct dc_stream_state *old_stream)
4834 {
4835 	struct drm_connector *connector = &aconnector->base;
4836 	struct amdgpu_device *adev = connector->dev->dev_private;
4837 	struct dc_stream_state *stream;
4838 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
4839 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
4840 	enum dc_status dc_result = DC_OK;
4841 
4842 	do {
4843 		stream = create_stream_for_sink(aconnector, drm_mode,
4844 						dm_state, old_stream,
4845 						requested_bpc);
4846 		if (stream == NULL) {
4847 			DRM_ERROR("Failed to create stream for sink!\n");
4848 			break;
4849 		}
4850 
4851 		dc_result = dc_validate_stream(adev->dm.dc, stream);
4852 
4853 		if (dc_result != DC_OK) {
4854 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4855 				      drm_mode->hdisplay,
4856 				      drm_mode->vdisplay,
4857 				      drm_mode->clock,
4858 				      dc_result);
4859 
4860 			dc_stream_release(stream);
4861 			stream = NULL;
4862 			requested_bpc -= 2; /* lower bpc to retry validation */
4863 		}
4864 
4865 	} while (stream == NULL && requested_bpc >= 6);
4866 
4867 	return stream;
4868 }
4869 
4870 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4871 				   struct drm_display_mode *mode)
4872 {
4873 	int result = MODE_ERROR;
4874 	struct dc_sink *dc_sink;
4875 	/* TODO: Unhardcode stream count */
4876 	struct dc_stream_state *stream;
4877 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4878 
4879 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4880 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4881 		return result;
4882 
4883 	/*
4884 	 * Only run this the first time mode_valid is called to initilialize
4885 	 * EDID mgmt
4886 	 */
4887 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4888 		!aconnector->dc_em_sink)
4889 		handle_edid_mgmt(aconnector);
4890 
4891 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4892 
4893 	if (dc_sink == NULL) {
4894 		DRM_ERROR("dc_sink is NULL!\n");
4895 		goto fail;
4896 	}
4897 
4898 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
4899 	if (stream) {
4900 		dc_stream_release(stream);
4901 		result = MODE_OK;
4902 	}
4903 
4904 fail:
4905 	/* TODO: error handling*/
4906 	return result;
4907 }
4908 
4909 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4910 				struct dc_info_packet *out)
4911 {
4912 	struct hdmi_drm_infoframe frame;
4913 	unsigned char buf[30]; /* 26 + 4 */
4914 	ssize_t len;
4915 	int ret, i;
4916 
4917 	memset(out, 0, sizeof(*out));
4918 
4919 	if (!state->hdr_output_metadata)
4920 		return 0;
4921 
4922 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4923 	if (ret)
4924 		return ret;
4925 
4926 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4927 	if (len < 0)
4928 		return (int)len;
4929 
4930 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4931 	if (len != 30)
4932 		return -EINVAL;
4933 
4934 	/* Prepare the infopacket for DC. */
4935 	switch (state->connector->connector_type) {
4936 	case DRM_MODE_CONNECTOR_HDMIA:
4937 		out->hb0 = 0x87; /* type */
4938 		out->hb1 = 0x01; /* version */
4939 		out->hb2 = 0x1A; /* length */
4940 		out->sb[0] = buf[3]; /* checksum */
4941 		i = 1;
4942 		break;
4943 
4944 	case DRM_MODE_CONNECTOR_DisplayPort:
4945 	case DRM_MODE_CONNECTOR_eDP:
4946 		out->hb0 = 0x00; /* sdp id, zero */
4947 		out->hb1 = 0x87; /* type */
4948 		out->hb2 = 0x1D; /* payload len - 1 */
4949 		out->hb3 = (0x13 << 2); /* sdp version */
4950 		out->sb[0] = 0x01; /* version */
4951 		out->sb[1] = 0x1A; /* length */
4952 		i = 2;
4953 		break;
4954 
4955 	default:
4956 		return -EINVAL;
4957 	}
4958 
4959 	memcpy(&out->sb[i], &buf[4], 26);
4960 	out->valid = true;
4961 
4962 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4963 		       sizeof(out->sb), false);
4964 
4965 	return 0;
4966 }
4967 
4968 static bool
4969 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4970 			  const struct drm_connector_state *new_state)
4971 {
4972 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4973 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4974 
4975 	if (old_blob != new_blob) {
4976 		if (old_blob && new_blob &&
4977 		    old_blob->length == new_blob->length)
4978 			return memcmp(old_blob->data, new_blob->data,
4979 				      old_blob->length);
4980 
4981 		return true;
4982 	}
4983 
4984 	return false;
4985 }
4986 
4987 static int
4988 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4989 				 struct drm_atomic_state *state)
4990 {
4991 	struct drm_connector_state *new_con_state =
4992 		drm_atomic_get_new_connector_state(state, conn);
4993 	struct drm_connector_state *old_con_state =
4994 		drm_atomic_get_old_connector_state(state, conn);
4995 	struct drm_crtc *crtc = new_con_state->crtc;
4996 	struct drm_crtc_state *new_crtc_state;
4997 	int ret;
4998 
4999 	if (!crtc)
5000 		return 0;
5001 
5002 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5003 		struct dc_info_packet hdr_infopacket;
5004 
5005 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5006 		if (ret)
5007 			return ret;
5008 
5009 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5010 		if (IS_ERR(new_crtc_state))
5011 			return PTR_ERR(new_crtc_state);
5012 
5013 		/*
5014 		 * DC considers the stream backends changed if the
5015 		 * static metadata changes. Forcing the modeset also
5016 		 * gives a simple way for userspace to switch from
5017 		 * 8bpc to 10bpc when setting the metadata to enter
5018 		 * or exit HDR.
5019 		 *
5020 		 * Changing the static metadata after it's been
5021 		 * set is permissible, however. So only force a
5022 		 * modeset if we're entering or exiting HDR.
5023 		 */
5024 		new_crtc_state->mode_changed =
5025 			!old_con_state->hdr_output_metadata ||
5026 			!new_con_state->hdr_output_metadata;
5027 	}
5028 
5029 	return 0;
5030 }
5031 
5032 static const struct drm_connector_helper_funcs
5033 amdgpu_dm_connector_helper_funcs = {
5034 	/*
5035 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5036 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5037 	 * are missing after user start lightdm. So we need to renew modes list.
5038 	 * in get_modes call back, not just return the modes count
5039 	 */
5040 	.get_modes = get_modes,
5041 	.mode_valid = amdgpu_dm_connector_mode_valid,
5042 	.atomic_check = amdgpu_dm_connector_atomic_check,
5043 };
5044 
5045 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5046 {
5047 }
5048 
5049 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5050 {
5051 	struct drm_device *dev = new_crtc_state->crtc->dev;
5052 	struct drm_plane *plane;
5053 
5054 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5055 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5056 			return true;
5057 	}
5058 
5059 	return false;
5060 }
5061 
5062 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5063 {
5064 	struct drm_atomic_state *state = new_crtc_state->state;
5065 	struct drm_plane *plane;
5066 	int num_active = 0;
5067 
5068 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5069 		struct drm_plane_state *new_plane_state;
5070 
5071 		/* Cursor planes are "fake". */
5072 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5073 			continue;
5074 
5075 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5076 
5077 		if (!new_plane_state) {
5078 			/*
5079 			 * The plane is enable on the CRTC and hasn't changed
5080 			 * state. This means that it previously passed
5081 			 * validation and is therefore enabled.
5082 			 */
5083 			num_active += 1;
5084 			continue;
5085 		}
5086 
5087 		/* We need a framebuffer to be considered enabled. */
5088 		num_active += (new_plane_state->fb != NULL);
5089 	}
5090 
5091 	return num_active;
5092 }
5093 
5094 /*
5095  * Sets whether interrupts should be enabled on a specific CRTC.
5096  * We require that the stream be enabled and that there exist active
5097  * DC planes on the stream.
5098  */
5099 static void
5100 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5101 			       struct drm_crtc_state *new_crtc_state)
5102 {
5103 	struct dm_crtc_state *dm_new_crtc_state =
5104 		to_dm_crtc_state(new_crtc_state);
5105 
5106 	dm_new_crtc_state->active_planes = 0;
5107 	dm_new_crtc_state->interrupts_enabled = false;
5108 
5109 	if (!dm_new_crtc_state->stream)
5110 		return;
5111 
5112 	dm_new_crtc_state->active_planes =
5113 		count_crtc_active_planes(new_crtc_state);
5114 
5115 	dm_new_crtc_state->interrupts_enabled =
5116 		dm_new_crtc_state->active_planes > 0;
5117 }
5118 
5119 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5120 				       struct drm_crtc_state *state)
5121 {
5122 	struct amdgpu_device *adev = crtc->dev->dev_private;
5123 	struct dc *dc = adev->dm.dc;
5124 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5125 	int ret = -EINVAL;
5126 
5127 	/*
5128 	 * Update interrupt state for the CRTC. This needs to happen whenever
5129 	 * the CRTC has changed or whenever any of its planes have changed.
5130 	 * Atomic check satisfies both of these requirements since the CRTC
5131 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5132 	 */
5133 	dm_update_crtc_interrupt_state(crtc, state);
5134 
5135 	if (unlikely(!dm_crtc_state->stream &&
5136 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5137 		WARN_ON(1);
5138 		return ret;
5139 	}
5140 
5141 	/* In some use cases, like reset, no stream is attached */
5142 	if (!dm_crtc_state->stream)
5143 		return 0;
5144 
5145 	/*
5146 	 * We want at least one hardware plane enabled to use
5147 	 * the stream with a cursor enabled.
5148 	 */
5149 	if (state->enable && state->active &&
5150 	    does_crtc_have_active_cursor(state) &&
5151 	    dm_crtc_state->active_planes == 0)
5152 		return -EINVAL;
5153 
5154 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5155 		return 0;
5156 
5157 	return ret;
5158 }
5159 
5160 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5161 				      const struct drm_display_mode *mode,
5162 				      struct drm_display_mode *adjusted_mode)
5163 {
5164 	return true;
5165 }
5166 
5167 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5168 	.disable = dm_crtc_helper_disable,
5169 	.atomic_check = dm_crtc_helper_atomic_check,
5170 	.mode_fixup = dm_crtc_helper_mode_fixup,
5171 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5172 };
5173 
5174 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5175 {
5176 
5177 }
5178 
5179 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5180 {
5181 	switch (display_color_depth) {
5182 		case COLOR_DEPTH_666:
5183 			return 6;
5184 		case COLOR_DEPTH_888:
5185 			return 8;
5186 		case COLOR_DEPTH_101010:
5187 			return 10;
5188 		case COLOR_DEPTH_121212:
5189 			return 12;
5190 		case COLOR_DEPTH_141414:
5191 			return 14;
5192 		case COLOR_DEPTH_161616:
5193 			return 16;
5194 		default:
5195 			break;
5196 		}
5197 	return 0;
5198 }
5199 
5200 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5201 					  struct drm_crtc_state *crtc_state,
5202 					  struct drm_connector_state *conn_state)
5203 {
5204 	struct drm_atomic_state *state = crtc_state->state;
5205 	struct drm_connector *connector = conn_state->connector;
5206 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5207 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5208 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5209 	struct drm_dp_mst_topology_mgr *mst_mgr;
5210 	struct drm_dp_mst_port *mst_port;
5211 	enum dc_color_depth color_depth;
5212 	int clock, bpp = 0;
5213 	bool is_y420 = false;
5214 
5215 	if (!aconnector->port || !aconnector->dc_sink)
5216 		return 0;
5217 
5218 	mst_port = aconnector->port;
5219 	mst_mgr = &aconnector->mst_port->mst_mgr;
5220 
5221 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5222 		return 0;
5223 
5224 	if (!state->duplicated) {
5225 		int max_bpc = conn_state->max_requested_bpc;
5226 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5227 				aconnector->force_yuv420_output;
5228 		color_depth = convert_color_depth_from_display_info(connector,
5229 								    is_y420,
5230 								    max_bpc);
5231 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5232 		clock = adjusted_mode->clock;
5233 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5234 	}
5235 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5236 									   mst_mgr,
5237 									   mst_port,
5238 									   dm_new_connector_state->pbn,
5239 									   0);
5240 	if (dm_new_connector_state->vcpi_slots < 0) {
5241 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5242 		return dm_new_connector_state->vcpi_slots;
5243 	}
5244 	return 0;
5245 }
5246 
5247 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5248 	.disable = dm_encoder_helper_disable,
5249 	.atomic_check = dm_encoder_helper_atomic_check
5250 };
5251 
5252 #if defined(CONFIG_DRM_AMD_DC_DCN)
5253 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5254 					    struct dc_state *dc_state)
5255 {
5256 	struct dc_stream_state *stream = NULL;
5257 	struct drm_connector *connector;
5258 	struct drm_connector_state *new_con_state, *old_con_state;
5259 	struct amdgpu_dm_connector *aconnector;
5260 	struct dm_connector_state *dm_conn_state;
5261 	int i, j, clock, bpp;
5262 	int vcpi, pbn_div, pbn = 0;
5263 
5264 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5265 
5266 		aconnector = to_amdgpu_dm_connector(connector);
5267 
5268 		if (!aconnector->port)
5269 			continue;
5270 
5271 		if (!new_con_state || !new_con_state->crtc)
5272 			continue;
5273 
5274 		dm_conn_state = to_dm_connector_state(new_con_state);
5275 
5276 		for (j = 0; j < dc_state->stream_count; j++) {
5277 			stream = dc_state->streams[j];
5278 			if (!stream)
5279 				continue;
5280 
5281 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5282 				break;
5283 
5284 			stream = NULL;
5285 		}
5286 
5287 		if (!stream)
5288 			continue;
5289 
5290 		if (stream->timing.flags.DSC != 1) {
5291 			drm_dp_mst_atomic_enable_dsc(state,
5292 						     aconnector->port,
5293 						     dm_conn_state->pbn,
5294 						     0,
5295 						     false);
5296 			continue;
5297 		}
5298 
5299 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5300 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5301 		clock = stream->timing.pix_clk_100hz / 10;
5302 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5303 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5304 						    aconnector->port,
5305 						    pbn, pbn_div,
5306 						    true);
5307 		if (vcpi < 0)
5308 			return vcpi;
5309 
5310 		dm_conn_state->pbn = pbn;
5311 		dm_conn_state->vcpi_slots = vcpi;
5312 	}
5313 	return 0;
5314 }
5315 #endif
5316 
5317 static void dm_drm_plane_reset(struct drm_plane *plane)
5318 {
5319 	struct dm_plane_state *amdgpu_state = NULL;
5320 
5321 	if (plane->state)
5322 		plane->funcs->atomic_destroy_state(plane, plane->state);
5323 
5324 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5325 	WARN_ON(amdgpu_state == NULL);
5326 
5327 	if (amdgpu_state)
5328 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5329 }
5330 
5331 static struct drm_plane_state *
5332 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5333 {
5334 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5335 
5336 	old_dm_plane_state = to_dm_plane_state(plane->state);
5337 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5338 	if (!dm_plane_state)
5339 		return NULL;
5340 
5341 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5342 
5343 	if (old_dm_plane_state->dc_state) {
5344 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5345 		dc_plane_state_retain(dm_plane_state->dc_state);
5346 	}
5347 
5348 	return &dm_plane_state->base;
5349 }
5350 
5351 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5352 				struct drm_plane_state *state)
5353 {
5354 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5355 
5356 	if (dm_plane_state->dc_state)
5357 		dc_plane_state_release(dm_plane_state->dc_state);
5358 
5359 	drm_atomic_helper_plane_destroy_state(plane, state);
5360 }
5361 
5362 static const struct drm_plane_funcs dm_plane_funcs = {
5363 	.update_plane	= drm_atomic_helper_update_plane,
5364 	.disable_plane	= drm_atomic_helper_disable_plane,
5365 	.destroy	= drm_primary_helper_destroy,
5366 	.reset = dm_drm_plane_reset,
5367 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5368 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5369 };
5370 
5371 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5372 				      struct drm_plane_state *new_state)
5373 {
5374 	struct amdgpu_framebuffer *afb;
5375 	struct drm_gem_object *obj;
5376 	struct amdgpu_device *adev;
5377 	struct amdgpu_bo *rbo;
5378 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5379 	struct list_head list;
5380 	struct ttm_validate_buffer tv;
5381 	struct ww_acquire_ctx ticket;
5382 	uint64_t tiling_flags;
5383 	uint32_t domain;
5384 	int r;
5385 	bool force_disable_dcc = false;
5386 
5387 	dm_plane_state_old = to_dm_plane_state(plane->state);
5388 	dm_plane_state_new = to_dm_plane_state(new_state);
5389 
5390 	if (!new_state->fb) {
5391 		DRM_DEBUG_DRIVER("No FB bound\n");
5392 		return 0;
5393 	}
5394 
5395 	afb = to_amdgpu_framebuffer(new_state->fb);
5396 	obj = new_state->fb->obj[0];
5397 	rbo = gem_to_amdgpu_bo(obj);
5398 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5399 	INIT_LIST_HEAD(&list);
5400 
5401 	tv.bo = &rbo->tbo;
5402 	tv.num_shared = 1;
5403 	list_add(&tv.head, &list);
5404 
5405 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5406 	if (r) {
5407 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5408 		return r;
5409 	}
5410 
5411 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5412 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5413 	else
5414 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5415 
5416 	r = amdgpu_bo_pin(rbo, domain);
5417 	if (unlikely(r != 0)) {
5418 		if (r != -ERESTARTSYS)
5419 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5420 		ttm_eu_backoff_reservation(&ticket, &list);
5421 		return r;
5422 	}
5423 
5424 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5425 	if (unlikely(r != 0)) {
5426 		amdgpu_bo_unpin(rbo);
5427 		ttm_eu_backoff_reservation(&ticket, &list);
5428 		DRM_ERROR("%p bind failed\n", rbo);
5429 		return r;
5430 	}
5431 
5432 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5433 
5434 	ttm_eu_backoff_reservation(&ticket, &list);
5435 
5436 	afb->address = amdgpu_bo_gpu_offset(rbo);
5437 
5438 	amdgpu_bo_ref(rbo);
5439 
5440 	if (dm_plane_state_new->dc_state &&
5441 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5442 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5443 
5444 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5445 		fill_plane_buffer_attributes(
5446 			adev, afb, plane_state->format, plane_state->rotation,
5447 			tiling_flags, &plane_state->tiling_info,
5448 			&plane_state->plane_size, &plane_state->dcc,
5449 			&plane_state->address,
5450 			force_disable_dcc);
5451 	}
5452 
5453 	return 0;
5454 }
5455 
5456 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5457 				       struct drm_plane_state *old_state)
5458 {
5459 	struct amdgpu_bo *rbo;
5460 	int r;
5461 
5462 	if (!old_state->fb)
5463 		return;
5464 
5465 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5466 	r = amdgpu_bo_reserve(rbo, false);
5467 	if (unlikely(r)) {
5468 		DRM_ERROR("failed to reserve rbo before unpin\n");
5469 		return;
5470 	}
5471 
5472 	amdgpu_bo_unpin(rbo);
5473 	amdgpu_bo_unreserve(rbo);
5474 	amdgpu_bo_unref(&rbo);
5475 }
5476 
5477 static int dm_plane_atomic_check(struct drm_plane *plane,
5478 				 struct drm_plane_state *state)
5479 {
5480 	struct amdgpu_device *adev = plane->dev->dev_private;
5481 	struct dc *dc = adev->dm.dc;
5482 	struct dm_plane_state *dm_plane_state;
5483 	struct dc_scaling_info scaling_info;
5484 	int ret;
5485 
5486 	dm_plane_state = to_dm_plane_state(state);
5487 
5488 	if (!dm_plane_state->dc_state)
5489 		return 0;
5490 
5491 	ret = fill_dc_scaling_info(state, &scaling_info);
5492 	if (ret)
5493 		return ret;
5494 
5495 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5496 		return 0;
5497 
5498 	return -EINVAL;
5499 }
5500 
5501 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5502 				       struct drm_plane_state *new_plane_state)
5503 {
5504 	/* Only support async updates on cursor planes. */
5505 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5506 		return -EINVAL;
5507 
5508 	return 0;
5509 }
5510 
5511 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5512 					 struct drm_plane_state *new_state)
5513 {
5514 	struct drm_plane_state *old_state =
5515 		drm_atomic_get_old_plane_state(new_state->state, plane);
5516 
5517 	swap(plane->state->fb, new_state->fb);
5518 
5519 	plane->state->src_x = new_state->src_x;
5520 	plane->state->src_y = new_state->src_y;
5521 	plane->state->src_w = new_state->src_w;
5522 	plane->state->src_h = new_state->src_h;
5523 	plane->state->crtc_x = new_state->crtc_x;
5524 	plane->state->crtc_y = new_state->crtc_y;
5525 	plane->state->crtc_w = new_state->crtc_w;
5526 	plane->state->crtc_h = new_state->crtc_h;
5527 
5528 	handle_cursor_update(plane, old_state);
5529 }
5530 
5531 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5532 	.prepare_fb = dm_plane_helper_prepare_fb,
5533 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5534 	.atomic_check = dm_plane_atomic_check,
5535 	.atomic_async_check = dm_plane_atomic_async_check,
5536 	.atomic_async_update = dm_plane_atomic_async_update
5537 };
5538 
5539 /*
5540  * TODO: these are currently initialized to rgb formats only.
5541  * For future use cases we should either initialize them dynamically based on
5542  * plane capabilities, or initialize this array to all formats, so internal drm
5543  * check will succeed, and let DC implement proper check
5544  */
5545 static const uint32_t rgb_formats[] = {
5546 	DRM_FORMAT_XRGB8888,
5547 	DRM_FORMAT_ARGB8888,
5548 	DRM_FORMAT_RGBA8888,
5549 	DRM_FORMAT_XRGB2101010,
5550 	DRM_FORMAT_XBGR2101010,
5551 	DRM_FORMAT_ARGB2101010,
5552 	DRM_FORMAT_ABGR2101010,
5553 	DRM_FORMAT_XBGR8888,
5554 	DRM_FORMAT_ABGR8888,
5555 	DRM_FORMAT_RGB565,
5556 };
5557 
5558 static const uint32_t overlay_formats[] = {
5559 	DRM_FORMAT_XRGB8888,
5560 	DRM_FORMAT_ARGB8888,
5561 	DRM_FORMAT_RGBA8888,
5562 	DRM_FORMAT_XBGR8888,
5563 	DRM_FORMAT_ABGR8888,
5564 	DRM_FORMAT_RGB565
5565 };
5566 
5567 static const u32 cursor_formats[] = {
5568 	DRM_FORMAT_ARGB8888
5569 };
5570 
5571 static int get_plane_formats(const struct drm_plane *plane,
5572 			     const struct dc_plane_cap *plane_cap,
5573 			     uint32_t *formats, int max_formats)
5574 {
5575 	int i, num_formats = 0;
5576 
5577 	/*
5578 	 * TODO: Query support for each group of formats directly from
5579 	 * DC plane caps. This will require adding more formats to the
5580 	 * caps list.
5581 	 */
5582 
5583 	switch (plane->type) {
5584 	case DRM_PLANE_TYPE_PRIMARY:
5585 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5586 			if (num_formats >= max_formats)
5587 				break;
5588 
5589 			formats[num_formats++] = rgb_formats[i];
5590 		}
5591 
5592 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5593 			formats[num_formats++] = DRM_FORMAT_NV12;
5594 		if (plane_cap && plane_cap->pixel_format_support.p010)
5595 			formats[num_formats++] = DRM_FORMAT_P010;
5596 		break;
5597 
5598 	case DRM_PLANE_TYPE_OVERLAY:
5599 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5600 			if (num_formats >= max_formats)
5601 				break;
5602 
5603 			formats[num_formats++] = overlay_formats[i];
5604 		}
5605 		break;
5606 
5607 	case DRM_PLANE_TYPE_CURSOR:
5608 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5609 			if (num_formats >= max_formats)
5610 				break;
5611 
5612 			formats[num_formats++] = cursor_formats[i];
5613 		}
5614 		break;
5615 	}
5616 
5617 	return num_formats;
5618 }
5619 
5620 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5621 				struct drm_plane *plane,
5622 				unsigned long possible_crtcs,
5623 				const struct dc_plane_cap *plane_cap)
5624 {
5625 	uint32_t formats[32];
5626 	int num_formats;
5627 	int res = -EPERM;
5628 
5629 	num_formats = get_plane_formats(plane, plane_cap, formats,
5630 					ARRAY_SIZE(formats));
5631 
5632 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5633 				       &dm_plane_funcs, formats, num_formats,
5634 				       NULL, plane->type, NULL);
5635 	if (res)
5636 		return res;
5637 
5638 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5639 	    plane_cap && plane_cap->per_pixel_alpha) {
5640 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5641 					  BIT(DRM_MODE_BLEND_PREMULTI);
5642 
5643 		drm_plane_create_alpha_property(plane);
5644 		drm_plane_create_blend_mode_property(plane, blend_caps);
5645 	}
5646 
5647 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5648 	    plane_cap &&
5649 	    (plane_cap->pixel_format_support.nv12 ||
5650 	     plane_cap->pixel_format_support.p010)) {
5651 		/* This only affects YUV formats. */
5652 		drm_plane_create_color_properties(
5653 			plane,
5654 			BIT(DRM_COLOR_YCBCR_BT601) |
5655 			BIT(DRM_COLOR_YCBCR_BT709) |
5656 			BIT(DRM_COLOR_YCBCR_BT2020),
5657 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5658 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5659 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5660 	}
5661 
5662 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5663 
5664 	/* Create (reset) the plane state */
5665 	if (plane->funcs->reset)
5666 		plane->funcs->reset(plane);
5667 
5668 	return 0;
5669 }
5670 
5671 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5672 			       struct drm_plane *plane,
5673 			       uint32_t crtc_index)
5674 {
5675 	struct amdgpu_crtc *acrtc = NULL;
5676 	struct drm_plane *cursor_plane;
5677 
5678 	int res = -ENOMEM;
5679 
5680 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5681 	if (!cursor_plane)
5682 		goto fail;
5683 
5684 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5685 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5686 
5687 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5688 	if (!acrtc)
5689 		goto fail;
5690 
5691 	res = drm_crtc_init_with_planes(
5692 			dm->ddev,
5693 			&acrtc->base,
5694 			plane,
5695 			cursor_plane,
5696 			&amdgpu_dm_crtc_funcs, NULL);
5697 
5698 	if (res)
5699 		goto fail;
5700 
5701 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5702 
5703 	/* Create (reset) the plane state */
5704 	if (acrtc->base.funcs->reset)
5705 		acrtc->base.funcs->reset(&acrtc->base);
5706 
5707 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5708 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5709 
5710 	acrtc->crtc_id = crtc_index;
5711 	acrtc->base.enabled = false;
5712 	acrtc->otg_inst = -1;
5713 
5714 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5715 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5716 				   true, MAX_COLOR_LUT_ENTRIES);
5717 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5718 
5719 	return 0;
5720 
5721 fail:
5722 	kfree(acrtc);
5723 	kfree(cursor_plane);
5724 	return res;
5725 }
5726 
5727 
5728 static int to_drm_connector_type(enum amd_signal_type st)
5729 {
5730 	switch (st) {
5731 	case SIGNAL_TYPE_HDMI_TYPE_A:
5732 		return DRM_MODE_CONNECTOR_HDMIA;
5733 	case SIGNAL_TYPE_EDP:
5734 		return DRM_MODE_CONNECTOR_eDP;
5735 	case SIGNAL_TYPE_LVDS:
5736 		return DRM_MODE_CONNECTOR_LVDS;
5737 	case SIGNAL_TYPE_RGB:
5738 		return DRM_MODE_CONNECTOR_VGA;
5739 	case SIGNAL_TYPE_DISPLAY_PORT:
5740 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5741 		return DRM_MODE_CONNECTOR_DisplayPort;
5742 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5743 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5744 		return DRM_MODE_CONNECTOR_DVID;
5745 	case SIGNAL_TYPE_VIRTUAL:
5746 		return DRM_MODE_CONNECTOR_VIRTUAL;
5747 
5748 	default:
5749 		return DRM_MODE_CONNECTOR_Unknown;
5750 	}
5751 }
5752 
5753 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5754 {
5755 	struct drm_encoder *encoder;
5756 
5757 	/* There is only one encoder per connector */
5758 	drm_connector_for_each_possible_encoder(connector, encoder)
5759 		return encoder;
5760 
5761 	return NULL;
5762 }
5763 
5764 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5765 {
5766 	struct drm_encoder *encoder;
5767 	struct amdgpu_encoder *amdgpu_encoder;
5768 
5769 	encoder = amdgpu_dm_connector_to_encoder(connector);
5770 
5771 	if (encoder == NULL)
5772 		return;
5773 
5774 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5775 
5776 	amdgpu_encoder->native_mode.clock = 0;
5777 
5778 	if (!list_empty(&connector->probed_modes)) {
5779 		struct drm_display_mode *preferred_mode = NULL;
5780 
5781 		list_for_each_entry(preferred_mode,
5782 				    &connector->probed_modes,
5783 				    head) {
5784 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5785 				amdgpu_encoder->native_mode = *preferred_mode;
5786 
5787 			break;
5788 		}
5789 
5790 	}
5791 }
5792 
5793 static struct drm_display_mode *
5794 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5795 			     char *name,
5796 			     int hdisplay, int vdisplay)
5797 {
5798 	struct drm_device *dev = encoder->dev;
5799 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5800 	struct drm_display_mode *mode = NULL;
5801 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5802 
5803 	mode = drm_mode_duplicate(dev, native_mode);
5804 
5805 	if (mode == NULL)
5806 		return NULL;
5807 
5808 	mode->hdisplay = hdisplay;
5809 	mode->vdisplay = vdisplay;
5810 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5811 #ifdef __linux__
5812 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5813 #else
5814 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5815 #endif
5816 
5817 	return mode;
5818 
5819 }
5820 
5821 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5822 						 struct drm_connector *connector)
5823 {
5824 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5825 	struct drm_display_mode *mode = NULL;
5826 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5827 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5828 				to_amdgpu_dm_connector(connector);
5829 	int i;
5830 	int n;
5831 	struct mode_size {
5832 		char name[DRM_DISPLAY_MODE_LEN];
5833 		int w;
5834 		int h;
5835 	} common_modes[] = {
5836 		{  "640x480",  640,  480},
5837 		{  "800x600",  800,  600},
5838 		{ "1024x768", 1024,  768},
5839 		{ "1280x720", 1280,  720},
5840 		{ "1280x800", 1280,  800},
5841 		{"1280x1024", 1280, 1024},
5842 		{ "1440x900", 1440,  900},
5843 		{"1680x1050", 1680, 1050},
5844 		{"1600x1200", 1600, 1200},
5845 		{"1920x1080", 1920, 1080},
5846 		{"1920x1200", 1920, 1200}
5847 	};
5848 
5849 	n = ARRAY_SIZE(common_modes);
5850 
5851 	for (i = 0; i < n; i++) {
5852 		struct drm_display_mode *curmode = NULL;
5853 		bool mode_existed = false;
5854 
5855 		if (common_modes[i].w > native_mode->hdisplay ||
5856 		    common_modes[i].h > native_mode->vdisplay ||
5857 		   (common_modes[i].w == native_mode->hdisplay &&
5858 		    common_modes[i].h == native_mode->vdisplay))
5859 			continue;
5860 
5861 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5862 			if (common_modes[i].w == curmode->hdisplay &&
5863 			    common_modes[i].h == curmode->vdisplay) {
5864 				mode_existed = true;
5865 				break;
5866 			}
5867 		}
5868 
5869 		if (mode_existed)
5870 			continue;
5871 
5872 		mode = amdgpu_dm_create_common_mode(encoder,
5873 				common_modes[i].name, common_modes[i].w,
5874 				common_modes[i].h);
5875 		drm_mode_probed_add(connector, mode);
5876 		amdgpu_dm_connector->num_modes++;
5877 	}
5878 }
5879 
5880 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5881 					      struct edid *edid)
5882 {
5883 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5884 			to_amdgpu_dm_connector(connector);
5885 
5886 	if (edid) {
5887 		/* empty probed_modes */
5888 		INIT_LIST_HEAD(&connector->probed_modes);
5889 		amdgpu_dm_connector->num_modes =
5890 				drm_add_edid_modes(connector, edid);
5891 
5892 		/* sorting the probed modes before calling function
5893 		 * amdgpu_dm_get_native_mode() since EDID can have
5894 		 * more than one preferred mode. The modes that are
5895 		 * later in the probed mode list could be of higher
5896 		 * and preferred resolution. For example, 3840x2160
5897 		 * resolution in base EDID preferred timing and 4096x2160
5898 		 * preferred resolution in DID extension block later.
5899 		 */
5900 		drm_mode_sort(&connector->probed_modes);
5901 		amdgpu_dm_get_native_mode(connector);
5902 	} else {
5903 		amdgpu_dm_connector->num_modes = 0;
5904 	}
5905 }
5906 
5907 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5908 {
5909 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5910 			to_amdgpu_dm_connector(connector);
5911 	struct drm_encoder *encoder;
5912 	struct edid *edid = amdgpu_dm_connector->edid;
5913 
5914 	encoder = amdgpu_dm_connector_to_encoder(connector);
5915 
5916 	if (!edid || !drm_edid_is_valid(edid)) {
5917 		amdgpu_dm_connector->num_modes =
5918 				drm_add_modes_noedid(connector, 640, 480);
5919 	} else {
5920 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5921 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5922 	}
5923 	amdgpu_dm_fbc_init(connector);
5924 
5925 	return amdgpu_dm_connector->num_modes;
5926 }
5927 
5928 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5929 				     struct amdgpu_dm_connector *aconnector,
5930 				     int connector_type,
5931 				     struct dc_link *link,
5932 				     int link_index)
5933 {
5934 	struct amdgpu_device *adev = dm->ddev->dev_private;
5935 
5936 	/*
5937 	 * Some of the properties below require access to state, like bpc.
5938 	 * Allocate some default initial connector state with our reset helper.
5939 	 */
5940 	if (aconnector->base.funcs->reset)
5941 		aconnector->base.funcs->reset(&aconnector->base);
5942 
5943 	aconnector->connector_id = link_index;
5944 	aconnector->dc_link = link;
5945 	aconnector->base.interlace_allowed = false;
5946 	aconnector->base.doublescan_allowed = false;
5947 	aconnector->base.stereo_allowed = false;
5948 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5949 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5950 	aconnector->audio_inst = -1;
5951 	rw_init(&aconnector->hpd_lock, "dmhpd");
5952 
5953 	/*
5954 	 * configure support HPD hot plug connector_>polled default value is 0
5955 	 * which means HPD hot plug not supported
5956 	 */
5957 	switch (connector_type) {
5958 	case DRM_MODE_CONNECTOR_HDMIA:
5959 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5960 		aconnector->base.ycbcr_420_allowed =
5961 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5962 		break;
5963 	case DRM_MODE_CONNECTOR_DisplayPort:
5964 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5965 		aconnector->base.ycbcr_420_allowed =
5966 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5967 		break;
5968 	case DRM_MODE_CONNECTOR_DVID:
5969 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5970 		break;
5971 	default:
5972 		break;
5973 	}
5974 
5975 	drm_object_attach_property(&aconnector->base.base,
5976 				dm->ddev->mode_config.scaling_mode_property,
5977 				DRM_MODE_SCALE_NONE);
5978 
5979 	drm_object_attach_property(&aconnector->base.base,
5980 				adev->mode_info.underscan_property,
5981 				UNDERSCAN_OFF);
5982 	drm_object_attach_property(&aconnector->base.base,
5983 				adev->mode_info.underscan_hborder_property,
5984 				0);
5985 	drm_object_attach_property(&aconnector->base.base,
5986 				adev->mode_info.underscan_vborder_property,
5987 				0);
5988 
5989 	if (!aconnector->mst_port)
5990 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5991 
5992 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5993 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5994 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5995 
5996 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5997 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5998 		drm_object_attach_property(&aconnector->base.base,
5999 				adev->mode_info.abm_level_property, 0);
6000 	}
6001 
6002 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6003 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6004 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6005 		drm_object_attach_property(
6006 			&aconnector->base.base,
6007 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6008 
6009 		if (!aconnector->mst_port)
6010 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6011 
6012 #ifdef CONFIG_DRM_AMD_DC_HDCP
6013 		if (adev->dm.hdcp_workqueue)
6014 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6015 #endif
6016 	}
6017 }
6018 
6019 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6020 			      struct i2c_msg *msgs, int num)
6021 {
6022 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6023 	struct ddc_service *ddc_service = i2c->ddc_service;
6024 	struct i2c_command cmd;
6025 	int i;
6026 	int result = -EIO;
6027 
6028 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6029 
6030 	if (!cmd.payloads)
6031 		return result;
6032 
6033 	cmd.number_of_payloads = num;
6034 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6035 	cmd.speed = 100;
6036 
6037 	for (i = 0; i < num; i++) {
6038 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6039 		cmd.payloads[i].address = msgs[i].addr;
6040 		cmd.payloads[i].length = msgs[i].len;
6041 		cmd.payloads[i].data = msgs[i].buf;
6042 	}
6043 
6044 	if (dc_submit_i2c(
6045 			ddc_service->ctx->dc,
6046 			ddc_service->ddc_pin->hw_info.ddc_channel,
6047 			&cmd))
6048 		result = num;
6049 
6050 	kfree(cmd.payloads);
6051 	return result;
6052 }
6053 
6054 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6055 {
6056 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6057 }
6058 
6059 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6060 	.master_xfer = amdgpu_dm_i2c_xfer,
6061 	.functionality = amdgpu_dm_i2c_func,
6062 };
6063 
6064 static struct amdgpu_i2c_adapter *
6065 create_i2c(struct ddc_service *ddc_service,
6066 	   int link_index,
6067 	   int *res)
6068 {
6069 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6070 	struct amdgpu_i2c_adapter *i2c;
6071 
6072 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6073 	if (!i2c)
6074 		return NULL;
6075 #ifdef notyet
6076 	i2c->base.owner = THIS_MODULE;
6077 	i2c->base.class = I2C_CLASS_DDC;
6078 	i2c->base.dev.parent = &adev->pdev->dev;
6079 #endif
6080 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6081 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6082 	i2c_set_adapdata(&i2c->base, i2c);
6083 	i2c->ddc_service = ddc_service;
6084 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6085 
6086 	return i2c;
6087 }
6088 
6089 
6090 /*
6091  * Note: this function assumes that dc_link_detect() was called for the
6092  * dc_link which will be represented by this aconnector.
6093  */
6094 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6095 				    struct amdgpu_dm_connector *aconnector,
6096 				    uint32_t link_index,
6097 				    struct amdgpu_encoder *aencoder)
6098 {
6099 	int res = 0;
6100 	int connector_type;
6101 	struct dc *dc = dm->dc;
6102 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6103 	struct amdgpu_i2c_adapter *i2c;
6104 
6105 	link->priv = aconnector;
6106 
6107 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6108 
6109 	i2c = create_i2c(link->ddc, link->link_index, &res);
6110 	if (!i2c) {
6111 		DRM_ERROR("Failed to create i2c adapter data\n");
6112 		return -ENOMEM;
6113 	}
6114 
6115 	aconnector->i2c = i2c;
6116 	res = i2c_add_adapter(&i2c->base);
6117 
6118 	if (res) {
6119 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6120 		goto out_free;
6121 	}
6122 
6123 	connector_type = to_drm_connector_type(link->connector_signal);
6124 
6125 	res = drm_connector_init_with_ddc(
6126 			dm->ddev,
6127 			&aconnector->base,
6128 			&amdgpu_dm_connector_funcs,
6129 			connector_type,
6130 			&i2c->base);
6131 
6132 	if (res) {
6133 		DRM_ERROR("connector_init failed\n");
6134 		aconnector->connector_id = -1;
6135 		goto out_free;
6136 	}
6137 
6138 	drm_connector_helper_add(
6139 			&aconnector->base,
6140 			&amdgpu_dm_connector_helper_funcs);
6141 
6142 	amdgpu_dm_connector_init_helper(
6143 		dm,
6144 		aconnector,
6145 		connector_type,
6146 		link,
6147 		link_index);
6148 
6149 	drm_connector_attach_encoder(
6150 		&aconnector->base, &aencoder->base);
6151 
6152 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6153 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6154 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6155 
6156 out_free:
6157 	if (res) {
6158 		kfree(i2c);
6159 		aconnector->i2c = NULL;
6160 	}
6161 	return res;
6162 }
6163 
6164 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6165 {
6166 	switch (adev->mode_info.num_crtc) {
6167 	case 1:
6168 		return 0x1;
6169 	case 2:
6170 		return 0x3;
6171 	case 3:
6172 		return 0x7;
6173 	case 4:
6174 		return 0xf;
6175 	case 5:
6176 		return 0x1f;
6177 	case 6:
6178 	default:
6179 		return 0x3f;
6180 	}
6181 }
6182 
6183 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6184 				  struct amdgpu_encoder *aencoder,
6185 				  uint32_t link_index)
6186 {
6187 	struct amdgpu_device *adev = dev->dev_private;
6188 
6189 	int res = drm_encoder_init(dev,
6190 				   &aencoder->base,
6191 				   &amdgpu_dm_encoder_funcs,
6192 				   DRM_MODE_ENCODER_TMDS,
6193 				   NULL);
6194 
6195 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6196 
6197 	if (!res)
6198 		aencoder->encoder_id = link_index;
6199 	else
6200 		aencoder->encoder_id = -1;
6201 
6202 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6203 
6204 	return res;
6205 }
6206 
6207 static void manage_dm_interrupts(struct amdgpu_device *adev,
6208 				 struct amdgpu_crtc *acrtc,
6209 				 bool enable)
6210 {
6211 	/*
6212 	 * this is not correct translation but will work as soon as VBLANK
6213 	 * constant is the same as PFLIP
6214 	 */
6215 	int irq_type =
6216 		amdgpu_display_crtc_idx_to_irq_type(
6217 			adev,
6218 			acrtc->crtc_id);
6219 
6220 	if (enable) {
6221 		drm_crtc_vblank_on(&acrtc->base);
6222 		amdgpu_irq_get(
6223 			adev,
6224 			&adev->pageflip_irq,
6225 			irq_type);
6226 	} else {
6227 
6228 		amdgpu_irq_put(
6229 			adev,
6230 			&adev->pageflip_irq,
6231 			irq_type);
6232 		drm_crtc_vblank_off(&acrtc->base);
6233 	}
6234 }
6235 
6236 static bool
6237 is_scaling_state_different(const struct dm_connector_state *dm_state,
6238 			   const struct dm_connector_state *old_dm_state)
6239 {
6240 	if (dm_state->scaling != old_dm_state->scaling)
6241 		return true;
6242 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6243 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6244 			return true;
6245 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6246 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6247 			return true;
6248 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6249 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6250 		return true;
6251 	return false;
6252 }
6253 
6254 #ifdef CONFIG_DRM_AMD_DC_HDCP
6255 static bool is_content_protection_different(struct drm_connector_state *state,
6256 					    const struct drm_connector_state *old_state,
6257 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6258 {
6259 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6260 
6261 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6262 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6263 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6264 		return true;
6265 	}
6266 
6267 	/* CP is being re enabled, ignore this */
6268 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6269 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6270 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6271 		return false;
6272 	}
6273 
6274 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6275 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6276 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6277 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6278 
6279 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6280 	 * hot-plug, headless s3, dpms
6281 	 */
6282 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6283 	    aconnector->dc_sink != NULL)
6284 		return true;
6285 
6286 	if (old_state->content_protection == state->content_protection)
6287 		return false;
6288 
6289 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6290 		return true;
6291 
6292 	return false;
6293 }
6294 
6295 #endif
6296 static void remove_stream(struct amdgpu_device *adev,
6297 			  struct amdgpu_crtc *acrtc,
6298 			  struct dc_stream_state *stream)
6299 {
6300 	/* this is the update mode case */
6301 
6302 	acrtc->otg_inst = -1;
6303 	acrtc->enabled = false;
6304 }
6305 
6306 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6307 			       struct dc_cursor_position *position)
6308 {
6309 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6310 	int x, y;
6311 	int xorigin = 0, yorigin = 0;
6312 
6313 	position->enable = false;
6314 	position->x = 0;
6315 	position->y = 0;
6316 
6317 	if (!crtc || !plane->state->fb)
6318 		return 0;
6319 
6320 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6321 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6322 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6323 			  __func__,
6324 			  plane->state->crtc_w,
6325 			  plane->state->crtc_h);
6326 		return -EINVAL;
6327 	}
6328 
6329 	x = plane->state->crtc_x;
6330 	y = plane->state->crtc_y;
6331 
6332 	if (x <= -amdgpu_crtc->max_cursor_width ||
6333 	    y <= -amdgpu_crtc->max_cursor_height)
6334 		return 0;
6335 
6336 	if (x < 0) {
6337 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6338 		x = 0;
6339 	}
6340 	if (y < 0) {
6341 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6342 		y = 0;
6343 	}
6344 	position->enable = true;
6345 	position->translate_by_source = true;
6346 	position->x = x;
6347 	position->y = y;
6348 	position->x_hotspot = xorigin;
6349 	position->y_hotspot = yorigin;
6350 
6351 	return 0;
6352 }
6353 
6354 static void handle_cursor_update(struct drm_plane *plane,
6355 				 struct drm_plane_state *old_plane_state)
6356 {
6357 	struct amdgpu_device *adev = plane->dev->dev_private;
6358 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6359 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6360 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6361 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6362 	uint64_t address = afb ? afb->address : 0;
6363 	struct dc_cursor_position position;
6364 	struct dc_cursor_attributes attributes;
6365 	int ret;
6366 
6367 	if (!plane->state->fb && !old_plane_state->fb)
6368 		return;
6369 
6370 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6371 			 __func__,
6372 			 amdgpu_crtc->crtc_id,
6373 			 plane->state->crtc_w,
6374 			 plane->state->crtc_h);
6375 
6376 	ret = get_cursor_position(plane, crtc, &position);
6377 	if (ret)
6378 		return;
6379 
6380 	if (!position.enable) {
6381 		/* turn off cursor */
6382 		if (crtc_state && crtc_state->stream) {
6383 			mutex_lock(&adev->dm.dc_lock);
6384 			dc_stream_set_cursor_position(crtc_state->stream,
6385 						      &position);
6386 			mutex_unlock(&adev->dm.dc_lock);
6387 		}
6388 		return;
6389 	}
6390 
6391 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6392 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6393 
6394 	memset(&attributes, 0, sizeof(attributes));
6395 	attributes.address.high_part = upper_32_bits(address);
6396 	attributes.address.low_part  = lower_32_bits(address);
6397 	attributes.width             = plane->state->crtc_w;
6398 	attributes.height            = plane->state->crtc_h;
6399 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6400 	attributes.rotation_angle    = 0;
6401 	attributes.attribute_flags.value = 0;
6402 
6403 	attributes.pitch = attributes.width;
6404 
6405 	if (crtc_state->stream) {
6406 		mutex_lock(&adev->dm.dc_lock);
6407 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6408 							 &attributes))
6409 			DRM_ERROR("DC failed to set cursor attributes\n");
6410 
6411 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6412 						   &position))
6413 			DRM_ERROR("DC failed to set cursor position\n");
6414 		mutex_unlock(&adev->dm.dc_lock);
6415 	}
6416 }
6417 
6418 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6419 {
6420 
6421 	assert_spin_locked(&acrtc->base.dev->event_lock);
6422 	WARN_ON(acrtc->event);
6423 
6424 	acrtc->event = acrtc->base.state->event;
6425 
6426 	/* Set the flip status */
6427 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6428 
6429 	/* Mark this event as consumed */
6430 	acrtc->base.state->event = NULL;
6431 
6432 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6433 						 acrtc->crtc_id);
6434 }
6435 
6436 static void update_freesync_state_on_stream(
6437 	struct amdgpu_display_manager *dm,
6438 	struct dm_crtc_state *new_crtc_state,
6439 	struct dc_stream_state *new_stream,
6440 	struct dc_plane_state *surface,
6441 	u32 flip_timestamp_in_us)
6442 {
6443 	struct mod_vrr_params vrr_params;
6444 	struct dc_info_packet vrr_infopacket = {0};
6445 	struct amdgpu_device *adev = dm->adev;
6446 	unsigned long flags;
6447 
6448 	if (!new_stream)
6449 		return;
6450 
6451 	/*
6452 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6453 	 * For now it's sufficient to just guard against these conditions.
6454 	 */
6455 
6456 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6457 		return;
6458 
6459 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6460 	vrr_params = new_crtc_state->vrr_params;
6461 
6462 	if (surface) {
6463 		mod_freesync_handle_preflip(
6464 			dm->freesync_module,
6465 			surface,
6466 			new_stream,
6467 			flip_timestamp_in_us,
6468 			&vrr_params);
6469 
6470 		if (adev->family < AMDGPU_FAMILY_AI &&
6471 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6472 			mod_freesync_handle_v_update(dm->freesync_module,
6473 						     new_stream, &vrr_params);
6474 
6475 			/* Need to call this before the frame ends. */
6476 			dc_stream_adjust_vmin_vmax(dm->dc,
6477 						   new_crtc_state->stream,
6478 						   &vrr_params.adjust);
6479 		}
6480 	}
6481 
6482 	mod_freesync_build_vrr_infopacket(
6483 		dm->freesync_module,
6484 		new_stream,
6485 		&vrr_params,
6486 		PACKET_TYPE_VRR,
6487 		TRANSFER_FUNC_UNKNOWN,
6488 		&vrr_infopacket);
6489 
6490 	new_crtc_state->freesync_timing_changed |=
6491 		(memcmp(&new_crtc_state->vrr_params.adjust,
6492 			&vrr_params.adjust,
6493 			sizeof(vrr_params.adjust)) != 0);
6494 
6495 	new_crtc_state->freesync_vrr_info_changed |=
6496 		(memcmp(&new_crtc_state->vrr_infopacket,
6497 			&vrr_infopacket,
6498 			sizeof(vrr_infopacket)) != 0);
6499 
6500 	new_crtc_state->vrr_params = vrr_params;
6501 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6502 
6503 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6504 	new_stream->vrr_infopacket = vrr_infopacket;
6505 
6506 	if (new_crtc_state->freesync_vrr_info_changed)
6507 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6508 			      new_crtc_state->base.crtc->base.id,
6509 			      (int)new_crtc_state->base.vrr_enabled,
6510 			      (int)vrr_params.state);
6511 
6512 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6513 }
6514 
6515 static void pre_update_freesync_state_on_stream(
6516 	struct amdgpu_display_manager *dm,
6517 	struct dm_crtc_state *new_crtc_state)
6518 {
6519 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6520 	struct mod_vrr_params vrr_params;
6521 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6522 	struct amdgpu_device *adev = dm->adev;
6523 	unsigned long flags;
6524 
6525 	if (!new_stream)
6526 		return;
6527 
6528 	/*
6529 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6530 	 * For now it's sufficient to just guard against these conditions.
6531 	 */
6532 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6533 		return;
6534 
6535 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6536 	vrr_params = new_crtc_state->vrr_params;
6537 
6538 	if (new_crtc_state->vrr_supported &&
6539 	    config.min_refresh_in_uhz &&
6540 	    config.max_refresh_in_uhz) {
6541 		config.state = new_crtc_state->base.vrr_enabled ?
6542 			VRR_STATE_ACTIVE_VARIABLE :
6543 			VRR_STATE_INACTIVE;
6544 	} else {
6545 		config.state = VRR_STATE_UNSUPPORTED;
6546 	}
6547 
6548 	mod_freesync_build_vrr_params(dm->freesync_module,
6549 				      new_stream,
6550 				      &config, &vrr_params);
6551 
6552 	new_crtc_state->freesync_timing_changed |=
6553 		(memcmp(&new_crtc_state->vrr_params.adjust,
6554 			&vrr_params.adjust,
6555 			sizeof(vrr_params.adjust)) != 0);
6556 
6557 	new_crtc_state->vrr_params = vrr_params;
6558 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6559 }
6560 
6561 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6562 					    struct dm_crtc_state *new_state)
6563 {
6564 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6565 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6566 
6567 	if (!old_vrr_active && new_vrr_active) {
6568 		/* Transition VRR inactive -> active:
6569 		 * While VRR is active, we must not disable vblank irq, as a
6570 		 * reenable after disable would compute bogus vblank/pflip
6571 		 * timestamps if it likely happened inside display front-porch.
6572 		 *
6573 		 * We also need vupdate irq for the actual core vblank handling
6574 		 * at end of vblank.
6575 		 */
6576 		dm_set_vupdate_irq(new_state->base.crtc, true);
6577 		drm_crtc_vblank_get(new_state->base.crtc);
6578 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6579 				 __func__, new_state->base.crtc->base.id);
6580 	} else if (old_vrr_active && !new_vrr_active) {
6581 		/* Transition VRR active -> inactive:
6582 		 * Allow vblank irq disable again for fixed refresh rate.
6583 		 */
6584 		dm_set_vupdate_irq(new_state->base.crtc, false);
6585 		drm_crtc_vblank_put(new_state->base.crtc);
6586 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6587 				 __func__, new_state->base.crtc->base.id);
6588 	}
6589 }
6590 
6591 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6592 {
6593 	struct drm_plane *plane;
6594 	struct drm_plane_state *old_plane_state, *new_plane_state;
6595 	int i;
6596 
6597 	/*
6598 	 * TODO: Make this per-stream so we don't issue redundant updates for
6599 	 * commits with multiple streams.
6600 	 */
6601 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6602 				       new_plane_state, i)
6603 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6604 			handle_cursor_update(plane, old_plane_state);
6605 }
6606 
6607 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6608 				    struct dc_state *dc_state,
6609 				    struct drm_device *dev,
6610 				    struct amdgpu_display_manager *dm,
6611 				    struct drm_crtc *pcrtc,
6612 				    bool wait_for_vblank)
6613 {
6614 	uint32_t i;
6615 	uint64_t timestamp_ns;
6616 	struct drm_plane *plane;
6617 	struct drm_plane_state *old_plane_state, *new_plane_state;
6618 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6619 	struct drm_crtc_state *new_pcrtc_state =
6620 			drm_atomic_get_new_crtc_state(state, pcrtc);
6621 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6622 	struct dm_crtc_state *dm_old_crtc_state =
6623 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6624 	int planes_count = 0, vpos, hpos;
6625 	long r;
6626 	unsigned long flags;
6627 	struct amdgpu_bo *abo;
6628 	uint64_t tiling_flags;
6629 	uint32_t target_vblank, last_flip_vblank;
6630 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6631 	bool pflip_present = false;
6632 	struct {
6633 		struct dc_surface_update surface_updates[MAX_SURFACES];
6634 		struct dc_plane_info plane_infos[MAX_SURFACES];
6635 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6636 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6637 		struct dc_stream_update stream_update;
6638 	} *bundle;
6639 
6640 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6641 
6642 	if (!bundle) {
6643 		dm_error("Failed to allocate update bundle\n");
6644 		goto cleanup;
6645 	}
6646 
6647 	/*
6648 	 * Disable the cursor first if we're disabling all the planes.
6649 	 * It'll remain on the screen after the planes are re-enabled
6650 	 * if we don't.
6651 	 */
6652 	if (acrtc_state->active_planes == 0)
6653 		amdgpu_dm_commit_cursors(state);
6654 
6655 	/* update planes when needed */
6656 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6657 		struct drm_crtc *crtc = new_plane_state->crtc;
6658 		struct drm_crtc_state *new_crtc_state;
6659 		struct drm_framebuffer *fb = new_plane_state->fb;
6660 		bool plane_needs_flip;
6661 		struct dc_plane_state *dc_plane;
6662 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6663 
6664 		/* Cursor plane is handled after stream updates */
6665 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6666 			continue;
6667 
6668 		if (!fb || !crtc || pcrtc != crtc)
6669 			continue;
6670 
6671 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6672 		if (!new_crtc_state->active)
6673 			continue;
6674 
6675 		dc_plane = dm_new_plane_state->dc_state;
6676 
6677 		bundle->surface_updates[planes_count].surface = dc_plane;
6678 		if (new_pcrtc_state->color_mgmt_changed) {
6679 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6680 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6681 		}
6682 
6683 		fill_dc_scaling_info(new_plane_state,
6684 				     &bundle->scaling_infos[planes_count]);
6685 
6686 		bundle->surface_updates[planes_count].scaling_info =
6687 			&bundle->scaling_infos[planes_count];
6688 
6689 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6690 
6691 		pflip_present = pflip_present || plane_needs_flip;
6692 
6693 		if (!plane_needs_flip) {
6694 			planes_count += 1;
6695 			continue;
6696 		}
6697 
6698 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6699 
6700 		/*
6701 		 * Wait for all fences on this FB. Do limited wait to avoid
6702 		 * deadlock during GPU reset when this fence will not signal
6703 		 * but we hold reservation lock for the BO.
6704 		 */
6705 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6706 							false,
6707 							msecs_to_jiffies(5000));
6708 		if (unlikely(r <= 0))
6709 			DRM_ERROR("Waiting for fences timed out!");
6710 
6711 		/*
6712 		 * TODO This might fail and hence better not used, wait
6713 		 * explicitly on fences instead
6714 		 * and in general should be called for
6715 		 * blocking commit to as per framework helpers
6716 		 */
6717 		r = amdgpu_bo_reserve(abo, true);
6718 		if (unlikely(r != 0))
6719 			DRM_ERROR("failed to reserve buffer before flip\n");
6720 
6721 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6722 
6723 		amdgpu_bo_unreserve(abo);
6724 
6725 		fill_dc_plane_info_and_addr(
6726 			dm->adev, new_plane_state, tiling_flags,
6727 			&bundle->plane_infos[planes_count],
6728 			&bundle->flip_addrs[planes_count].address,
6729 			false);
6730 
6731 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6732 				 new_plane_state->plane->index,
6733 				 bundle->plane_infos[planes_count].dcc.enable);
6734 
6735 		bundle->surface_updates[planes_count].plane_info =
6736 			&bundle->plane_infos[planes_count];
6737 
6738 		/*
6739 		 * Only allow immediate flips for fast updates that don't
6740 		 * change FB pitch, DCC state, rotation or mirroing.
6741 		 */
6742 		bundle->flip_addrs[planes_count].flip_immediate =
6743 			crtc->state->async_flip &&
6744 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6745 
6746 		timestamp_ns = ktime_get_ns();
6747 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6748 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6749 		bundle->surface_updates[planes_count].surface = dc_plane;
6750 
6751 		if (!bundle->surface_updates[planes_count].surface) {
6752 			DRM_ERROR("No surface for CRTC: id=%d\n",
6753 					acrtc_attach->crtc_id);
6754 			continue;
6755 		}
6756 
6757 		if (plane == pcrtc->primary)
6758 			update_freesync_state_on_stream(
6759 				dm,
6760 				acrtc_state,
6761 				acrtc_state->stream,
6762 				dc_plane,
6763 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6764 
6765 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6766 				 __func__,
6767 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6768 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6769 
6770 		planes_count += 1;
6771 
6772 	}
6773 
6774 	if (pflip_present) {
6775 		if (!vrr_active) {
6776 			/* Use old throttling in non-vrr fixed refresh rate mode
6777 			 * to keep flip scheduling based on target vblank counts
6778 			 * working in a backwards compatible way, e.g., for
6779 			 * clients using the GLX_OML_sync_control extension or
6780 			 * DRI3/Present extension with defined target_msc.
6781 			 */
6782 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6783 		}
6784 		else {
6785 			/* For variable refresh rate mode only:
6786 			 * Get vblank of last completed flip to avoid > 1 vrr
6787 			 * flips per video frame by use of throttling, but allow
6788 			 * flip programming anywhere in the possibly large
6789 			 * variable vrr vblank interval for fine-grained flip
6790 			 * timing control and more opportunity to avoid stutter
6791 			 * on late submission of flips.
6792 			 */
6793 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6794 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6795 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6796 		}
6797 
6798 		target_vblank = last_flip_vblank + wait_for_vblank;
6799 
6800 		/*
6801 		 * Wait until we're out of the vertical blank period before the one
6802 		 * targeted by the flip
6803 		 */
6804 		while ((acrtc_attach->enabled &&
6805 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6806 							    0, &vpos, &hpos, NULL,
6807 							    NULL, &pcrtc->hwmode)
6808 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6809 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6810 			(int)(target_vblank -
6811 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6812 			usleep_range(1000, 1100);
6813 		}
6814 
6815 		if (acrtc_attach->base.state->event) {
6816 			drm_crtc_vblank_get(pcrtc);
6817 
6818 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6819 
6820 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6821 			prepare_flip_isr(acrtc_attach);
6822 
6823 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6824 		}
6825 
6826 		if (acrtc_state->stream) {
6827 			if (acrtc_state->freesync_vrr_info_changed)
6828 				bundle->stream_update.vrr_infopacket =
6829 					&acrtc_state->stream->vrr_infopacket;
6830 		}
6831 	}
6832 
6833 	/* Update the planes if changed or disable if we don't have any. */
6834 	if ((planes_count || acrtc_state->active_planes == 0) &&
6835 		acrtc_state->stream) {
6836 		bundle->stream_update.stream = acrtc_state->stream;
6837 		if (new_pcrtc_state->mode_changed) {
6838 			bundle->stream_update.src = acrtc_state->stream->src;
6839 			bundle->stream_update.dst = acrtc_state->stream->dst;
6840 		}
6841 
6842 		if (new_pcrtc_state->color_mgmt_changed) {
6843 			/*
6844 			 * TODO: This isn't fully correct since we've actually
6845 			 * already modified the stream in place.
6846 			 */
6847 			bundle->stream_update.gamut_remap =
6848 				&acrtc_state->stream->gamut_remap_matrix;
6849 			bundle->stream_update.output_csc_transform =
6850 				&acrtc_state->stream->csc_color_matrix;
6851 			bundle->stream_update.out_transfer_func =
6852 				acrtc_state->stream->out_transfer_func;
6853 		}
6854 
6855 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6856 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6857 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6858 
6859 		/*
6860 		 * If FreeSync state on the stream has changed then we need to
6861 		 * re-adjust the min/max bounds now that DC doesn't handle this
6862 		 * as part of commit.
6863 		 */
6864 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6865 		    amdgpu_dm_vrr_active(acrtc_state)) {
6866 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6867 			dc_stream_adjust_vmin_vmax(
6868 				dm->dc, acrtc_state->stream,
6869 				&acrtc_state->vrr_params.adjust);
6870 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6871 		}
6872 		mutex_lock(&dm->dc_lock);
6873 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6874 				acrtc_state->stream->link->psr_allow_active)
6875 			amdgpu_dm_psr_disable(acrtc_state->stream);
6876 
6877 		dc_commit_updates_for_stream(dm->dc,
6878 						     bundle->surface_updates,
6879 						     planes_count,
6880 						     acrtc_state->stream,
6881 						     &bundle->stream_update,
6882 						     dc_state);
6883 
6884 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6885 						acrtc_state->stream->psr_version &&
6886 						!acrtc_state->stream->link->psr_feature_enabled)
6887 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6888 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6889 						acrtc_state->stream->link->psr_feature_enabled &&
6890 						!acrtc_state->stream->link->psr_allow_active) {
6891 			amdgpu_dm_psr_enable(acrtc_state->stream);
6892 		}
6893 
6894 		mutex_unlock(&dm->dc_lock);
6895 	}
6896 
6897 	/*
6898 	 * Update cursor state *after* programming all the planes.
6899 	 * This avoids redundant programming in the case where we're going
6900 	 * to be disabling a single plane - those pipes are being disabled.
6901 	 */
6902 	if (acrtc_state->active_planes)
6903 		amdgpu_dm_commit_cursors(state);
6904 
6905 cleanup:
6906 	kfree(bundle);
6907 }
6908 
6909 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6910 				   struct drm_atomic_state *state)
6911 {
6912 	struct amdgpu_device *adev = dev->dev_private;
6913 	struct amdgpu_dm_connector *aconnector;
6914 	struct drm_connector *connector;
6915 	struct drm_connector_state *old_con_state, *new_con_state;
6916 	struct drm_crtc_state *new_crtc_state;
6917 	struct dm_crtc_state *new_dm_crtc_state;
6918 	const struct dc_stream_status *status;
6919 	int i, inst;
6920 
6921 	/* Notify device removals. */
6922 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6923 		if (old_con_state->crtc != new_con_state->crtc) {
6924 			/* CRTC changes require notification. */
6925 			goto notify;
6926 		}
6927 
6928 		if (!new_con_state->crtc)
6929 			continue;
6930 
6931 		new_crtc_state = drm_atomic_get_new_crtc_state(
6932 			state, new_con_state->crtc);
6933 
6934 		if (!new_crtc_state)
6935 			continue;
6936 
6937 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6938 			continue;
6939 
6940 	notify:
6941 		aconnector = to_amdgpu_dm_connector(connector);
6942 
6943 		mutex_lock(&adev->dm.audio_lock);
6944 		inst = aconnector->audio_inst;
6945 		aconnector->audio_inst = -1;
6946 		mutex_unlock(&adev->dm.audio_lock);
6947 
6948 		amdgpu_dm_audio_eld_notify(adev, inst);
6949 	}
6950 
6951 	/* Notify audio device additions. */
6952 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6953 		if (!new_con_state->crtc)
6954 			continue;
6955 
6956 		new_crtc_state = drm_atomic_get_new_crtc_state(
6957 			state, new_con_state->crtc);
6958 
6959 		if (!new_crtc_state)
6960 			continue;
6961 
6962 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6963 			continue;
6964 
6965 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6966 		if (!new_dm_crtc_state->stream)
6967 			continue;
6968 
6969 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6970 		if (!status)
6971 			continue;
6972 
6973 		aconnector = to_amdgpu_dm_connector(connector);
6974 
6975 		mutex_lock(&adev->dm.audio_lock);
6976 		inst = status->audio_inst;
6977 		aconnector->audio_inst = inst;
6978 		mutex_unlock(&adev->dm.audio_lock);
6979 
6980 		amdgpu_dm_audio_eld_notify(adev, inst);
6981 	}
6982 }
6983 
6984 /*
6985  * Enable interrupts on CRTCs that are newly active, undergone
6986  * a modeset, or have active planes again.
6987  *
6988  * Done in two passes, based on the for_modeset flag:
6989  * Pass 1: For CRTCs going through modeset
6990  * Pass 2: For CRTCs going from 0 to n active planes
6991  *
6992  * Interrupts can only be enabled after the planes are programmed,
6993  * so this requires a two-pass approach since we don't want to
6994  * just defer the interrupts until after commit planes every time.
6995  */
6996 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6997 					     struct drm_atomic_state *state,
6998 					     bool for_modeset)
6999 {
7000 	struct amdgpu_device *adev = dev->dev_private;
7001 	struct drm_crtc *crtc;
7002 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7003 	int i;
7004 #ifdef CONFIG_DEBUG_FS
7005 	enum amdgpu_dm_pipe_crc_source source;
7006 #endif
7007 
7008 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7009 				      new_crtc_state, i) {
7010 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7011 		struct dm_crtc_state *dm_new_crtc_state =
7012 			to_dm_crtc_state(new_crtc_state);
7013 		struct dm_crtc_state *dm_old_crtc_state =
7014 			to_dm_crtc_state(old_crtc_state);
7015 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7016 		bool run_pass;
7017 
7018 		run_pass = (for_modeset && modeset) ||
7019 			   (!for_modeset && !modeset &&
7020 			    !dm_old_crtc_state->interrupts_enabled);
7021 
7022 		if (!run_pass)
7023 			continue;
7024 
7025 		if (!dm_new_crtc_state->interrupts_enabled)
7026 			continue;
7027 
7028 		manage_dm_interrupts(adev, acrtc, true);
7029 
7030 #ifdef CONFIG_DEBUG_FS
7031 		/* The stream has changed so CRC capture needs to re-enabled. */
7032 		source = dm_new_crtc_state->crc_src;
7033 		if (amdgpu_dm_is_valid_crc_source(source)) {
7034 			amdgpu_dm_crtc_configure_crc_source(
7035 				crtc, dm_new_crtc_state,
7036 				dm_new_crtc_state->crc_src);
7037 		}
7038 #endif
7039 	}
7040 }
7041 
7042 /*
7043  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7044  * @crtc_state: the DRM CRTC state
7045  * @stream_state: the DC stream state.
7046  *
7047  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7048  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7049  */
7050 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7051 						struct dc_stream_state *stream_state)
7052 {
7053 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7054 }
7055 
7056 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7057 				   struct drm_atomic_state *state,
7058 				   bool nonblock)
7059 {
7060 	struct drm_crtc *crtc;
7061 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7062 	struct amdgpu_device *adev = dev->dev_private;
7063 	int i;
7064 
7065 	/*
7066 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7067 	 * a modeset, being disabled, or have no active planes.
7068 	 *
7069 	 * It's done in atomic commit rather than commit tail for now since
7070 	 * some of these interrupt handlers access the current CRTC state and
7071 	 * potentially the stream pointer itself.
7072 	 *
7073 	 * Since the atomic state is swapped within atomic commit and not within
7074 	 * commit tail this would leave to new state (that hasn't been committed yet)
7075 	 * being accesssed from within the handlers.
7076 	 *
7077 	 * TODO: Fix this so we can do this in commit tail and not have to block
7078 	 * in atomic check.
7079 	 */
7080 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7081 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7082 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7083 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7084 
7085 		if (dm_old_crtc_state->interrupts_enabled &&
7086 		    (!dm_new_crtc_state->interrupts_enabled ||
7087 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7088 			manage_dm_interrupts(adev, acrtc, false);
7089 	}
7090 	/*
7091 	 * Add check here for SoC's that support hardware cursor plane, to
7092 	 * unset legacy_cursor_update
7093 	 */
7094 
7095 	return drm_atomic_helper_commit(dev, state, nonblock);
7096 
7097 	/*TODO Handle EINTR, reenable IRQ*/
7098 }
7099 
7100 /**
7101  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7102  * @state: The atomic state to commit
7103  *
7104  * This will tell DC to commit the constructed DC state from atomic_check,
7105  * programming the hardware. Any failures here implies a hardware failure, since
7106  * atomic check should have filtered anything non-kosher.
7107  */
7108 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7109 {
7110 	struct drm_device *dev = state->dev;
7111 	struct amdgpu_device *adev = dev->dev_private;
7112 	struct amdgpu_display_manager *dm = &adev->dm;
7113 	struct dm_atomic_state *dm_state;
7114 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7115 	uint32_t i, j;
7116 	struct drm_crtc *crtc;
7117 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7118 	unsigned long flags;
7119 	bool wait_for_vblank = true;
7120 	struct drm_connector *connector;
7121 	struct drm_connector_state *old_con_state, *new_con_state;
7122 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7123 	int crtc_disable_count = 0;
7124 
7125 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7126 
7127 	dm_state = dm_atomic_get_new_state(state);
7128 	if (dm_state && dm_state->context) {
7129 		dc_state = dm_state->context;
7130 	} else {
7131 		/* No state changes, retain current state. */
7132 		dc_state_temp = dc_create_state(dm->dc);
7133 		ASSERT(dc_state_temp);
7134 		dc_state = dc_state_temp;
7135 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7136 	}
7137 
7138 	/* update changed items */
7139 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7140 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7141 
7142 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7143 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7144 
7145 		DRM_DEBUG_DRIVER(
7146 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7147 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7148 			"connectors_changed:%d\n",
7149 			acrtc->crtc_id,
7150 			new_crtc_state->enable,
7151 			new_crtc_state->active,
7152 			new_crtc_state->planes_changed,
7153 			new_crtc_state->mode_changed,
7154 			new_crtc_state->active_changed,
7155 			new_crtc_state->connectors_changed);
7156 
7157 		/* Copy all transient state flags into dc state */
7158 		if (dm_new_crtc_state->stream) {
7159 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7160 							    dm_new_crtc_state->stream);
7161 		}
7162 
7163 		/* handles headless hotplug case, updating new_state and
7164 		 * aconnector as needed
7165 		 */
7166 
7167 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7168 
7169 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7170 
7171 			if (!dm_new_crtc_state->stream) {
7172 				/*
7173 				 * this could happen because of issues with
7174 				 * userspace notifications delivery.
7175 				 * In this case userspace tries to set mode on
7176 				 * display which is disconnected in fact.
7177 				 * dc_sink is NULL in this case on aconnector.
7178 				 * We expect reset mode will come soon.
7179 				 *
7180 				 * This can also happen when unplug is done
7181 				 * during resume sequence ended
7182 				 *
7183 				 * In this case, we want to pretend we still
7184 				 * have a sink to keep the pipe running so that
7185 				 * hw state is consistent with the sw state
7186 				 */
7187 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7188 						__func__, acrtc->base.base.id);
7189 				continue;
7190 			}
7191 
7192 			if (dm_old_crtc_state->stream)
7193 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7194 
7195 			pm_runtime_get_noresume(dev->dev);
7196 
7197 			acrtc->enabled = true;
7198 			acrtc->hw_mode = new_crtc_state->mode;
7199 			crtc->hwmode = new_crtc_state->mode;
7200 		} else if (modereset_required(new_crtc_state)) {
7201 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7202 			/* i.e. reset mode */
7203 			if (dm_old_crtc_state->stream) {
7204 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7205 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7206 
7207 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7208 			}
7209 		}
7210 	} /* for_each_crtc_in_state() */
7211 
7212 	if (dc_state) {
7213 		dm_enable_per_frame_crtc_master_sync(dc_state);
7214 		mutex_lock(&dm->dc_lock);
7215 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7216 		mutex_unlock(&dm->dc_lock);
7217 	}
7218 
7219 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7220 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7221 
7222 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7223 
7224 		if (dm_new_crtc_state->stream != NULL) {
7225 			const struct dc_stream_status *status =
7226 					dc_stream_get_status(dm_new_crtc_state->stream);
7227 
7228 			if (!status)
7229 				status = dc_stream_get_status_from_state(dc_state,
7230 									 dm_new_crtc_state->stream);
7231 
7232 			if (!status)
7233 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7234 			else
7235 				acrtc->otg_inst = status->primary_otg_inst;
7236 		}
7237 	}
7238 #ifdef CONFIG_DRM_AMD_DC_HDCP
7239 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7240 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7241 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7242 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7243 
7244 		new_crtc_state = NULL;
7245 
7246 		if (acrtc)
7247 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7248 
7249 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7250 
7251 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7252 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7253 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7254 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7255 			continue;
7256 		}
7257 
7258 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7259 			hdcp_update_display(
7260 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7261 				new_con_state->hdcp_content_type,
7262 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7263 													 : false);
7264 	}
7265 #endif
7266 
7267 	/* Handle connector state changes */
7268 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7269 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7270 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7271 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7272 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7273 		struct dc_stream_update stream_update;
7274 		struct dc_info_packet hdr_packet;
7275 		struct dc_stream_status *status = NULL;
7276 		bool abm_changed, hdr_changed, scaling_changed;
7277 
7278 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7279 		memset(&stream_update, 0, sizeof(stream_update));
7280 
7281 		if (acrtc) {
7282 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7283 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7284 		}
7285 
7286 		/* Skip any modesets/resets */
7287 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7288 			continue;
7289 
7290 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7291 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7292 
7293 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7294 							     dm_old_con_state);
7295 
7296 		abm_changed = dm_new_crtc_state->abm_level !=
7297 			      dm_old_crtc_state->abm_level;
7298 
7299 		hdr_changed =
7300 			is_hdr_metadata_different(old_con_state, new_con_state);
7301 
7302 		if (!scaling_changed && !abm_changed && !hdr_changed)
7303 			continue;
7304 
7305 		stream_update.stream = dm_new_crtc_state->stream;
7306 		if (scaling_changed) {
7307 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7308 					dm_new_con_state, dm_new_crtc_state->stream);
7309 
7310 			stream_update.src = dm_new_crtc_state->stream->src;
7311 			stream_update.dst = dm_new_crtc_state->stream->dst;
7312 		}
7313 
7314 		if (abm_changed) {
7315 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7316 
7317 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7318 		}
7319 
7320 		if (hdr_changed) {
7321 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7322 			stream_update.hdr_static_metadata = &hdr_packet;
7323 		}
7324 
7325 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7326 		WARN_ON(!status);
7327 		WARN_ON(!status->plane_count);
7328 
7329 		/*
7330 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7331 		 * Here we create an empty update on each plane.
7332 		 * To fix this, DC should permit updating only stream properties.
7333 		 */
7334 		for (j = 0; j < status->plane_count; j++)
7335 			dummy_updates[j].surface = status->plane_states[0];
7336 
7337 
7338 		mutex_lock(&dm->dc_lock);
7339 		dc_commit_updates_for_stream(dm->dc,
7340 						     dummy_updates,
7341 						     status->plane_count,
7342 						     dm_new_crtc_state->stream,
7343 						     &stream_update,
7344 						     dc_state);
7345 		mutex_unlock(&dm->dc_lock);
7346 	}
7347 
7348 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7349 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7350 				      new_crtc_state, i) {
7351 		if (old_crtc_state->active && !new_crtc_state->active)
7352 			crtc_disable_count++;
7353 
7354 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7355 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7356 
7357 		/* Update freesync active state. */
7358 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7359 
7360 		/* Handle vrr on->off / off->on transitions */
7361 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7362 						dm_new_crtc_state);
7363 	}
7364 
7365 	/* Enable interrupts for CRTCs going through a modeset. */
7366 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7367 
7368 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7369 		if (new_crtc_state->async_flip)
7370 			wait_for_vblank = false;
7371 
7372 	/* update planes when needed per crtc*/
7373 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7374 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7375 
7376 		if (dm_new_crtc_state->stream)
7377 			amdgpu_dm_commit_planes(state, dc_state, dev,
7378 						dm, crtc, wait_for_vblank);
7379 	}
7380 
7381 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7382 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7383 
7384 	/* Update audio instances for each connector. */
7385 	amdgpu_dm_commit_audio(dev, state);
7386 
7387 	/*
7388 	 * send vblank event on all events not handled in flip and
7389 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7390 	 */
7391 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7392 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7393 
7394 		if (new_crtc_state->event)
7395 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7396 
7397 		new_crtc_state->event = NULL;
7398 	}
7399 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7400 
7401 	/* Signal HW programming completion */
7402 	drm_atomic_helper_commit_hw_done(state);
7403 
7404 	if (wait_for_vblank)
7405 		drm_atomic_helper_wait_for_flip_done(dev, state);
7406 
7407 	drm_atomic_helper_cleanup_planes(dev, state);
7408 
7409 	/*
7410 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7411 	 * so we can put the GPU into runtime suspend if we're not driving any
7412 	 * displays anymore
7413 	 */
7414 	for (i = 0; i < crtc_disable_count; i++)
7415 		pm_runtime_put_autosuspend(dev->dev);
7416 	pm_runtime_mark_last_busy(dev->dev);
7417 
7418 	if (dc_state_temp)
7419 		dc_release_state(dc_state_temp);
7420 }
7421 
7422 
7423 static int dm_force_atomic_commit(struct drm_connector *connector)
7424 {
7425 	int ret = 0;
7426 	struct drm_device *ddev = connector->dev;
7427 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7428 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7429 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7430 	struct drm_connector_state *conn_state;
7431 	struct drm_crtc_state *crtc_state;
7432 	struct drm_plane_state *plane_state;
7433 
7434 	if (!state)
7435 		return -ENOMEM;
7436 
7437 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7438 
7439 	/* Construct an atomic state to restore previous display setting */
7440 
7441 	/*
7442 	 * Attach connectors to drm_atomic_state
7443 	 */
7444 	conn_state = drm_atomic_get_connector_state(state, connector);
7445 
7446 	ret = PTR_ERR_OR_ZERO(conn_state);
7447 	if (ret)
7448 		goto err;
7449 
7450 	/* Attach crtc to drm_atomic_state*/
7451 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7452 
7453 	ret = PTR_ERR_OR_ZERO(crtc_state);
7454 	if (ret)
7455 		goto err;
7456 
7457 	/* force a restore */
7458 	crtc_state->mode_changed = true;
7459 
7460 	/* Attach plane to drm_atomic_state */
7461 	plane_state = drm_atomic_get_plane_state(state, plane);
7462 
7463 	ret = PTR_ERR_OR_ZERO(plane_state);
7464 	if (ret)
7465 		goto err;
7466 
7467 
7468 	/* Call commit internally with the state we just constructed */
7469 	ret = drm_atomic_commit(state);
7470 	if (!ret)
7471 		return 0;
7472 
7473 err:
7474 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7475 	drm_atomic_state_put(state);
7476 
7477 	return ret;
7478 }
7479 
7480 /*
7481  * This function handles all cases when set mode does not come upon hotplug.
7482  * This includes when a display is unplugged then plugged back into the
7483  * same port and when running without usermode desktop manager supprot
7484  */
7485 void dm_restore_drm_connector_state(struct drm_device *dev,
7486 				    struct drm_connector *connector)
7487 {
7488 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7489 	struct amdgpu_crtc *disconnected_acrtc;
7490 	struct dm_crtc_state *acrtc_state;
7491 
7492 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7493 		return;
7494 
7495 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7496 	if (!disconnected_acrtc)
7497 		return;
7498 
7499 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7500 	if (!acrtc_state->stream)
7501 		return;
7502 
7503 	/*
7504 	 * If the previous sink is not released and different from the current,
7505 	 * we deduce we are in a state where we can not rely on usermode call
7506 	 * to turn on the display, so we do it here
7507 	 */
7508 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7509 		dm_force_atomic_commit(&aconnector->base);
7510 }
7511 
7512 /*
7513  * Grabs all modesetting locks to serialize against any blocking commits,
7514  * Waits for completion of all non blocking commits.
7515  */
7516 static int do_aquire_global_lock(struct drm_device *dev,
7517 				 struct drm_atomic_state *state)
7518 {
7519 	struct drm_crtc *crtc;
7520 	struct drm_crtc_commit *commit;
7521 	long ret;
7522 
7523 	/*
7524 	 * Adding all modeset locks to aquire_ctx will
7525 	 * ensure that when the framework release it the
7526 	 * extra locks we are locking here will get released to
7527 	 */
7528 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7529 	if (ret)
7530 		return ret;
7531 
7532 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7533 		spin_lock(&crtc->commit_lock);
7534 		commit = list_first_entry_or_null(&crtc->commit_list,
7535 				struct drm_crtc_commit, commit_entry);
7536 		if (commit)
7537 			drm_crtc_commit_get(commit);
7538 		spin_unlock(&crtc->commit_lock);
7539 
7540 		if (!commit)
7541 			continue;
7542 
7543 		/*
7544 		 * Make sure all pending HW programming completed and
7545 		 * page flips done
7546 		 */
7547 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7548 
7549 		if (ret > 0)
7550 			ret = wait_for_completion_interruptible_timeout(
7551 					&commit->flip_done, 10*HZ);
7552 
7553 		if (ret == 0)
7554 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7555 				  "timed out\n", crtc->base.id, crtc->name);
7556 
7557 		drm_crtc_commit_put(commit);
7558 	}
7559 
7560 	return ret < 0 ? ret : 0;
7561 }
7562 
7563 static void get_freesync_config_for_crtc(
7564 	struct dm_crtc_state *new_crtc_state,
7565 	struct dm_connector_state *new_con_state)
7566 {
7567 	struct mod_freesync_config config = {0};
7568 	struct amdgpu_dm_connector *aconnector =
7569 			to_amdgpu_dm_connector(new_con_state->base.connector);
7570 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7571 	int vrefresh = drm_mode_vrefresh(mode);
7572 
7573 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7574 					vrefresh >= aconnector->min_vfreq &&
7575 					vrefresh <= aconnector->max_vfreq;
7576 
7577 	if (new_crtc_state->vrr_supported) {
7578 		new_crtc_state->stream->ignore_msa_timing_param = true;
7579 		config.state = new_crtc_state->base.vrr_enabled ?
7580 				VRR_STATE_ACTIVE_VARIABLE :
7581 				VRR_STATE_INACTIVE;
7582 		config.min_refresh_in_uhz =
7583 				aconnector->min_vfreq * 1000000;
7584 		config.max_refresh_in_uhz =
7585 				aconnector->max_vfreq * 1000000;
7586 		config.vsif_supported = true;
7587 		config.btr = true;
7588 	}
7589 
7590 	new_crtc_state->freesync_config = config;
7591 }
7592 
7593 static void reset_freesync_config_for_crtc(
7594 	struct dm_crtc_state *new_crtc_state)
7595 {
7596 	new_crtc_state->vrr_supported = false;
7597 
7598 	memset(&new_crtc_state->vrr_params, 0,
7599 	       sizeof(new_crtc_state->vrr_params));
7600 	memset(&new_crtc_state->vrr_infopacket, 0,
7601 	       sizeof(new_crtc_state->vrr_infopacket));
7602 }
7603 
7604 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7605 				struct drm_atomic_state *state,
7606 				struct drm_crtc *crtc,
7607 				struct drm_crtc_state *old_crtc_state,
7608 				struct drm_crtc_state *new_crtc_state,
7609 				bool enable,
7610 				bool *lock_and_validation_needed)
7611 {
7612 	struct dm_atomic_state *dm_state = NULL;
7613 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7614 	struct dc_stream_state *new_stream;
7615 	int ret = 0;
7616 
7617 	/*
7618 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7619 	 * update changed items
7620 	 */
7621 	struct amdgpu_crtc *acrtc = NULL;
7622 	struct amdgpu_dm_connector *aconnector = NULL;
7623 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7624 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7625 
7626 	new_stream = NULL;
7627 
7628 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7629 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7630 	acrtc = to_amdgpu_crtc(crtc);
7631 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7632 
7633 	/* TODO This hack should go away */
7634 	if (aconnector && enable) {
7635 		/* Make sure fake sink is created in plug-in scenario */
7636 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7637 							    &aconnector->base);
7638 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7639 							    &aconnector->base);
7640 
7641 		if (IS_ERR(drm_new_conn_state)) {
7642 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7643 			goto fail;
7644 		}
7645 
7646 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7647 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7648 
7649 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7650 			goto skip_modeset;
7651 
7652 		new_stream = create_validate_stream_for_sink(aconnector,
7653 							     &new_crtc_state->mode,
7654 							     dm_new_conn_state,
7655 							     dm_old_crtc_state->stream);
7656 
7657 		/*
7658 		 * we can have no stream on ACTION_SET if a display
7659 		 * was disconnected during S3, in this case it is not an
7660 		 * error, the OS will be updated after detection, and
7661 		 * will do the right thing on next atomic commit
7662 		 */
7663 
7664 		if (!new_stream) {
7665 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7666 					__func__, acrtc->base.base.id);
7667 			ret = -ENOMEM;
7668 			goto fail;
7669 		}
7670 
7671 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7672 
7673 		ret = fill_hdr_info_packet(drm_new_conn_state,
7674 					   &new_stream->hdr_static_metadata);
7675 		if (ret)
7676 			goto fail;
7677 
7678 		/*
7679 		 * If we already removed the old stream from the context
7680 		 * (and set the new stream to NULL) then we can't reuse
7681 		 * the old stream even if the stream and scaling are unchanged.
7682 		 * We'll hit the BUG_ON and black screen.
7683 		 *
7684 		 * TODO: Refactor this function to allow this check to work
7685 		 * in all conditions.
7686 		 */
7687 		if (dm_new_crtc_state->stream &&
7688 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7689 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7690 			new_crtc_state->mode_changed = false;
7691 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7692 					 new_crtc_state->mode_changed);
7693 		}
7694 	}
7695 
7696 	/* mode_changed flag may get updated above, need to check again */
7697 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7698 		goto skip_modeset;
7699 
7700 	DRM_DEBUG_DRIVER(
7701 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7702 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7703 		"connectors_changed:%d\n",
7704 		acrtc->crtc_id,
7705 		new_crtc_state->enable,
7706 		new_crtc_state->active,
7707 		new_crtc_state->planes_changed,
7708 		new_crtc_state->mode_changed,
7709 		new_crtc_state->active_changed,
7710 		new_crtc_state->connectors_changed);
7711 
7712 	/* Remove stream for any changed/disabled CRTC */
7713 	if (!enable) {
7714 
7715 		if (!dm_old_crtc_state->stream)
7716 			goto skip_modeset;
7717 
7718 		ret = dm_atomic_get_state(state, &dm_state);
7719 		if (ret)
7720 			goto fail;
7721 
7722 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7723 				crtc->base.id);
7724 
7725 		/* i.e. reset mode */
7726 		if (dc_remove_stream_from_ctx(
7727 				dm->dc,
7728 				dm_state->context,
7729 				dm_old_crtc_state->stream) != DC_OK) {
7730 			ret = -EINVAL;
7731 			goto fail;
7732 		}
7733 
7734 		dc_stream_release(dm_old_crtc_state->stream);
7735 		dm_new_crtc_state->stream = NULL;
7736 
7737 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7738 
7739 		*lock_and_validation_needed = true;
7740 
7741 	} else {/* Add stream for any updated/enabled CRTC */
7742 		/*
7743 		 * Quick fix to prevent NULL pointer on new_stream when
7744 		 * added MST connectors not found in existing crtc_state in the chained mode
7745 		 * TODO: need to dig out the root cause of that
7746 		 */
7747 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7748 			goto skip_modeset;
7749 
7750 		if (modereset_required(new_crtc_state))
7751 			goto skip_modeset;
7752 
7753 		if (modeset_required(new_crtc_state, new_stream,
7754 				     dm_old_crtc_state->stream)) {
7755 
7756 			WARN_ON(dm_new_crtc_state->stream);
7757 
7758 			ret = dm_atomic_get_state(state, &dm_state);
7759 			if (ret)
7760 				goto fail;
7761 
7762 			dm_new_crtc_state->stream = new_stream;
7763 
7764 			dc_stream_retain(new_stream);
7765 
7766 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7767 						crtc->base.id);
7768 
7769 			if (dc_add_stream_to_ctx(
7770 					dm->dc,
7771 					dm_state->context,
7772 					dm_new_crtc_state->stream) != DC_OK) {
7773 				ret = -EINVAL;
7774 				goto fail;
7775 			}
7776 
7777 			*lock_and_validation_needed = true;
7778 		}
7779 	}
7780 
7781 skip_modeset:
7782 	/* Release extra reference */
7783 	if (new_stream)
7784 		 dc_stream_release(new_stream);
7785 
7786 	/*
7787 	 * We want to do dc stream updates that do not require a
7788 	 * full modeset below.
7789 	 */
7790 	if (!(enable && aconnector && new_crtc_state->enable &&
7791 	      new_crtc_state->active))
7792 		return 0;
7793 	/*
7794 	 * Given above conditions, the dc state cannot be NULL because:
7795 	 * 1. We're in the process of enabling CRTCs (just been added
7796 	 *    to the dc context, or already is on the context)
7797 	 * 2. Has a valid connector attached, and
7798 	 * 3. Is currently active and enabled.
7799 	 * => The dc stream state currently exists.
7800 	 */
7801 	BUG_ON(dm_new_crtc_state->stream == NULL);
7802 
7803 	/* Scaling or underscan settings */
7804 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7805 		update_stream_scaling_settings(
7806 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7807 
7808 	/* ABM settings */
7809 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7810 
7811 	/*
7812 	 * Color management settings. We also update color properties
7813 	 * when a modeset is needed, to ensure it gets reprogrammed.
7814 	 */
7815 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7816 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7817 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7818 		if (ret)
7819 			goto fail;
7820 	}
7821 
7822 	/* Update Freesync settings. */
7823 	get_freesync_config_for_crtc(dm_new_crtc_state,
7824 				     dm_new_conn_state);
7825 
7826 	return ret;
7827 
7828 fail:
7829 	if (new_stream)
7830 		dc_stream_release(new_stream);
7831 	return ret;
7832 }
7833 
7834 static bool should_reset_plane(struct drm_atomic_state *state,
7835 			       struct drm_plane *plane,
7836 			       struct drm_plane_state *old_plane_state,
7837 			       struct drm_plane_state *new_plane_state)
7838 {
7839 	struct drm_plane *other;
7840 	struct drm_plane_state *old_other_state, *new_other_state;
7841 	struct drm_crtc_state *new_crtc_state;
7842 	int i;
7843 
7844 	/*
7845 	 * TODO: Remove this hack once the checks below are sufficient
7846 	 * enough to determine when we need to reset all the planes on
7847 	 * the stream.
7848 	 */
7849 	if (state->allow_modeset)
7850 		return true;
7851 
7852 	/* Exit early if we know that we're adding or removing the plane. */
7853 	if (old_plane_state->crtc != new_plane_state->crtc)
7854 		return true;
7855 
7856 	/* old crtc == new_crtc == NULL, plane not in context. */
7857 	if (!new_plane_state->crtc)
7858 		return false;
7859 
7860 	new_crtc_state =
7861 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7862 
7863 	if (!new_crtc_state)
7864 		return true;
7865 
7866 	/* CRTC Degamma changes currently require us to recreate planes. */
7867 	if (new_crtc_state->color_mgmt_changed)
7868 		return true;
7869 
7870 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7871 		return true;
7872 
7873 	/*
7874 	 * If there are any new primary or overlay planes being added or
7875 	 * removed then the z-order can potentially change. To ensure
7876 	 * correct z-order and pipe acquisition the current DC architecture
7877 	 * requires us to remove and recreate all existing planes.
7878 	 *
7879 	 * TODO: Come up with a more elegant solution for this.
7880 	 */
7881 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7882 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7883 			continue;
7884 
7885 		if (old_other_state->crtc != new_plane_state->crtc &&
7886 		    new_other_state->crtc != new_plane_state->crtc)
7887 			continue;
7888 
7889 		if (old_other_state->crtc != new_other_state->crtc)
7890 			return true;
7891 
7892 		/* TODO: Remove this once we can handle fast format changes. */
7893 		if (old_other_state->fb && new_other_state->fb &&
7894 		    old_other_state->fb->format != new_other_state->fb->format)
7895 			return true;
7896 	}
7897 
7898 	return false;
7899 }
7900 
7901 static int dm_update_plane_state(struct dc *dc,
7902 				 struct drm_atomic_state *state,
7903 				 struct drm_plane *plane,
7904 				 struct drm_plane_state *old_plane_state,
7905 				 struct drm_plane_state *new_plane_state,
7906 				 bool enable,
7907 				 bool *lock_and_validation_needed)
7908 {
7909 
7910 	struct dm_atomic_state *dm_state = NULL;
7911 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7912 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7913 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7914 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7915 	struct amdgpu_crtc *new_acrtc;
7916 	bool needs_reset;
7917 	int ret = 0;
7918 
7919 
7920 	new_plane_crtc = new_plane_state->crtc;
7921 	old_plane_crtc = old_plane_state->crtc;
7922 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7923 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7924 
7925 	/*TODO Implement better atomic check for cursor plane */
7926 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7927 		if (!enable || !new_plane_crtc ||
7928 			drm_atomic_plane_disabling(plane->state, new_plane_state))
7929 			return 0;
7930 
7931 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7932 
7933 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7934 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7935 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7936 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
7937 			return -EINVAL;
7938 		}
7939 
7940 		return 0;
7941 	}
7942 
7943 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7944 					 new_plane_state);
7945 
7946 	/* Remove any changed/removed planes */
7947 	if (!enable) {
7948 		if (!needs_reset)
7949 			return 0;
7950 
7951 		if (!old_plane_crtc)
7952 			return 0;
7953 
7954 		old_crtc_state = drm_atomic_get_old_crtc_state(
7955 				state, old_plane_crtc);
7956 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7957 
7958 		if (!dm_old_crtc_state->stream)
7959 			return 0;
7960 
7961 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7962 				plane->base.id, old_plane_crtc->base.id);
7963 
7964 		ret = dm_atomic_get_state(state, &dm_state);
7965 		if (ret)
7966 			return ret;
7967 
7968 		if (!dc_remove_plane_from_context(
7969 				dc,
7970 				dm_old_crtc_state->stream,
7971 				dm_old_plane_state->dc_state,
7972 				dm_state->context)) {
7973 
7974 			ret = EINVAL;
7975 			return ret;
7976 		}
7977 
7978 
7979 		dc_plane_state_release(dm_old_plane_state->dc_state);
7980 		dm_new_plane_state->dc_state = NULL;
7981 
7982 		*lock_and_validation_needed = true;
7983 
7984 	} else { /* Add new planes */
7985 		struct dc_plane_state *dc_new_plane_state;
7986 
7987 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7988 			return 0;
7989 
7990 		if (!new_plane_crtc)
7991 			return 0;
7992 
7993 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7994 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7995 
7996 		if (!dm_new_crtc_state->stream)
7997 			return 0;
7998 
7999 		if (!needs_reset)
8000 			return 0;
8001 
8002 		WARN_ON(dm_new_plane_state->dc_state);
8003 
8004 		dc_new_plane_state = dc_create_plane_state(dc);
8005 		if (!dc_new_plane_state)
8006 			return -ENOMEM;
8007 
8008 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8009 				plane->base.id, new_plane_crtc->base.id);
8010 
8011 		ret = fill_dc_plane_attributes(
8012 			new_plane_crtc->dev->dev_private,
8013 			dc_new_plane_state,
8014 			new_plane_state,
8015 			new_crtc_state);
8016 		if (ret) {
8017 			dc_plane_state_release(dc_new_plane_state);
8018 			return ret;
8019 		}
8020 
8021 		ret = dm_atomic_get_state(state, &dm_state);
8022 		if (ret) {
8023 			dc_plane_state_release(dc_new_plane_state);
8024 			return ret;
8025 		}
8026 
8027 		/*
8028 		 * Any atomic check errors that occur after this will
8029 		 * not need a release. The plane state will be attached
8030 		 * to the stream, and therefore part of the atomic
8031 		 * state. It'll be released when the atomic state is
8032 		 * cleaned.
8033 		 */
8034 		if (!dc_add_plane_to_context(
8035 				dc,
8036 				dm_new_crtc_state->stream,
8037 				dc_new_plane_state,
8038 				dm_state->context)) {
8039 
8040 			dc_plane_state_release(dc_new_plane_state);
8041 			return -EINVAL;
8042 		}
8043 
8044 		dm_new_plane_state->dc_state = dc_new_plane_state;
8045 
8046 		/* Tell DC to do a full surface update every time there
8047 		 * is a plane change. Inefficient, but works for now.
8048 		 */
8049 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8050 
8051 		*lock_and_validation_needed = true;
8052 	}
8053 
8054 
8055 	return ret;
8056 }
8057 
8058 static int
8059 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8060 				    struct drm_atomic_state *state,
8061 				    enum surface_update_type *out_type)
8062 {
8063 	struct dc *dc = dm->dc;
8064 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8065 	int i, j, num_plane, ret = 0;
8066 	struct drm_plane_state *old_plane_state, *new_plane_state;
8067 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8068 	struct drm_crtc *new_plane_crtc;
8069 	struct drm_plane *plane;
8070 
8071 	struct drm_crtc *crtc;
8072 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8073 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8074 	struct dc_stream_status *status = NULL;
8075 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8076 	struct surface_info_bundle {
8077 		struct dc_surface_update surface_updates[MAX_SURFACES];
8078 		struct dc_plane_info plane_infos[MAX_SURFACES];
8079 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8080 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8081 		struct dc_stream_update stream_update;
8082 	} *bundle;
8083 
8084 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8085 
8086 	if (!bundle) {
8087 		DRM_ERROR("Failed to allocate update bundle\n");
8088 		/* Set type to FULL to avoid crashing in DC*/
8089 		update_type = UPDATE_TYPE_FULL;
8090 		goto cleanup;
8091 	}
8092 
8093 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8094 
8095 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8096 
8097 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8098 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8099 		num_plane = 0;
8100 
8101 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8102 			update_type = UPDATE_TYPE_FULL;
8103 			goto cleanup;
8104 		}
8105 
8106 		if (!new_dm_crtc_state->stream)
8107 			continue;
8108 
8109 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8110 			const struct amdgpu_framebuffer *amdgpu_fb =
8111 				to_amdgpu_framebuffer(new_plane_state->fb);
8112 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8113 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8114 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8115 			uint64_t tiling_flags;
8116 
8117 			new_plane_crtc = new_plane_state->crtc;
8118 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8119 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8120 
8121 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8122 				continue;
8123 
8124 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8125 				update_type = UPDATE_TYPE_FULL;
8126 				goto cleanup;
8127 			}
8128 
8129 			if (crtc != new_plane_crtc)
8130 				continue;
8131 
8132 			bundle->surface_updates[num_plane].surface =
8133 					new_dm_plane_state->dc_state;
8134 
8135 			if (new_crtc_state->mode_changed) {
8136 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8137 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8138 			}
8139 
8140 			if (new_crtc_state->color_mgmt_changed) {
8141 				bundle->surface_updates[num_plane].gamma =
8142 						new_dm_plane_state->dc_state->gamma_correction;
8143 				bundle->surface_updates[num_plane].in_transfer_func =
8144 						new_dm_plane_state->dc_state->in_transfer_func;
8145 				bundle->stream_update.gamut_remap =
8146 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8147 				bundle->stream_update.output_csc_transform =
8148 						&new_dm_crtc_state->stream->csc_color_matrix;
8149 				bundle->stream_update.out_transfer_func =
8150 						new_dm_crtc_state->stream->out_transfer_func;
8151 			}
8152 
8153 			ret = fill_dc_scaling_info(new_plane_state,
8154 						   scaling_info);
8155 			if (ret)
8156 				goto cleanup;
8157 
8158 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8159 
8160 			if (amdgpu_fb) {
8161 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8162 				if (ret)
8163 					goto cleanup;
8164 
8165 				ret = fill_dc_plane_info_and_addr(
8166 					dm->adev, new_plane_state, tiling_flags,
8167 					plane_info,
8168 					&flip_addr->address,
8169 					false);
8170 				if (ret)
8171 					goto cleanup;
8172 
8173 				bundle->surface_updates[num_plane].plane_info = plane_info;
8174 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8175 			}
8176 
8177 			num_plane++;
8178 		}
8179 
8180 		if (num_plane == 0)
8181 			continue;
8182 
8183 		ret = dm_atomic_get_state(state, &dm_state);
8184 		if (ret)
8185 			goto cleanup;
8186 
8187 		old_dm_state = dm_atomic_get_old_state(state);
8188 		if (!old_dm_state) {
8189 			ret = -EINVAL;
8190 			goto cleanup;
8191 		}
8192 
8193 		status = dc_stream_get_status_from_state(old_dm_state->context,
8194 							 new_dm_crtc_state->stream);
8195 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8196 		/*
8197 		 * TODO: DC modifies the surface during this call so we need
8198 		 * to lock here - find a way to do this without locking.
8199 		 */
8200 		mutex_lock(&dm->dc_lock);
8201 		update_type = dc_check_update_surfaces_for_stream(
8202 				dc,	bundle->surface_updates, num_plane,
8203 				&bundle->stream_update, status);
8204 		mutex_unlock(&dm->dc_lock);
8205 
8206 		if (update_type > UPDATE_TYPE_MED) {
8207 			update_type = UPDATE_TYPE_FULL;
8208 			goto cleanup;
8209 		}
8210 	}
8211 
8212 cleanup:
8213 	kfree(bundle);
8214 
8215 	*out_type = update_type;
8216 	return ret;
8217 }
8218 
8219 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8220 {
8221 	struct drm_connector *connector;
8222 	struct drm_connector_state *conn_state;
8223 	struct amdgpu_dm_connector *aconnector = NULL;
8224 	int i;
8225 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8226 		if (conn_state->crtc != crtc)
8227 			continue;
8228 
8229 		aconnector = to_amdgpu_dm_connector(connector);
8230 		if (!aconnector->port || !aconnector->mst_port)
8231 			aconnector = NULL;
8232 		else
8233 			break;
8234 	}
8235 
8236 	if (!aconnector)
8237 		return 0;
8238 
8239 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8240 }
8241 
8242 /**
8243  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8244  * @dev: The DRM device
8245  * @state: The atomic state to commit
8246  *
8247  * Validate that the given atomic state is programmable by DC into hardware.
8248  * This involves constructing a &struct dc_state reflecting the new hardware
8249  * state we wish to commit, then querying DC to see if it is programmable. It's
8250  * important not to modify the existing DC state. Otherwise, atomic_check
8251  * may unexpectedly commit hardware changes.
8252  *
8253  * When validating the DC state, it's important that the right locks are
8254  * acquired. For full updates case which removes/adds/updates streams on one
8255  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8256  * that any such full update commit will wait for completion of any outstanding
8257  * flip using DRMs synchronization events. See
8258  * dm_determine_update_type_for_commit()
8259  *
8260  * Note that DM adds the affected connectors for all CRTCs in state, when that
8261  * might not seem necessary. This is because DC stream creation requires the
8262  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8263  * be possible but non-trivial - a possible TODO item.
8264  *
8265  * Return: -Error code if validation failed.
8266  */
8267 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8268 				  struct drm_atomic_state *state)
8269 {
8270 	struct amdgpu_device *adev = dev->dev_private;
8271 	struct dm_atomic_state *dm_state = NULL;
8272 	struct dc *dc = adev->dm.dc;
8273 	struct drm_connector *connector;
8274 	struct drm_connector_state *old_con_state, *new_con_state;
8275 	struct drm_crtc *crtc;
8276 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8277 	struct drm_plane *plane;
8278 	struct drm_plane_state *old_plane_state, *new_plane_state;
8279 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8280 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8281 
8282 	int ret, i;
8283 
8284 	/*
8285 	 * This bool will be set for true for any modeset/reset
8286 	 * or plane update which implies non fast surface update.
8287 	 */
8288 	bool lock_and_validation_needed = false;
8289 
8290 	ret = drm_atomic_helper_check_modeset(dev, state);
8291 	if (ret)
8292 		goto fail;
8293 
8294 	if (adev->asic_type >= CHIP_NAVI10) {
8295 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8296 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8297 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8298 				if (ret)
8299 					goto fail;
8300 			}
8301 		}
8302 	}
8303 
8304 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8305 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8306 		    !new_crtc_state->color_mgmt_changed &&
8307 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8308 			continue;
8309 
8310 		if (!new_crtc_state->enable)
8311 			continue;
8312 
8313 		ret = drm_atomic_add_affected_connectors(state, crtc);
8314 		if (ret)
8315 			return ret;
8316 
8317 		ret = drm_atomic_add_affected_planes(state, crtc);
8318 		if (ret)
8319 			goto fail;
8320 	}
8321 
8322 	/*
8323 	 * Add all primary and overlay planes on the CRTC to the state
8324 	 * whenever a plane is enabled to maintain correct z-ordering
8325 	 * and to enable fast surface updates.
8326 	 */
8327 	drm_for_each_crtc(crtc, dev) {
8328 		bool modified = false;
8329 
8330 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8331 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8332 				continue;
8333 
8334 			if (new_plane_state->crtc == crtc ||
8335 			    old_plane_state->crtc == crtc) {
8336 				modified = true;
8337 				break;
8338 			}
8339 		}
8340 
8341 		if (!modified)
8342 			continue;
8343 
8344 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8345 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8346 				continue;
8347 
8348 			new_plane_state =
8349 				drm_atomic_get_plane_state(state, plane);
8350 
8351 			if (IS_ERR(new_plane_state)) {
8352 				ret = PTR_ERR(new_plane_state);
8353 				goto fail;
8354 			}
8355 		}
8356 	}
8357 
8358 	/* Remove exiting planes if they are modified */
8359 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8360 		ret = dm_update_plane_state(dc, state, plane,
8361 					    old_plane_state,
8362 					    new_plane_state,
8363 					    false,
8364 					    &lock_and_validation_needed);
8365 		if (ret)
8366 			goto fail;
8367 	}
8368 
8369 	/* Disable all crtcs which require disable */
8370 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8371 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8372 					   old_crtc_state,
8373 					   new_crtc_state,
8374 					   false,
8375 					   &lock_and_validation_needed);
8376 		if (ret)
8377 			goto fail;
8378 	}
8379 
8380 	/* Enable all crtcs which require enable */
8381 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8382 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8383 					   old_crtc_state,
8384 					   new_crtc_state,
8385 					   true,
8386 					   &lock_and_validation_needed);
8387 		if (ret)
8388 			goto fail;
8389 	}
8390 
8391 	/* Add new/modified planes */
8392 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8393 		ret = dm_update_plane_state(dc, state, plane,
8394 					    old_plane_state,
8395 					    new_plane_state,
8396 					    true,
8397 					    &lock_and_validation_needed);
8398 		if (ret)
8399 			goto fail;
8400 	}
8401 
8402 	/* Run this here since we want to validate the streams we created */
8403 	ret = drm_atomic_helper_check_planes(dev, state);
8404 	if (ret)
8405 		goto fail;
8406 
8407 	if (state->legacy_cursor_update) {
8408 		/*
8409 		 * This is a fast cursor update coming from the plane update
8410 		 * helper, check if it can be done asynchronously for better
8411 		 * performance.
8412 		 */
8413 		state->async_update =
8414 			!drm_atomic_helper_async_check(dev, state);
8415 
8416 		/*
8417 		 * Skip the remaining global validation if this is an async
8418 		 * update. Cursor updates can be done without affecting
8419 		 * state or bandwidth calcs and this avoids the performance
8420 		 * penalty of locking the private state object and
8421 		 * allocating a new dc_state.
8422 		 */
8423 		if (state->async_update)
8424 			return 0;
8425 	}
8426 
8427 	/* Check scaling and underscan changes*/
8428 	/* TODO Removed scaling changes validation due to inability to commit
8429 	 * new stream into context w\o causing full reset. Need to
8430 	 * decide how to handle.
8431 	 */
8432 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8433 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8434 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8435 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8436 
8437 		/* Skip any modesets/resets */
8438 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8439 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8440 			continue;
8441 
8442 		/* Skip any thing not scale or underscan changes */
8443 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8444 			continue;
8445 
8446 		overall_update_type = UPDATE_TYPE_FULL;
8447 		lock_and_validation_needed = true;
8448 	}
8449 
8450 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8451 	if (ret)
8452 		goto fail;
8453 
8454 	if (overall_update_type < update_type)
8455 		overall_update_type = update_type;
8456 
8457 	/*
8458 	 * lock_and_validation_needed was an old way to determine if we need to set
8459 	 * the global lock. Leaving it in to check if we broke any corner cases
8460 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8461 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8462 	 */
8463 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8464 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8465 
8466 	if (overall_update_type > UPDATE_TYPE_FAST) {
8467 		ret = dm_atomic_get_state(state, &dm_state);
8468 		if (ret)
8469 			goto fail;
8470 
8471 		ret = do_aquire_global_lock(dev, state);
8472 		if (ret)
8473 			goto fail;
8474 
8475 #if defined(CONFIG_DRM_AMD_DC_DCN)
8476 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8477 			goto fail;
8478 
8479 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8480 		if (ret)
8481 			goto fail;
8482 #endif
8483 
8484 		/*
8485 		 * Perform validation of MST topology in the state:
8486 		 * We need to perform MST atomic check before calling
8487 		 * dc_validate_global_state(), or there is a chance
8488 		 * to get stuck in an infinite loop and hang eventually.
8489 		 */
8490 		ret = drm_dp_mst_atomic_check(state);
8491 		if (ret)
8492 			goto fail;
8493 
8494 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8495 			ret = -EINVAL;
8496 			goto fail;
8497 		}
8498 	} else {
8499 		/*
8500 		 * The commit is a fast update. Fast updates shouldn't change
8501 		 * the DC context, affect global validation, and can have their
8502 		 * commit work done in parallel with other commits not touching
8503 		 * the same resource. If we have a new DC context as part of
8504 		 * the DM atomic state from validation we need to free it and
8505 		 * retain the existing one instead.
8506 		 */
8507 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8508 
8509 		new_dm_state = dm_atomic_get_new_state(state);
8510 		old_dm_state = dm_atomic_get_old_state(state);
8511 
8512 		if (new_dm_state && old_dm_state) {
8513 			if (new_dm_state->context)
8514 				dc_release_state(new_dm_state->context);
8515 
8516 			new_dm_state->context = old_dm_state->context;
8517 
8518 			if (old_dm_state->context)
8519 				dc_retain_state(old_dm_state->context);
8520 		}
8521 	}
8522 
8523 	/* Store the overall update type for use later in atomic check. */
8524 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8525 		struct dm_crtc_state *dm_new_crtc_state =
8526 			to_dm_crtc_state(new_crtc_state);
8527 
8528 		dm_new_crtc_state->update_type = (int)overall_update_type;
8529 	}
8530 
8531 	/* Must be success */
8532 	WARN_ON(ret);
8533 	return ret;
8534 
8535 fail:
8536 	if (ret == -EDEADLK)
8537 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8538 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8539 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8540 	else
8541 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8542 
8543 	return ret;
8544 }
8545 
8546 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8547 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8548 {
8549 	uint8_t dpcd_data;
8550 	bool capable = false;
8551 
8552 	if (amdgpu_dm_connector->dc_link &&
8553 		dm_helpers_dp_read_dpcd(
8554 				NULL,
8555 				amdgpu_dm_connector->dc_link,
8556 				DP_DOWN_STREAM_PORT_COUNT,
8557 				&dpcd_data,
8558 				sizeof(dpcd_data))) {
8559 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8560 	}
8561 
8562 	return capable;
8563 }
8564 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8565 					struct edid *edid)
8566 {
8567 	int i;
8568 	bool edid_check_required;
8569 	struct detailed_timing *timing;
8570 	struct detailed_non_pixel *data;
8571 	struct detailed_data_monitor_range *range;
8572 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8573 			to_amdgpu_dm_connector(connector);
8574 	struct dm_connector_state *dm_con_state = NULL;
8575 
8576 	struct drm_device *dev = connector->dev;
8577 	struct amdgpu_device *adev = dev->dev_private;
8578 	bool freesync_capable = false;
8579 
8580 	if (!connector->state) {
8581 		DRM_ERROR("%s - Connector has no state", __func__);
8582 		goto update;
8583 	}
8584 
8585 	if (!edid) {
8586 		dm_con_state = to_dm_connector_state(connector->state);
8587 
8588 		amdgpu_dm_connector->min_vfreq = 0;
8589 		amdgpu_dm_connector->max_vfreq = 0;
8590 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8591 
8592 		goto update;
8593 	}
8594 
8595 	dm_con_state = to_dm_connector_state(connector->state);
8596 
8597 	edid_check_required = false;
8598 	if (!amdgpu_dm_connector->dc_sink) {
8599 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8600 		goto update;
8601 	}
8602 	if (!adev->dm.freesync_module)
8603 		goto update;
8604 	/*
8605 	 * if edid non zero restrict freesync only for dp and edp
8606 	 */
8607 	if (edid) {
8608 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8609 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8610 			edid_check_required = is_dp_capable_without_timing_msa(
8611 						adev->dm.dc,
8612 						amdgpu_dm_connector);
8613 		}
8614 	}
8615 	if (edid_check_required == true && (edid->version > 1 ||
8616 	   (edid->version == 1 && edid->revision > 1))) {
8617 		for (i = 0; i < 4; i++) {
8618 
8619 			timing	= &edid->detailed_timings[i];
8620 			data	= &timing->data.other_data;
8621 			range	= &data->data.range;
8622 			/*
8623 			 * Check if monitor has continuous frequency mode
8624 			 */
8625 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8626 				continue;
8627 			/*
8628 			 * Check for flag range limits only. If flag == 1 then
8629 			 * no additional timing information provided.
8630 			 * Default GTF, GTF Secondary curve and CVT are not
8631 			 * supported
8632 			 */
8633 			if (range->flags != 1)
8634 				continue;
8635 
8636 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8637 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8638 			amdgpu_dm_connector->pixel_clock_mhz =
8639 				range->pixel_clock_mhz * 10;
8640 			break;
8641 		}
8642 
8643 		if (amdgpu_dm_connector->max_vfreq -
8644 		    amdgpu_dm_connector->min_vfreq > 10) {
8645 
8646 			freesync_capable = true;
8647 		}
8648 	}
8649 
8650 update:
8651 	if (dm_con_state)
8652 		dm_con_state->freesync_capable = freesync_capable;
8653 
8654 	if (connector->vrr_capable_property)
8655 		drm_connector_set_vrr_capable_property(connector,
8656 						       freesync_capable);
8657 }
8658 
8659 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8660 {
8661 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8662 
8663 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8664 		return;
8665 	if (link->type == dc_connection_none)
8666 		return;
8667 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8668 					dpcd_data, sizeof(dpcd_data))) {
8669 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8670 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8671 	}
8672 }
8673 
8674 /*
8675  * amdgpu_dm_link_setup_psr() - configure psr link
8676  * @stream: stream state
8677  *
8678  * Return: true if success
8679  */
8680 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8681 {
8682 	struct dc_link *link = NULL;
8683 	struct psr_config psr_config = {0};
8684 	struct psr_context psr_context = {0};
8685 	struct dc *dc = NULL;
8686 	bool ret = false;
8687 
8688 	if (stream == NULL)
8689 		return false;
8690 
8691 	link = stream->link;
8692 	dc = link->ctx->dc;
8693 
8694 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8695 
8696 	if (psr_config.psr_version > 0) {
8697 		psr_config.psr_exit_link_training_required = 0x1;
8698 		psr_config.psr_frame_capture_indication_req = 0;
8699 		psr_config.psr_rfb_setup_time = 0x37;
8700 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8701 		psr_config.allow_smu_optimizations = 0x0;
8702 
8703 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8704 
8705 	}
8706 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8707 
8708 	return ret;
8709 }
8710 
8711 /*
8712  * amdgpu_dm_psr_enable() - enable psr f/w
8713  * @stream: stream state
8714  *
8715  * Return: true if success
8716  */
8717 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8718 {
8719 	struct dc_link *link = stream->link;
8720 	unsigned int vsync_rate_hz = 0;
8721 	struct dc_static_screen_params params = {0};
8722 	/* Calculate number of static frames before generating interrupt to
8723 	 * enter PSR.
8724 	 */
8725 	// Init fail safe of 2 frames static
8726 	unsigned int num_frames_static = 2;
8727 
8728 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8729 
8730 	vsync_rate_hz = div64_u64(div64_u64((
8731 			stream->timing.pix_clk_100hz * 100),
8732 			stream->timing.v_total),
8733 			stream->timing.h_total);
8734 
8735 	/* Round up
8736 	 * Calculate number of frames such that at least 30 ms of time has
8737 	 * passed.
8738 	 */
8739 	if (vsync_rate_hz != 0) {
8740 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8741 		num_frames_static = (30000 / frame_time_microsec) + 1;
8742 	}
8743 
8744 	params.triggers.cursor_update = true;
8745 	params.triggers.overlay_update = true;
8746 	params.triggers.surface_update = true;
8747 	params.num_frames = num_frames_static;
8748 
8749 	dc_stream_set_static_screen_params(link->ctx->dc,
8750 					   &stream, 1,
8751 					   &params);
8752 
8753 	return dc_link_set_psr_allow_active(link, true, false);
8754 }
8755 
8756 /*
8757  * amdgpu_dm_psr_disable() - disable psr f/w
8758  * @stream:  stream state
8759  *
8760  * Return: true if success
8761  */
8762 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8763 {
8764 
8765 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8766 
8767 	return dc_link_set_psr_allow_active(stream->link, false, true);
8768 }
8769