xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 #ifdef notyet
645 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
646 	.bind	= amdgpu_dm_audio_component_bind,
647 	.unbind	= amdgpu_dm_audio_component_unbind,
648 };
649 #endif
650 
651 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
652 {
653 	int i, ret;
654 
655 	if (!amdgpu_audio)
656 		return 0;
657 
658 	adev->mode_info.audio.enabled = true;
659 
660 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
661 
662 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
663 		adev->mode_info.audio.pin[i].channels = -1;
664 		adev->mode_info.audio.pin[i].rate = -1;
665 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
666 		adev->mode_info.audio.pin[i].status_bits = 0;
667 		adev->mode_info.audio.pin[i].category_code = 0;
668 		adev->mode_info.audio.pin[i].connected = false;
669 		adev->mode_info.audio.pin[i].id =
670 			adev->dm.dc->res_pool->audios[i]->inst;
671 		adev->mode_info.audio.pin[i].offset = 0;
672 	}
673 
674 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
675 	if (ret < 0)
676 		return ret;
677 
678 	adev->dm.audio_registered = true;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
684 {
685 	if (!amdgpu_audio)
686 		return;
687 
688 	if (!adev->mode_info.audio.enabled)
689 		return;
690 
691 	if (adev->dm.audio_registered) {
692 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
693 		adev->dm.audio_registered = false;
694 	}
695 
696 	/* TODO: Disable audio? */
697 
698 	adev->mode_info.audio.enabled = false;
699 }
700 
701 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
702 {
703 	struct drm_audio_component *acomp = adev->dm.audio_component;
704 
705 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
706 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
707 
708 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
709 						 pin, -1);
710 	}
711 }
712 
713 static int dm_dmub_hw_init(struct amdgpu_device *adev)
714 {
715 	const struct dmcub_firmware_header_v1_0 *hdr;
716 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
717 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
718 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
719 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
720 	struct abm *abm = adev->dm.dc->res_pool->abm;
721 	struct dmub_srv_hw_params hw_params;
722 	enum dmub_status status;
723 	const unsigned char *fw_inst_const, *fw_bss_data;
724 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
725 	bool has_hw_support;
726 
727 	if (!dmub_srv)
728 		/* DMUB isn't supported on the ASIC. */
729 		return 0;
730 
731 	if (!fb_info) {
732 		DRM_ERROR("No framebuffer info for DMUB service.\n");
733 		return -EINVAL;
734 	}
735 
736 	if (!dmub_fw) {
737 		/* Firmware required for DMUB support. */
738 		DRM_ERROR("No firmware provided for DMUB.\n");
739 		return -EINVAL;
740 	}
741 
742 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
743 	if (status != DMUB_STATUS_OK) {
744 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
745 		return -EINVAL;
746 	}
747 
748 	if (!has_hw_support) {
749 		DRM_INFO("DMUB unsupported on ASIC\n");
750 		return 0;
751 	}
752 
753 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
754 
755 	fw_inst_const = dmub_fw->data +
756 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
757 			PSP_HEADER_BYTES;
758 
759 	fw_bss_data = dmub_fw->data +
760 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 		      le32_to_cpu(hdr->inst_const_bytes);
762 
763 	/* Copy firmware and bios info into FB memory. */
764 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
765 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
766 
767 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
768 
769 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
770 	 * amdgpu_ucode_init_single_fw will load dmub firmware
771 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
772 	 * will be done by dm_dmub_hw_init
773 	 */
774 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
775 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
776 				fw_inst_const_size);
777 	}
778 
779 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
780 	       fw_bss_data_size);
781 
782 	/* Copy firmware bios info into FB memory. */
783 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
784 	       adev->bios_size);
785 
786 	/* Reset regions that need to be reset. */
787 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
788 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
789 
790 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
791 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
792 
793 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
794 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
795 
796 	/* Initialize hardware. */
797 	memset(&hw_params, 0, sizeof(hw_params));
798 	hw_params.fb_base = adev->gmc.fb_start;
799 	hw_params.fb_offset = adev->gmc.aper_base;
800 
801 	/* backdoor load firmware and trigger dmub running */
802 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
803 		hw_params.load_inst_const = true;
804 
805 	if (dmcu)
806 		hw_params.psp_version = dmcu->psp_version;
807 
808 	for (i = 0; i < fb_info->num_fb; ++i)
809 		hw_params.fb[i] = &fb_info->fb[i];
810 
811 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
812 	if (status != DMUB_STATUS_OK) {
813 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
814 		return -EINVAL;
815 	}
816 
817 	/* Wait for firmware load to finish. */
818 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
819 	if (status != DMUB_STATUS_OK)
820 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
821 
822 	/* Init DMCU and ABM if available. */
823 	if (dmcu && abm) {
824 		dmcu->funcs->dmcu_init(dmcu);
825 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
826 	}
827 
828 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
829 	if (!adev->dm.dc->ctx->dmub_srv) {
830 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
831 		return -ENOMEM;
832 	}
833 
834 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
835 		 adev->dm.dmcub_fw_version);
836 
837 	return 0;
838 }
839 
840 static int amdgpu_dm_init(struct amdgpu_device *adev)
841 {
842 	struct dc_init_data init_data;
843 #ifdef CONFIG_DRM_AMD_DC_HDCP
844 	struct dc_callback_init init_params;
845 #endif
846 	int r;
847 
848 	adev->dm.ddev = adev->ddev;
849 	adev->dm.adev = adev;
850 
851 	/* Zero all the fields */
852 	memset(&init_data, 0, sizeof(init_data));
853 #ifdef CONFIG_DRM_AMD_DC_HDCP
854 	memset(&init_params, 0, sizeof(init_params));
855 #endif
856 
857 	rw_init(&adev->dm.dc_lock, "dmdc");
858 	rw_init(&adev->dm.audio_lock, "dmaud");
859 
860 	if(amdgpu_dm_irq_init(adev)) {
861 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
862 		goto error;
863 	}
864 
865 	init_data.asic_id.chip_family = adev->family;
866 
867 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
868 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
869 
870 	init_data.asic_id.vram_width = adev->gmc.vram_width;
871 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
872 	init_data.asic_id.atombios_base_address =
873 		adev->mode_info.atom_context->bios;
874 
875 	init_data.driver = adev;
876 
877 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
878 
879 	if (!adev->dm.cgs_device) {
880 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
881 		goto error;
882 	}
883 
884 	init_data.cgs_device = adev->dm.cgs_device;
885 
886 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
887 
888 	switch (adev->asic_type) {
889 	case CHIP_CARRIZO:
890 	case CHIP_STONEY:
891 	case CHIP_RAVEN:
892 	case CHIP_RENOIR:
893 		init_data.flags.gpu_vm_support = true;
894 		break;
895 	default:
896 		break;
897 	}
898 
899 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
900 		init_data.flags.fbc_support = true;
901 
902 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
903 		init_data.flags.multi_mon_pp_mclk_switch = true;
904 
905 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
906 		init_data.flags.disable_fractional_pwm = true;
907 
908 	init_data.flags.power_down_display_on_boot = true;
909 
910 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
911 
912 	/* Display Core create. */
913 	adev->dm.dc = dc_create(&init_data);
914 
915 	if (adev->dm.dc) {
916 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
917 	} else {
918 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
919 		goto error;
920 	}
921 
922 	r = dm_dmub_hw_init(adev);
923 	if (r) {
924 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
925 		goto error;
926 	}
927 
928 	dc_hardware_init(adev->dm.dc);
929 
930 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
931 	if (!adev->dm.freesync_module) {
932 		DRM_ERROR(
933 		"amdgpu: failed to initialize freesync_module.\n");
934 	} else
935 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
936 				adev->dm.freesync_module);
937 
938 	amdgpu_dm_init_color_mod();
939 
940 #ifdef CONFIG_DRM_AMD_DC_HDCP
941 	if (adev->asic_type >= CHIP_RAVEN) {
942 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
943 
944 		if (!adev->dm.hdcp_workqueue)
945 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
946 		else
947 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
948 
949 		dc_init_callbacks(adev->dm.dc, &init_params);
950 	}
951 #endif
952 	if (amdgpu_dm_initialize_drm_device(adev)) {
953 		DRM_ERROR(
954 		"amdgpu: failed to initialize sw for display support.\n");
955 		goto error;
956 	}
957 
958 	/* Update the actual used number of crtc */
959 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
960 
961 	/* create fake encoders for MST */
962 	dm_dp_create_fake_mst_encoders(adev);
963 
964 	/* TODO: Add_display_info? */
965 
966 	/* TODO use dynamic cursor width */
967 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
968 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
969 
970 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
971 		DRM_ERROR(
972 		"amdgpu: failed to initialize sw for display support.\n");
973 		goto error;
974 	}
975 
976 	DRM_DEBUG_DRIVER("KMS initialized.\n");
977 
978 	return 0;
979 error:
980 	amdgpu_dm_fini(adev);
981 
982 	return -EINVAL;
983 }
984 
985 static void amdgpu_dm_fini(struct amdgpu_device *adev)
986 {
987 	int i;
988 
989 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
990 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
991 	}
992 
993 	amdgpu_dm_audio_fini(adev);
994 
995 	amdgpu_dm_destroy_drm_device(&adev->dm);
996 
997 #ifdef CONFIG_DRM_AMD_DC_HDCP
998 	if (adev->dm.hdcp_workqueue) {
999 		hdcp_destroy(adev->dm.hdcp_workqueue);
1000 		adev->dm.hdcp_workqueue = NULL;
1001 	}
1002 
1003 	if (adev->dm.dc)
1004 		dc_deinit_callbacks(adev->dm.dc);
1005 #endif
1006 	if (adev->dm.dc->ctx->dmub_srv) {
1007 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1008 		adev->dm.dc->ctx->dmub_srv = NULL;
1009 	}
1010 
1011 	if (adev->dm.dmub_bo)
1012 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1013 				      &adev->dm.dmub_bo_gpu_addr,
1014 				      &adev->dm.dmub_bo_cpu_addr);
1015 
1016 	/* DC Destroy TODO: Replace destroy DAL */
1017 	if (adev->dm.dc)
1018 		dc_destroy(&adev->dm.dc);
1019 	/*
1020 	 * TODO: pageflip, vlank interrupt
1021 	 *
1022 	 * amdgpu_dm_irq_fini(adev);
1023 	 */
1024 
1025 	if (adev->dm.cgs_device) {
1026 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1027 		adev->dm.cgs_device = NULL;
1028 	}
1029 	if (adev->dm.freesync_module) {
1030 		mod_freesync_destroy(adev->dm.freesync_module);
1031 		adev->dm.freesync_module = NULL;
1032 	}
1033 
1034 	mutex_destroy(&adev->dm.audio_lock);
1035 	mutex_destroy(&adev->dm.dc_lock);
1036 
1037 	return;
1038 }
1039 
1040 static int load_dmcu_fw(struct amdgpu_device *adev)
1041 {
1042 	const char *fw_name_dmcu = NULL;
1043 	int r;
1044 	const struct dmcu_firmware_header_v1_0 *hdr;
1045 
1046 	switch(adev->asic_type) {
1047 	case CHIP_BONAIRE:
1048 	case CHIP_HAWAII:
1049 	case CHIP_KAVERI:
1050 	case CHIP_KABINI:
1051 	case CHIP_MULLINS:
1052 	case CHIP_TONGA:
1053 	case CHIP_FIJI:
1054 	case CHIP_CARRIZO:
1055 	case CHIP_STONEY:
1056 	case CHIP_POLARIS11:
1057 	case CHIP_POLARIS10:
1058 	case CHIP_POLARIS12:
1059 	case CHIP_VEGAM:
1060 	case CHIP_VEGA10:
1061 	case CHIP_VEGA12:
1062 	case CHIP_VEGA20:
1063 	case CHIP_NAVI10:
1064 	case CHIP_NAVI14:
1065 	case CHIP_RENOIR:
1066 		return 0;
1067 	case CHIP_NAVI12:
1068 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1069 		break;
1070 	case CHIP_RAVEN:
1071 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1072 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1073 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1074 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1075 		else
1076 			return 0;
1077 		break;
1078 	default:
1079 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1080 		return -EINVAL;
1081 	}
1082 
1083 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1085 		return 0;
1086 	}
1087 
1088 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1089 	if (r == -ENOENT) {
1090 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1091 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1092 		adev->dm.fw_dmcu = NULL;
1093 		return 0;
1094 	}
1095 	if (r) {
1096 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1097 			fw_name_dmcu);
1098 		return r;
1099 	}
1100 
1101 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1102 	if (r) {
1103 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1104 			fw_name_dmcu);
1105 		release_firmware(adev->dm.fw_dmcu);
1106 		adev->dm.fw_dmcu = NULL;
1107 		return r;
1108 	}
1109 
1110 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1111 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1112 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1113 	adev->firmware.fw_size +=
1114 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1115 
1116 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1117 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1118 	adev->firmware.fw_size +=
1119 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1120 
1121 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1122 
1123 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1124 
1125 	return 0;
1126 }
1127 
1128 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1129 {
1130 	struct amdgpu_device *adev = ctx;
1131 
1132 	return dm_read_reg(adev->dm.dc->ctx, address);
1133 }
1134 
1135 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1136 				     uint32_t value)
1137 {
1138 	struct amdgpu_device *adev = ctx;
1139 
1140 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1141 }
1142 
1143 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1144 {
1145 	struct dmub_srv_create_params create_params;
1146 	struct dmub_srv_region_params region_params;
1147 	struct dmub_srv_region_info region_info;
1148 	struct dmub_srv_fb_params fb_params;
1149 	struct dmub_srv_fb_info *fb_info;
1150 	struct dmub_srv *dmub_srv;
1151 	const struct dmcub_firmware_header_v1_0 *hdr;
1152 	const char *fw_name_dmub;
1153 	enum dmub_asic dmub_asic;
1154 	enum dmub_status status;
1155 	int r;
1156 
1157 	switch (adev->asic_type) {
1158 	case CHIP_RENOIR:
1159 		dmub_asic = DMUB_ASIC_DCN21;
1160 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1161 		break;
1162 
1163 	default:
1164 		/* ASIC doesn't support DMUB. */
1165 		return 0;
1166 	}
1167 
1168 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1169 	if (r) {
1170 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1171 		return 0;
1172 	}
1173 
1174 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1175 	if (r) {
1176 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1177 		return 0;
1178 	}
1179 
1180 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1181 
1182 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1183 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1184 			AMDGPU_UCODE_ID_DMCUB;
1185 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1186 			adev->dm.dmub_fw;
1187 		adev->firmware.fw_size +=
1188 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1189 
1190 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1191 			 adev->dm.dmcub_fw_version);
1192 	}
1193 
1194 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1195 
1196 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1197 	dmub_srv = adev->dm.dmub_srv;
1198 
1199 	if (!dmub_srv) {
1200 		DRM_ERROR("Failed to allocate DMUB service!\n");
1201 		return -ENOMEM;
1202 	}
1203 
1204 	memset(&create_params, 0, sizeof(create_params));
1205 	create_params.user_ctx = adev;
1206 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1207 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1208 	create_params.asic = dmub_asic;
1209 
1210 	/* Create the DMUB service. */
1211 	status = dmub_srv_create(dmub_srv, &create_params);
1212 	if (status != DMUB_STATUS_OK) {
1213 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1214 		return -EINVAL;
1215 	}
1216 
1217 	/* Calculate the size of all the regions for the DMUB service. */
1218 	memset(&region_params, 0, sizeof(region_params));
1219 
1220 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1221 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1222 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1223 	region_params.vbios_size = adev->bios_size;
1224 	region_params.fw_bss_data =
1225 		adev->dm.dmub_fw->data +
1226 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1227 		le32_to_cpu(hdr->inst_const_bytes);
1228 
1229 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1230 					   &region_info);
1231 
1232 	if (status != DMUB_STATUS_OK) {
1233 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1234 		return -EINVAL;
1235 	}
1236 
1237 	/*
1238 	 * Allocate a framebuffer based on the total size of all the regions.
1239 	 * TODO: Move this into GART.
1240 	 */
1241 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1242 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1243 				    &adev->dm.dmub_bo_gpu_addr,
1244 				    &adev->dm.dmub_bo_cpu_addr);
1245 	if (r)
1246 		return r;
1247 
1248 	/* Rebase the regions on the framebuffer address. */
1249 	memset(&fb_params, 0, sizeof(fb_params));
1250 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1251 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1252 	fb_params.region_info = &region_info;
1253 
1254 	adev->dm.dmub_fb_info =
1255 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1256 	fb_info = adev->dm.dmub_fb_info;
1257 
1258 	if (!fb_info) {
1259 		DRM_ERROR(
1260 			"Failed to allocate framebuffer info for DMUB service!\n");
1261 		return -ENOMEM;
1262 	}
1263 
1264 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1265 	if (status != DMUB_STATUS_OK) {
1266 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1267 		return -EINVAL;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static int dm_sw_init(void *handle)
1274 {
1275 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 	int r;
1277 
1278 	r = dm_dmub_sw_init(adev);
1279 	if (r)
1280 		return r;
1281 
1282 	return load_dmcu_fw(adev);
1283 }
1284 
1285 static int dm_sw_fini(void *handle)
1286 {
1287 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1288 
1289 	kfree(adev->dm.dmub_fb_info);
1290 	adev->dm.dmub_fb_info = NULL;
1291 
1292 	if (adev->dm.dmub_srv) {
1293 		dmub_srv_destroy(adev->dm.dmub_srv);
1294 		adev->dm.dmub_srv = NULL;
1295 	}
1296 
1297 	if (adev->dm.dmub_fw) {
1298 		release_firmware(adev->dm.dmub_fw);
1299 		adev->dm.dmub_fw = NULL;
1300 	}
1301 
1302 	if(adev->dm.fw_dmcu) {
1303 		release_firmware(adev->dm.fw_dmcu);
1304 		adev->dm.fw_dmcu = NULL;
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1311 {
1312 	struct amdgpu_dm_connector *aconnector;
1313 	struct drm_connector *connector;
1314 	struct drm_connector_list_iter iter;
1315 	int ret = 0;
1316 
1317 	drm_connector_list_iter_begin(dev, &iter);
1318 	drm_for_each_connector_iter(connector, &iter) {
1319 		aconnector = to_amdgpu_dm_connector(connector);
1320 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1321 		    aconnector->mst_mgr.aux) {
1322 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1323 					 aconnector,
1324 					 aconnector->base.base.id);
1325 
1326 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1327 			if (ret < 0) {
1328 				DRM_ERROR("DM_MST: Failed to start MST\n");
1329 				aconnector->dc_link->type =
1330 					dc_connection_single;
1331 				break;
1332 			}
1333 		}
1334 	}
1335 	drm_connector_list_iter_end(&iter);
1336 
1337 	return ret;
1338 }
1339 
1340 static int dm_late_init(void *handle)
1341 {
1342 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1343 
1344 	struct dmcu_iram_parameters params;
1345 	unsigned int linear_lut[16];
1346 	int i;
1347 	struct dmcu *dmcu = NULL;
1348 	bool ret;
1349 
1350 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1351 		return detect_mst_link_for_all_connectors(adev->ddev);
1352 
1353 	dmcu = adev->dm.dc->res_pool->dmcu;
1354 
1355 	for (i = 0; i < 16; i++)
1356 		linear_lut[i] = 0xFFFF * i / 15;
1357 
1358 	params.set = 0;
1359 	params.backlight_ramping_start = 0xCCCC;
1360 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1361 	params.backlight_lut_array_size = 16;
1362 	params.backlight_lut_array = linear_lut;
1363 
1364 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1365 	 * 0xFFFF x 0.01 = 0x28F
1366 	 */
1367 	params.min_abm_backlight = 0x28F;
1368 
1369 	ret = dmcu_load_iram(dmcu, params);
1370 
1371 	if (!ret)
1372 		return -EINVAL;
1373 
1374 	return detect_mst_link_for_all_connectors(adev->ddev);
1375 }
1376 
1377 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1378 {
1379 	struct amdgpu_dm_connector *aconnector;
1380 	struct drm_connector *connector;
1381 	struct drm_connector_list_iter iter;
1382 	struct drm_dp_mst_topology_mgr *mgr;
1383 	int ret;
1384 	bool need_hotplug = false;
1385 
1386 	drm_connector_list_iter_begin(dev, &iter);
1387 	drm_for_each_connector_iter(connector, &iter) {
1388 		aconnector = to_amdgpu_dm_connector(connector);
1389 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1390 		    aconnector->mst_port)
1391 			continue;
1392 
1393 		mgr = &aconnector->mst_mgr;
1394 
1395 		if (suspend) {
1396 			drm_dp_mst_topology_mgr_suspend(mgr);
1397 		} else {
1398 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1399 			if (ret < 0) {
1400 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1401 				need_hotplug = true;
1402 			}
1403 		}
1404 	}
1405 	drm_connector_list_iter_end(&iter);
1406 
1407 	if (need_hotplug)
1408 		drm_kms_helper_hotplug_event(dev);
1409 }
1410 
1411 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1412 {
1413 	struct smu_context *smu = &adev->smu;
1414 	int ret = 0;
1415 
1416 	if (!is_support_sw_smu(adev))
1417 		return 0;
1418 
1419 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1420 	 * on window driver dc implementation.
1421 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1422 	 * should be passed to smu during boot up and resume from s3.
1423 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1424 	 * dcn20_resource_construct
1425 	 * then call pplib functions below to pass the settings to smu:
1426 	 * smu_set_watermarks_for_clock_ranges
1427 	 * smu_set_watermarks_table
1428 	 * navi10_set_watermarks_table
1429 	 * smu_write_watermarks_table
1430 	 *
1431 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1432 	 * dc has implemented different flow for window driver:
1433 	 * dc_hardware_init / dc_set_power_state
1434 	 * dcn10_init_hw
1435 	 * notify_wm_ranges
1436 	 * set_wm_ranges
1437 	 * -- Linux
1438 	 * smu_set_watermarks_for_clock_ranges
1439 	 * renoir_set_watermarks_table
1440 	 * smu_write_watermarks_table
1441 	 *
1442 	 * For Linux,
1443 	 * dc_hardware_init -> amdgpu_dm_init
1444 	 * dc_set_power_state --> dm_resume
1445 	 *
1446 	 * therefore, this function apply to navi10/12/14 but not Renoir
1447 	 * *
1448 	 */
1449 	switch(adev->asic_type) {
1450 	case CHIP_NAVI10:
1451 	case CHIP_NAVI14:
1452 	case CHIP_NAVI12:
1453 		break;
1454 	default:
1455 		return 0;
1456 	}
1457 
1458 	mutex_lock(&smu->mutex);
1459 
1460 	/* pass data to smu controller */
1461 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1462 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1463 		ret = smu_write_watermarks_table(smu);
1464 
1465 		if (ret) {
1466 			mutex_unlock(&smu->mutex);
1467 			DRM_ERROR("Failed to update WMTABLE!\n");
1468 			return ret;
1469 		}
1470 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1471 	}
1472 
1473 	mutex_unlock(&smu->mutex);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * dm_hw_init() - Initialize DC device
1480  * @handle: The base driver device containing the amdgpu_dm device.
1481  *
1482  * Initialize the &struct amdgpu_display_manager device. This involves calling
1483  * the initializers of each DM component, then populating the struct with them.
1484  *
1485  * Although the function implies hardware initialization, both hardware and
1486  * software are initialized here. Splitting them out to their relevant init
1487  * hooks is a future TODO item.
1488  *
1489  * Some notable things that are initialized here:
1490  *
1491  * - Display Core, both software and hardware
1492  * - DC modules that we need (freesync and color management)
1493  * - DRM software states
1494  * - Interrupt sources and handlers
1495  * - Vblank support
1496  * - Debug FS entries, if enabled
1497  */
1498 static int dm_hw_init(void *handle)
1499 {
1500 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1501 	/* Create DAL display manager */
1502 	amdgpu_dm_init(adev);
1503 	amdgpu_dm_hpd_init(adev);
1504 
1505 	return 0;
1506 }
1507 
1508 /**
1509  * dm_hw_fini() - Teardown DC device
1510  * @handle: The base driver device containing the amdgpu_dm device.
1511  *
1512  * Teardown components within &struct amdgpu_display_manager that require
1513  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1514  * were loaded. Also flush IRQ workqueues and disable them.
1515  */
1516 static int dm_hw_fini(void *handle)
1517 {
1518 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1519 
1520 	amdgpu_dm_hpd_fini(adev);
1521 
1522 	amdgpu_dm_irq_fini(adev);
1523 	amdgpu_dm_fini(adev);
1524 	return 0;
1525 }
1526 
1527 static int dm_suspend(void *handle)
1528 {
1529 	struct amdgpu_device *adev = handle;
1530 	struct amdgpu_display_manager *dm = &adev->dm;
1531 	int ret = 0;
1532 
1533 	WARN_ON(adev->dm.cached_state);
1534 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1535 
1536 	s3_handle_mst(adev->ddev, true);
1537 
1538 	amdgpu_dm_irq_suspend(adev);
1539 
1540 
1541 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1542 
1543 	return ret;
1544 }
1545 
1546 static struct amdgpu_dm_connector *
1547 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1548 					     struct drm_crtc *crtc)
1549 {
1550 	uint32_t i;
1551 	struct drm_connector_state *new_con_state;
1552 	struct drm_connector *connector;
1553 	struct drm_crtc *crtc_from_state;
1554 
1555 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1556 		crtc_from_state = new_con_state->crtc;
1557 
1558 		if (crtc_from_state == crtc)
1559 			return to_amdgpu_dm_connector(connector);
1560 	}
1561 
1562 	return NULL;
1563 }
1564 
1565 static void emulated_link_detect(struct dc_link *link)
1566 {
1567 	struct dc_sink_init_data sink_init_data = { 0 };
1568 	struct display_sink_capability sink_caps = { 0 };
1569 	enum dc_edid_status edid_status;
1570 	struct dc_context *dc_ctx = link->ctx;
1571 	struct dc_sink *sink = NULL;
1572 	struct dc_sink *prev_sink = NULL;
1573 
1574 	link->type = dc_connection_none;
1575 	prev_sink = link->local_sink;
1576 
1577 	if (prev_sink != NULL)
1578 		dc_sink_retain(prev_sink);
1579 
1580 	switch (link->connector_signal) {
1581 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1582 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1583 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1584 		break;
1585 	}
1586 
1587 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1588 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1589 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1590 		break;
1591 	}
1592 
1593 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1594 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1595 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1596 		break;
1597 	}
1598 
1599 	case SIGNAL_TYPE_LVDS: {
1600 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1601 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1602 		break;
1603 	}
1604 
1605 	case SIGNAL_TYPE_EDP: {
1606 		sink_caps.transaction_type =
1607 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1608 		sink_caps.signal = SIGNAL_TYPE_EDP;
1609 		break;
1610 	}
1611 
1612 	case SIGNAL_TYPE_DISPLAY_PORT: {
1613 		sink_caps.transaction_type =
1614 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1615 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1616 		break;
1617 	}
1618 
1619 	default:
1620 		DC_ERROR("Invalid connector type! signal:%d\n",
1621 			link->connector_signal);
1622 		return;
1623 	}
1624 
1625 	sink_init_data.link = link;
1626 	sink_init_data.sink_signal = sink_caps.signal;
1627 
1628 	sink = dc_sink_create(&sink_init_data);
1629 	if (!sink) {
1630 		DC_ERROR("Failed to create sink!\n");
1631 		return;
1632 	}
1633 
1634 	/* dc_sink_create returns a new reference */
1635 	link->local_sink = sink;
1636 
1637 	edid_status = dm_helpers_read_local_edid(
1638 			link->ctx,
1639 			link,
1640 			sink);
1641 
1642 	if (edid_status != EDID_OK)
1643 		DC_ERROR("Failed to read EDID");
1644 
1645 }
1646 
1647 static int dm_resume(void *handle)
1648 {
1649 	struct amdgpu_device *adev = handle;
1650 	struct drm_device *ddev = adev->ddev;
1651 	struct amdgpu_display_manager *dm = &adev->dm;
1652 	struct amdgpu_dm_connector *aconnector;
1653 	struct drm_connector *connector;
1654 	struct drm_connector_list_iter iter;
1655 	struct drm_crtc *crtc;
1656 	struct drm_crtc_state *new_crtc_state;
1657 	struct dm_crtc_state *dm_new_crtc_state;
1658 	struct drm_plane *plane;
1659 	struct drm_plane_state *new_plane_state;
1660 	struct dm_plane_state *dm_new_plane_state;
1661 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1662 	enum dc_connection_type new_connection_type = dc_connection_none;
1663 	int i, r;
1664 
1665 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1666 	dc_release_state(dm_state->context);
1667 	dm_state->context = dc_create_state(dm->dc);
1668 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1669 	dc_resource_state_construct(dm->dc, dm_state->context);
1670 
1671 	/* Before powering on DC we need to re-initialize DMUB. */
1672 	r = dm_dmub_hw_init(adev);
1673 	if (r)
1674 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1675 
1676 	/* power on hardware */
1677 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1678 
1679 	/* program HPD filter */
1680 	dc_resume(dm->dc);
1681 
1682 	/*
1683 	 * early enable HPD Rx IRQ, should be done before set mode as short
1684 	 * pulse interrupts are used for MST
1685 	 */
1686 	amdgpu_dm_irq_resume_early(adev);
1687 
1688 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1689 	s3_handle_mst(ddev, false);
1690 
1691 	/* Do detection*/
1692 	drm_connector_list_iter_begin(ddev, &iter);
1693 	drm_for_each_connector_iter(connector, &iter) {
1694 		aconnector = to_amdgpu_dm_connector(connector);
1695 
1696 		/*
1697 		 * this is the case when traversing through already created
1698 		 * MST connectors, should be skipped
1699 		 */
1700 		if (aconnector->mst_port)
1701 			continue;
1702 
1703 		mutex_lock(&aconnector->hpd_lock);
1704 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1705 			DRM_ERROR("KMS: Failed to detect connector\n");
1706 
1707 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1708 			emulated_link_detect(aconnector->dc_link);
1709 		else
1710 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1711 
1712 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1713 			aconnector->fake_enable = false;
1714 
1715 		if (aconnector->dc_sink)
1716 			dc_sink_release(aconnector->dc_sink);
1717 		aconnector->dc_sink = NULL;
1718 		amdgpu_dm_update_connector_after_detect(aconnector);
1719 		mutex_unlock(&aconnector->hpd_lock);
1720 	}
1721 	drm_connector_list_iter_end(&iter);
1722 
1723 	/* Force mode set in atomic commit */
1724 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1725 		new_crtc_state->active_changed = true;
1726 
1727 	/*
1728 	 * atomic_check is expected to create the dc states. We need to release
1729 	 * them here, since they were duplicated as part of the suspend
1730 	 * procedure.
1731 	 */
1732 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1733 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1734 		if (dm_new_crtc_state->stream) {
1735 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1736 			dc_stream_release(dm_new_crtc_state->stream);
1737 			dm_new_crtc_state->stream = NULL;
1738 		}
1739 	}
1740 
1741 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1742 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1743 		if (dm_new_plane_state->dc_state) {
1744 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1745 			dc_plane_state_release(dm_new_plane_state->dc_state);
1746 			dm_new_plane_state->dc_state = NULL;
1747 		}
1748 	}
1749 
1750 	drm_atomic_helper_resume(ddev, dm->cached_state);
1751 
1752 	dm->cached_state = NULL;
1753 
1754 	amdgpu_dm_irq_resume_late(adev);
1755 
1756 	amdgpu_dm_smu_write_watermarks_table(adev);
1757 
1758 	return 0;
1759 }
1760 
1761 /**
1762  * DOC: DM Lifecycle
1763  *
1764  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1765  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1766  * the base driver's device list to be initialized and torn down accordingly.
1767  *
1768  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1769  */
1770 
1771 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1772 	.name = "dm",
1773 	.early_init = dm_early_init,
1774 	.late_init = dm_late_init,
1775 	.sw_init = dm_sw_init,
1776 	.sw_fini = dm_sw_fini,
1777 	.hw_init = dm_hw_init,
1778 	.hw_fini = dm_hw_fini,
1779 	.suspend = dm_suspend,
1780 	.resume = dm_resume,
1781 	.is_idle = dm_is_idle,
1782 	.wait_for_idle = dm_wait_for_idle,
1783 	.check_soft_reset = dm_check_soft_reset,
1784 	.soft_reset = dm_soft_reset,
1785 	.set_clockgating_state = dm_set_clockgating_state,
1786 	.set_powergating_state = dm_set_powergating_state,
1787 };
1788 
1789 const struct amdgpu_ip_block_version dm_ip_block =
1790 {
1791 	.type = AMD_IP_BLOCK_TYPE_DCE,
1792 	.major = 1,
1793 	.minor = 0,
1794 	.rev = 0,
1795 	.funcs = &amdgpu_dm_funcs,
1796 };
1797 
1798 
1799 /**
1800  * DOC: atomic
1801  *
1802  * *WIP*
1803  */
1804 
1805 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1806 	.fb_create = amdgpu_display_user_framebuffer_create,
1807 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1808 	.atomic_check = amdgpu_dm_atomic_check,
1809 	.atomic_commit = amdgpu_dm_atomic_commit,
1810 };
1811 
1812 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1813 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1814 };
1815 
1816 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1817 {
1818 	u32 max_cll, min_cll, max, min, q, r;
1819 	struct amdgpu_dm_backlight_caps *caps;
1820 	struct amdgpu_display_manager *dm;
1821 	struct drm_connector *conn_base;
1822 	struct amdgpu_device *adev;
1823 	struct dc_link *link = NULL;
1824 	static const u8 pre_computed_values[] = {
1825 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1826 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1827 
1828 	if (!aconnector || !aconnector->dc_link)
1829 		return;
1830 
1831 	link = aconnector->dc_link;
1832 	if (link->connector_signal != SIGNAL_TYPE_EDP)
1833 		return;
1834 
1835 	conn_base = &aconnector->base;
1836 	adev = conn_base->dev->dev_private;
1837 	dm = &adev->dm;
1838 	caps = &dm->backlight_caps;
1839 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1840 	caps->aux_support = false;
1841 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1842 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1843 
1844 	if (caps->ext_caps->bits.oled == 1 ||
1845 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1846 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1847 		caps->aux_support = true;
1848 
1849 	/* From the specification (CTA-861-G), for calculating the maximum
1850 	 * luminance we need to use:
1851 	 *	Luminance = 50*2**(CV/32)
1852 	 * Where CV is a one-byte value.
1853 	 * For calculating this expression we may need float point precision;
1854 	 * to avoid this complexity level, we take advantage that CV is divided
1855 	 * by a constant. From the Euclids division algorithm, we know that CV
1856 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1857 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1858 	 * need to pre-compute the value of r/32. For pre-computing the values
1859 	 * We just used the following Ruby line:
1860 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1861 	 * The results of the above expressions can be verified at
1862 	 * pre_computed_values.
1863 	 */
1864 	q = max_cll >> 5;
1865 	r = max_cll % 32;
1866 	max = (1 << q) * pre_computed_values[r];
1867 
1868 	// min luminance: maxLum * (CV/255)^2 / 100
1869 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1870 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1871 
1872 	caps->aux_max_input_signal = max;
1873 	caps->aux_min_input_signal = min;
1874 }
1875 
1876 void amdgpu_dm_update_connector_after_detect(
1877 		struct amdgpu_dm_connector *aconnector)
1878 {
1879 	struct drm_connector *connector = &aconnector->base;
1880 	struct drm_device *dev = connector->dev;
1881 	struct dc_sink *sink;
1882 
1883 	/* MST handled by drm_mst framework */
1884 	if (aconnector->mst_mgr.mst_state == true)
1885 		return;
1886 
1887 
1888 	sink = aconnector->dc_link->local_sink;
1889 	if (sink)
1890 		dc_sink_retain(sink);
1891 
1892 	/*
1893 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1894 	 * the connector sink is set to either fake or physical sink depends on link status.
1895 	 * Skip if already done during boot.
1896 	 */
1897 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1898 			&& aconnector->dc_em_sink) {
1899 
1900 		/*
1901 		 * For S3 resume with headless use eml_sink to fake stream
1902 		 * because on resume connector->sink is set to NULL
1903 		 */
1904 		mutex_lock(&dev->mode_config.mutex);
1905 
1906 		if (sink) {
1907 			if (aconnector->dc_sink) {
1908 				amdgpu_dm_update_freesync_caps(connector, NULL);
1909 				/*
1910 				 * retain and release below are used to
1911 				 * bump up refcount for sink because the link doesn't point
1912 				 * to it anymore after disconnect, so on next crtc to connector
1913 				 * reshuffle by UMD we will get into unwanted dc_sink release
1914 				 */
1915 				dc_sink_release(aconnector->dc_sink);
1916 			}
1917 			aconnector->dc_sink = sink;
1918 			dc_sink_retain(aconnector->dc_sink);
1919 			amdgpu_dm_update_freesync_caps(connector,
1920 					aconnector->edid);
1921 		} else {
1922 			amdgpu_dm_update_freesync_caps(connector, NULL);
1923 			if (!aconnector->dc_sink) {
1924 				aconnector->dc_sink = aconnector->dc_em_sink;
1925 				dc_sink_retain(aconnector->dc_sink);
1926 			}
1927 		}
1928 
1929 		mutex_unlock(&dev->mode_config.mutex);
1930 
1931 		if (sink)
1932 			dc_sink_release(sink);
1933 		return;
1934 	}
1935 
1936 	/*
1937 	 * TODO: temporary guard to look for proper fix
1938 	 * if this sink is MST sink, we should not do anything
1939 	 */
1940 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1941 		dc_sink_release(sink);
1942 		return;
1943 	}
1944 
1945 	if (aconnector->dc_sink == sink) {
1946 		/*
1947 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1948 		 * Do nothing!!
1949 		 */
1950 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1951 				aconnector->connector_id);
1952 		if (sink)
1953 			dc_sink_release(sink);
1954 		return;
1955 	}
1956 
1957 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1958 		aconnector->connector_id, aconnector->dc_sink, sink);
1959 
1960 	mutex_lock(&dev->mode_config.mutex);
1961 
1962 	/*
1963 	 * 1. Update status of the drm connector
1964 	 * 2. Send an event and let userspace tell us what to do
1965 	 */
1966 	if (sink) {
1967 		/*
1968 		 * TODO: check if we still need the S3 mode update workaround.
1969 		 * If yes, put it here.
1970 		 */
1971 		if (aconnector->dc_sink)
1972 			amdgpu_dm_update_freesync_caps(connector, NULL);
1973 
1974 		aconnector->dc_sink = sink;
1975 		dc_sink_retain(aconnector->dc_sink);
1976 		if (sink->dc_edid.length == 0) {
1977 			aconnector->edid = NULL;
1978 			if (aconnector->dc_link->aux_mode) {
1979 				drm_dp_cec_unset_edid(
1980 					&aconnector->dm_dp_aux.aux);
1981 			}
1982 		} else {
1983 			aconnector->edid =
1984 				(struct edid *)sink->dc_edid.raw_edid;
1985 
1986 			drm_connector_update_edid_property(connector,
1987 							   aconnector->edid);
1988 			drm_add_edid_modes(connector, aconnector->edid);
1989 
1990 			if (aconnector->dc_link->aux_mode)
1991 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1992 						    aconnector->edid);
1993 		}
1994 
1995 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1996 		update_connector_ext_caps(aconnector);
1997 	} else {
1998 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1999 		amdgpu_dm_update_freesync_caps(connector, NULL);
2000 		drm_connector_update_edid_property(connector, NULL);
2001 		aconnector->num_modes = 0;
2002 		dc_sink_release(aconnector->dc_sink);
2003 		aconnector->dc_sink = NULL;
2004 		aconnector->edid = NULL;
2005 #ifdef CONFIG_DRM_AMD_DC_HDCP
2006 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2007 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2008 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2009 #endif
2010 	}
2011 
2012 	mutex_unlock(&dev->mode_config.mutex);
2013 
2014 	if (sink)
2015 		dc_sink_release(sink);
2016 }
2017 
2018 static void handle_hpd_irq(void *param)
2019 {
2020 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2021 	struct drm_connector *connector = &aconnector->base;
2022 	struct drm_device *dev = connector->dev;
2023 	enum dc_connection_type new_connection_type = dc_connection_none;
2024 #ifdef CONFIG_DRM_AMD_DC_HDCP
2025 	struct amdgpu_device *adev = dev->dev_private;
2026 #endif
2027 
2028 	/*
2029 	 * In case of failure or MST no need to update connector status or notify the OS
2030 	 * since (for MST case) MST does this in its own context.
2031 	 */
2032 	mutex_lock(&aconnector->hpd_lock);
2033 
2034 #ifdef CONFIG_DRM_AMD_DC_HDCP
2035 	if (adev->dm.hdcp_workqueue)
2036 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2037 #endif
2038 	if (aconnector->fake_enable)
2039 		aconnector->fake_enable = false;
2040 
2041 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2042 		DRM_ERROR("KMS: Failed to detect connector\n");
2043 
2044 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2045 		emulated_link_detect(aconnector->dc_link);
2046 
2047 
2048 		drm_modeset_lock_all(dev);
2049 		dm_restore_drm_connector_state(dev, connector);
2050 		drm_modeset_unlock_all(dev);
2051 
2052 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2053 			drm_kms_helper_hotplug_event(dev);
2054 
2055 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2056 		amdgpu_dm_update_connector_after_detect(aconnector);
2057 
2058 
2059 		drm_modeset_lock_all(dev);
2060 		dm_restore_drm_connector_state(dev, connector);
2061 		drm_modeset_unlock_all(dev);
2062 
2063 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2064 			drm_kms_helper_hotplug_event(dev);
2065 	}
2066 	mutex_unlock(&aconnector->hpd_lock);
2067 
2068 }
2069 
2070 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2071 {
2072 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2073 	uint8_t dret;
2074 	bool new_irq_handled = false;
2075 	int dpcd_addr;
2076 	int dpcd_bytes_to_read;
2077 
2078 	const int max_process_count = 30;
2079 	int process_count = 0;
2080 
2081 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2082 
2083 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2084 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2085 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2086 		dpcd_addr = DP_SINK_COUNT;
2087 	} else {
2088 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2089 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2090 		dpcd_addr = DP_SINK_COUNT_ESI;
2091 	}
2092 
2093 	dret = drm_dp_dpcd_read(
2094 		&aconnector->dm_dp_aux.aux,
2095 		dpcd_addr,
2096 		esi,
2097 		dpcd_bytes_to_read);
2098 
2099 	while (dret == dpcd_bytes_to_read &&
2100 		process_count < max_process_count) {
2101 		uint8_t retry;
2102 		dret = 0;
2103 
2104 		process_count++;
2105 
2106 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2107 		/* handle HPD short pulse irq */
2108 		if (aconnector->mst_mgr.mst_state)
2109 			drm_dp_mst_hpd_irq(
2110 				&aconnector->mst_mgr,
2111 				esi,
2112 				&new_irq_handled);
2113 
2114 		if (new_irq_handled) {
2115 			/* ACK at DPCD to notify down stream */
2116 			const int ack_dpcd_bytes_to_write =
2117 				dpcd_bytes_to_read - 1;
2118 
2119 			for (retry = 0; retry < 3; retry++) {
2120 				uint8_t wret;
2121 
2122 				wret = drm_dp_dpcd_write(
2123 					&aconnector->dm_dp_aux.aux,
2124 					dpcd_addr + 1,
2125 					&esi[1],
2126 					ack_dpcd_bytes_to_write);
2127 				if (wret == ack_dpcd_bytes_to_write)
2128 					break;
2129 			}
2130 
2131 			/* check if there is new irq to be handled */
2132 			dret = drm_dp_dpcd_read(
2133 				&aconnector->dm_dp_aux.aux,
2134 				dpcd_addr,
2135 				esi,
2136 				dpcd_bytes_to_read);
2137 
2138 			new_irq_handled = false;
2139 		} else {
2140 			break;
2141 		}
2142 	}
2143 
2144 	if (process_count == max_process_count)
2145 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2146 }
2147 
2148 static void handle_hpd_rx_irq(void *param)
2149 {
2150 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2151 	struct drm_connector *connector = &aconnector->base;
2152 	struct drm_device *dev = connector->dev;
2153 	struct dc_link *dc_link = aconnector->dc_link;
2154 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2155 	enum dc_connection_type new_connection_type = dc_connection_none;
2156 #ifdef CONFIG_DRM_AMD_DC_HDCP
2157 	union hpd_irq_data hpd_irq_data;
2158 	struct amdgpu_device *adev = dev->dev_private;
2159 
2160 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2161 #endif
2162 
2163 	/*
2164 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2165 	 * conflict, after implement i2c helper, this mutex should be
2166 	 * retired.
2167 	 */
2168 	if (dc_link->type != dc_connection_mst_branch)
2169 		mutex_lock(&aconnector->hpd_lock);
2170 
2171 
2172 #ifdef CONFIG_DRM_AMD_DC_HDCP
2173 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2174 #else
2175 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2176 #endif
2177 			!is_mst_root_connector) {
2178 		/* Downstream Port status changed. */
2179 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2180 			DRM_ERROR("KMS: Failed to detect connector\n");
2181 
2182 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2183 			emulated_link_detect(dc_link);
2184 
2185 			if (aconnector->fake_enable)
2186 				aconnector->fake_enable = false;
2187 
2188 			amdgpu_dm_update_connector_after_detect(aconnector);
2189 
2190 
2191 			drm_modeset_lock_all(dev);
2192 			dm_restore_drm_connector_state(dev, connector);
2193 			drm_modeset_unlock_all(dev);
2194 
2195 			drm_kms_helper_hotplug_event(dev);
2196 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2197 
2198 			if (aconnector->fake_enable)
2199 				aconnector->fake_enable = false;
2200 
2201 			amdgpu_dm_update_connector_after_detect(aconnector);
2202 
2203 
2204 			drm_modeset_lock_all(dev);
2205 			dm_restore_drm_connector_state(dev, connector);
2206 			drm_modeset_unlock_all(dev);
2207 
2208 			drm_kms_helper_hotplug_event(dev);
2209 		}
2210 	}
2211 #ifdef CONFIG_DRM_AMD_DC_HDCP
2212 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2213 		if (adev->dm.hdcp_workqueue)
2214 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2215 	}
2216 #endif
2217 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2218 	    (dc_link->type == dc_connection_mst_branch))
2219 		dm_handle_hpd_rx_irq(aconnector);
2220 
2221 	if (dc_link->type != dc_connection_mst_branch) {
2222 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2223 		mutex_unlock(&aconnector->hpd_lock);
2224 	}
2225 }
2226 
2227 static void register_hpd_handlers(struct amdgpu_device *adev)
2228 {
2229 	struct drm_device *dev = adev->ddev;
2230 	struct drm_connector *connector;
2231 	struct amdgpu_dm_connector *aconnector;
2232 	const struct dc_link *dc_link;
2233 	struct dc_interrupt_params int_params = {0};
2234 
2235 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2236 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2237 
2238 	list_for_each_entry(connector,
2239 			&dev->mode_config.connector_list, head)	{
2240 
2241 		aconnector = to_amdgpu_dm_connector(connector);
2242 		dc_link = aconnector->dc_link;
2243 
2244 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2245 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2246 			int_params.irq_source = dc_link->irq_source_hpd;
2247 
2248 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2249 					handle_hpd_irq,
2250 					(void *) aconnector);
2251 		}
2252 
2253 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2254 
2255 			/* Also register for DP short pulse (hpd_rx). */
2256 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2257 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2258 
2259 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2260 					handle_hpd_rx_irq,
2261 					(void *) aconnector);
2262 		}
2263 	}
2264 }
2265 
2266 /* Register IRQ sources and initialize IRQ callbacks */
2267 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2268 {
2269 	struct dc *dc = adev->dm.dc;
2270 	struct common_irq_params *c_irq_params;
2271 	struct dc_interrupt_params int_params = {0};
2272 	int r;
2273 	int i;
2274 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2275 
2276 	if (adev->asic_type >= CHIP_VEGA10)
2277 		client_id = SOC15_IH_CLIENTID_DCE;
2278 
2279 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2280 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2281 
2282 	/*
2283 	 * Actions of amdgpu_irq_add_id():
2284 	 * 1. Register a set() function with base driver.
2285 	 *    Base driver will call set() function to enable/disable an
2286 	 *    interrupt in DC hardware.
2287 	 * 2. Register amdgpu_dm_irq_handler().
2288 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2289 	 *    coming from DC hardware.
2290 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2291 	 *    for acknowledging and handling. */
2292 
2293 	/* Use VBLANK interrupt */
2294 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2295 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2296 		if (r) {
2297 			DRM_ERROR("Failed to add crtc irq id!\n");
2298 			return r;
2299 		}
2300 
2301 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2302 		int_params.irq_source =
2303 			dc_interrupt_to_irq_source(dc, i, 0);
2304 
2305 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2306 
2307 		c_irq_params->adev = adev;
2308 		c_irq_params->irq_src = int_params.irq_source;
2309 
2310 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2311 				dm_crtc_high_irq, c_irq_params);
2312 	}
2313 
2314 	/* Use VUPDATE interrupt */
2315 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2316 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2317 		if (r) {
2318 			DRM_ERROR("Failed to add vupdate irq id!\n");
2319 			return r;
2320 		}
2321 
2322 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2323 		int_params.irq_source =
2324 			dc_interrupt_to_irq_source(dc, i, 0);
2325 
2326 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2327 
2328 		c_irq_params->adev = adev;
2329 		c_irq_params->irq_src = int_params.irq_source;
2330 
2331 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2332 				dm_vupdate_high_irq, c_irq_params);
2333 	}
2334 
2335 	/* Use GRPH_PFLIP interrupt */
2336 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2337 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2338 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2339 		if (r) {
2340 			DRM_ERROR("Failed to add page flip irq id!\n");
2341 			return r;
2342 		}
2343 
2344 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2345 		int_params.irq_source =
2346 			dc_interrupt_to_irq_source(dc, i, 0);
2347 
2348 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2349 
2350 		c_irq_params->adev = adev;
2351 		c_irq_params->irq_src = int_params.irq_source;
2352 
2353 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2354 				dm_pflip_high_irq, c_irq_params);
2355 
2356 	}
2357 
2358 	/* HPD */
2359 	r = amdgpu_irq_add_id(adev, client_id,
2360 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2361 	if (r) {
2362 		DRM_ERROR("Failed to add hpd irq id!\n");
2363 		return r;
2364 	}
2365 
2366 	register_hpd_handlers(adev);
2367 
2368 	return 0;
2369 }
2370 
2371 #if defined(CONFIG_DRM_AMD_DC_DCN)
2372 /* Register IRQ sources and initialize IRQ callbacks */
2373 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2374 {
2375 	struct dc *dc = adev->dm.dc;
2376 	struct common_irq_params *c_irq_params;
2377 	struct dc_interrupt_params int_params = {0};
2378 	int r;
2379 	int i;
2380 
2381 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2382 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2383 
2384 	/*
2385 	 * Actions of amdgpu_irq_add_id():
2386 	 * 1. Register a set() function with base driver.
2387 	 *    Base driver will call set() function to enable/disable an
2388 	 *    interrupt in DC hardware.
2389 	 * 2. Register amdgpu_dm_irq_handler().
2390 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2391 	 *    coming from DC hardware.
2392 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2393 	 *    for acknowledging and handling.
2394 	 */
2395 
2396 	/* Use VSTARTUP interrupt */
2397 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2398 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2399 			i++) {
2400 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2401 
2402 		if (r) {
2403 			DRM_ERROR("Failed to add crtc irq id!\n");
2404 			return r;
2405 		}
2406 
2407 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2408 		int_params.irq_source =
2409 			dc_interrupt_to_irq_source(dc, i, 0);
2410 
2411 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2412 
2413 		c_irq_params->adev = adev;
2414 		c_irq_params->irq_src = int_params.irq_source;
2415 
2416 		amdgpu_dm_irq_register_interrupt(
2417 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2418 	}
2419 
2420 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2421 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2422 	 * to trigger at end of each vblank, regardless of state of the lock,
2423 	 * matching DCE behaviour.
2424 	 */
2425 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2426 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2427 	     i++) {
2428 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2429 
2430 		if (r) {
2431 			DRM_ERROR("Failed to add vupdate irq id!\n");
2432 			return r;
2433 		}
2434 
2435 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2436 		int_params.irq_source =
2437 			dc_interrupt_to_irq_source(dc, i, 0);
2438 
2439 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2440 
2441 		c_irq_params->adev = adev;
2442 		c_irq_params->irq_src = int_params.irq_source;
2443 
2444 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2445 				dm_vupdate_high_irq, c_irq_params);
2446 	}
2447 
2448 	/* Use GRPH_PFLIP interrupt */
2449 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2450 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2451 			i++) {
2452 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2453 		if (r) {
2454 			DRM_ERROR("Failed to add page flip irq id!\n");
2455 			return r;
2456 		}
2457 
2458 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2459 		int_params.irq_source =
2460 			dc_interrupt_to_irq_source(dc, i, 0);
2461 
2462 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2463 
2464 		c_irq_params->adev = adev;
2465 		c_irq_params->irq_src = int_params.irq_source;
2466 
2467 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2468 				dm_pflip_high_irq, c_irq_params);
2469 
2470 	}
2471 
2472 	/* HPD */
2473 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2474 			&adev->hpd_irq);
2475 	if (r) {
2476 		DRM_ERROR("Failed to add hpd irq id!\n");
2477 		return r;
2478 	}
2479 
2480 	register_hpd_handlers(adev);
2481 
2482 	return 0;
2483 }
2484 #endif
2485 
2486 /*
2487  * Acquires the lock for the atomic state object and returns
2488  * the new atomic state.
2489  *
2490  * This should only be called during atomic check.
2491  */
2492 static int dm_atomic_get_state(struct drm_atomic_state *state,
2493 			       struct dm_atomic_state **dm_state)
2494 {
2495 	struct drm_device *dev = state->dev;
2496 	struct amdgpu_device *adev = dev->dev_private;
2497 	struct amdgpu_display_manager *dm = &adev->dm;
2498 	struct drm_private_state *priv_state;
2499 
2500 	if (*dm_state)
2501 		return 0;
2502 
2503 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2504 	if (IS_ERR(priv_state))
2505 		return PTR_ERR(priv_state);
2506 
2507 	*dm_state = to_dm_atomic_state(priv_state);
2508 
2509 	return 0;
2510 }
2511 
2512 struct dm_atomic_state *
2513 dm_atomic_get_new_state(struct drm_atomic_state *state)
2514 {
2515 	struct drm_device *dev = state->dev;
2516 	struct amdgpu_device *adev = dev->dev_private;
2517 	struct amdgpu_display_manager *dm = &adev->dm;
2518 	struct drm_private_obj *obj;
2519 	struct drm_private_state *new_obj_state;
2520 	int i;
2521 
2522 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2523 		if (obj->funcs == dm->atomic_obj.funcs)
2524 			return to_dm_atomic_state(new_obj_state);
2525 	}
2526 
2527 	return NULL;
2528 }
2529 
2530 struct dm_atomic_state *
2531 dm_atomic_get_old_state(struct drm_atomic_state *state)
2532 {
2533 	struct drm_device *dev = state->dev;
2534 	struct amdgpu_device *adev = dev->dev_private;
2535 	struct amdgpu_display_manager *dm = &adev->dm;
2536 	struct drm_private_obj *obj;
2537 	struct drm_private_state *old_obj_state;
2538 	int i;
2539 
2540 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2541 		if (obj->funcs == dm->atomic_obj.funcs)
2542 			return to_dm_atomic_state(old_obj_state);
2543 	}
2544 
2545 	return NULL;
2546 }
2547 
2548 static struct drm_private_state *
2549 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2550 {
2551 	struct dm_atomic_state *old_state, *new_state;
2552 
2553 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2554 	if (!new_state)
2555 		return NULL;
2556 
2557 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2558 
2559 	old_state = to_dm_atomic_state(obj->state);
2560 
2561 	if (old_state && old_state->context)
2562 		new_state->context = dc_copy_state(old_state->context);
2563 
2564 	if (!new_state->context) {
2565 		kfree(new_state);
2566 		return NULL;
2567 	}
2568 
2569 	return &new_state->base;
2570 }
2571 
2572 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2573 				    struct drm_private_state *state)
2574 {
2575 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2576 
2577 	if (dm_state && dm_state->context)
2578 		dc_release_state(dm_state->context);
2579 
2580 	kfree(dm_state);
2581 }
2582 
2583 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2584 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2585 	.atomic_destroy_state = dm_atomic_destroy_state,
2586 };
2587 
2588 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2589 {
2590 	struct dm_atomic_state *state;
2591 	int r;
2592 
2593 	adev->mode_info.mode_config_initialized = true;
2594 
2595 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2596 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2597 
2598 	adev->ddev->mode_config.max_width = 16384;
2599 	adev->ddev->mode_config.max_height = 16384;
2600 
2601 	adev->ddev->mode_config.preferred_depth = 24;
2602 	adev->ddev->mode_config.prefer_shadow = 1;
2603 	/* indicates support for immediate flip */
2604 	adev->ddev->mode_config.async_page_flip = true;
2605 
2606 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2607 
2608 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2609 	if (!state)
2610 		return -ENOMEM;
2611 
2612 	state->context = dc_create_state(adev->dm.dc);
2613 	if (!state->context) {
2614 		kfree(state);
2615 		return -ENOMEM;
2616 	}
2617 
2618 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2619 
2620 	drm_atomic_private_obj_init(adev->ddev,
2621 				    &adev->dm.atomic_obj,
2622 				    &state->base,
2623 				    &dm_atomic_state_funcs);
2624 
2625 	r = amdgpu_display_modeset_create_props(adev);
2626 	if (r)
2627 		return r;
2628 
2629 	r = amdgpu_dm_audio_init(adev);
2630 	if (r)
2631 		return r;
2632 
2633 	return 0;
2634 }
2635 
2636 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2637 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2638 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2639 
2640 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2641 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2642 
2643 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2644 {
2645 #if defined(CONFIG_ACPI)
2646 	struct amdgpu_dm_backlight_caps caps;
2647 
2648 	if (dm->backlight_caps.caps_valid)
2649 		return;
2650 
2651 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2652 	if (caps.caps_valid) {
2653 		dm->backlight_caps.caps_valid = true;
2654 		if (caps.aux_support)
2655 			return;
2656 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2657 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2658 	} else {
2659 		dm->backlight_caps.min_input_signal =
2660 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2661 		dm->backlight_caps.max_input_signal =
2662 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2663 	}
2664 #else
2665 	if (dm->backlight_caps.aux_support)
2666 		return;
2667 
2668 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2669 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2670 #endif
2671 }
2672 
2673 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2674 {
2675 	bool rc;
2676 
2677 	if (!link)
2678 		return 1;
2679 
2680 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2681 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2682 
2683 	return rc ? 0 : 1;
2684 }
2685 
2686 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2687 			      const uint32_t user_brightness)
2688 {
2689 	u32 min, max, conversion_pace;
2690 	u32 brightness = user_brightness;
2691 
2692 	if (!caps)
2693 		goto out;
2694 
2695 	if (!caps->aux_support) {
2696 		max = caps->max_input_signal;
2697 		min = caps->min_input_signal;
2698 		/*
2699 		 * The brightness input is in the range 0-255
2700 		 * It needs to be rescaled to be between the
2701 		 * requested min and max input signal
2702 		 * It also needs to be scaled up by 0x101 to
2703 		 * match the DC interface which has a range of
2704 		 * 0 to 0xffff
2705 		 */
2706 		conversion_pace = 0x101;
2707 		brightness =
2708 			user_brightness
2709 			* conversion_pace
2710 			* (max - min)
2711 			/ AMDGPU_MAX_BL_LEVEL
2712 			+ min * conversion_pace;
2713 	} else {
2714 		/* TODO
2715 		 * We are doing a linear interpolation here, which is OK but
2716 		 * does not provide the optimal result. We probably want
2717 		 * something close to the Perceptual Quantizer (PQ) curve.
2718 		 */
2719 		max = caps->aux_max_input_signal;
2720 		min = caps->aux_min_input_signal;
2721 
2722 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2723 			       + user_brightness * max;
2724 		// Multiple the value by 1000 since we use millinits
2725 		brightness *= 1000;
2726 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2727 	}
2728 
2729 out:
2730 	return brightness;
2731 }
2732 
2733 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2734 {
2735 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2736 	struct amdgpu_dm_backlight_caps caps;
2737 	struct dc_link *link = NULL;
2738 	u32 brightness;
2739 	bool rc;
2740 
2741 	amdgpu_dm_update_backlight_caps(dm);
2742 	caps = dm->backlight_caps;
2743 
2744 	link = (struct dc_link *)dm->backlight_link;
2745 
2746 	brightness = convert_brightness(&caps, bd->props.brightness);
2747 	// Change brightness based on AUX property
2748 	if (caps.aux_support)
2749 		return set_backlight_via_aux(link, brightness);
2750 
2751 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2752 
2753 	return rc ? 0 : 1;
2754 }
2755 
2756 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2757 {
2758 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2759 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2760 
2761 	if (ret == DC_ERROR_UNEXPECTED)
2762 		return bd->props.brightness;
2763 	return ret;
2764 }
2765 
2766 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2767 	.options = BL_CORE_SUSPENDRESUME,
2768 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2769 	.update_status	= amdgpu_dm_backlight_update_status,
2770 };
2771 
2772 static void
2773 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2774 {
2775 	char bl_name[16];
2776 	struct backlight_properties props = { 0 };
2777 
2778 	amdgpu_dm_update_backlight_caps(dm);
2779 
2780 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2781 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2782 	props.type = BACKLIGHT_RAW;
2783 
2784 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2785 			dm->adev->ddev->primary->index);
2786 
2787 	dm->backlight_dev = backlight_device_register(bl_name,
2788 			dm->adev->ddev->dev,
2789 			dm,
2790 			&amdgpu_dm_backlight_ops,
2791 			&props);
2792 
2793 	if (IS_ERR(dm->backlight_dev))
2794 		DRM_ERROR("DM: Backlight registration failed!\n");
2795 	else
2796 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2797 }
2798 
2799 #endif
2800 
2801 static int initialize_plane(struct amdgpu_display_manager *dm,
2802 			    struct amdgpu_mode_info *mode_info, int plane_id,
2803 			    enum drm_plane_type plane_type,
2804 			    const struct dc_plane_cap *plane_cap)
2805 {
2806 	struct drm_plane *plane;
2807 	unsigned long possible_crtcs;
2808 	int ret = 0;
2809 
2810 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2811 	if (!plane) {
2812 		DRM_ERROR("KMS: Failed to allocate plane\n");
2813 		return -ENOMEM;
2814 	}
2815 	plane->type = plane_type;
2816 
2817 	/*
2818 	 * HACK: IGT tests expect that the primary plane for a CRTC
2819 	 * can only have one possible CRTC. Only expose support for
2820 	 * any CRTC if they're not going to be used as a primary plane
2821 	 * for a CRTC - like overlay or underlay planes.
2822 	 */
2823 	possible_crtcs = 1 << plane_id;
2824 	if (plane_id >= dm->dc->caps.max_streams)
2825 		possible_crtcs = 0xff;
2826 
2827 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2828 
2829 	if (ret) {
2830 		DRM_ERROR("KMS: Failed to initialize plane\n");
2831 		kfree(plane);
2832 		return ret;
2833 	}
2834 
2835 	if (mode_info)
2836 		mode_info->planes[plane_id] = plane;
2837 
2838 	return ret;
2839 }
2840 
2841 
2842 static void register_backlight_device(struct amdgpu_display_manager *dm,
2843 				      struct dc_link *link)
2844 {
2845 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2846 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2847 
2848 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2849 	    link->type != dc_connection_none) {
2850 		/*
2851 		 * Event if registration failed, we should continue with
2852 		 * DM initialization because not having a backlight control
2853 		 * is better then a black screen.
2854 		 */
2855 		amdgpu_dm_register_backlight_device(dm);
2856 
2857 		if (dm->backlight_dev)
2858 			dm->backlight_link = link;
2859 	}
2860 #endif
2861 }
2862 
2863 
2864 /*
2865  * In this architecture, the association
2866  * connector -> encoder -> crtc
2867  * id not really requried. The crtc and connector will hold the
2868  * display_index as an abstraction to use with DAL component
2869  *
2870  * Returns 0 on success
2871  */
2872 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2873 {
2874 	struct amdgpu_display_manager *dm = &adev->dm;
2875 	int32_t i;
2876 	struct amdgpu_dm_connector *aconnector = NULL;
2877 	struct amdgpu_encoder *aencoder = NULL;
2878 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2879 	uint32_t link_cnt;
2880 	int32_t primary_planes;
2881 	enum dc_connection_type new_connection_type = dc_connection_none;
2882 	const struct dc_plane_cap *plane;
2883 
2884 	link_cnt = dm->dc->caps.max_links;
2885 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2886 		DRM_ERROR("DM: Failed to initialize mode config\n");
2887 		return -EINVAL;
2888 	}
2889 
2890 	/* There is one primary plane per CRTC */
2891 	primary_planes = dm->dc->caps.max_streams;
2892 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2893 
2894 	/*
2895 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2896 	 * Order is reversed to match iteration order in atomic check.
2897 	 */
2898 	for (i = (primary_planes - 1); i >= 0; i--) {
2899 		plane = &dm->dc->caps.planes[i];
2900 
2901 		if (initialize_plane(dm, mode_info, i,
2902 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2903 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2904 			goto fail;
2905 		}
2906 	}
2907 
2908 	/*
2909 	 * Initialize overlay planes, index starting after primary planes.
2910 	 * These planes have a higher DRM index than the primary planes since
2911 	 * they should be considered as having a higher z-order.
2912 	 * Order is reversed to match iteration order in atomic check.
2913 	 *
2914 	 * Only support DCN for now, and only expose one so we don't encourage
2915 	 * userspace to use up all the pipes.
2916 	 */
2917 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2918 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2919 
2920 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2921 			continue;
2922 
2923 		if (!plane->blends_with_above || !plane->blends_with_below)
2924 			continue;
2925 
2926 		if (!plane->pixel_format_support.argb8888)
2927 			continue;
2928 
2929 		if (initialize_plane(dm, NULL, primary_planes + i,
2930 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2931 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2932 			goto fail;
2933 		}
2934 
2935 		/* Only create one overlay plane. */
2936 		break;
2937 	}
2938 
2939 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2940 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2941 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2942 			goto fail;
2943 		}
2944 
2945 	dm->display_indexes_num = dm->dc->caps.max_streams;
2946 
2947 	/* loops over all connectors on the board */
2948 	for (i = 0; i < link_cnt; i++) {
2949 		struct dc_link *link = NULL;
2950 
2951 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2952 			DRM_ERROR(
2953 				"KMS: Cannot support more than %d display indexes\n",
2954 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2955 			continue;
2956 		}
2957 
2958 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2959 		if (!aconnector)
2960 			goto fail;
2961 
2962 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2963 		if (!aencoder)
2964 			goto fail;
2965 
2966 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2967 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2968 			goto fail;
2969 		}
2970 
2971 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2972 			DRM_ERROR("KMS: Failed to initialize connector\n");
2973 			goto fail;
2974 		}
2975 
2976 		link = dc_get_link_at_index(dm->dc, i);
2977 
2978 		if (!dc_link_detect_sink(link, &new_connection_type))
2979 			DRM_ERROR("KMS: Failed to detect connector\n");
2980 
2981 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2982 			emulated_link_detect(link);
2983 			amdgpu_dm_update_connector_after_detect(aconnector);
2984 
2985 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2986 			amdgpu_dm_update_connector_after_detect(aconnector);
2987 			register_backlight_device(dm, link);
2988 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2989 				amdgpu_dm_set_psr_caps(link);
2990 		}
2991 
2992 
2993 	}
2994 
2995 	/* Software is initialized. Now we can register interrupt handlers. */
2996 	switch (adev->asic_type) {
2997 	case CHIP_BONAIRE:
2998 	case CHIP_HAWAII:
2999 	case CHIP_KAVERI:
3000 	case CHIP_KABINI:
3001 	case CHIP_MULLINS:
3002 	case CHIP_TONGA:
3003 	case CHIP_FIJI:
3004 	case CHIP_CARRIZO:
3005 	case CHIP_STONEY:
3006 	case CHIP_POLARIS11:
3007 	case CHIP_POLARIS10:
3008 	case CHIP_POLARIS12:
3009 	case CHIP_VEGAM:
3010 	case CHIP_VEGA10:
3011 	case CHIP_VEGA12:
3012 	case CHIP_VEGA20:
3013 		if (dce110_register_irq_handlers(dm->adev)) {
3014 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3015 			goto fail;
3016 		}
3017 		break;
3018 #if defined(CONFIG_DRM_AMD_DC_DCN)
3019 	case CHIP_RAVEN:
3020 	case CHIP_NAVI12:
3021 	case CHIP_NAVI10:
3022 	case CHIP_NAVI14:
3023 	case CHIP_RENOIR:
3024 		if (dcn10_register_irq_handlers(dm->adev)) {
3025 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3026 			goto fail;
3027 		}
3028 		break;
3029 #endif
3030 	default:
3031 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3032 		goto fail;
3033 	}
3034 
3035 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3036 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3037 
3038 	/* No userspace support. */
3039 	dm->dc->debug.disable_tri_buf = true;
3040 
3041 	return 0;
3042 fail:
3043 	kfree(aencoder);
3044 	kfree(aconnector);
3045 
3046 	return -EINVAL;
3047 }
3048 
3049 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3050 {
3051 	drm_mode_config_cleanup(dm->ddev);
3052 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3053 	return;
3054 }
3055 
3056 /******************************************************************************
3057  * amdgpu_display_funcs functions
3058  *****************************************************************************/
3059 
3060 /*
3061  * dm_bandwidth_update - program display watermarks
3062  *
3063  * @adev: amdgpu_device pointer
3064  *
3065  * Calculate and program the display watermarks and line buffer allocation.
3066  */
3067 static void dm_bandwidth_update(struct amdgpu_device *adev)
3068 {
3069 	/* TODO: implement later */
3070 }
3071 
3072 static const struct amdgpu_display_funcs dm_display_funcs = {
3073 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3074 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3075 	.backlight_set_level = NULL, /* never called for DC */
3076 	.backlight_get_level = NULL, /* never called for DC */
3077 	.hpd_sense = NULL,/* called unconditionally */
3078 	.hpd_set_polarity = NULL, /* called unconditionally */
3079 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3080 	.page_flip_get_scanoutpos =
3081 		dm_crtc_get_scanoutpos,/* called unconditionally */
3082 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3083 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3084 };
3085 
3086 #if defined(CONFIG_DEBUG_KERNEL_DC)
3087 
3088 static ssize_t s3_debug_store(struct device *device,
3089 			      struct device_attribute *attr,
3090 			      const char *buf,
3091 			      size_t count)
3092 {
3093 	int ret;
3094 	int s3_state;
3095 	struct drm_device *drm_dev = dev_get_drvdata(device);
3096 	struct amdgpu_device *adev = drm_dev->dev_private;
3097 
3098 	ret = kstrtoint(buf, 0, &s3_state);
3099 
3100 	if (ret == 0) {
3101 		if (s3_state) {
3102 			dm_resume(adev);
3103 			drm_kms_helper_hotplug_event(adev->ddev);
3104 		} else
3105 			dm_suspend(adev);
3106 	}
3107 
3108 	return ret == 0 ? count : 0;
3109 }
3110 
3111 DEVICE_ATTR_WO(s3_debug);
3112 
3113 #endif
3114 
3115 static int dm_early_init(void *handle)
3116 {
3117 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3118 
3119 	switch (adev->asic_type) {
3120 	case CHIP_BONAIRE:
3121 	case CHIP_HAWAII:
3122 		adev->mode_info.num_crtc = 6;
3123 		adev->mode_info.num_hpd = 6;
3124 		adev->mode_info.num_dig = 6;
3125 		break;
3126 	case CHIP_KAVERI:
3127 		adev->mode_info.num_crtc = 4;
3128 		adev->mode_info.num_hpd = 6;
3129 		adev->mode_info.num_dig = 7;
3130 		break;
3131 	case CHIP_KABINI:
3132 	case CHIP_MULLINS:
3133 		adev->mode_info.num_crtc = 2;
3134 		adev->mode_info.num_hpd = 6;
3135 		adev->mode_info.num_dig = 6;
3136 		break;
3137 	case CHIP_FIJI:
3138 	case CHIP_TONGA:
3139 		adev->mode_info.num_crtc = 6;
3140 		adev->mode_info.num_hpd = 6;
3141 		adev->mode_info.num_dig = 7;
3142 		break;
3143 	case CHIP_CARRIZO:
3144 		adev->mode_info.num_crtc = 3;
3145 		adev->mode_info.num_hpd = 6;
3146 		adev->mode_info.num_dig = 9;
3147 		break;
3148 	case CHIP_STONEY:
3149 		adev->mode_info.num_crtc = 2;
3150 		adev->mode_info.num_hpd = 6;
3151 		adev->mode_info.num_dig = 9;
3152 		break;
3153 	case CHIP_POLARIS11:
3154 	case CHIP_POLARIS12:
3155 		adev->mode_info.num_crtc = 5;
3156 		adev->mode_info.num_hpd = 5;
3157 		adev->mode_info.num_dig = 5;
3158 		break;
3159 	case CHIP_POLARIS10:
3160 	case CHIP_VEGAM:
3161 		adev->mode_info.num_crtc = 6;
3162 		adev->mode_info.num_hpd = 6;
3163 		adev->mode_info.num_dig = 6;
3164 		break;
3165 	case CHIP_VEGA10:
3166 	case CHIP_VEGA12:
3167 	case CHIP_VEGA20:
3168 		adev->mode_info.num_crtc = 6;
3169 		adev->mode_info.num_hpd = 6;
3170 		adev->mode_info.num_dig = 6;
3171 		break;
3172 #if defined(CONFIG_DRM_AMD_DC_DCN)
3173 	case CHIP_RAVEN:
3174 		adev->mode_info.num_crtc = 4;
3175 		adev->mode_info.num_hpd = 4;
3176 		adev->mode_info.num_dig = 4;
3177 		break;
3178 #endif
3179 	case CHIP_NAVI10:
3180 	case CHIP_NAVI12:
3181 		adev->mode_info.num_crtc = 6;
3182 		adev->mode_info.num_hpd = 6;
3183 		adev->mode_info.num_dig = 6;
3184 		break;
3185 	case CHIP_NAVI14:
3186 		adev->mode_info.num_crtc = 5;
3187 		adev->mode_info.num_hpd = 5;
3188 		adev->mode_info.num_dig = 5;
3189 		break;
3190 	case CHIP_RENOIR:
3191 		adev->mode_info.num_crtc = 4;
3192 		adev->mode_info.num_hpd = 4;
3193 		adev->mode_info.num_dig = 4;
3194 		break;
3195 	default:
3196 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3197 		return -EINVAL;
3198 	}
3199 
3200 	amdgpu_dm_set_irq_funcs(adev);
3201 
3202 	if (adev->mode_info.funcs == NULL)
3203 		adev->mode_info.funcs = &dm_display_funcs;
3204 
3205 	/*
3206 	 * Note: Do NOT change adev->audio_endpt_rreg and
3207 	 * adev->audio_endpt_wreg because they are initialised in
3208 	 * amdgpu_device_init()
3209 	 */
3210 #if defined(CONFIG_DEBUG_KERNEL_DC)
3211 	device_create_file(
3212 		adev->ddev->dev,
3213 		&dev_attr_s3_debug);
3214 #endif
3215 
3216 	return 0;
3217 }
3218 
3219 static bool modeset_required(struct drm_crtc_state *crtc_state,
3220 			     struct dc_stream_state *new_stream,
3221 			     struct dc_stream_state *old_stream)
3222 {
3223 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3224 		return false;
3225 
3226 	if (!crtc_state->enable)
3227 		return false;
3228 
3229 	return crtc_state->active;
3230 }
3231 
3232 static bool modereset_required(struct drm_crtc_state *crtc_state)
3233 {
3234 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3235 		return false;
3236 
3237 	return !crtc_state->enable || !crtc_state->active;
3238 }
3239 
3240 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3241 {
3242 	drm_encoder_cleanup(encoder);
3243 	kfree(encoder);
3244 }
3245 
3246 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3247 	.destroy = amdgpu_dm_encoder_destroy,
3248 };
3249 
3250 
3251 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3252 				struct dc_scaling_info *scaling_info)
3253 {
3254 	int scale_w, scale_h;
3255 
3256 	memset(scaling_info, 0, sizeof(*scaling_info));
3257 
3258 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3259 	scaling_info->src_rect.x = state->src_x >> 16;
3260 	scaling_info->src_rect.y = state->src_y >> 16;
3261 
3262 	scaling_info->src_rect.width = state->src_w >> 16;
3263 	if (scaling_info->src_rect.width == 0)
3264 		return -EINVAL;
3265 
3266 	scaling_info->src_rect.height = state->src_h >> 16;
3267 	if (scaling_info->src_rect.height == 0)
3268 		return -EINVAL;
3269 
3270 	scaling_info->dst_rect.x = state->crtc_x;
3271 	scaling_info->dst_rect.y = state->crtc_y;
3272 
3273 	if (state->crtc_w == 0)
3274 		return -EINVAL;
3275 
3276 	scaling_info->dst_rect.width = state->crtc_w;
3277 
3278 	if (state->crtc_h == 0)
3279 		return -EINVAL;
3280 
3281 	scaling_info->dst_rect.height = state->crtc_h;
3282 
3283 	/* DRM doesn't specify clipping on destination output. */
3284 	scaling_info->clip_rect = scaling_info->dst_rect;
3285 
3286 	/* TODO: Validate scaling per-format with DC plane caps */
3287 	scale_w = scaling_info->dst_rect.width * 1000 /
3288 		  scaling_info->src_rect.width;
3289 
3290 	if (scale_w < 250 || scale_w > 16000)
3291 		return -EINVAL;
3292 
3293 	scale_h = scaling_info->dst_rect.height * 1000 /
3294 		  scaling_info->src_rect.height;
3295 
3296 	if (scale_h < 250 || scale_h > 16000)
3297 		return -EINVAL;
3298 
3299 	/*
3300 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3301 	 * assume reasonable defaults based on the format.
3302 	 */
3303 
3304 	return 0;
3305 }
3306 
3307 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3308 		       uint64_t *tiling_flags)
3309 {
3310 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3311 	int r = amdgpu_bo_reserve(rbo, false);
3312 
3313 	if (unlikely(r)) {
3314 		/* Don't show error message when returning -ERESTARTSYS */
3315 		if (r != -ERESTARTSYS)
3316 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3317 		return r;
3318 	}
3319 
3320 	if (tiling_flags)
3321 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3322 
3323 	amdgpu_bo_unreserve(rbo);
3324 
3325 	return r;
3326 }
3327 
3328 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3329 {
3330 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3331 
3332 	return offset ? (address + offset * 256) : 0;
3333 }
3334 
3335 static int
3336 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3337 			  const struct amdgpu_framebuffer *afb,
3338 			  const enum surface_pixel_format format,
3339 			  const enum dc_rotation_angle rotation,
3340 			  const struct plane_size *plane_size,
3341 			  const union dc_tiling_info *tiling_info,
3342 			  const uint64_t info,
3343 			  struct dc_plane_dcc_param *dcc,
3344 			  struct dc_plane_address *address,
3345 			  bool force_disable_dcc)
3346 {
3347 	struct dc *dc = adev->dm.dc;
3348 	struct dc_dcc_surface_param input;
3349 	struct dc_surface_dcc_cap output;
3350 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3351 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3352 	uint64_t dcc_address;
3353 
3354 	memset(&input, 0, sizeof(input));
3355 	memset(&output, 0, sizeof(output));
3356 
3357 	if (force_disable_dcc)
3358 		return 0;
3359 
3360 	if (!offset)
3361 		return 0;
3362 
3363 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3364 		return 0;
3365 
3366 	if (!dc->cap_funcs.get_dcc_compression_cap)
3367 		return -EINVAL;
3368 
3369 	input.format = format;
3370 	input.surface_size.width = plane_size->surface_size.width;
3371 	input.surface_size.height = plane_size->surface_size.height;
3372 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3373 
3374 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3375 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3376 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3377 		input.scan = SCAN_DIRECTION_VERTICAL;
3378 
3379 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3380 		return -EINVAL;
3381 
3382 	if (!output.capable)
3383 		return -EINVAL;
3384 
3385 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3386 		return -EINVAL;
3387 
3388 	dcc->enable = 1;
3389 	dcc->meta_pitch =
3390 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3391 	dcc->independent_64b_blks = i64b;
3392 
3393 	dcc_address = get_dcc_address(afb->address, info);
3394 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3395 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3396 
3397 	return 0;
3398 }
3399 
3400 static int
3401 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3402 			     const struct amdgpu_framebuffer *afb,
3403 			     const enum surface_pixel_format format,
3404 			     const enum dc_rotation_angle rotation,
3405 			     const uint64_t tiling_flags,
3406 			     union dc_tiling_info *tiling_info,
3407 			     struct plane_size *plane_size,
3408 			     struct dc_plane_dcc_param *dcc,
3409 			     struct dc_plane_address *address,
3410 			     bool force_disable_dcc)
3411 {
3412 	const struct drm_framebuffer *fb = &afb->base;
3413 	int ret;
3414 
3415 	memset(tiling_info, 0, sizeof(*tiling_info));
3416 	memset(plane_size, 0, sizeof(*plane_size));
3417 	memset(dcc, 0, sizeof(*dcc));
3418 	memset(address, 0, sizeof(*address));
3419 
3420 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3421 		plane_size->surface_size.x = 0;
3422 		plane_size->surface_size.y = 0;
3423 		plane_size->surface_size.width = fb->width;
3424 		plane_size->surface_size.height = fb->height;
3425 		plane_size->surface_pitch =
3426 			fb->pitches[0] / fb->format->cpp[0];
3427 
3428 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3429 		address->grph.addr.low_part = lower_32_bits(afb->address);
3430 		address->grph.addr.high_part = upper_32_bits(afb->address);
3431 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3432 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3433 
3434 		plane_size->surface_size.x = 0;
3435 		plane_size->surface_size.y = 0;
3436 		plane_size->surface_size.width = fb->width;
3437 		plane_size->surface_size.height = fb->height;
3438 		plane_size->surface_pitch =
3439 			fb->pitches[0] / fb->format->cpp[0];
3440 
3441 		plane_size->chroma_size.x = 0;
3442 		plane_size->chroma_size.y = 0;
3443 		/* TODO: set these based on surface format */
3444 		plane_size->chroma_size.width = fb->width / 2;
3445 		plane_size->chroma_size.height = fb->height / 2;
3446 
3447 		plane_size->chroma_pitch =
3448 			fb->pitches[1] / fb->format->cpp[1];
3449 
3450 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3451 		address->video_progressive.luma_addr.low_part =
3452 			lower_32_bits(afb->address);
3453 		address->video_progressive.luma_addr.high_part =
3454 			upper_32_bits(afb->address);
3455 		address->video_progressive.chroma_addr.low_part =
3456 			lower_32_bits(chroma_addr);
3457 		address->video_progressive.chroma_addr.high_part =
3458 			upper_32_bits(chroma_addr);
3459 	}
3460 
3461 	/* Fill GFX8 params */
3462 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3463 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3464 
3465 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3466 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3467 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3468 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3469 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3470 
3471 		/* XXX fix me for VI */
3472 		tiling_info->gfx8.num_banks = num_banks;
3473 		tiling_info->gfx8.array_mode =
3474 				DC_ARRAY_2D_TILED_THIN1;
3475 		tiling_info->gfx8.tile_split = tile_split;
3476 		tiling_info->gfx8.bank_width = bankw;
3477 		tiling_info->gfx8.bank_height = bankh;
3478 		tiling_info->gfx8.tile_aspect = mtaspect;
3479 		tiling_info->gfx8.tile_mode =
3480 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3481 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3482 			== DC_ARRAY_1D_TILED_THIN1) {
3483 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3484 	}
3485 
3486 	tiling_info->gfx8.pipe_config =
3487 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3488 
3489 	if (adev->asic_type == CHIP_VEGA10 ||
3490 	    adev->asic_type == CHIP_VEGA12 ||
3491 	    adev->asic_type == CHIP_VEGA20 ||
3492 	    adev->asic_type == CHIP_NAVI10 ||
3493 	    adev->asic_type == CHIP_NAVI14 ||
3494 	    adev->asic_type == CHIP_NAVI12 ||
3495 	    adev->asic_type == CHIP_RENOIR ||
3496 	    adev->asic_type == CHIP_RAVEN) {
3497 		/* Fill GFX9 params */
3498 		tiling_info->gfx9.num_pipes =
3499 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3500 		tiling_info->gfx9.num_banks =
3501 			adev->gfx.config.gb_addr_config_fields.num_banks;
3502 		tiling_info->gfx9.pipe_interleave =
3503 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3504 		tiling_info->gfx9.num_shader_engines =
3505 			adev->gfx.config.gb_addr_config_fields.num_se;
3506 		tiling_info->gfx9.max_compressed_frags =
3507 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3508 		tiling_info->gfx9.num_rb_per_se =
3509 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3510 		tiling_info->gfx9.swizzle =
3511 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3512 		tiling_info->gfx9.shaderEnable = 1;
3513 
3514 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3515 						plane_size, tiling_info,
3516 						tiling_flags, dcc, address,
3517 						force_disable_dcc);
3518 		if (ret)
3519 			return ret;
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 static void
3526 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3527 			       bool *per_pixel_alpha, bool *global_alpha,
3528 			       int *global_alpha_value)
3529 {
3530 	*per_pixel_alpha = false;
3531 	*global_alpha = false;
3532 	*global_alpha_value = 0xff;
3533 
3534 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3535 		return;
3536 
3537 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3538 		static const uint32_t alpha_formats[] = {
3539 			DRM_FORMAT_ARGB8888,
3540 			DRM_FORMAT_RGBA8888,
3541 			DRM_FORMAT_ABGR8888,
3542 		};
3543 		uint32_t format = plane_state->fb->format->format;
3544 		unsigned int i;
3545 
3546 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3547 			if (format == alpha_formats[i]) {
3548 				*per_pixel_alpha = true;
3549 				break;
3550 			}
3551 		}
3552 	}
3553 
3554 	if (plane_state->alpha < 0xffff) {
3555 		*global_alpha = true;
3556 		*global_alpha_value = plane_state->alpha >> 8;
3557 	}
3558 }
3559 
3560 static int
3561 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3562 			    const enum surface_pixel_format format,
3563 			    enum dc_color_space *color_space)
3564 {
3565 	bool full_range;
3566 
3567 	*color_space = COLOR_SPACE_SRGB;
3568 
3569 	/* DRM color properties only affect non-RGB formats. */
3570 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3571 		return 0;
3572 
3573 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3574 
3575 	switch (plane_state->color_encoding) {
3576 	case DRM_COLOR_YCBCR_BT601:
3577 		if (full_range)
3578 			*color_space = COLOR_SPACE_YCBCR601;
3579 		else
3580 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3581 		break;
3582 
3583 	case DRM_COLOR_YCBCR_BT709:
3584 		if (full_range)
3585 			*color_space = COLOR_SPACE_YCBCR709;
3586 		else
3587 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3588 		break;
3589 
3590 	case DRM_COLOR_YCBCR_BT2020:
3591 		if (full_range)
3592 			*color_space = COLOR_SPACE_2020_YCBCR;
3593 		else
3594 			return -EINVAL;
3595 		break;
3596 
3597 	default:
3598 		return -EINVAL;
3599 	}
3600 
3601 	return 0;
3602 }
3603 
3604 static int
3605 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3606 			    const struct drm_plane_state *plane_state,
3607 			    const uint64_t tiling_flags,
3608 			    struct dc_plane_info *plane_info,
3609 			    struct dc_plane_address *address,
3610 			    bool force_disable_dcc)
3611 {
3612 	const struct drm_framebuffer *fb = plane_state->fb;
3613 	const struct amdgpu_framebuffer *afb =
3614 		to_amdgpu_framebuffer(plane_state->fb);
3615 	struct drm_format_name_buf format_name;
3616 	int ret;
3617 
3618 	memset(plane_info, 0, sizeof(*plane_info));
3619 
3620 	switch (fb->format->format) {
3621 	case DRM_FORMAT_C8:
3622 		plane_info->format =
3623 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3624 		break;
3625 	case DRM_FORMAT_RGB565:
3626 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3627 		break;
3628 	case DRM_FORMAT_XRGB8888:
3629 	case DRM_FORMAT_ARGB8888:
3630 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3631 		break;
3632 	case DRM_FORMAT_XRGB2101010:
3633 	case DRM_FORMAT_ARGB2101010:
3634 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3635 		break;
3636 	case DRM_FORMAT_XBGR2101010:
3637 	case DRM_FORMAT_ABGR2101010:
3638 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3639 		break;
3640 	case DRM_FORMAT_XBGR8888:
3641 	case DRM_FORMAT_ABGR8888:
3642 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3643 		break;
3644 	case DRM_FORMAT_NV21:
3645 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3646 		break;
3647 	case DRM_FORMAT_NV12:
3648 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3649 		break;
3650 	case DRM_FORMAT_P010:
3651 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3652 		break;
3653 	default:
3654 		DRM_ERROR(
3655 			"Unsupported screen format %s\n",
3656 			drm_get_format_name(fb->format->format, &format_name));
3657 		return -EINVAL;
3658 	}
3659 
3660 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3661 	case DRM_MODE_ROTATE_0:
3662 		plane_info->rotation = ROTATION_ANGLE_0;
3663 		break;
3664 	case DRM_MODE_ROTATE_90:
3665 		plane_info->rotation = ROTATION_ANGLE_90;
3666 		break;
3667 	case DRM_MODE_ROTATE_180:
3668 		plane_info->rotation = ROTATION_ANGLE_180;
3669 		break;
3670 	case DRM_MODE_ROTATE_270:
3671 		plane_info->rotation = ROTATION_ANGLE_270;
3672 		break;
3673 	default:
3674 		plane_info->rotation = ROTATION_ANGLE_0;
3675 		break;
3676 	}
3677 
3678 	plane_info->visible = true;
3679 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3680 
3681 	plane_info->layer_index = 0;
3682 
3683 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3684 					  &plane_info->color_space);
3685 	if (ret)
3686 		return ret;
3687 
3688 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3689 					   plane_info->rotation, tiling_flags,
3690 					   &plane_info->tiling_info,
3691 					   &plane_info->plane_size,
3692 					   &plane_info->dcc, address,
3693 					   force_disable_dcc);
3694 	if (ret)
3695 		return ret;
3696 
3697 	fill_blending_from_plane_state(
3698 		plane_state, &plane_info->per_pixel_alpha,
3699 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3700 
3701 	return 0;
3702 }
3703 
3704 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3705 				    struct dc_plane_state *dc_plane_state,
3706 				    struct drm_plane_state *plane_state,
3707 				    struct drm_crtc_state *crtc_state)
3708 {
3709 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3710 	const struct amdgpu_framebuffer *amdgpu_fb =
3711 		to_amdgpu_framebuffer(plane_state->fb);
3712 	struct dc_scaling_info scaling_info;
3713 	struct dc_plane_info plane_info;
3714 	uint64_t tiling_flags;
3715 	int ret;
3716 	bool force_disable_dcc = false;
3717 
3718 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3719 	if (ret)
3720 		return ret;
3721 
3722 	dc_plane_state->src_rect = scaling_info.src_rect;
3723 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3724 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3725 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3726 
3727 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3728 	if (ret)
3729 		return ret;
3730 
3731 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3732 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3733 					  &plane_info,
3734 					  &dc_plane_state->address,
3735 					  force_disable_dcc);
3736 	if (ret)
3737 		return ret;
3738 
3739 	dc_plane_state->format = plane_info.format;
3740 	dc_plane_state->color_space = plane_info.color_space;
3741 	dc_plane_state->format = plane_info.format;
3742 	dc_plane_state->plane_size = plane_info.plane_size;
3743 	dc_plane_state->rotation = plane_info.rotation;
3744 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3745 	dc_plane_state->stereo_format = plane_info.stereo_format;
3746 	dc_plane_state->tiling_info = plane_info.tiling_info;
3747 	dc_plane_state->visible = plane_info.visible;
3748 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3749 	dc_plane_state->global_alpha = plane_info.global_alpha;
3750 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3751 	dc_plane_state->dcc = plane_info.dcc;
3752 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3753 
3754 	/*
3755 	 * Always set input transfer function, since plane state is refreshed
3756 	 * every time.
3757 	 */
3758 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3759 	if (ret)
3760 		return ret;
3761 
3762 	return 0;
3763 }
3764 
3765 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3766 					   const struct dm_connector_state *dm_state,
3767 					   struct dc_stream_state *stream)
3768 {
3769 	enum amdgpu_rmx_type rmx_type;
3770 
3771 	struct rect src = { 0 }; /* viewport in composition space*/
3772 	struct rect dst = { 0 }; /* stream addressable area */
3773 
3774 	/* no mode. nothing to be done */
3775 	if (!mode)
3776 		return;
3777 
3778 	/* Full screen scaling by default */
3779 	src.width = mode->hdisplay;
3780 	src.height = mode->vdisplay;
3781 	dst.width = stream->timing.h_addressable;
3782 	dst.height = stream->timing.v_addressable;
3783 
3784 	if (dm_state) {
3785 		rmx_type = dm_state->scaling;
3786 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3787 			if (src.width * dst.height <
3788 					src.height * dst.width) {
3789 				/* height needs less upscaling/more downscaling */
3790 				dst.width = src.width *
3791 						dst.height / src.height;
3792 			} else {
3793 				/* width needs less upscaling/more downscaling */
3794 				dst.height = src.height *
3795 						dst.width / src.width;
3796 			}
3797 		} else if (rmx_type == RMX_CENTER) {
3798 			dst = src;
3799 		}
3800 
3801 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3802 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3803 
3804 		if (dm_state->underscan_enable) {
3805 			dst.x += dm_state->underscan_hborder / 2;
3806 			dst.y += dm_state->underscan_vborder / 2;
3807 			dst.width -= dm_state->underscan_hborder;
3808 			dst.height -= dm_state->underscan_vborder;
3809 		}
3810 	}
3811 
3812 	stream->src = src;
3813 	stream->dst = dst;
3814 
3815 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3816 			dst.x, dst.y, dst.width, dst.height);
3817 
3818 }
3819 
3820 static enum dc_color_depth
3821 convert_color_depth_from_display_info(const struct drm_connector *connector,
3822 				      bool is_y420, int requested_bpc)
3823 {
3824 	uint8_t bpc;
3825 
3826 	if (is_y420) {
3827 		bpc = 8;
3828 
3829 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3830 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3831 			bpc = 16;
3832 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3833 			bpc = 12;
3834 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3835 			bpc = 10;
3836 	} else {
3837 		bpc = (uint8_t)connector->display_info.bpc;
3838 		/* Assume 8 bpc by default if no bpc is specified. */
3839 		bpc = bpc ? bpc : 8;
3840 	}
3841 
3842 	if (requested_bpc > 0) {
3843 		/*
3844 		 * Cap display bpc based on the user requested value.
3845 		 *
3846 		 * The value for state->max_bpc may not correctly updated
3847 		 * depending on when the connector gets added to the state
3848 		 * or if this was called outside of atomic check, so it
3849 		 * can't be used directly.
3850 		 */
3851 		bpc = min_t(u8, bpc, requested_bpc);
3852 
3853 		/* Round down to the nearest even number. */
3854 		bpc = bpc - (bpc & 1);
3855 	}
3856 
3857 	switch (bpc) {
3858 	case 0:
3859 		/*
3860 		 * Temporary Work around, DRM doesn't parse color depth for
3861 		 * EDID revision before 1.4
3862 		 * TODO: Fix edid parsing
3863 		 */
3864 		return COLOR_DEPTH_888;
3865 	case 6:
3866 		return COLOR_DEPTH_666;
3867 	case 8:
3868 		return COLOR_DEPTH_888;
3869 	case 10:
3870 		return COLOR_DEPTH_101010;
3871 	case 12:
3872 		return COLOR_DEPTH_121212;
3873 	case 14:
3874 		return COLOR_DEPTH_141414;
3875 	case 16:
3876 		return COLOR_DEPTH_161616;
3877 	default:
3878 		return COLOR_DEPTH_UNDEFINED;
3879 	}
3880 }
3881 
3882 static enum dc_aspect_ratio
3883 get_aspect_ratio(const struct drm_display_mode *mode_in)
3884 {
3885 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3886 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3887 }
3888 
3889 static enum dc_color_space
3890 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3891 {
3892 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3893 
3894 	switch (dc_crtc_timing->pixel_encoding)	{
3895 	case PIXEL_ENCODING_YCBCR422:
3896 	case PIXEL_ENCODING_YCBCR444:
3897 	case PIXEL_ENCODING_YCBCR420:
3898 	{
3899 		/*
3900 		 * 27030khz is the separation point between HDTV and SDTV
3901 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3902 		 * respectively
3903 		 */
3904 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3905 			if (dc_crtc_timing->flags.Y_ONLY)
3906 				color_space =
3907 					COLOR_SPACE_YCBCR709_LIMITED;
3908 			else
3909 				color_space = COLOR_SPACE_YCBCR709;
3910 		} else {
3911 			if (dc_crtc_timing->flags.Y_ONLY)
3912 				color_space =
3913 					COLOR_SPACE_YCBCR601_LIMITED;
3914 			else
3915 				color_space = COLOR_SPACE_YCBCR601;
3916 		}
3917 
3918 	}
3919 	break;
3920 	case PIXEL_ENCODING_RGB:
3921 		color_space = COLOR_SPACE_SRGB;
3922 		break;
3923 
3924 	default:
3925 		WARN_ON(1);
3926 		break;
3927 	}
3928 
3929 	return color_space;
3930 }
3931 
3932 static bool adjust_colour_depth_from_display_info(
3933 	struct dc_crtc_timing *timing_out,
3934 	const struct drm_display_info *info)
3935 {
3936 	enum dc_color_depth depth = timing_out->display_color_depth;
3937 	int normalized_clk;
3938 	do {
3939 		normalized_clk = timing_out->pix_clk_100hz / 10;
3940 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3941 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3942 			normalized_clk /= 2;
3943 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3944 		switch (depth) {
3945 		case COLOR_DEPTH_888:
3946 			break;
3947 		case COLOR_DEPTH_101010:
3948 			normalized_clk = (normalized_clk * 30) / 24;
3949 			break;
3950 		case COLOR_DEPTH_121212:
3951 			normalized_clk = (normalized_clk * 36) / 24;
3952 			break;
3953 		case COLOR_DEPTH_161616:
3954 			normalized_clk = (normalized_clk * 48) / 24;
3955 			break;
3956 		default:
3957 			/* The above depths are the only ones valid for HDMI. */
3958 			return false;
3959 		}
3960 		if (normalized_clk <= info->max_tmds_clock) {
3961 			timing_out->display_color_depth = depth;
3962 			return true;
3963 		}
3964 	} while (--depth > COLOR_DEPTH_666);
3965 	return false;
3966 }
3967 
3968 static void fill_stream_properties_from_drm_display_mode(
3969 	struct dc_stream_state *stream,
3970 	const struct drm_display_mode *mode_in,
3971 	const struct drm_connector *connector,
3972 	const struct drm_connector_state *connector_state,
3973 	const struct dc_stream_state *old_stream,
3974 	int requested_bpc)
3975 {
3976 	struct dc_crtc_timing *timing_out = &stream->timing;
3977 	const struct drm_display_info *info = &connector->display_info;
3978 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3979 	struct hdmi_vendor_infoframe hv_frame;
3980 	struct hdmi_avi_infoframe avi_frame;
3981 
3982 	memset(&hv_frame, 0, sizeof(hv_frame));
3983 	memset(&avi_frame, 0, sizeof(avi_frame));
3984 
3985 	timing_out->h_border_left = 0;
3986 	timing_out->h_border_right = 0;
3987 	timing_out->v_border_top = 0;
3988 	timing_out->v_border_bottom = 0;
3989 	/* TODO: un-hardcode */
3990 	if (drm_mode_is_420_only(info, mode_in)
3991 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3992 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3993 	else if (drm_mode_is_420_also(info, mode_in)
3994 			&& aconnector->force_yuv420_output)
3995 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3996 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3997 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3998 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3999 	else
4000 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4001 
4002 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4003 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4004 		connector,
4005 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4006 		requested_bpc);
4007 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4008 	timing_out->hdmi_vic = 0;
4009 
4010 	if(old_stream) {
4011 		timing_out->vic = old_stream->timing.vic;
4012 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4013 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4014 	} else {
4015 		timing_out->vic = drm_match_cea_mode(mode_in);
4016 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4017 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4018 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4019 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4020 	}
4021 
4022 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4023 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4024 		timing_out->vic = avi_frame.video_code;
4025 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4026 		timing_out->hdmi_vic = hv_frame.vic;
4027 	}
4028 
4029 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4030 	timing_out->h_total = mode_in->crtc_htotal;
4031 	timing_out->h_sync_width =
4032 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4033 	timing_out->h_front_porch =
4034 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4035 	timing_out->v_total = mode_in->crtc_vtotal;
4036 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4037 	timing_out->v_front_porch =
4038 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4039 	timing_out->v_sync_width =
4040 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4041 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4042 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4043 
4044 	stream->output_color_space = get_output_color_space(timing_out);
4045 
4046 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4047 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4048 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4049 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4050 		    drm_mode_is_420_also(info, mode_in) &&
4051 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4052 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4053 			adjust_colour_depth_from_display_info(timing_out, info);
4054 		}
4055 	}
4056 }
4057 
4058 static void fill_audio_info(struct audio_info *audio_info,
4059 			    const struct drm_connector *drm_connector,
4060 			    const struct dc_sink *dc_sink)
4061 {
4062 	int i = 0;
4063 	int cea_revision = 0;
4064 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4065 
4066 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4067 	audio_info->product_id = edid_caps->product_id;
4068 
4069 	cea_revision = drm_connector->display_info.cea_rev;
4070 
4071 #ifdef __linux__
4072 	strscpy(audio_info->display_name,
4073 		edid_caps->display_name,
4074 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4075 #else
4076 	strncpy(audio_info->display_name,
4077 		edid_caps->display_name,
4078 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
4079 #endif
4080 
4081 	if (cea_revision >= 3) {
4082 		audio_info->mode_count = edid_caps->audio_mode_count;
4083 
4084 		for (i = 0; i < audio_info->mode_count; ++i) {
4085 			audio_info->modes[i].format_code =
4086 					(enum audio_format_code)
4087 					(edid_caps->audio_modes[i].format_code);
4088 			audio_info->modes[i].channel_count =
4089 					edid_caps->audio_modes[i].channel_count;
4090 			audio_info->modes[i].sample_rates.all =
4091 					edid_caps->audio_modes[i].sample_rate;
4092 			audio_info->modes[i].sample_size =
4093 					edid_caps->audio_modes[i].sample_size;
4094 		}
4095 	}
4096 
4097 	audio_info->flags.all = edid_caps->speaker_flags;
4098 
4099 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4100 	if (drm_connector->latency_present[0]) {
4101 		audio_info->video_latency = drm_connector->video_latency[0];
4102 		audio_info->audio_latency = drm_connector->audio_latency[0];
4103 	}
4104 
4105 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4106 
4107 }
4108 
4109 static void
4110 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4111 				      struct drm_display_mode *dst_mode)
4112 {
4113 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4114 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4115 	dst_mode->crtc_clock = src_mode->crtc_clock;
4116 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4117 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4118 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4119 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4120 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4121 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4122 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4123 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4124 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4125 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4126 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4127 }
4128 
4129 static void
4130 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4131 					const struct drm_display_mode *native_mode,
4132 					bool scale_enabled)
4133 {
4134 	if (scale_enabled) {
4135 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4136 	} else if (native_mode->clock == drm_mode->clock &&
4137 			native_mode->htotal == drm_mode->htotal &&
4138 			native_mode->vtotal == drm_mode->vtotal) {
4139 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4140 	} else {
4141 		/* no scaling nor amdgpu inserted, no need to patch */
4142 	}
4143 }
4144 
4145 static struct dc_sink *
4146 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4147 {
4148 	struct dc_sink_init_data sink_init_data = { 0 };
4149 	struct dc_sink *sink = NULL;
4150 	sink_init_data.link = aconnector->dc_link;
4151 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4152 
4153 	sink = dc_sink_create(&sink_init_data);
4154 	if (!sink) {
4155 		DRM_ERROR("Failed to create sink!\n");
4156 		return NULL;
4157 	}
4158 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4159 
4160 	return sink;
4161 }
4162 
4163 static void set_multisync_trigger_params(
4164 		struct dc_stream_state *stream)
4165 {
4166 	if (stream->triggered_crtc_reset.enabled) {
4167 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4168 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4169 	}
4170 }
4171 
4172 static void set_master_stream(struct dc_stream_state *stream_set[],
4173 			      int stream_count)
4174 {
4175 	int j, highest_rfr = 0, master_stream = 0;
4176 
4177 	for (j = 0;  j < stream_count; j++) {
4178 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4179 			int refresh_rate = 0;
4180 
4181 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4182 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4183 			if (refresh_rate > highest_rfr) {
4184 				highest_rfr = refresh_rate;
4185 				master_stream = j;
4186 			}
4187 		}
4188 	}
4189 	for (j = 0;  j < stream_count; j++) {
4190 		if (stream_set[j])
4191 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4192 	}
4193 }
4194 
4195 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4196 {
4197 	int i = 0;
4198 
4199 	if (context->stream_count < 2)
4200 		return;
4201 	for (i = 0; i < context->stream_count ; i++) {
4202 		if (!context->streams[i])
4203 			continue;
4204 		/*
4205 		 * TODO: add a function to read AMD VSDB bits and set
4206 		 * crtc_sync_master.multi_sync_enabled flag
4207 		 * For now it's set to false
4208 		 */
4209 		set_multisync_trigger_params(context->streams[i]);
4210 	}
4211 	set_master_stream(context->streams, context->stream_count);
4212 }
4213 
4214 static struct dc_stream_state *
4215 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4216 		       const struct drm_display_mode *drm_mode,
4217 		       const struct dm_connector_state *dm_state,
4218 		       const struct dc_stream_state *old_stream,
4219 		       int requested_bpc)
4220 {
4221 	struct drm_display_mode *preferred_mode = NULL;
4222 	struct drm_connector *drm_connector;
4223 	const struct drm_connector_state *con_state =
4224 		dm_state ? &dm_state->base : NULL;
4225 	struct dc_stream_state *stream = NULL;
4226 	struct drm_display_mode mode = *drm_mode;
4227 	bool native_mode_found = false;
4228 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4229 	int mode_refresh;
4230 	int preferred_refresh = 0;
4231 #if defined(CONFIG_DRM_AMD_DC_DCN)
4232 	struct dsc_dec_dpcd_caps dsc_caps;
4233 #endif
4234 	uint32_t link_bandwidth_kbps;
4235 
4236 	struct dc_sink *sink = NULL;
4237 	if (aconnector == NULL) {
4238 		DRM_ERROR("aconnector is NULL!\n");
4239 		return stream;
4240 	}
4241 
4242 	drm_connector = &aconnector->base;
4243 
4244 	if (!aconnector->dc_sink) {
4245 		sink = create_fake_sink(aconnector);
4246 		if (!sink)
4247 			return stream;
4248 	} else {
4249 		sink = aconnector->dc_sink;
4250 		dc_sink_retain(sink);
4251 	}
4252 
4253 	stream = dc_create_stream_for_sink(sink);
4254 
4255 	if (stream == NULL) {
4256 		DRM_ERROR("Failed to create stream for sink!\n");
4257 		goto finish;
4258 	}
4259 
4260 	stream->dm_stream_context = aconnector;
4261 
4262 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4263 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4264 
4265 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4266 		/* Search for preferred mode */
4267 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4268 			native_mode_found = true;
4269 			break;
4270 		}
4271 	}
4272 	if (!native_mode_found)
4273 		preferred_mode = list_first_entry_or_null(
4274 				&aconnector->base.modes,
4275 				struct drm_display_mode,
4276 				head);
4277 
4278 	mode_refresh = drm_mode_vrefresh(&mode);
4279 
4280 	if (preferred_mode == NULL) {
4281 		/*
4282 		 * This may not be an error, the use case is when we have no
4283 		 * usermode calls to reset and set mode upon hotplug. In this
4284 		 * case, we call set mode ourselves to restore the previous mode
4285 		 * and the modelist may not be filled in in time.
4286 		 */
4287 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4288 	} else {
4289 		decide_crtc_timing_for_drm_display_mode(
4290 				&mode, preferred_mode,
4291 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4292 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4293 	}
4294 
4295 	if (!dm_state)
4296 		drm_mode_set_crtcinfo(&mode, 0);
4297 
4298 	/*
4299 	* If scaling is enabled and refresh rate didn't change
4300 	* we copy the vic and polarities of the old timings
4301 	*/
4302 	if (!scale || mode_refresh != preferred_refresh)
4303 		fill_stream_properties_from_drm_display_mode(stream,
4304 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4305 	else
4306 		fill_stream_properties_from_drm_display_mode(stream,
4307 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4308 
4309 	stream->timing.flags.DSC = 0;
4310 
4311 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4312 #if defined(CONFIG_DRM_AMD_DC_DCN)
4313 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4314 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4315 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4316 				      &dsc_caps);
4317 #endif
4318 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4319 							     dc_link_get_link_cap(aconnector->dc_link));
4320 
4321 #if defined(CONFIG_DRM_AMD_DC_DCN)
4322 		if (dsc_caps.is_dsc_supported)
4323 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4324 						  &dsc_caps,
4325 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4326 						  link_bandwidth_kbps,
4327 						  &stream->timing,
4328 						  &stream->timing.dsc_cfg))
4329 				stream->timing.flags.DSC = 1;
4330 #endif
4331 	}
4332 
4333 	update_stream_scaling_settings(&mode, dm_state, stream);
4334 
4335 	fill_audio_info(
4336 		&stream->audio_info,
4337 		drm_connector,
4338 		sink);
4339 
4340 	update_stream_signal(stream, sink);
4341 
4342 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4343 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4344 	if (stream->link->psr_feature_enabled)	{
4345 		struct dc  *core_dc = stream->link->ctx->dc;
4346 
4347 		if (dc_is_dmcu_initialized(core_dc)) {
4348 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4349 
4350 			stream->psr_version = dmcu->dmcu_version.psr_version;
4351 
4352 			//
4353 			// should decide stream support vsc sdp colorimetry capability
4354 			// before building vsc info packet
4355 			//
4356 			stream->use_vsc_sdp_for_colorimetry = false;
4357 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4358 				stream->use_vsc_sdp_for_colorimetry =
4359 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4360 			} else {
4361 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4362 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4363 					stream->use_vsc_sdp_for_colorimetry = true;
4364 				}
4365 			}
4366 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4367 		}
4368 	}
4369 finish:
4370 	dc_sink_release(sink);
4371 
4372 	return stream;
4373 }
4374 
4375 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4376 {
4377 	drm_crtc_cleanup(crtc);
4378 	kfree(crtc);
4379 }
4380 
4381 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4382 				  struct drm_crtc_state *state)
4383 {
4384 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4385 
4386 	/* TODO Destroy dc_stream objects are stream object is flattened */
4387 	if (cur->stream)
4388 		dc_stream_release(cur->stream);
4389 
4390 
4391 	__drm_atomic_helper_crtc_destroy_state(state);
4392 
4393 
4394 	kfree(state);
4395 }
4396 
4397 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4398 {
4399 	struct dm_crtc_state *state;
4400 
4401 	if (crtc->state)
4402 		dm_crtc_destroy_state(crtc, crtc->state);
4403 
4404 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4405 	if (WARN_ON(!state))
4406 		return;
4407 
4408 	crtc->state = &state->base;
4409 	crtc->state->crtc = crtc;
4410 
4411 }
4412 
4413 static struct drm_crtc_state *
4414 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4415 {
4416 	struct dm_crtc_state *state, *cur;
4417 
4418 	cur = to_dm_crtc_state(crtc->state);
4419 
4420 	if (WARN_ON(!crtc->state))
4421 		return NULL;
4422 
4423 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4424 	if (!state)
4425 		return NULL;
4426 
4427 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4428 
4429 	if (cur->stream) {
4430 		state->stream = cur->stream;
4431 		dc_stream_retain(state->stream);
4432 	}
4433 
4434 	state->active_planes = cur->active_planes;
4435 	state->interrupts_enabled = cur->interrupts_enabled;
4436 	state->vrr_params = cur->vrr_params;
4437 	state->vrr_infopacket = cur->vrr_infopacket;
4438 	state->abm_level = cur->abm_level;
4439 	state->vrr_supported = cur->vrr_supported;
4440 	state->freesync_config = cur->freesync_config;
4441 	state->crc_src = cur->crc_src;
4442 	state->cm_has_degamma = cur->cm_has_degamma;
4443 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4444 
4445 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4446 
4447 	return &state->base;
4448 }
4449 
4450 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4451 {
4452 	enum dc_irq_source irq_source;
4453 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4454 	struct amdgpu_device *adev = crtc->dev->dev_private;
4455 	int rc;
4456 
4457 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4458 
4459 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4460 
4461 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4462 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4463 	return rc;
4464 }
4465 
4466 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4467 {
4468 	enum dc_irq_source irq_source;
4469 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4470 	struct amdgpu_device *adev = crtc->dev->dev_private;
4471 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4472 	int rc = 0;
4473 
4474 	if (enable) {
4475 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4476 		if (amdgpu_dm_vrr_active(acrtc_state))
4477 			rc = dm_set_vupdate_irq(crtc, true);
4478 	} else {
4479 		/* vblank irq off -> vupdate irq off */
4480 		rc = dm_set_vupdate_irq(crtc, false);
4481 	}
4482 
4483 	if (rc)
4484 		return rc;
4485 
4486 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4487 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4488 }
4489 
4490 static int dm_enable_vblank(struct drm_crtc *crtc)
4491 {
4492 	return dm_set_vblank(crtc, true);
4493 }
4494 
4495 static void dm_disable_vblank(struct drm_crtc *crtc)
4496 {
4497 	dm_set_vblank(crtc, false);
4498 }
4499 
4500 /* Implemented only the options currently availible for the driver */
4501 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4502 	.reset = dm_crtc_reset_state,
4503 	.destroy = amdgpu_dm_crtc_destroy,
4504 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4505 	.set_config = drm_atomic_helper_set_config,
4506 	.page_flip = drm_atomic_helper_page_flip,
4507 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4508 	.atomic_destroy_state = dm_crtc_destroy_state,
4509 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4510 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4511 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4512 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4513 	.enable_vblank = dm_enable_vblank,
4514 	.disable_vblank = dm_disable_vblank,
4515 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4516 };
4517 
4518 static enum drm_connector_status
4519 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4520 {
4521 	bool connected;
4522 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4523 
4524 	/*
4525 	 * Notes:
4526 	 * 1. This interface is NOT called in context of HPD irq.
4527 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4528 	 * makes it a bad place for *any* MST-related activity.
4529 	 */
4530 
4531 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4532 	    !aconnector->fake_enable)
4533 		connected = (aconnector->dc_sink != NULL);
4534 	else
4535 		connected = (aconnector->base.force == DRM_FORCE_ON);
4536 
4537 	return (connected ? connector_status_connected :
4538 			connector_status_disconnected);
4539 }
4540 
4541 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4542 					    struct drm_connector_state *connector_state,
4543 					    struct drm_property *property,
4544 					    uint64_t val)
4545 {
4546 	struct drm_device *dev = connector->dev;
4547 	struct amdgpu_device *adev = dev->dev_private;
4548 	struct dm_connector_state *dm_old_state =
4549 		to_dm_connector_state(connector->state);
4550 	struct dm_connector_state *dm_new_state =
4551 		to_dm_connector_state(connector_state);
4552 
4553 	int ret = -EINVAL;
4554 
4555 	if (property == dev->mode_config.scaling_mode_property) {
4556 		enum amdgpu_rmx_type rmx_type;
4557 
4558 		switch (val) {
4559 		case DRM_MODE_SCALE_CENTER:
4560 			rmx_type = RMX_CENTER;
4561 			break;
4562 		case DRM_MODE_SCALE_ASPECT:
4563 			rmx_type = RMX_ASPECT;
4564 			break;
4565 		case DRM_MODE_SCALE_FULLSCREEN:
4566 			rmx_type = RMX_FULL;
4567 			break;
4568 		case DRM_MODE_SCALE_NONE:
4569 		default:
4570 			rmx_type = RMX_OFF;
4571 			break;
4572 		}
4573 
4574 		if (dm_old_state->scaling == rmx_type)
4575 			return 0;
4576 
4577 		dm_new_state->scaling = rmx_type;
4578 		ret = 0;
4579 	} else if (property == adev->mode_info.underscan_hborder_property) {
4580 		dm_new_state->underscan_hborder = val;
4581 		ret = 0;
4582 	} else if (property == adev->mode_info.underscan_vborder_property) {
4583 		dm_new_state->underscan_vborder = val;
4584 		ret = 0;
4585 	} else if (property == adev->mode_info.underscan_property) {
4586 		dm_new_state->underscan_enable = val;
4587 		ret = 0;
4588 	} else if (property == adev->mode_info.abm_level_property) {
4589 		dm_new_state->abm_level = val;
4590 		ret = 0;
4591 	}
4592 
4593 	return ret;
4594 }
4595 
4596 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4597 					    const struct drm_connector_state *state,
4598 					    struct drm_property *property,
4599 					    uint64_t *val)
4600 {
4601 	struct drm_device *dev = connector->dev;
4602 	struct amdgpu_device *adev = dev->dev_private;
4603 	struct dm_connector_state *dm_state =
4604 		to_dm_connector_state(state);
4605 	int ret = -EINVAL;
4606 
4607 	if (property == dev->mode_config.scaling_mode_property) {
4608 		switch (dm_state->scaling) {
4609 		case RMX_CENTER:
4610 			*val = DRM_MODE_SCALE_CENTER;
4611 			break;
4612 		case RMX_ASPECT:
4613 			*val = DRM_MODE_SCALE_ASPECT;
4614 			break;
4615 		case RMX_FULL:
4616 			*val = DRM_MODE_SCALE_FULLSCREEN;
4617 			break;
4618 		case RMX_OFF:
4619 		default:
4620 			*val = DRM_MODE_SCALE_NONE;
4621 			break;
4622 		}
4623 		ret = 0;
4624 	} else if (property == adev->mode_info.underscan_hborder_property) {
4625 		*val = dm_state->underscan_hborder;
4626 		ret = 0;
4627 	} else if (property == adev->mode_info.underscan_vborder_property) {
4628 		*val = dm_state->underscan_vborder;
4629 		ret = 0;
4630 	} else if (property == adev->mode_info.underscan_property) {
4631 		*val = dm_state->underscan_enable;
4632 		ret = 0;
4633 	} else if (property == adev->mode_info.abm_level_property) {
4634 		*val = dm_state->abm_level;
4635 		ret = 0;
4636 	}
4637 
4638 	return ret;
4639 }
4640 
4641 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4642 {
4643 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4644 
4645 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4646 }
4647 
4648 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4649 {
4650 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4651 	const struct dc_link *link = aconnector->dc_link;
4652 	struct amdgpu_device *adev = connector->dev->dev_private;
4653 	struct amdgpu_display_manager *dm = &adev->dm;
4654 
4655 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4656 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4657 
4658 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4659 	    link->type != dc_connection_none &&
4660 	    dm->backlight_dev) {
4661 		backlight_device_unregister(dm->backlight_dev);
4662 		dm->backlight_dev = NULL;
4663 	}
4664 #endif
4665 
4666 	if (aconnector->dc_em_sink)
4667 		dc_sink_release(aconnector->dc_em_sink);
4668 	aconnector->dc_em_sink = NULL;
4669 	if (aconnector->dc_sink)
4670 		dc_sink_release(aconnector->dc_sink);
4671 	aconnector->dc_sink = NULL;
4672 
4673 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4674 	drm_connector_unregister(connector);
4675 	drm_connector_cleanup(connector);
4676 	if (aconnector->i2c) {
4677 		i2c_del_adapter(&aconnector->i2c->base);
4678 		kfree(aconnector->i2c);
4679 	}
4680 	kfree(aconnector->dm_dp_aux.aux.name);
4681 
4682 	kfree(connector);
4683 }
4684 
4685 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4686 {
4687 	struct dm_connector_state *state =
4688 		to_dm_connector_state(connector->state);
4689 
4690 	if (connector->state)
4691 		__drm_atomic_helper_connector_destroy_state(connector->state);
4692 
4693 	kfree(state);
4694 
4695 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4696 
4697 	if (state) {
4698 		state->scaling = RMX_OFF;
4699 		state->underscan_enable = false;
4700 		state->underscan_hborder = 0;
4701 		state->underscan_vborder = 0;
4702 		state->base.max_requested_bpc = 8;
4703 		state->vcpi_slots = 0;
4704 		state->pbn = 0;
4705 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4706 			state->abm_level = amdgpu_dm_abm_level;
4707 
4708 		__drm_atomic_helper_connector_reset(connector, &state->base);
4709 	}
4710 }
4711 
4712 struct drm_connector_state *
4713 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4714 {
4715 	struct dm_connector_state *state =
4716 		to_dm_connector_state(connector->state);
4717 
4718 	struct dm_connector_state *new_state =
4719 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4720 
4721 	if (!new_state)
4722 		return NULL;
4723 
4724 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4725 
4726 	new_state->freesync_capable = state->freesync_capable;
4727 	new_state->abm_level = state->abm_level;
4728 	new_state->scaling = state->scaling;
4729 	new_state->underscan_enable = state->underscan_enable;
4730 	new_state->underscan_hborder = state->underscan_hborder;
4731 	new_state->underscan_vborder = state->underscan_vborder;
4732 	new_state->vcpi_slots = state->vcpi_slots;
4733 	new_state->pbn = state->pbn;
4734 	return &new_state->base;
4735 }
4736 
4737 static int
4738 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4739 {
4740 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4741 		to_amdgpu_dm_connector(connector);
4742 	int r;
4743 
4744 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4745 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4746 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4747 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4748 		if (r)
4749 			return r;
4750 	}
4751 
4752 #if defined(CONFIG_DEBUG_FS)
4753 	connector_debugfs_init(amdgpu_dm_connector);
4754 #endif
4755 
4756 	return 0;
4757 }
4758 
4759 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4760 	.reset = amdgpu_dm_connector_funcs_reset,
4761 	.detect = amdgpu_dm_connector_detect,
4762 	.fill_modes = drm_helper_probe_single_connector_modes,
4763 	.destroy = amdgpu_dm_connector_destroy,
4764 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4765 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4766 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4767 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4768 	.late_register = amdgpu_dm_connector_late_register,
4769 	.early_unregister = amdgpu_dm_connector_unregister
4770 };
4771 
4772 static int get_modes(struct drm_connector *connector)
4773 {
4774 	return amdgpu_dm_connector_get_modes(connector);
4775 }
4776 
4777 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4778 {
4779 	struct dc_sink_init_data init_params = {
4780 			.link = aconnector->dc_link,
4781 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4782 	};
4783 	struct edid *edid;
4784 
4785 	if (!aconnector->base.edid_blob_ptr) {
4786 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4787 				aconnector->base.name);
4788 
4789 		aconnector->base.force = DRM_FORCE_OFF;
4790 		aconnector->base.override_edid = false;
4791 		return;
4792 	}
4793 
4794 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4795 
4796 	aconnector->edid = edid;
4797 
4798 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4799 		aconnector->dc_link,
4800 		(uint8_t *)edid,
4801 		(edid->extensions + 1) * EDID_LENGTH,
4802 		&init_params);
4803 
4804 	if (aconnector->base.force == DRM_FORCE_ON) {
4805 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4806 		aconnector->dc_link->local_sink :
4807 		aconnector->dc_em_sink;
4808 		dc_sink_retain(aconnector->dc_sink);
4809 	}
4810 }
4811 
4812 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4813 {
4814 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4815 
4816 	/*
4817 	 * In case of headless boot with force on for DP managed connector
4818 	 * Those settings have to be != 0 to get initial modeset
4819 	 */
4820 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4821 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4822 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4823 	}
4824 
4825 
4826 	aconnector->base.override_edid = true;
4827 	create_eml_sink(aconnector);
4828 }
4829 
4830 static struct dc_stream_state *
4831 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4832 				const struct drm_display_mode *drm_mode,
4833 				const struct dm_connector_state *dm_state,
4834 				const struct dc_stream_state *old_stream)
4835 {
4836 	struct drm_connector *connector = &aconnector->base;
4837 	struct amdgpu_device *adev = connector->dev->dev_private;
4838 	struct dc_stream_state *stream;
4839 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
4840 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
4841 	enum dc_status dc_result = DC_OK;
4842 
4843 	do {
4844 		stream = create_stream_for_sink(aconnector, drm_mode,
4845 						dm_state, old_stream,
4846 						requested_bpc);
4847 		if (stream == NULL) {
4848 			DRM_ERROR("Failed to create stream for sink!\n");
4849 			break;
4850 		}
4851 
4852 		dc_result = dc_validate_stream(adev->dm.dc, stream);
4853 
4854 		if (dc_result != DC_OK) {
4855 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4856 				      drm_mode->hdisplay,
4857 				      drm_mode->vdisplay,
4858 				      drm_mode->clock,
4859 				      dc_result);
4860 
4861 			dc_stream_release(stream);
4862 			stream = NULL;
4863 			requested_bpc -= 2; /* lower bpc to retry validation */
4864 		}
4865 
4866 	} while (stream == NULL && requested_bpc >= 6);
4867 
4868 	return stream;
4869 }
4870 
4871 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4872 				   struct drm_display_mode *mode)
4873 {
4874 	int result = MODE_ERROR;
4875 	struct dc_sink *dc_sink;
4876 	/* TODO: Unhardcode stream count */
4877 	struct dc_stream_state *stream;
4878 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4879 
4880 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4881 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4882 		return result;
4883 
4884 	/*
4885 	 * Only run this the first time mode_valid is called to initilialize
4886 	 * EDID mgmt
4887 	 */
4888 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4889 		!aconnector->dc_em_sink)
4890 		handle_edid_mgmt(aconnector);
4891 
4892 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4893 
4894 	if (dc_sink == NULL) {
4895 		DRM_ERROR("dc_sink is NULL!\n");
4896 		goto fail;
4897 	}
4898 
4899 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
4900 	if (stream) {
4901 		dc_stream_release(stream);
4902 		result = MODE_OK;
4903 	}
4904 
4905 fail:
4906 	/* TODO: error handling*/
4907 	return result;
4908 }
4909 
4910 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4911 				struct dc_info_packet *out)
4912 {
4913 	struct hdmi_drm_infoframe frame;
4914 	unsigned char buf[30]; /* 26 + 4 */
4915 	ssize_t len;
4916 	int ret, i;
4917 
4918 	memset(out, 0, sizeof(*out));
4919 
4920 	if (!state->hdr_output_metadata)
4921 		return 0;
4922 
4923 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4924 	if (ret)
4925 		return ret;
4926 
4927 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4928 	if (len < 0)
4929 		return (int)len;
4930 
4931 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4932 	if (len != 30)
4933 		return -EINVAL;
4934 
4935 	/* Prepare the infopacket for DC. */
4936 	switch (state->connector->connector_type) {
4937 	case DRM_MODE_CONNECTOR_HDMIA:
4938 		out->hb0 = 0x87; /* type */
4939 		out->hb1 = 0x01; /* version */
4940 		out->hb2 = 0x1A; /* length */
4941 		out->sb[0] = buf[3]; /* checksum */
4942 		i = 1;
4943 		break;
4944 
4945 	case DRM_MODE_CONNECTOR_DisplayPort:
4946 	case DRM_MODE_CONNECTOR_eDP:
4947 		out->hb0 = 0x00; /* sdp id, zero */
4948 		out->hb1 = 0x87; /* type */
4949 		out->hb2 = 0x1D; /* payload len - 1 */
4950 		out->hb3 = (0x13 << 2); /* sdp version */
4951 		out->sb[0] = 0x01; /* version */
4952 		out->sb[1] = 0x1A; /* length */
4953 		i = 2;
4954 		break;
4955 
4956 	default:
4957 		return -EINVAL;
4958 	}
4959 
4960 	memcpy(&out->sb[i], &buf[4], 26);
4961 	out->valid = true;
4962 
4963 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4964 		       sizeof(out->sb), false);
4965 
4966 	return 0;
4967 }
4968 
4969 static bool
4970 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4971 			  const struct drm_connector_state *new_state)
4972 {
4973 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4974 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4975 
4976 	if (old_blob != new_blob) {
4977 		if (old_blob && new_blob &&
4978 		    old_blob->length == new_blob->length)
4979 			return memcmp(old_blob->data, new_blob->data,
4980 				      old_blob->length);
4981 
4982 		return true;
4983 	}
4984 
4985 	return false;
4986 }
4987 
4988 static int
4989 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4990 				 struct drm_atomic_state *state)
4991 {
4992 	struct drm_connector_state *new_con_state =
4993 		drm_atomic_get_new_connector_state(state, conn);
4994 	struct drm_connector_state *old_con_state =
4995 		drm_atomic_get_old_connector_state(state, conn);
4996 	struct drm_crtc *crtc = new_con_state->crtc;
4997 	struct drm_crtc_state *new_crtc_state;
4998 	int ret;
4999 
5000 	if (!crtc)
5001 		return 0;
5002 
5003 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5004 		struct dc_info_packet hdr_infopacket;
5005 
5006 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5007 		if (ret)
5008 			return ret;
5009 
5010 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5011 		if (IS_ERR(new_crtc_state))
5012 			return PTR_ERR(new_crtc_state);
5013 
5014 		/*
5015 		 * DC considers the stream backends changed if the
5016 		 * static metadata changes. Forcing the modeset also
5017 		 * gives a simple way for userspace to switch from
5018 		 * 8bpc to 10bpc when setting the metadata to enter
5019 		 * or exit HDR.
5020 		 *
5021 		 * Changing the static metadata after it's been
5022 		 * set is permissible, however. So only force a
5023 		 * modeset if we're entering or exiting HDR.
5024 		 */
5025 		new_crtc_state->mode_changed =
5026 			!old_con_state->hdr_output_metadata ||
5027 			!new_con_state->hdr_output_metadata;
5028 	}
5029 
5030 	return 0;
5031 }
5032 
5033 static const struct drm_connector_helper_funcs
5034 amdgpu_dm_connector_helper_funcs = {
5035 	/*
5036 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5037 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5038 	 * are missing after user start lightdm. So we need to renew modes list.
5039 	 * in get_modes call back, not just return the modes count
5040 	 */
5041 	.get_modes = get_modes,
5042 	.mode_valid = amdgpu_dm_connector_mode_valid,
5043 	.atomic_check = amdgpu_dm_connector_atomic_check,
5044 };
5045 
5046 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5047 {
5048 }
5049 
5050 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5051 {
5052 	struct drm_device *dev = new_crtc_state->crtc->dev;
5053 	struct drm_plane *plane;
5054 
5055 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5056 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5057 			return true;
5058 	}
5059 
5060 	return false;
5061 }
5062 
5063 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5064 {
5065 	struct drm_atomic_state *state = new_crtc_state->state;
5066 	struct drm_plane *plane;
5067 	int num_active = 0;
5068 
5069 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5070 		struct drm_plane_state *new_plane_state;
5071 
5072 		/* Cursor planes are "fake". */
5073 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5074 			continue;
5075 
5076 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5077 
5078 		if (!new_plane_state) {
5079 			/*
5080 			 * The plane is enable on the CRTC and hasn't changed
5081 			 * state. This means that it previously passed
5082 			 * validation and is therefore enabled.
5083 			 */
5084 			num_active += 1;
5085 			continue;
5086 		}
5087 
5088 		/* We need a framebuffer to be considered enabled. */
5089 		num_active += (new_plane_state->fb != NULL);
5090 	}
5091 
5092 	return num_active;
5093 }
5094 
5095 /*
5096  * Sets whether interrupts should be enabled on a specific CRTC.
5097  * We require that the stream be enabled and that there exist active
5098  * DC planes on the stream.
5099  */
5100 static void
5101 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5102 			       struct drm_crtc_state *new_crtc_state)
5103 {
5104 	struct dm_crtc_state *dm_new_crtc_state =
5105 		to_dm_crtc_state(new_crtc_state);
5106 
5107 	dm_new_crtc_state->active_planes = 0;
5108 	dm_new_crtc_state->interrupts_enabled = false;
5109 
5110 	if (!dm_new_crtc_state->stream)
5111 		return;
5112 
5113 	dm_new_crtc_state->active_planes =
5114 		count_crtc_active_planes(new_crtc_state);
5115 
5116 	dm_new_crtc_state->interrupts_enabled =
5117 		dm_new_crtc_state->active_planes > 0;
5118 }
5119 
5120 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5121 				       struct drm_crtc_state *state)
5122 {
5123 	struct amdgpu_device *adev = crtc->dev->dev_private;
5124 	struct dc *dc = adev->dm.dc;
5125 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5126 	int ret = -EINVAL;
5127 
5128 	/*
5129 	 * Update interrupt state for the CRTC. This needs to happen whenever
5130 	 * the CRTC has changed or whenever any of its planes have changed.
5131 	 * Atomic check satisfies both of these requirements since the CRTC
5132 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5133 	 */
5134 	dm_update_crtc_interrupt_state(crtc, state);
5135 
5136 	if (unlikely(!dm_crtc_state->stream &&
5137 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5138 		WARN_ON(1);
5139 		return ret;
5140 	}
5141 
5142 	/* In some use cases, like reset, no stream is attached */
5143 	if (!dm_crtc_state->stream)
5144 		return 0;
5145 
5146 	/*
5147 	 * We want at least one hardware plane enabled to use
5148 	 * the stream with a cursor enabled.
5149 	 */
5150 	if (state->enable && state->active &&
5151 	    does_crtc_have_active_cursor(state) &&
5152 	    dm_crtc_state->active_planes == 0)
5153 		return -EINVAL;
5154 
5155 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5156 		return 0;
5157 
5158 	return ret;
5159 }
5160 
5161 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5162 				      const struct drm_display_mode *mode,
5163 				      struct drm_display_mode *adjusted_mode)
5164 {
5165 	return true;
5166 }
5167 
5168 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5169 	.disable = dm_crtc_helper_disable,
5170 	.atomic_check = dm_crtc_helper_atomic_check,
5171 	.mode_fixup = dm_crtc_helper_mode_fixup,
5172 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5173 };
5174 
5175 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5176 {
5177 
5178 }
5179 
5180 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5181 {
5182 	switch (display_color_depth) {
5183 		case COLOR_DEPTH_666:
5184 			return 6;
5185 		case COLOR_DEPTH_888:
5186 			return 8;
5187 		case COLOR_DEPTH_101010:
5188 			return 10;
5189 		case COLOR_DEPTH_121212:
5190 			return 12;
5191 		case COLOR_DEPTH_141414:
5192 			return 14;
5193 		case COLOR_DEPTH_161616:
5194 			return 16;
5195 		default:
5196 			break;
5197 		}
5198 	return 0;
5199 }
5200 
5201 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5202 					  struct drm_crtc_state *crtc_state,
5203 					  struct drm_connector_state *conn_state)
5204 {
5205 	struct drm_atomic_state *state = crtc_state->state;
5206 	struct drm_connector *connector = conn_state->connector;
5207 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5208 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5209 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5210 	struct drm_dp_mst_topology_mgr *mst_mgr;
5211 	struct drm_dp_mst_port *mst_port;
5212 	enum dc_color_depth color_depth;
5213 	int clock, bpp = 0;
5214 	bool is_y420 = false;
5215 
5216 	if (!aconnector->port || !aconnector->dc_sink)
5217 		return 0;
5218 
5219 	mst_port = aconnector->port;
5220 	mst_mgr = &aconnector->mst_port->mst_mgr;
5221 
5222 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5223 		return 0;
5224 
5225 	if (!state->duplicated) {
5226 		int max_bpc = conn_state->max_requested_bpc;
5227 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5228 				aconnector->force_yuv420_output;
5229 		color_depth = convert_color_depth_from_display_info(connector,
5230 								    is_y420,
5231 								    max_bpc);
5232 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5233 		clock = adjusted_mode->clock;
5234 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5235 	}
5236 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5237 									   mst_mgr,
5238 									   mst_port,
5239 									   dm_new_connector_state->pbn,
5240 									   0);
5241 	if (dm_new_connector_state->vcpi_slots < 0) {
5242 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5243 		return dm_new_connector_state->vcpi_slots;
5244 	}
5245 	return 0;
5246 }
5247 
5248 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5249 	.disable = dm_encoder_helper_disable,
5250 	.atomic_check = dm_encoder_helper_atomic_check
5251 };
5252 
5253 #if defined(CONFIG_DRM_AMD_DC_DCN)
5254 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5255 					    struct dc_state *dc_state)
5256 {
5257 	struct dc_stream_state *stream = NULL;
5258 	struct drm_connector *connector;
5259 	struct drm_connector_state *new_con_state, *old_con_state;
5260 	struct amdgpu_dm_connector *aconnector;
5261 	struct dm_connector_state *dm_conn_state;
5262 	int i, j, clock, bpp;
5263 	int vcpi, pbn_div, pbn = 0;
5264 
5265 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5266 
5267 		aconnector = to_amdgpu_dm_connector(connector);
5268 
5269 		if (!aconnector->port)
5270 			continue;
5271 
5272 		if (!new_con_state || !new_con_state->crtc)
5273 			continue;
5274 
5275 		dm_conn_state = to_dm_connector_state(new_con_state);
5276 
5277 		for (j = 0; j < dc_state->stream_count; j++) {
5278 			stream = dc_state->streams[j];
5279 			if (!stream)
5280 				continue;
5281 
5282 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5283 				break;
5284 
5285 			stream = NULL;
5286 		}
5287 
5288 		if (!stream)
5289 			continue;
5290 
5291 		if (stream->timing.flags.DSC != 1) {
5292 			drm_dp_mst_atomic_enable_dsc(state,
5293 						     aconnector->port,
5294 						     dm_conn_state->pbn,
5295 						     0,
5296 						     false);
5297 			continue;
5298 		}
5299 
5300 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5301 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5302 		clock = stream->timing.pix_clk_100hz / 10;
5303 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5304 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5305 						    aconnector->port,
5306 						    pbn, pbn_div,
5307 						    true);
5308 		if (vcpi < 0)
5309 			return vcpi;
5310 
5311 		dm_conn_state->pbn = pbn;
5312 		dm_conn_state->vcpi_slots = vcpi;
5313 	}
5314 	return 0;
5315 }
5316 #endif
5317 
5318 static void dm_drm_plane_reset(struct drm_plane *plane)
5319 {
5320 	struct dm_plane_state *amdgpu_state = NULL;
5321 
5322 	if (plane->state)
5323 		plane->funcs->atomic_destroy_state(plane, plane->state);
5324 
5325 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5326 	WARN_ON(amdgpu_state == NULL);
5327 
5328 	if (amdgpu_state)
5329 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5330 }
5331 
5332 static struct drm_plane_state *
5333 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5334 {
5335 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5336 
5337 	old_dm_plane_state = to_dm_plane_state(plane->state);
5338 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5339 	if (!dm_plane_state)
5340 		return NULL;
5341 
5342 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5343 
5344 	if (old_dm_plane_state->dc_state) {
5345 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5346 		dc_plane_state_retain(dm_plane_state->dc_state);
5347 	}
5348 
5349 	return &dm_plane_state->base;
5350 }
5351 
5352 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5353 				struct drm_plane_state *state)
5354 {
5355 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5356 
5357 	if (dm_plane_state->dc_state)
5358 		dc_plane_state_release(dm_plane_state->dc_state);
5359 
5360 	drm_atomic_helper_plane_destroy_state(plane, state);
5361 }
5362 
5363 static const struct drm_plane_funcs dm_plane_funcs = {
5364 	.update_plane	= drm_atomic_helper_update_plane,
5365 	.disable_plane	= drm_atomic_helper_disable_plane,
5366 	.destroy	= drm_primary_helper_destroy,
5367 	.reset = dm_drm_plane_reset,
5368 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5369 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5370 };
5371 
5372 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5373 				      struct drm_plane_state *new_state)
5374 {
5375 	struct amdgpu_framebuffer *afb;
5376 	struct drm_gem_object *obj;
5377 	struct amdgpu_device *adev;
5378 	struct amdgpu_bo *rbo;
5379 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5380 	struct list_head list;
5381 	struct ttm_validate_buffer tv;
5382 	struct ww_acquire_ctx ticket;
5383 	uint64_t tiling_flags;
5384 	uint32_t domain;
5385 	int r;
5386 	bool force_disable_dcc = false;
5387 
5388 	dm_plane_state_old = to_dm_plane_state(plane->state);
5389 	dm_plane_state_new = to_dm_plane_state(new_state);
5390 
5391 	if (!new_state->fb) {
5392 		DRM_DEBUG_DRIVER("No FB bound\n");
5393 		return 0;
5394 	}
5395 
5396 	afb = to_amdgpu_framebuffer(new_state->fb);
5397 	obj = new_state->fb->obj[0];
5398 	rbo = gem_to_amdgpu_bo(obj);
5399 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5400 	INIT_LIST_HEAD(&list);
5401 
5402 	tv.bo = &rbo->tbo;
5403 	tv.num_shared = 1;
5404 	list_add(&tv.head, &list);
5405 
5406 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5407 	if (r) {
5408 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5409 		return r;
5410 	}
5411 
5412 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5413 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5414 	else
5415 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5416 
5417 	r = amdgpu_bo_pin(rbo, domain);
5418 	if (unlikely(r != 0)) {
5419 		if (r != -ERESTARTSYS)
5420 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5421 		ttm_eu_backoff_reservation(&ticket, &list);
5422 		return r;
5423 	}
5424 
5425 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5426 	if (unlikely(r != 0)) {
5427 		amdgpu_bo_unpin(rbo);
5428 		ttm_eu_backoff_reservation(&ticket, &list);
5429 		DRM_ERROR("%p bind failed\n", rbo);
5430 		return r;
5431 	}
5432 
5433 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5434 
5435 	ttm_eu_backoff_reservation(&ticket, &list);
5436 
5437 	afb->address = amdgpu_bo_gpu_offset(rbo);
5438 
5439 	amdgpu_bo_ref(rbo);
5440 
5441 	if (dm_plane_state_new->dc_state &&
5442 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5443 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5444 
5445 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5446 		fill_plane_buffer_attributes(
5447 			adev, afb, plane_state->format, plane_state->rotation,
5448 			tiling_flags, &plane_state->tiling_info,
5449 			&plane_state->plane_size, &plane_state->dcc,
5450 			&plane_state->address,
5451 			force_disable_dcc);
5452 	}
5453 
5454 	return 0;
5455 }
5456 
5457 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5458 				       struct drm_plane_state *old_state)
5459 {
5460 	struct amdgpu_bo *rbo;
5461 	int r;
5462 
5463 	if (!old_state->fb)
5464 		return;
5465 
5466 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5467 	r = amdgpu_bo_reserve(rbo, false);
5468 	if (unlikely(r)) {
5469 		DRM_ERROR("failed to reserve rbo before unpin\n");
5470 		return;
5471 	}
5472 
5473 	amdgpu_bo_unpin(rbo);
5474 	amdgpu_bo_unreserve(rbo);
5475 	amdgpu_bo_unref(&rbo);
5476 }
5477 
5478 static int dm_plane_atomic_check(struct drm_plane *plane,
5479 				 struct drm_plane_state *state)
5480 {
5481 	struct amdgpu_device *adev = plane->dev->dev_private;
5482 	struct dc *dc = adev->dm.dc;
5483 	struct dm_plane_state *dm_plane_state;
5484 	struct dc_scaling_info scaling_info;
5485 	int ret;
5486 
5487 	dm_plane_state = to_dm_plane_state(state);
5488 
5489 	if (!dm_plane_state->dc_state)
5490 		return 0;
5491 
5492 	ret = fill_dc_scaling_info(state, &scaling_info);
5493 	if (ret)
5494 		return ret;
5495 
5496 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5497 		return 0;
5498 
5499 	return -EINVAL;
5500 }
5501 
5502 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5503 				       struct drm_plane_state *new_plane_state)
5504 {
5505 	/* Only support async updates on cursor planes. */
5506 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5507 		return -EINVAL;
5508 
5509 	return 0;
5510 }
5511 
5512 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5513 					 struct drm_plane_state *new_state)
5514 {
5515 	struct drm_plane_state *old_state =
5516 		drm_atomic_get_old_plane_state(new_state->state, plane);
5517 
5518 	swap(plane->state->fb, new_state->fb);
5519 
5520 	plane->state->src_x = new_state->src_x;
5521 	plane->state->src_y = new_state->src_y;
5522 	plane->state->src_w = new_state->src_w;
5523 	plane->state->src_h = new_state->src_h;
5524 	plane->state->crtc_x = new_state->crtc_x;
5525 	plane->state->crtc_y = new_state->crtc_y;
5526 	plane->state->crtc_w = new_state->crtc_w;
5527 	plane->state->crtc_h = new_state->crtc_h;
5528 
5529 	handle_cursor_update(plane, old_state);
5530 }
5531 
5532 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5533 	.prepare_fb = dm_plane_helper_prepare_fb,
5534 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5535 	.atomic_check = dm_plane_atomic_check,
5536 	.atomic_async_check = dm_plane_atomic_async_check,
5537 	.atomic_async_update = dm_plane_atomic_async_update
5538 };
5539 
5540 /*
5541  * TODO: these are currently initialized to rgb formats only.
5542  * For future use cases we should either initialize them dynamically based on
5543  * plane capabilities, or initialize this array to all formats, so internal drm
5544  * check will succeed, and let DC implement proper check
5545  */
5546 static const uint32_t rgb_formats[] = {
5547 	DRM_FORMAT_XRGB8888,
5548 	DRM_FORMAT_ARGB8888,
5549 	DRM_FORMAT_RGBA8888,
5550 	DRM_FORMAT_XRGB2101010,
5551 	DRM_FORMAT_XBGR2101010,
5552 	DRM_FORMAT_ARGB2101010,
5553 	DRM_FORMAT_ABGR2101010,
5554 	DRM_FORMAT_XBGR8888,
5555 	DRM_FORMAT_ABGR8888,
5556 	DRM_FORMAT_RGB565,
5557 };
5558 
5559 static const uint32_t overlay_formats[] = {
5560 	DRM_FORMAT_XRGB8888,
5561 	DRM_FORMAT_ARGB8888,
5562 	DRM_FORMAT_RGBA8888,
5563 	DRM_FORMAT_XBGR8888,
5564 	DRM_FORMAT_ABGR8888,
5565 	DRM_FORMAT_RGB565
5566 };
5567 
5568 static const u32 cursor_formats[] = {
5569 	DRM_FORMAT_ARGB8888
5570 };
5571 
5572 static int get_plane_formats(const struct drm_plane *plane,
5573 			     const struct dc_plane_cap *plane_cap,
5574 			     uint32_t *formats, int max_formats)
5575 {
5576 	int i, num_formats = 0;
5577 
5578 	/*
5579 	 * TODO: Query support for each group of formats directly from
5580 	 * DC plane caps. This will require adding more formats to the
5581 	 * caps list.
5582 	 */
5583 
5584 	switch (plane->type) {
5585 	case DRM_PLANE_TYPE_PRIMARY:
5586 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5587 			if (num_formats >= max_formats)
5588 				break;
5589 
5590 			formats[num_formats++] = rgb_formats[i];
5591 		}
5592 
5593 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5594 			formats[num_formats++] = DRM_FORMAT_NV12;
5595 		if (plane_cap && plane_cap->pixel_format_support.p010)
5596 			formats[num_formats++] = DRM_FORMAT_P010;
5597 		break;
5598 
5599 	case DRM_PLANE_TYPE_OVERLAY:
5600 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5601 			if (num_formats >= max_formats)
5602 				break;
5603 
5604 			formats[num_formats++] = overlay_formats[i];
5605 		}
5606 		break;
5607 
5608 	case DRM_PLANE_TYPE_CURSOR:
5609 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5610 			if (num_formats >= max_formats)
5611 				break;
5612 
5613 			formats[num_formats++] = cursor_formats[i];
5614 		}
5615 		break;
5616 	}
5617 
5618 	return num_formats;
5619 }
5620 
5621 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5622 				struct drm_plane *plane,
5623 				unsigned long possible_crtcs,
5624 				const struct dc_plane_cap *plane_cap)
5625 {
5626 	uint32_t formats[32];
5627 	int num_formats;
5628 	int res = -EPERM;
5629 
5630 	num_formats = get_plane_formats(plane, plane_cap, formats,
5631 					ARRAY_SIZE(formats));
5632 
5633 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5634 				       &dm_plane_funcs, formats, num_formats,
5635 				       NULL, plane->type, NULL);
5636 	if (res)
5637 		return res;
5638 
5639 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5640 	    plane_cap && plane_cap->per_pixel_alpha) {
5641 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5642 					  BIT(DRM_MODE_BLEND_PREMULTI);
5643 
5644 		drm_plane_create_alpha_property(plane);
5645 		drm_plane_create_blend_mode_property(plane, blend_caps);
5646 	}
5647 
5648 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5649 	    plane_cap &&
5650 	    (plane_cap->pixel_format_support.nv12 ||
5651 	     plane_cap->pixel_format_support.p010)) {
5652 		/* This only affects YUV formats. */
5653 		drm_plane_create_color_properties(
5654 			plane,
5655 			BIT(DRM_COLOR_YCBCR_BT601) |
5656 			BIT(DRM_COLOR_YCBCR_BT709) |
5657 			BIT(DRM_COLOR_YCBCR_BT2020),
5658 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5659 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5660 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5661 	}
5662 
5663 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5664 
5665 	/* Create (reset) the plane state */
5666 	if (plane->funcs->reset)
5667 		plane->funcs->reset(plane);
5668 
5669 	return 0;
5670 }
5671 
5672 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5673 			       struct drm_plane *plane,
5674 			       uint32_t crtc_index)
5675 {
5676 	struct amdgpu_crtc *acrtc = NULL;
5677 	struct drm_plane *cursor_plane;
5678 
5679 	int res = -ENOMEM;
5680 
5681 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5682 	if (!cursor_plane)
5683 		goto fail;
5684 
5685 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5686 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5687 
5688 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5689 	if (!acrtc)
5690 		goto fail;
5691 
5692 	res = drm_crtc_init_with_planes(
5693 			dm->ddev,
5694 			&acrtc->base,
5695 			plane,
5696 			cursor_plane,
5697 			&amdgpu_dm_crtc_funcs, NULL);
5698 
5699 	if (res)
5700 		goto fail;
5701 
5702 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5703 
5704 	/* Create (reset) the plane state */
5705 	if (acrtc->base.funcs->reset)
5706 		acrtc->base.funcs->reset(&acrtc->base);
5707 
5708 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5709 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5710 
5711 	acrtc->crtc_id = crtc_index;
5712 	acrtc->base.enabled = false;
5713 	acrtc->otg_inst = -1;
5714 
5715 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5716 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5717 				   true, MAX_COLOR_LUT_ENTRIES);
5718 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5719 
5720 	return 0;
5721 
5722 fail:
5723 	kfree(acrtc);
5724 	kfree(cursor_plane);
5725 	return res;
5726 }
5727 
5728 
5729 static int to_drm_connector_type(enum amd_signal_type st)
5730 {
5731 	switch (st) {
5732 	case SIGNAL_TYPE_HDMI_TYPE_A:
5733 		return DRM_MODE_CONNECTOR_HDMIA;
5734 	case SIGNAL_TYPE_EDP:
5735 		return DRM_MODE_CONNECTOR_eDP;
5736 	case SIGNAL_TYPE_LVDS:
5737 		return DRM_MODE_CONNECTOR_LVDS;
5738 	case SIGNAL_TYPE_RGB:
5739 		return DRM_MODE_CONNECTOR_VGA;
5740 	case SIGNAL_TYPE_DISPLAY_PORT:
5741 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5742 		return DRM_MODE_CONNECTOR_DisplayPort;
5743 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5744 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5745 		return DRM_MODE_CONNECTOR_DVID;
5746 	case SIGNAL_TYPE_VIRTUAL:
5747 		return DRM_MODE_CONNECTOR_VIRTUAL;
5748 
5749 	default:
5750 		return DRM_MODE_CONNECTOR_Unknown;
5751 	}
5752 }
5753 
5754 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5755 {
5756 	struct drm_encoder *encoder;
5757 
5758 	/* There is only one encoder per connector */
5759 	drm_connector_for_each_possible_encoder(connector, encoder)
5760 		return encoder;
5761 
5762 	return NULL;
5763 }
5764 
5765 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5766 {
5767 	struct drm_encoder *encoder;
5768 	struct amdgpu_encoder *amdgpu_encoder;
5769 
5770 	encoder = amdgpu_dm_connector_to_encoder(connector);
5771 
5772 	if (encoder == NULL)
5773 		return;
5774 
5775 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5776 
5777 	amdgpu_encoder->native_mode.clock = 0;
5778 
5779 	if (!list_empty(&connector->probed_modes)) {
5780 		struct drm_display_mode *preferred_mode = NULL;
5781 
5782 		list_for_each_entry(preferred_mode,
5783 				    &connector->probed_modes,
5784 				    head) {
5785 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5786 				amdgpu_encoder->native_mode = *preferred_mode;
5787 
5788 			break;
5789 		}
5790 
5791 	}
5792 }
5793 
5794 static struct drm_display_mode *
5795 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5796 			     char *name,
5797 			     int hdisplay, int vdisplay)
5798 {
5799 	struct drm_device *dev = encoder->dev;
5800 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5801 	struct drm_display_mode *mode = NULL;
5802 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5803 
5804 	mode = drm_mode_duplicate(dev, native_mode);
5805 
5806 	if (mode == NULL)
5807 		return NULL;
5808 
5809 	mode->hdisplay = hdisplay;
5810 	mode->vdisplay = vdisplay;
5811 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5812 #ifdef __linux__
5813 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5814 #else
5815 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5816 #endif
5817 
5818 	return mode;
5819 
5820 }
5821 
5822 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5823 						 struct drm_connector *connector)
5824 {
5825 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5826 	struct drm_display_mode *mode = NULL;
5827 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5828 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5829 				to_amdgpu_dm_connector(connector);
5830 	int i;
5831 	int n;
5832 	struct mode_size {
5833 		char name[DRM_DISPLAY_MODE_LEN];
5834 		int w;
5835 		int h;
5836 	} common_modes[] = {
5837 		{  "640x480",  640,  480},
5838 		{  "800x600",  800,  600},
5839 		{ "1024x768", 1024,  768},
5840 		{ "1280x720", 1280,  720},
5841 		{ "1280x800", 1280,  800},
5842 		{"1280x1024", 1280, 1024},
5843 		{ "1440x900", 1440,  900},
5844 		{"1680x1050", 1680, 1050},
5845 		{"1600x1200", 1600, 1200},
5846 		{"1920x1080", 1920, 1080},
5847 		{"1920x1200", 1920, 1200}
5848 	};
5849 
5850 	n = ARRAY_SIZE(common_modes);
5851 
5852 	for (i = 0; i < n; i++) {
5853 		struct drm_display_mode *curmode = NULL;
5854 		bool mode_existed = false;
5855 
5856 		if (common_modes[i].w > native_mode->hdisplay ||
5857 		    common_modes[i].h > native_mode->vdisplay ||
5858 		   (common_modes[i].w == native_mode->hdisplay &&
5859 		    common_modes[i].h == native_mode->vdisplay))
5860 			continue;
5861 
5862 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5863 			if (common_modes[i].w == curmode->hdisplay &&
5864 			    common_modes[i].h == curmode->vdisplay) {
5865 				mode_existed = true;
5866 				break;
5867 			}
5868 		}
5869 
5870 		if (mode_existed)
5871 			continue;
5872 
5873 		mode = amdgpu_dm_create_common_mode(encoder,
5874 				common_modes[i].name, common_modes[i].w,
5875 				common_modes[i].h);
5876 		drm_mode_probed_add(connector, mode);
5877 		amdgpu_dm_connector->num_modes++;
5878 	}
5879 }
5880 
5881 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5882 					      struct edid *edid)
5883 {
5884 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5885 			to_amdgpu_dm_connector(connector);
5886 
5887 	if (edid) {
5888 		/* empty probed_modes */
5889 		INIT_LIST_HEAD(&connector->probed_modes);
5890 		amdgpu_dm_connector->num_modes =
5891 				drm_add_edid_modes(connector, edid);
5892 
5893 		/* sorting the probed modes before calling function
5894 		 * amdgpu_dm_get_native_mode() since EDID can have
5895 		 * more than one preferred mode. The modes that are
5896 		 * later in the probed mode list could be of higher
5897 		 * and preferred resolution. For example, 3840x2160
5898 		 * resolution in base EDID preferred timing and 4096x2160
5899 		 * preferred resolution in DID extension block later.
5900 		 */
5901 		drm_mode_sort(&connector->probed_modes);
5902 		amdgpu_dm_get_native_mode(connector);
5903 	} else {
5904 		amdgpu_dm_connector->num_modes = 0;
5905 	}
5906 }
5907 
5908 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5909 {
5910 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5911 			to_amdgpu_dm_connector(connector);
5912 	struct drm_encoder *encoder;
5913 	struct edid *edid = amdgpu_dm_connector->edid;
5914 
5915 	encoder = amdgpu_dm_connector_to_encoder(connector);
5916 
5917 	if (!edid || !drm_edid_is_valid(edid)) {
5918 		amdgpu_dm_connector->num_modes =
5919 				drm_add_modes_noedid(connector, 640, 480);
5920 	} else {
5921 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5922 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5923 	}
5924 	amdgpu_dm_fbc_init(connector);
5925 
5926 	return amdgpu_dm_connector->num_modes;
5927 }
5928 
5929 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5930 				     struct amdgpu_dm_connector *aconnector,
5931 				     int connector_type,
5932 				     struct dc_link *link,
5933 				     int link_index)
5934 {
5935 	struct amdgpu_device *adev = dm->ddev->dev_private;
5936 
5937 	/*
5938 	 * Some of the properties below require access to state, like bpc.
5939 	 * Allocate some default initial connector state with our reset helper.
5940 	 */
5941 	if (aconnector->base.funcs->reset)
5942 		aconnector->base.funcs->reset(&aconnector->base);
5943 
5944 	aconnector->connector_id = link_index;
5945 	aconnector->dc_link = link;
5946 	aconnector->base.interlace_allowed = false;
5947 	aconnector->base.doublescan_allowed = false;
5948 	aconnector->base.stereo_allowed = false;
5949 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5950 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5951 	aconnector->audio_inst = -1;
5952 	rw_init(&aconnector->hpd_lock, "dmhpd");
5953 
5954 	/*
5955 	 * configure support HPD hot plug connector_>polled default value is 0
5956 	 * which means HPD hot plug not supported
5957 	 */
5958 	switch (connector_type) {
5959 	case DRM_MODE_CONNECTOR_HDMIA:
5960 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5961 		aconnector->base.ycbcr_420_allowed =
5962 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5963 		break;
5964 	case DRM_MODE_CONNECTOR_DisplayPort:
5965 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5966 		aconnector->base.ycbcr_420_allowed =
5967 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5968 		break;
5969 	case DRM_MODE_CONNECTOR_DVID:
5970 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5971 		break;
5972 	default:
5973 		break;
5974 	}
5975 
5976 	drm_object_attach_property(&aconnector->base.base,
5977 				dm->ddev->mode_config.scaling_mode_property,
5978 				DRM_MODE_SCALE_NONE);
5979 
5980 	drm_object_attach_property(&aconnector->base.base,
5981 				adev->mode_info.underscan_property,
5982 				UNDERSCAN_OFF);
5983 	drm_object_attach_property(&aconnector->base.base,
5984 				adev->mode_info.underscan_hborder_property,
5985 				0);
5986 	drm_object_attach_property(&aconnector->base.base,
5987 				adev->mode_info.underscan_vborder_property,
5988 				0);
5989 
5990 	if (!aconnector->mst_port)
5991 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5992 
5993 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5994 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5995 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5996 
5997 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5998 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5999 		drm_object_attach_property(&aconnector->base.base,
6000 				adev->mode_info.abm_level_property, 0);
6001 	}
6002 
6003 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6004 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6005 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6006 		drm_object_attach_property(
6007 			&aconnector->base.base,
6008 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6009 
6010 		if (!aconnector->mst_port)
6011 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6012 
6013 #ifdef CONFIG_DRM_AMD_DC_HDCP
6014 		if (adev->dm.hdcp_workqueue)
6015 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6016 #endif
6017 	}
6018 }
6019 
6020 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6021 			      struct i2c_msg *msgs, int num)
6022 {
6023 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6024 	struct ddc_service *ddc_service = i2c->ddc_service;
6025 	struct i2c_command cmd;
6026 	int i;
6027 	int result = -EIO;
6028 
6029 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6030 
6031 	if (!cmd.payloads)
6032 		return result;
6033 
6034 	cmd.number_of_payloads = num;
6035 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6036 	cmd.speed = 100;
6037 
6038 	for (i = 0; i < num; i++) {
6039 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6040 		cmd.payloads[i].address = msgs[i].addr;
6041 		cmd.payloads[i].length = msgs[i].len;
6042 		cmd.payloads[i].data = msgs[i].buf;
6043 	}
6044 
6045 	if (dc_submit_i2c(
6046 			ddc_service->ctx->dc,
6047 			ddc_service->ddc_pin->hw_info.ddc_channel,
6048 			&cmd))
6049 		result = num;
6050 
6051 	kfree(cmd.payloads);
6052 	return result;
6053 }
6054 
6055 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6056 {
6057 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6058 }
6059 
6060 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6061 	.master_xfer = amdgpu_dm_i2c_xfer,
6062 	.functionality = amdgpu_dm_i2c_func,
6063 };
6064 
6065 static struct amdgpu_i2c_adapter *
6066 create_i2c(struct ddc_service *ddc_service,
6067 	   int link_index,
6068 	   int *res)
6069 {
6070 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6071 	struct amdgpu_i2c_adapter *i2c;
6072 
6073 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6074 	if (!i2c)
6075 		return NULL;
6076 #ifdef notyet
6077 	i2c->base.owner = THIS_MODULE;
6078 	i2c->base.class = I2C_CLASS_DDC;
6079 	i2c->base.dev.parent = &adev->pdev->dev;
6080 #endif
6081 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6082 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6083 	i2c_set_adapdata(&i2c->base, i2c);
6084 	i2c->ddc_service = ddc_service;
6085 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6086 
6087 	return i2c;
6088 }
6089 
6090 
6091 /*
6092  * Note: this function assumes that dc_link_detect() was called for the
6093  * dc_link which will be represented by this aconnector.
6094  */
6095 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6096 				    struct amdgpu_dm_connector *aconnector,
6097 				    uint32_t link_index,
6098 				    struct amdgpu_encoder *aencoder)
6099 {
6100 	int res = 0;
6101 	int connector_type;
6102 	struct dc *dc = dm->dc;
6103 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6104 	struct amdgpu_i2c_adapter *i2c;
6105 
6106 	link->priv = aconnector;
6107 
6108 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6109 
6110 	i2c = create_i2c(link->ddc, link->link_index, &res);
6111 	if (!i2c) {
6112 		DRM_ERROR("Failed to create i2c adapter data\n");
6113 		return -ENOMEM;
6114 	}
6115 
6116 	aconnector->i2c = i2c;
6117 	res = i2c_add_adapter(&i2c->base);
6118 
6119 	if (res) {
6120 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6121 		goto out_free;
6122 	}
6123 
6124 	connector_type = to_drm_connector_type(link->connector_signal);
6125 
6126 	res = drm_connector_init_with_ddc(
6127 			dm->ddev,
6128 			&aconnector->base,
6129 			&amdgpu_dm_connector_funcs,
6130 			connector_type,
6131 			&i2c->base);
6132 
6133 	if (res) {
6134 		DRM_ERROR("connector_init failed\n");
6135 		aconnector->connector_id = -1;
6136 		goto out_free;
6137 	}
6138 
6139 	drm_connector_helper_add(
6140 			&aconnector->base,
6141 			&amdgpu_dm_connector_helper_funcs);
6142 
6143 	amdgpu_dm_connector_init_helper(
6144 		dm,
6145 		aconnector,
6146 		connector_type,
6147 		link,
6148 		link_index);
6149 
6150 	drm_connector_attach_encoder(
6151 		&aconnector->base, &aencoder->base);
6152 
6153 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6154 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6155 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6156 
6157 out_free:
6158 	if (res) {
6159 		kfree(i2c);
6160 		aconnector->i2c = NULL;
6161 	}
6162 	return res;
6163 }
6164 
6165 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6166 {
6167 	switch (adev->mode_info.num_crtc) {
6168 	case 1:
6169 		return 0x1;
6170 	case 2:
6171 		return 0x3;
6172 	case 3:
6173 		return 0x7;
6174 	case 4:
6175 		return 0xf;
6176 	case 5:
6177 		return 0x1f;
6178 	case 6:
6179 	default:
6180 		return 0x3f;
6181 	}
6182 }
6183 
6184 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6185 				  struct amdgpu_encoder *aencoder,
6186 				  uint32_t link_index)
6187 {
6188 	struct amdgpu_device *adev = dev->dev_private;
6189 
6190 	int res = drm_encoder_init(dev,
6191 				   &aencoder->base,
6192 				   &amdgpu_dm_encoder_funcs,
6193 				   DRM_MODE_ENCODER_TMDS,
6194 				   NULL);
6195 
6196 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6197 
6198 	if (!res)
6199 		aencoder->encoder_id = link_index;
6200 	else
6201 		aencoder->encoder_id = -1;
6202 
6203 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6204 
6205 	return res;
6206 }
6207 
6208 static void manage_dm_interrupts(struct amdgpu_device *adev,
6209 				 struct amdgpu_crtc *acrtc,
6210 				 bool enable)
6211 {
6212 	/*
6213 	 * this is not correct translation but will work as soon as VBLANK
6214 	 * constant is the same as PFLIP
6215 	 */
6216 	int irq_type =
6217 		amdgpu_display_crtc_idx_to_irq_type(
6218 			adev,
6219 			acrtc->crtc_id);
6220 
6221 	if (enable) {
6222 		drm_crtc_vblank_on(&acrtc->base);
6223 		amdgpu_irq_get(
6224 			adev,
6225 			&adev->pageflip_irq,
6226 			irq_type);
6227 	} else {
6228 
6229 		amdgpu_irq_put(
6230 			adev,
6231 			&adev->pageflip_irq,
6232 			irq_type);
6233 		drm_crtc_vblank_off(&acrtc->base);
6234 	}
6235 }
6236 
6237 static bool
6238 is_scaling_state_different(const struct dm_connector_state *dm_state,
6239 			   const struct dm_connector_state *old_dm_state)
6240 {
6241 	if (dm_state->scaling != old_dm_state->scaling)
6242 		return true;
6243 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6244 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6245 			return true;
6246 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6247 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6248 			return true;
6249 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6250 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6251 		return true;
6252 	return false;
6253 }
6254 
6255 #ifdef CONFIG_DRM_AMD_DC_HDCP
6256 static bool is_content_protection_different(struct drm_connector_state *state,
6257 					    const struct drm_connector_state *old_state,
6258 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6259 {
6260 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6261 
6262 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6263 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6264 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6265 		return true;
6266 	}
6267 
6268 	/* CP is being re enabled, ignore this */
6269 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6270 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6271 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6272 		return false;
6273 	}
6274 
6275 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6276 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6277 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6278 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6279 
6280 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6281 	 * hot-plug, headless s3, dpms
6282 	 */
6283 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6284 	    aconnector->dc_sink != NULL)
6285 		return true;
6286 
6287 	if (old_state->content_protection == state->content_protection)
6288 		return false;
6289 
6290 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6291 		return true;
6292 
6293 	return false;
6294 }
6295 
6296 #endif
6297 static void remove_stream(struct amdgpu_device *adev,
6298 			  struct amdgpu_crtc *acrtc,
6299 			  struct dc_stream_state *stream)
6300 {
6301 	/* this is the update mode case */
6302 
6303 	acrtc->otg_inst = -1;
6304 	acrtc->enabled = false;
6305 }
6306 
6307 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6308 			       struct dc_cursor_position *position)
6309 {
6310 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6311 	int x, y;
6312 	int xorigin = 0, yorigin = 0;
6313 
6314 	position->enable = false;
6315 	position->x = 0;
6316 	position->y = 0;
6317 
6318 	if (!crtc || !plane->state->fb)
6319 		return 0;
6320 
6321 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6322 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6323 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6324 			  __func__,
6325 			  plane->state->crtc_w,
6326 			  plane->state->crtc_h);
6327 		return -EINVAL;
6328 	}
6329 
6330 	x = plane->state->crtc_x;
6331 	y = plane->state->crtc_y;
6332 
6333 	if (x <= -amdgpu_crtc->max_cursor_width ||
6334 	    y <= -amdgpu_crtc->max_cursor_height)
6335 		return 0;
6336 
6337 	if (x < 0) {
6338 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6339 		x = 0;
6340 	}
6341 	if (y < 0) {
6342 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6343 		y = 0;
6344 	}
6345 	position->enable = true;
6346 	position->translate_by_source = true;
6347 	position->x = x;
6348 	position->y = y;
6349 	position->x_hotspot = xorigin;
6350 	position->y_hotspot = yorigin;
6351 
6352 	return 0;
6353 }
6354 
6355 static void handle_cursor_update(struct drm_plane *plane,
6356 				 struct drm_plane_state *old_plane_state)
6357 {
6358 	struct amdgpu_device *adev = plane->dev->dev_private;
6359 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6360 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6361 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6362 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6363 	uint64_t address = afb ? afb->address : 0;
6364 	struct dc_cursor_position position;
6365 	struct dc_cursor_attributes attributes;
6366 	int ret;
6367 
6368 	if (!plane->state->fb && !old_plane_state->fb)
6369 		return;
6370 
6371 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6372 			 __func__,
6373 			 amdgpu_crtc->crtc_id,
6374 			 plane->state->crtc_w,
6375 			 plane->state->crtc_h);
6376 
6377 	ret = get_cursor_position(plane, crtc, &position);
6378 	if (ret)
6379 		return;
6380 
6381 	if (!position.enable) {
6382 		/* turn off cursor */
6383 		if (crtc_state && crtc_state->stream) {
6384 			mutex_lock(&adev->dm.dc_lock);
6385 			dc_stream_set_cursor_position(crtc_state->stream,
6386 						      &position);
6387 			mutex_unlock(&adev->dm.dc_lock);
6388 		}
6389 		return;
6390 	}
6391 
6392 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6393 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6394 
6395 	memset(&attributes, 0, sizeof(attributes));
6396 	attributes.address.high_part = upper_32_bits(address);
6397 	attributes.address.low_part  = lower_32_bits(address);
6398 	attributes.width             = plane->state->crtc_w;
6399 	attributes.height            = plane->state->crtc_h;
6400 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6401 	attributes.rotation_angle    = 0;
6402 	attributes.attribute_flags.value = 0;
6403 
6404 	attributes.pitch = attributes.width;
6405 
6406 	if (crtc_state->stream) {
6407 		mutex_lock(&adev->dm.dc_lock);
6408 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6409 							 &attributes))
6410 			DRM_ERROR("DC failed to set cursor attributes\n");
6411 
6412 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6413 						   &position))
6414 			DRM_ERROR("DC failed to set cursor position\n");
6415 		mutex_unlock(&adev->dm.dc_lock);
6416 	}
6417 }
6418 
6419 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6420 {
6421 
6422 	assert_spin_locked(&acrtc->base.dev->event_lock);
6423 	WARN_ON(acrtc->event);
6424 
6425 	acrtc->event = acrtc->base.state->event;
6426 
6427 	/* Set the flip status */
6428 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6429 
6430 	/* Mark this event as consumed */
6431 	acrtc->base.state->event = NULL;
6432 
6433 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6434 						 acrtc->crtc_id);
6435 }
6436 
6437 static void update_freesync_state_on_stream(
6438 	struct amdgpu_display_manager *dm,
6439 	struct dm_crtc_state *new_crtc_state,
6440 	struct dc_stream_state *new_stream,
6441 	struct dc_plane_state *surface,
6442 	u32 flip_timestamp_in_us)
6443 {
6444 	struct mod_vrr_params vrr_params;
6445 	struct dc_info_packet vrr_infopacket = {0};
6446 	struct amdgpu_device *adev = dm->adev;
6447 	unsigned long flags;
6448 
6449 	if (!new_stream)
6450 		return;
6451 
6452 	/*
6453 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6454 	 * For now it's sufficient to just guard against these conditions.
6455 	 */
6456 
6457 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6458 		return;
6459 
6460 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6461 	vrr_params = new_crtc_state->vrr_params;
6462 
6463 	if (surface) {
6464 		mod_freesync_handle_preflip(
6465 			dm->freesync_module,
6466 			surface,
6467 			new_stream,
6468 			flip_timestamp_in_us,
6469 			&vrr_params);
6470 
6471 		if (adev->family < AMDGPU_FAMILY_AI &&
6472 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6473 			mod_freesync_handle_v_update(dm->freesync_module,
6474 						     new_stream, &vrr_params);
6475 
6476 			/* Need to call this before the frame ends. */
6477 			dc_stream_adjust_vmin_vmax(dm->dc,
6478 						   new_crtc_state->stream,
6479 						   &vrr_params.adjust);
6480 		}
6481 	}
6482 
6483 	mod_freesync_build_vrr_infopacket(
6484 		dm->freesync_module,
6485 		new_stream,
6486 		&vrr_params,
6487 		PACKET_TYPE_VRR,
6488 		TRANSFER_FUNC_UNKNOWN,
6489 		&vrr_infopacket);
6490 
6491 	new_crtc_state->freesync_timing_changed |=
6492 		(memcmp(&new_crtc_state->vrr_params.adjust,
6493 			&vrr_params.adjust,
6494 			sizeof(vrr_params.adjust)) != 0);
6495 
6496 	new_crtc_state->freesync_vrr_info_changed |=
6497 		(memcmp(&new_crtc_state->vrr_infopacket,
6498 			&vrr_infopacket,
6499 			sizeof(vrr_infopacket)) != 0);
6500 
6501 	new_crtc_state->vrr_params = vrr_params;
6502 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6503 
6504 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6505 	new_stream->vrr_infopacket = vrr_infopacket;
6506 
6507 	if (new_crtc_state->freesync_vrr_info_changed)
6508 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6509 			      new_crtc_state->base.crtc->base.id,
6510 			      (int)new_crtc_state->base.vrr_enabled,
6511 			      (int)vrr_params.state);
6512 
6513 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6514 }
6515 
6516 static void pre_update_freesync_state_on_stream(
6517 	struct amdgpu_display_manager *dm,
6518 	struct dm_crtc_state *new_crtc_state)
6519 {
6520 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6521 	struct mod_vrr_params vrr_params;
6522 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6523 	struct amdgpu_device *adev = dm->adev;
6524 	unsigned long flags;
6525 
6526 	if (!new_stream)
6527 		return;
6528 
6529 	/*
6530 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6531 	 * For now it's sufficient to just guard against these conditions.
6532 	 */
6533 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6534 		return;
6535 
6536 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6537 	vrr_params = new_crtc_state->vrr_params;
6538 
6539 	if (new_crtc_state->vrr_supported &&
6540 	    config.min_refresh_in_uhz &&
6541 	    config.max_refresh_in_uhz) {
6542 		config.state = new_crtc_state->base.vrr_enabled ?
6543 			VRR_STATE_ACTIVE_VARIABLE :
6544 			VRR_STATE_INACTIVE;
6545 	} else {
6546 		config.state = VRR_STATE_UNSUPPORTED;
6547 	}
6548 
6549 	mod_freesync_build_vrr_params(dm->freesync_module,
6550 				      new_stream,
6551 				      &config, &vrr_params);
6552 
6553 	new_crtc_state->freesync_timing_changed |=
6554 		(memcmp(&new_crtc_state->vrr_params.adjust,
6555 			&vrr_params.adjust,
6556 			sizeof(vrr_params.adjust)) != 0);
6557 
6558 	new_crtc_state->vrr_params = vrr_params;
6559 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6560 }
6561 
6562 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6563 					    struct dm_crtc_state *new_state)
6564 {
6565 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6566 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6567 
6568 	if (!old_vrr_active && new_vrr_active) {
6569 		/* Transition VRR inactive -> active:
6570 		 * While VRR is active, we must not disable vblank irq, as a
6571 		 * reenable after disable would compute bogus vblank/pflip
6572 		 * timestamps if it likely happened inside display front-porch.
6573 		 *
6574 		 * We also need vupdate irq for the actual core vblank handling
6575 		 * at end of vblank.
6576 		 */
6577 		dm_set_vupdate_irq(new_state->base.crtc, true);
6578 		drm_crtc_vblank_get(new_state->base.crtc);
6579 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6580 				 __func__, new_state->base.crtc->base.id);
6581 	} else if (old_vrr_active && !new_vrr_active) {
6582 		/* Transition VRR active -> inactive:
6583 		 * Allow vblank irq disable again for fixed refresh rate.
6584 		 */
6585 		dm_set_vupdate_irq(new_state->base.crtc, false);
6586 		drm_crtc_vblank_put(new_state->base.crtc);
6587 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6588 				 __func__, new_state->base.crtc->base.id);
6589 	}
6590 }
6591 
6592 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6593 {
6594 	struct drm_plane *plane;
6595 	struct drm_plane_state *old_plane_state, *new_plane_state;
6596 	int i;
6597 
6598 	/*
6599 	 * TODO: Make this per-stream so we don't issue redundant updates for
6600 	 * commits with multiple streams.
6601 	 */
6602 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6603 				       new_plane_state, i)
6604 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6605 			handle_cursor_update(plane, old_plane_state);
6606 }
6607 
6608 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6609 				    struct dc_state *dc_state,
6610 				    struct drm_device *dev,
6611 				    struct amdgpu_display_manager *dm,
6612 				    struct drm_crtc *pcrtc,
6613 				    bool wait_for_vblank)
6614 {
6615 	uint32_t i;
6616 	uint64_t timestamp_ns;
6617 	struct drm_plane *plane;
6618 	struct drm_plane_state *old_plane_state, *new_plane_state;
6619 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6620 	struct drm_crtc_state *new_pcrtc_state =
6621 			drm_atomic_get_new_crtc_state(state, pcrtc);
6622 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6623 	struct dm_crtc_state *dm_old_crtc_state =
6624 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6625 	int planes_count = 0, vpos, hpos;
6626 	long r;
6627 	unsigned long flags;
6628 	struct amdgpu_bo *abo;
6629 	uint64_t tiling_flags;
6630 	uint32_t target_vblank, last_flip_vblank;
6631 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6632 	bool pflip_present = false;
6633 	struct {
6634 		struct dc_surface_update surface_updates[MAX_SURFACES];
6635 		struct dc_plane_info plane_infos[MAX_SURFACES];
6636 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6637 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6638 		struct dc_stream_update stream_update;
6639 	} *bundle;
6640 
6641 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6642 
6643 	if (!bundle) {
6644 		dm_error("Failed to allocate update bundle\n");
6645 		goto cleanup;
6646 	}
6647 
6648 	/*
6649 	 * Disable the cursor first if we're disabling all the planes.
6650 	 * It'll remain on the screen after the planes are re-enabled
6651 	 * if we don't.
6652 	 */
6653 	if (acrtc_state->active_planes == 0)
6654 		amdgpu_dm_commit_cursors(state);
6655 
6656 	/* update planes when needed */
6657 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6658 		struct drm_crtc *crtc = new_plane_state->crtc;
6659 		struct drm_crtc_state *new_crtc_state;
6660 		struct drm_framebuffer *fb = new_plane_state->fb;
6661 		bool plane_needs_flip;
6662 		struct dc_plane_state *dc_plane;
6663 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6664 
6665 		/* Cursor plane is handled after stream updates */
6666 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6667 			continue;
6668 
6669 		if (!fb || !crtc || pcrtc != crtc)
6670 			continue;
6671 
6672 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6673 		if (!new_crtc_state->active)
6674 			continue;
6675 
6676 		dc_plane = dm_new_plane_state->dc_state;
6677 
6678 		bundle->surface_updates[planes_count].surface = dc_plane;
6679 		if (new_pcrtc_state->color_mgmt_changed) {
6680 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6681 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6682 		}
6683 
6684 		fill_dc_scaling_info(new_plane_state,
6685 				     &bundle->scaling_infos[planes_count]);
6686 
6687 		bundle->surface_updates[planes_count].scaling_info =
6688 			&bundle->scaling_infos[planes_count];
6689 
6690 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6691 
6692 		pflip_present = pflip_present || plane_needs_flip;
6693 
6694 		if (!plane_needs_flip) {
6695 			planes_count += 1;
6696 			continue;
6697 		}
6698 
6699 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6700 
6701 		/*
6702 		 * Wait for all fences on this FB. Do limited wait to avoid
6703 		 * deadlock during GPU reset when this fence will not signal
6704 		 * but we hold reservation lock for the BO.
6705 		 */
6706 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6707 							false,
6708 							msecs_to_jiffies(5000));
6709 		if (unlikely(r <= 0))
6710 			DRM_ERROR("Waiting for fences timed out!");
6711 
6712 		/*
6713 		 * TODO This might fail and hence better not used, wait
6714 		 * explicitly on fences instead
6715 		 * and in general should be called for
6716 		 * blocking commit to as per framework helpers
6717 		 */
6718 		r = amdgpu_bo_reserve(abo, true);
6719 		if (unlikely(r != 0))
6720 			DRM_ERROR("failed to reserve buffer before flip\n");
6721 
6722 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6723 
6724 		amdgpu_bo_unreserve(abo);
6725 
6726 		fill_dc_plane_info_and_addr(
6727 			dm->adev, new_plane_state, tiling_flags,
6728 			&bundle->plane_infos[planes_count],
6729 			&bundle->flip_addrs[planes_count].address,
6730 			false);
6731 
6732 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6733 				 new_plane_state->plane->index,
6734 				 bundle->plane_infos[planes_count].dcc.enable);
6735 
6736 		bundle->surface_updates[planes_count].plane_info =
6737 			&bundle->plane_infos[planes_count];
6738 
6739 		/*
6740 		 * Only allow immediate flips for fast updates that don't
6741 		 * change FB pitch, DCC state, rotation or mirroing.
6742 		 */
6743 		bundle->flip_addrs[planes_count].flip_immediate =
6744 			crtc->state->async_flip &&
6745 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6746 
6747 		timestamp_ns = ktime_get_ns();
6748 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6749 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6750 		bundle->surface_updates[planes_count].surface = dc_plane;
6751 
6752 		if (!bundle->surface_updates[planes_count].surface) {
6753 			DRM_ERROR("No surface for CRTC: id=%d\n",
6754 					acrtc_attach->crtc_id);
6755 			continue;
6756 		}
6757 
6758 		if (plane == pcrtc->primary)
6759 			update_freesync_state_on_stream(
6760 				dm,
6761 				acrtc_state,
6762 				acrtc_state->stream,
6763 				dc_plane,
6764 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6765 
6766 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6767 				 __func__,
6768 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6769 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6770 
6771 		planes_count += 1;
6772 
6773 	}
6774 
6775 	if (pflip_present) {
6776 		if (!vrr_active) {
6777 			/* Use old throttling in non-vrr fixed refresh rate mode
6778 			 * to keep flip scheduling based on target vblank counts
6779 			 * working in a backwards compatible way, e.g., for
6780 			 * clients using the GLX_OML_sync_control extension or
6781 			 * DRI3/Present extension with defined target_msc.
6782 			 */
6783 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6784 		}
6785 		else {
6786 			/* For variable refresh rate mode only:
6787 			 * Get vblank of last completed flip to avoid > 1 vrr
6788 			 * flips per video frame by use of throttling, but allow
6789 			 * flip programming anywhere in the possibly large
6790 			 * variable vrr vblank interval for fine-grained flip
6791 			 * timing control and more opportunity to avoid stutter
6792 			 * on late submission of flips.
6793 			 */
6794 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6795 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6796 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6797 		}
6798 
6799 		target_vblank = last_flip_vblank + wait_for_vblank;
6800 
6801 		/*
6802 		 * Wait until we're out of the vertical blank period before the one
6803 		 * targeted by the flip
6804 		 */
6805 		while ((acrtc_attach->enabled &&
6806 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6807 							    0, &vpos, &hpos, NULL,
6808 							    NULL, &pcrtc->hwmode)
6809 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6810 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6811 			(int)(target_vblank -
6812 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6813 			usleep_range(1000, 1100);
6814 		}
6815 
6816 		if (acrtc_attach->base.state->event) {
6817 			drm_crtc_vblank_get(pcrtc);
6818 
6819 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6820 
6821 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6822 			prepare_flip_isr(acrtc_attach);
6823 
6824 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6825 		}
6826 
6827 		if (acrtc_state->stream) {
6828 			if (acrtc_state->freesync_vrr_info_changed)
6829 				bundle->stream_update.vrr_infopacket =
6830 					&acrtc_state->stream->vrr_infopacket;
6831 		}
6832 	}
6833 
6834 	/* Update the planes if changed or disable if we don't have any. */
6835 	if ((planes_count || acrtc_state->active_planes == 0) &&
6836 		acrtc_state->stream) {
6837 		bundle->stream_update.stream = acrtc_state->stream;
6838 		if (new_pcrtc_state->mode_changed) {
6839 			bundle->stream_update.src = acrtc_state->stream->src;
6840 			bundle->stream_update.dst = acrtc_state->stream->dst;
6841 		}
6842 
6843 		if (new_pcrtc_state->color_mgmt_changed) {
6844 			/*
6845 			 * TODO: This isn't fully correct since we've actually
6846 			 * already modified the stream in place.
6847 			 */
6848 			bundle->stream_update.gamut_remap =
6849 				&acrtc_state->stream->gamut_remap_matrix;
6850 			bundle->stream_update.output_csc_transform =
6851 				&acrtc_state->stream->csc_color_matrix;
6852 			bundle->stream_update.out_transfer_func =
6853 				acrtc_state->stream->out_transfer_func;
6854 		}
6855 
6856 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6857 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6858 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6859 
6860 		/*
6861 		 * If FreeSync state on the stream has changed then we need to
6862 		 * re-adjust the min/max bounds now that DC doesn't handle this
6863 		 * as part of commit.
6864 		 */
6865 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6866 		    amdgpu_dm_vrr_active(acrtc_state)) {
6867 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6868 			dc_stream_adjust_vmin_vmax(
6869 				dm->dc, acrtc_state->stream,
6870 				&acrtc_state->vrr_params.adjust);
6871 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6872 		}
6873 		mutex_lock(&dm->dc_lock);
6874 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6875 				acrtc_state->stream->link->psr_allow_active)
6876 			amdgpu_dm_psr_disable(acrtc_state->stream);
6877 
6878 		dc_commit_updates_for_stream(dm->dc,
6879 						     bundle->surface_updates,
6880 						     planes_count,
6881 						     acrtc_state->stream,
6882 						     &bundle->stream_update,
6883 						     dc_state);
6884 
6885 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6886 						acrtc_state->stream->psr_version &&
6887 						!acrtc_state->stream->link->psr_feature_enabled)
6888 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6889 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6890 						acrtc_state->stream->link->psr_feature_enabled &&
6891 						!acrtc_state->stream->link->psr_allow_active) {
6892 			amdgpu_dm_psr_enable(acrtc_state->stream);
6893 		}
6894 
6895 		mutex_unlock(&dm->dc_lock);
6896 	}
6897 
6898 	/*
6899 	 * Update cursor state *after* programming all the planes.
6900 	 * This avoids redundant programming in the case where we're going
6901 	 * to be disabling a single plane - those pipes are being disabled.
6902 	 */
6903 	if (acrtc_state->active_planes)
6904 		amdgpu_dm_commit_cursors(state);
6905 
6906 cleanup:
6907 	kfree(bundle);
6908 }
6909 
6910 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6911 				   struct drm_atomic_state *state)
6912 {
6913 	struct amdgpu_device *adev = dev->dev_private;
6914 	struct amdgpu_dm_connector *aconnector;
6915 	struct drm_connector *connector;
6916 	struct drm_connector_state *old_con_state, *new_con_state;
6917 	struct drm_crtc_state *new_crtc_state;
6918 	struct dm_crtc_state *new_dm_crtc_state;
6919 	const struct dc_stream_status *status;
6920 	int i, inst;
6921 
6922 	/* Notify device removals. */
6923 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6924 		if (old_con_state->crtc != new_con_state->crtc) {
6925 			/* CRTC changes require notification. */
6926 			goto notify;
6927 		}
6928 
6929 		if (!new_con_state->crtc)
6930 			continue;
6931 
6932 		new_crtc_state = drm_atomic_get_new_crtc_state(
6933 			state, new_con_state->crtc);
6934 
6935 		if (!new_crtc_state)
6936 			continue;
6937 
6938 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6939 			continue;
6940 
6941 	notify:
6942 		aconnector = to_amdgpu_dm_connector(connector);
6943 
6944 		mutex_lock(&adev->dm.audio_lock);
6945 		inst = aconnector->audio_inst;
6946 		aconnector->audio_inst = -1;
6947 		mutex_unlock(&adev->dm.audio_lock);
6948 
6949 		amdgpu_dm_audio_eld_notify(adev, inst);
6950 	}
6951 
6952 	/* Notify audio device additions. */
6953 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6954 		if (!new_con_state->crtc)
6955 			continue;
6956 
6957 		new_crtc_state = drm_atomic_get_new_crtc_state(
6958 			state, new_con_state->crtc);
6959 
6960 		if (!new_crtc_state)
6961 			continue;
6962 
6963 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6964 			continue;
6965 
6966 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6967 		if (!new_dm_crtc_state->stream)
6968 			continue;
6969 
6970 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6971 		if (!status)
6972 			continue;
6973 
6974 		aconnector = to_amdgpu_dm_connector(connector);
6975 
6976 		mutex_lock(&adev->dm.audio_lock);
6977 		inst = status->audio_inst;
6978 		aconnector->audio_inst = inst;
6979 		mutex_unlock(&adev->dm.audio_lock);
6980 
6981 		amdgpu_dm_audio_eld_notify(adev, inst);
6982 	}
6983 }
6984 
6985 /*
6986  * Enable interrupts on CRTCs that are newly active, undergone
6987  * a modeset, or have active planes again.
6988  *
6989  * Done in two passes, based on the for_modeset flag:
6990  * Pass 1: For CRTCs going through modeset
6991  * Pass 2: For CRTCs going from 0 to n active planes
6992  *
6993  * Interrupts can only be enabled after the planes are programmed,
6994  * so this requires a two-pass approach since we don't want to
6995  * just defer the interrupts until after commit planes every time.
6996  */
6997 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6998 					     struct drm_atomic_state *state,
6999 					     bool for_modeset)
7000 {
7001 	struct amdgpu_device *adev = dev->dev_private;
7002 	struct drm_crtc *crtc;
7003 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7004 	int i;
7005 #ifdef CONFIG_DEBUG_FS
7006 	enum amdgpu_dm_pipe_crc_source source;
7007 #endif
7008 
7009 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7010 				      new_crtc_state, i) {
7011 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7012 		struct dm_crtc_state *dm_new_crtc_state =
7013 			to_dm_crtc_state(new_crtc_state);
7014 		struct dm_crtc_state *dm_old_crtc_state =
7015 			to_dm_crtc_state(old_crtc_state);
7016 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
7017 		bool run_pass;
7018 
7019 		run_pass = (for_modeset && modeset) ||
7020 			   (!for_modeset && !modeset &&
7021 			    !dm_old_crtc_state->interrupts_enabled);
7022 
7023 		if (!run_pass)
7024 			continue;
7025 
7026 		if (!dm_new_crtc_state->interrupts_enabled)
7027 			continue;
7028 
7029 		manage_dm_interrupts(adev, acrtc, true);
7030 
7031 #ifdef CONFIG_DEBUG_FS
7032 		/* The stream has changed so CRC capture needs to re-enabled. */
7033 		source = dm_new_crtc_state->crc_src;
7034 		if (amdgpu_dm_is_valid_crc_source(source)) {
7035 			amdgpu_dm_crtc_configure_crc_source(
7036 				crtc, dm_new_crtc_state,
7037 				dm_new_crtc_state->crc_src);
7038 		}
7039 #endif
7040 	}
7041 }
7042 
7043 /*
7044  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7045  * @crtc_state: the DRM CRTC state
7046  * @stream_state: the DC stream state.
7047  *
7048  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7049  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7050  */
7051 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7052 						struct dc_stream_state *stream_state)
7053 {
7054 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7055 }
7056 
7057 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7058 				   struct drm_atomic_state *state,
7059 				   bool nonblock)
7060 {
7061 	struct drm_crtc *crtc;
7062 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7063 	struct amdgpu_device *adev = dev->dev_private;
7064 	int i;
7065 
7066 	/*
7067 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7068 	 * a modeset, being disabled, or have no active planes.
7069 	 *
7070 	 * It's done in atomic commit rather than commit tail for now since
7071 	 * some of these interrupt handlers access the current CRTC state and
7072 	 * potentially the stream pointer itself.
7073 	 *
7074 	 * Since the atomic state is swapped within atomic commit and not within
7075 	 * commit tail this would leave to new state (that hasn't been committed yet)
7076 	 * being accesssed from within the handlers.
7077 	 *
7078 	 * TODO: Fix this so we can do this in commit tail and not have to block
7079 	 * in atomic check.
7080 	 */
7081 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7082 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7083 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7084 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7085 
7086 		if (dm_old_crtc_state->interrupts_enabled &&
7087 		    (!dm_new_crtc_state->interrupts_enabled ||
7088 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7089 			manage_dm_interrupts(adev, acrtc, false);
7090 	}
7091 	/*
7092 	 * Add check here for SoC's that support hardware cursor plane, to
7093 	 * unset legacy_cursor_update
7094 	 */
7095 
7096 	return drm_atomic_helper_commit(dev, state, nonblock);
7097 
7098 	/*TODO Handle EINTR, reenable IRQ*/
7099 }
7100 
7101 /**
7102  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7103  * @state: The atomic state to commit
7104  *
7105  * This will tell DC to commit the constructed DC state from atomic_check,
7106  * programming the hardware. Any failures here implies a hardware failure, since
7107  * atomic check should have filtered anything non-kosher.
7108  */
7109 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7110 {
7111 	struct drm_device *dev = state->dev;
7112 	struct amdgpu_device *adev = dev->dev_private;
7113 	struct amdgpu_display_manager *dm = &adev->dm;
7114 	struct dm_atomic_state *dm_state;
7115 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7116 	uint32_t i, j;
7117 	struct drm_crtc *crtc;
7118 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7119 	unsigned long flags;
7120 	bool wait_for_vblank = true;
7121 	struct drm_connector *connector;
7122 	struct drm_connector_state *old_con_state, *new_con_state;
7123 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7124 	int crtc_disable_count = 0;
7125 
7126 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7127 
7128 	dm_state = dm_atomic_get_new_state(state);
7129 	if (dm_state && dm_state->context) {
7130 		dc_state = dm_state->context;
7131 	} else {
7132 		/* No state changes, retain current state. */
7133 		dc_state_temp = dc_create_state(dm->dc);
7134 		ASSERT(dc_state_temp);
7135 		dc_state = dc_state_temp;
7136 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7137 	}
7138 
7139 	/* update changed items */
7140 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7141 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7142 
7143 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7144 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7145 
7146 		DRM_DEBUG_DRIVER(
7147 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7148 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7149 			"connectors_changed:%d\n",
7150 			acrtc->crtc_id,
7151 			new_crtc_state->enable,
7152 			new_crtc_state->active,
7153 			new_crtc_state->planes_changed,
7154 			new_crtc_state->mode_changed,
7155 			new_crtc_state->active_changed,
7156 			new_crtc_state->connectors_changed);
7157 
7158 		/* Copy all transient state flags into dc state */
7159 		if (dm_new_crtc_state->stream) {
7160 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7161 							    dm_new_crtc_state->stream);
7162 		}
7163 
7164 		/* handles headless hotplug case, updating new_state and
7165 		 * aconnector as needed
7166 		 */
7167 
7168 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7169 
7170 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7171 
7172 			if (!dm_new_crtc_state->stream) {
7173 				/*
7174 				 * this could happen because of issues with
7175 				 * userspace notifications delivery.
7176 				 * In this case userspace tries to set mode on
7177 				 * display which is disconnected in fact.
7178 				 * dc_sink is NULL in this case on aconnector.
7179 				 * We expect reset mode will come soon.
7180 				 *
7181 				 * This can also happen when unplug is done
7182 				 * during resume sequence ended
7183 				 *
7184 				 * In this case, we want to pretend we still
7185 				 * have a sink to keep the pipe running so that
7186 				 * hw state is consistent with the sw state
7187 				 */
7188 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7189 						__func__, acrtc->base.base.id);
7190 				continue;
7191 			}
7192 
7193 			if (dm_old_crtc_state->stream)
7194 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7195 
7196 			pm_runtime_get_noresume(dev->dev);
7197 
7198 			acrtc->enabled = true;
7199 			acrtc->hw_mode = new_crtc_state->mode;
7200 			crtc->hwmode = new_crtc_state->mode;
7201 		} else if (modereset_required(new_crtc_state)) {
7202 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7203 			/* i.e. reset mode */
7204 			if (dm_old_crtc_state->stream) {
7205 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7206 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7207 
7208 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7209 			}
7210 		}
7211 	} /* for_each_crtc_in_state() */
7212 
7213 	if (dc_state) {
7214 		dm_enable_per_frame_crtc_master_sync(dc_state);
7215 		mutex_lock(&dm->dc_lock);
7216 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7217 		mutex_unlock(&dm->dc_lock);
7218 	}
7219 
7220 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7221 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7222 
7223 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7224 
7225 		if (dm_new_crtc_state->stream != NULL) {
7226 			const struct dc_stream_status *status =
7227 					dc_stream_get_status(dm_new_crtc_state->stream);
7228 
7229 			if (!status)
7230 				status = dc_stream_get_status_from_state(dc_state,
7231 									 dm_new_crtc_state->stream);
7232 
7233 			if (!status)
7234 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7235 			else
7236 				acrtc->otg_inst = status->primary_otg_inst;
7237 		}
7238 	}
7239 #ifdef CONFIG_DRM_AMD_DC_HDCP
7240 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7241 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7242 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7243 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7244 
7245 		new_crtc_state = NULL;
7246 
7247 		if (acrtc)
7248 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7249 
7250 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7251 
7252 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7253 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7254 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7255 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7256 			continue;
7257 		}
7258 
7259 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7260 			hdcp_update_display(
7261 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7262 				new_con_state->hdcp_content_type,
7263 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7264 													 : false);
7265 	}
7266 #endif
7267 
7268 	/* Handle connector state changes */
7269 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7270 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7271 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7272 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7273 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7274 		struct dc_stream_update stream_update;
7275 		struct dc_info_packet hdr_packet;
7276 		struct dc_stream_status *status = NULL;
7277 		bool abm_changed, hdr_changed, scaling_changed;
7278 
7279 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7280 		memset(&stream_update, 0, sizeof(stream_update));
7281 
7282 		if (acrtc) {
7283 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7284 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7285 		}
7286 
7287 		/* Skip any modesets/resets */
7288 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7289 			continue;
7290 
7291 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7292 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7293 
7294 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7295 							     dm_old_con_state);
7296 
7297 		abm_changed = dm_new_crtc_state->abm_level !=
7298 			      dm_old_crtc_state->abm_level;
7299 
7300 		hdr_changed =
7301 			is_hdr_metadata_different(old_con_state, new_con_state);
7302 
7303 		if (!scaling_changed && !abm_changed && !hdr_changed)
7304 			continue;
7305 
7306 		stream_update.stream = dm_new_crtc_state->stream;
7307 		if (scaling_changed) {
7308 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7309 					dm_new_con_state, dm_new_crtc_state->stream);
7310 
7311 			stream_update.src = dm_new_crtc_state->stream->src;
7312 			stream_update.dst = dm_new_crtc_state->stream->dst;
7313 		}
7314 
7315 		if (abm_changed) {
7316 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7317 
7318 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7319 		}
7320 
7321 		if (hdr_changed) {
7322 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7323 			stream_update.hdr_static_metadata = &hdr_packet;
7324 		}
7325 
7326 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7327 		WARN_ON(!status);
7328 		WARN_ON(!status->plane_count);
7329 
7330 		/*
7331 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7332 		 * Here we create an empty update on each plane.
7333 		 * To fix this, DC should permit updating only stream properties.
7334 		 */
7335 		for (j = 0; j < status->plane_count; j++)
7336 			dummy_updates[j].surface = status->plane_states[0];
7337 
7338 
7339 		mutex_lock(&dm->dc_lock);
7340 		dc_commit_updates_for_stream(dm->dc,
7341 						     dummy_updates,
7342 						     status->plane_count,
7343 						     dm_new_crtc_state->stream,
7344 						     &stream_update,
7345 						     dc_state);
7346 		mutex_unlock(&dm->dc_lock);
7347 	}
7348 
7349 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7350 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7351 				      new_crtc_state, i) {
7352 		if (old_crtc_state->active && !new_crtc_state->active)
7353 			crtc_disable_count++;
7354 
7355 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7356 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7357 
7358 		/* Update freesync active state. */
7359 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7360 
7361 		/* Handle vrr on->off / off->on transitions */
7362 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7363 						dm_new_crtc_state);
7364 	}
7365 
7366 	/* Enable interrupts for CRTCs going through a modeset. */
7367 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7368 
7369 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7370 		if (new_crtc_state->async_flip)
7371 			wait_for_vblank = false;
7372 
7373 	/* update planes when needed per crtc*/
7374 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7375 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7376 
7377 		if (dm_new_crtc_state->stream)
7378 			amdgpu_dm_commit_planes(state, dc_state, dev,
7379 						dm, crtc, wait_for_vblank);
7380 	}
7381 
7382 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7383 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7384 
7385 	/* Update audio instances for each connector. */
7386 	amdgpu_dm_commit_audio(dev, state);
7387 
7388 	/*
7389 	 * send vblank event on all events not handled in flip and
7390 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7391 	 */
7392 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7393 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7394 
7395 		if (new_crtc_state->event)
7396 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7397 
7398 		new_crtc_state->event = NULL;
7399 	}
7400 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7401 
7402 	/* Signal HW programming completion */
7403 	drm_atomic_helper_commit_hw_done(state);
7404 
7405 	if (wait_for_vblank)
7406 		drm_atomic_helper_wait_for_flip_done(dev, state);
7407 
7408 	drm_atomic_helper_cleanup_planes(dev, state);
7409 
7410 	/*
7411 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7412 	 * so we can put the GPU into runtime suspend if we're not driving any
7413 	 * displays anymore
7414 	 */
7415 	for (i = 0; i < crtc_disable_count; i++)
7416 		pm_runtime_put_autosuspend(dev->dev);
7417 	pm_runtime_mark_last_busy(dev->dev);
7418 
7419 	if (dc_state_temp)
7420 		dc_release_state(dc_state_temp);
7421 }
7422 
7423 
7424 static int dm_force_atomic_commit(struct drm_connector *connector)
7425 {
7426 	int ret = 0;
7427 	struct drm_device *ddev = connector->dev;
7428 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7429 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7430 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7431 	struct drm_connector_state *conn_state;
7432 	struct drm_crtc_state *crtc_state;
7433 	struct drm_plane_state *plane_state;
7434 
7435 	if (!state)
7436 		return -ENOMEM;
7437 
7438 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7439 
7440 	/* Construct an atomic state to restore previous display setting */
7441 
7442 	/*
7443 	 * Attach connectors to drm_atomic_state
7444 	 */
7445 	conn_state = drm_atomic_get_connector_state(state, connector);
7446 
7447 	ret = PTR_ERR_OR_ZERO(conn_state);
7448 	if (ret)
7449 		goto err;
7450 
7451 	/* Attach crtc to drm_atomic_state*/
7452 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7453 
7454 	ret = PTR_ERR_OR_ZERO(crtc_state);
7455 	if (ret)
7456 		goto err;
7457 
7458 	/* force a restore */
7459 	crtc_state->mode_changed = true;
7460 
7461 	/* Attach plane to drm_atomic_state */
7462 	plane_state = drm_atomic_get_plane_state(state, plane);
7463 
7464 	ret = PTR_ERR_OR_ZERO(plane_state);
7465 	if (ret)
7466 		goto err;
7467 
7468 
7469 	/* Call commit internally with the state we just constructed */
7470 	ret = drm_atomic_commit(state);
7471 	if (!ret)
7472 		return 0;
7473 
7474 err:
7475 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7476 	drm_atomic_state_put(state);
7477 
7478 	return ret;
7479 }
7480 
7481 /*
7482  * This function handles all cases when set mode does not come upon hotplug.
7483  * This includes when a display is unplugged then plugged back into the
7484  * same port and when running without usermode desktop manager supprot
7485  */
7486 void dm_restore_drm_connector_state(struct drm_device *dev,
7487 				    struct drm_connector *connector)
7488 {
7489 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7490 	struct amdgpu_crtc *disconnected_acrtc;
7491 	struct dm_crtc_state *acrtc_state;
7492 
7493 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7494 		return;
7495 
7496 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7497 	if (!disconnected_acrtc)
7498 		return;
7499 
7500 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7501 	if (!acrtc_state->stream)
7502 		return;
7503 
7504 	/*
7505 	 * If the previous sink is not released and different from the current,
7506 	 * we deduce we are in a state where we can not rely on usermode call
7507 	 * to turn on the display, so we do it here
7508 	 */
7509 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7510 		dm_force_atomic_commit(&aconnector->base);
7511 }
7512 
7513 /*
7514  * Grabs all modesetting locks to serialize against any blocking commits,
7515  * Waits for completion of all non blocking commits.
7516  */
7517 static int do_aquire_global_lock(struct drm_device *dev,
7518 				 struct drm_atomic_state *state)
7519 {
7520 	struct drm_crtc *crtc;
7521 	struct drm_crtc_commit *commit;
7522 	long ret;
7523 
7524 	/*
7525 	 * Adding all modeset locks to aquire_ctx will
7526 	 * ensure that when the framework release it the
7527 	 * extra locks we are locking here will get released to
7528 	 */
7529 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7530 	if (ret)
7531 		return ret;
7532 
7533 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7534 		spin_lock(&crtc->commit_lock);
7535 		commit = list_first_entry_or_null(&crtc->commit_list,
7536 				struct drm_crtc_commit, commit_entry);
7537 		if (commit)
7538 			drm_crtc_commit_get(commit);
7539 		spin_unlock(&crtc->commit_lock);
7540 
7541 		if (!commit)
7542 			continue;
7543 
7544 		/*
7545 		 * Make sure all pending HW programming completed and
7546 		 * page flips done
7547 		 */
7548 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7549 
7550 		if (ret > 0)
7551 			ret = wait_for_completion_interruptible_timeout(
7552 					&commit->flip_done, 10*HZ);
7553 
7554 		if (ret == 0)
7555 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7556 				  "timed out\n", crtc->base.id, crtc->name);
7557 
7558 		drm_crtc_commit_put(commit);
7559 	}
7560 
7561 	return ret < 0 ? ret : 0;
7562 }
7563 
7564 static void get_freesync_config_for_crtc(
7565 	struct dm_crtc_state *new_crtc_state,
7566 	struct dm_connector_state *new_con_state)
7567 {
7568 	struct mod_freesync_config config = {0};
7569 	struct amdgpu_dm_connector *aconnector =
7570 			to_amdgpu_dm_connector(new_con_state->base.connector);
7571 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7572 	int vrefresh = drm_mode_vrefresh(mode);
7573 
7574 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7575 					vrefresh >= aconnector->min_vfreq &&
7576 					vrefresh <= aconnector->max_vfreq;
7577 
7578 	if (new_crtc_state->vrr_supported) {
7579 		new_crtc_state->stream->ignore_msa_timing_param = true;
7580 		config.state = new_crtc_state->base.vrr_enabled ?
7581 				VRR_STATE_ACTIVE_VARIABLE :
7582 				VRR_STATE_INACTIVE;
7583 		config.min_refresh_in_uhz =
7584 				aconnector->min_vfreq * 1000000;
7585 		config.max_refresh_in_uhz =
7586 				aconnector->max_vfreq * 1000000;
7587 		config.vsif_supported = true;
7588 		config.btr = true;
7589 	}
7590 
7591 	new_crtc_state->freesync_config = config;
7592 }
7593 
7594 static void reset_freesync_config_for_crtc(
7595 	struct dm_crtc_state *new_crtc_state)
7596 {
7597 	new_crtc_state->vrr_supported = false;
7598 
7599 	memset(&new_crtc_state->vrr_params, 0,
7600 	       sizeof(new_crtc_state->vrr_params));
7601 	memset(&new_crtc_state->vrr_infopacket, 0,
7602 	       sizeof(new_crtc_state->vrr_infopacket));
7603 }
7604 
7605 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7606 				struct drm_atomic_state *state,
7607 				struct drm_crtc *crtc,
7608 				struct drm_crtc_state *old_crtc_state,
7609 				struct drm_crtc_state *new_crtc_state,
7610 				bool enable,
7611 				bool *lock_and_validation_needed)
7612 {
7613 	struct dm_atomic_state *dm_state = NULL;
7614 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7615 	struct dc_stream_state *new_stream;
7616 	int ret = 0;
7617 
7618 	/*
7619 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7620 	 * update changed items
7621 	 */
7622 	struct amdgpu_crtc *acrtc = NULL;
7623 	struct amdgpu_dm_connector *aconnector = NULL;
7624 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7625 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7626 
7627 	new_stream = NULL;
7628 
7629 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7630 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7631 	acrtc = to_amdgpu_crtc(crtc);
7632 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7633 
7634 	/* TODO This hack should go away */
7635 	if (aconnector && enable) {
7636 		/* Make sure fake sink is created in plug-in scenario */
7637 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7638 							    &aconnector->base);
7639 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7640 							    &aconnector->base);
7641 
7642 		if (IS_ERR(drm_new_conn_state)) {
7643 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7644 			goto fail;
7645 		}
7646 
7647 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7648 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7649 
7650 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7651 			goto skip_modeset;
7652 
7653 		new_stream = create_validate_stream_for_sink(aconnector,
7654 							     &new_crtc_state->mode,
7655 							     dm_new_conn_state,
7656 							     dm_old_crtc_state->stream);
7657 
7658 		/*
7659 		 * we can have no stream on ACTION_SET if a display
7660 		 * was disconnected during S3, in this case it is not an
7661 		 * error, the OS will be updated after detection, and
7662 		 * will do the right thing on next atomic commit
7663 		 */
7664 
7665 		if (!new_stream) {
7666 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7667 					__func__, acrtc->base.base.id);
7668 			ret = -ENOMEM;
7669 			goto fail;
7670 		}
7671 
7672 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7673 
7674 		ret = fill_hdr_info_packet(drm_new_conn_state,
7675 					   &new_stream->hdr_static_metadata);
7676 		if (ret)
7677 			goto fail;
7678 
7679 		/*
7680 		 * If we already removed the old stream from the context
7681 		 * (and set the new stream to NULL) then we can't reuse
7682 		 * the old stream even if the stream and scaling are unchanged.
7683 		 * We'll hit the BUG_ON and black screen.
7684 		 *
7685 		 * TODO: Refactor this function to allow this check to work
7686 		 * in all conditions.
7687 		 */
7688 		if (dm_new_crtc_state->stream &&
7689 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7690 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7691 			new_crtc_state->mode_changed = false;
7692 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7693 					 new_crtc_state->mode_changed);
7694 		}
7695 	}
7696 
7697 	/* mode_changed flag may get updated above, need to check again */
7698 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7699 		goto skip_modeset;
7700 
7701 	DRM_DEBUG_DRIVER(
7702 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7703 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7704 		"connectors_changed:%d\n",
7705 		acrtc->crtc_id,
7706 		new_crtc_state->enable,
7707 		new_crtc_state->active,
7708 		new_crtc_state->planes_changed,
7709 		new_crtc_state->mode_changed,
7710 		new_crtc_state->active_changed,
7711 		new_crtc_state->connectors_changed);
7712 
7713 	/* Remove stream for any changed/disabled CRTC */
7714 	if (!enable) {
7715 
7716 		if (!dm_old_crtc_state->stream)
7717 			goto skip_modeset;
7718 
7719 		ret = dm_atomic_get_state(state, &dm_state);
7720 		if (ret)
7721 			goto fail;
7722 
7723 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7724 				crtc->base.id);
7725 
7726 		/* i.e. reset mode */
7727 		if (dc_remove_stream_from_ctx(
7728 				dm->dc,
7729 				dm_state->context,
7730 				dm_old_crtc_state->stream) != DC_OK) {
7731 			ret = -EINVAL;
7732 			goto fail;
7733 		}
7734 
7735 		dc_stream_release(dm_old_crtc_state->stream);
7736 		dm_new_crtc_state->stream = NULL;
7737 
7738 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7739 
7740 		*lock_and_validation_needed = true;
7741 
7742 	} else {/* Add stream for any updated/enabled CRTC */
7743 		/*
7744 		 * Quick fix to prevent NULL pointer on new_stream when
7745 		 * added MST connectors not found in existing crtc_state in the chained mode
7746 		 * TODO: need to dig out the root cause of that
7747 		 */
7748 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7749 			goto skip_modeset;
7750 
7751 		if (modereset_required(new_crtc_state))
7752 			goto skip_modeset;
7753 
7754 		if (modeset_required(new_crtc_state, new_stream,
7755 				     dm_old_crtc_state->stream)) {
7756 
7757 			WARN_ON(dm_new_crtc_state->stream);
7758 
7759 			ret = dm_atomic_get_state(state, &dm_state);
7760 			if (ret)
7761 				goto fail;
7762 
7763 			dm_new_crtc_state->stream = new_stream;
7764 
7765 			dc_stream_retain(new_stream);
7766 
7767 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7768 						crtc->base.id);
7769 
7770 			if (dc_add_stream_to_ctx(
7771 					dm->dc,
7772 					dm_state->context,
7773 					dm_new_crtc_state->stream) != DC_OK) {
7774 				ret = -EINVAL;
7775 				goto fail;
7776 			}
7777 
7778 			*lock_and_validation_needed = true;
7779 		}
7780 	}
7781 
7782 skip_modeset:
7783 	/* Release extra reference */
7784 	if (new_stream)
7785 		 dc_stream_release(new_stream);
7786 
7787 	/*
7788 	 * We want to do dc stream updates that do not require a
7789 	 * full modeset below.
7790 	 */
7791 	if (!(enable && aconnector && new_crtc_state->enable &&
7792 	      new_crtc_state->active))
7793 		return 0;
7794 	/*
7795 	 * Given above conditions, the dc state cannot be NULL because:
7796 	 * 1. We're in the process of enabling CRTCs (just been added
7797 	 *    to the dc context, or already is on the context)
7798 	 * 2. Has a valid connector attached, and
7799 	 * 3. Is currently active and enabled.
7800 	 * => The dc stream state currently exists.
7801 	 */
7802 	BUG_ON(dm_new_crtc_state->stream == NULL);
7803 
7804 	/* Scaling or underscan settings */
7805 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7806 		update_stream_scaling_settings(
7807 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7808 
7809 	/* ABM settings */
7810 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7811 
7812 	/*
7813 	 * Color management settings. We also update color properties
7814 	 * when a modeset is needed, to ensure it gets reprogrammed.
7815 	 */
7816 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7817 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7818 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7819 		if (ret)
7820 			goto fail;
7821 	}
7822 
7823 	/* Update Freesync settings. */
7824 	get_freesync_config_for_crtc(dm_new_crtc_state,
7825 				     dm_new_conn_state);
7826 
7827 	return ret;
7828 
7829 fail:
7830 	if (new_stream)
7831 		dc_stream_release(new_stream);
7832 	return ret;
7833 }
7834 
7835 static bool should_reset_plane(struct drm_atomic_state *state,
7836 			       struct drm_plane *plane,
7837 			       struct drm_plane_state *old_plane_state,
7838 			       struct drm_plane_state *new_plane_state)
7839 {
7840 	struct drm_plane *other;
7841 	struct drm_plane_state *old_other_state, *new_other_state;
7842 	struct drm_crtc_state *new_crtc_state;
7843 	int i;
7844 
7845 	/*
7846 	 * TODO: Remove this hack once the checks below are sufficient
7847 	 * enough to determine when we need to reset all the planes on
7848 	 * the stream.
7849 	 */
7850 	if (state->allow_modeset)
7851 		return true;
7852 
7853 	/* Exit early if we know that we're adding or removing the plane. */
7854 	if (old_plane_state->crtc != new_plane_state->crtc)
7855 		return true;
7856 
7857 	/* old crtc == new_crtc == NULL, plane not in context. */
7858 	if (!new_plane_state->crtc)
7859 		return false;
7860 
7861 	new_crtc_state =
7862 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7863 
7864 	if (!new_crtc_state)
7865 		return true;
7866 
7867 	/* CRTC Degamma changes currently require us to recreate planes. */
7868 	if (new_crtc_state->color_mgmt_changed)
7869 		return true;
7870 
7871 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7872 		return true;
7873 
7874 	/*
7875 	 * If there are any new primary or overlay planes being added or
7876 	 * removed then the z-order can potentially change. To ensure
7877 	 * correct z-order and pipe acquisition the current DC architecture
7878 	 * requires us to remove and recreate all existing planes.
7879 	 *
7880 	 * TODO: Come up with a more elegant solution for this.
7881 	 */
7882 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7883 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7884 			continue;
7885 
7886 		if (old_other_state->crtc != new_plane_state->crtc &&
7887 		    new_other_state->crtc != new_plane_state->crtc)
7888 			continue;
7889 
7890 		if (old_other_state->crtc != new_other_state->crtc)
7891 			return true;
7892 
7893 		/* TODO: Remove this once we can handle fast format changes. */
7894 		if (old_other_state->fb && new_other_state->fb &&
7895 		    old_other_state->fb->format != new_other_state->fb->format)
7896 			return true;
7897 	}
7898 
7899 	return false;
7900 }
7901 
7902 static int dm_update_plane_state(struct dc *dc,
7903 				 struct drm_atomic_state *state,
7904 				 struct drm_plane *plane,
7905 				 struct drm_plane_state *old_plane_state,
7906 				 struct drm_plane_state *new_plane_state,
7907 				 bool enable,
7908 				 bool *lock_and_validation_needed)
7909 {
7910 
7911 	struct dm_atomic_state *dm_state = NULL;
7912 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7913 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7914 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7915 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7916 	struct amdgpu_crtc *new_acrtc;
7917 	bool needs_reset;
7918 	int ret = 0;
7919 
7920 
7921 	new_plane_crtc = new_plane_state->crtc;
7922 	old_plane_crtc = old_plane_state->crtc;
7923 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7924 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7925 
7926 	/*TODO Implement better atomic check for cursor plane */
7927 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7928 		if (!enable || !new_plane_crtc ||
7929 			drm_atomic_plane_disabling(plane->state, new_plane_state))
7930 			return 0;
7931 
7932 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7933 
7934 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7935 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7936 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7937 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
7938 			return -EINVAL;
7939 		}
7940 
7941 		return 0;
7942 	}
7943 
7944 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7945 					 new_plane_state);
7946 
7947 	/* Remove any changed/removed planes */
7948 	if (!enable) {
7949 		if (!needs_reset)
7950 			return 0;
7951 
7952 		if (!old_plane_crtc)
7953 			return 0;
7954 
7955 		old_crtc_state = drm_atomic_get_old_crtc_state(
7956 				state, old_plane_crtc);
7957 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7958 
7959 		if (!dm_old_crtc_state->stream)
7960 			return 0;
7961 
7962 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7963 				plane->base.id, old_plane_crtc->base.id);
7964 
7965 		ret = dm_atomic_get_state(state, &dm_state);
7966 		if (ret)
7967 			return ret;
7968 
7969 		if (!dc_remove_plane_from_context(
7970 				dc,
7971 				dm_old_crtc_state->stream,
7972 				dm_old_plane_state->dc_state,
7973 				dm_state->context)) {
7974 
7975 			ret = EINVAL;
7976 			return ret;
7977 		}
7978 
7979 
7980 		dc_plane_state_release(dm_old_plane_state->dc_state);
7981 		dm_new_plane_state->dc_state = NULL;
7982 
7983 		*lock_and_validation_needed = true;
7984 
7985 	} else { /* Add new planes */
7986 		struct dc_plane_state *dc_new_plane_state;
7987 
7988 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7989 			return 0;
7990 
7991 		if (!new_plane_crtc)
7992 			return 0;
7993 
7994 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7995 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7996 
7997 		if (!dm_new_crtc_state->stream)
7998 			return 0;
7999 
8000 		if (!needs_reset)
8001 			return 0;
8002 
8003 		WARN_ON(dm_new_plane_state->dc_state);
8004 
8005 		dc_new_plane_state = dc_create_plane_state(dc);
8006 		if (!dc_new_plane_state)
8007 			return -ENOMEM;
8008 
8009 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8010 				plane->base.id, new_plane_crtc->base.id);
8011 
8012 		ret = fill_dc_plane_attributes(
8013 			new_plane_crtc->dev->dev_private,
8014 			dc_new_plane_state,
8015 			new_plane_state,
8016 			new_crtc_state);
8017 		if (ret) {
8018 			dc_plane_state_release(dc_new_plane_state);
8019 			return ret;
8020 		}
8021 
8022 		ret = dm_atomic_get_state(state, &dm_state);
8023 		if (ret) {
8024 			dc_plane_state_release(dc_new_plane_state);
8025 			return ret;
8026 		}
8027 
8028 		/*
8029 		 * Any atomic check errors that occur after this will
8030 		 * not need a release. The plane state will be attached
8031 		 * to the stream, and therefore part of the atomic
8032 		 * state. It'll be released when the atomic state is
8033 		 * cleaned.
8034 		 */
8035 		if (!dc_add_plane_to_context(
8036 				dc,
8037 				dm_new_crtc_state->stream,
8038 				dc_new_plane_state,
8039 				dm_state->context)) {
8040 
8041 			dc_plane_state_release(dc_new_plane_state);
8042 			return -EINVAL;
8043 		}
8044 
8045 		dm_new_plane_state->dc_state = dc_new_plane_state;
8046 
8047 		/* Tell DC to do a full surface update every time there
8048 		 * is a plane change. Inefficient, but works for now.
8049 		 */
8050 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8051 
8052 		*lock_and_validation_needed = true;
8053 	}
8054 
8055 
8056 	return ret;
8057 }
8058 
8059 static int
8060 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8061 				    struct drm_atomic_state *state,
8062 				    enum surface_update_type *out_type)
8063 {
8064 	struct dc *dc = dm->dc;
8065 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8066 	int i, j, num_plane, ret = 0;
8067 	struct drm_plane_state *old_plane_state, *new_plane_state;
8068 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8069 	struct drm_crtc *new_plane_crtc;
8070 	struct drm_plane *plane;
8071 
8072 	struct drm_crtc *crtc;
8073 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8074 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8075 	struct dc_stream_status *status = NULL;
8076 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8077 	struct surface_info_bundle {
8078 		struct dc_surface_update surface_updates[MAX_SURFACES];
8079 		struct dc_plane_info plane_infos[MAX_SURFACES];
8080 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8081 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8082 		struct dc_stream_update stream_update;
8083 	} *bundle;
8084 
8085 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8086 
8087 	if (!bundle) {
8088 		DRM_ERROR("Failed to allocate update bundle\n");
8089 		/* Set type to FULL to avoid crashing in DC*/
8090 		update_type = UPDATE_TYPE_FULL;
8091 		goto cleanup;
8092 	}
8093 
8094 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8095 
8096 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8097 
8098 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8099 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8100 		num_plane = 0;
8101 
8102 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8103 			update_type = UPDATE_TYPE_FULL;
8104 			goto cleanup;
8105 		}
8106 
8107 		if (!new_dm_crtc_state->stream)
8108 			continue;
8109 
8110 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8111 			const struct amdgpu_framebuffer *amdgpu_fb =
8112 				to_amdgpu_framebuffer(new_plane_state->fb);
8113 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8114 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8115 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8116 			uint64_t tiling_flags;
8117 
8118 			new_plane_crtc = new_plane_state->crtc;
8119 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8120 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8121 
8122 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8123 				continue;
8124 
8125 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8126 				update_type = UPDATE_TYPE_FULL;
8127 				goto cleanup;
8128 			}
8129 
8130 			if (crtc != new_plane_crtc)
8131 				continue;
8132 
8133 			bundle->surface_updates[num_plane].surface =
8134 					new_dm_plane_state->dc_state;
8135 
8136 			if (new_crtc_state->mode_changed) {
8137 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8138 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8139 			}
8140 
8141 			if (new_crtc_state->color_mgmt_changed) {
8142 				bundle->surface_updates[num_plane].gamma =
8143 						new_dm_plane_state->dc_state->gamma_correction;
8144 				bundle->surface_updates[num_plane].in_transfer_func =
8145 						new_dm_plane_state->dc_state->in_transfer_func;
8146 				bundle->stream_update.gamut_remap =
8147 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8148 				bundle->stream_update.output_csc_transform =
8149 						&new_dm_crtc_state->stream->csc_color_matrix;
8150 				bundle->stream_update.out_transfer_func =
8151 						new_dm_crtc_state->stream->out_transfer_func;
8152 			}
8153 
8154 			ret = fill_dc_scaling_info(new_plane_state,
8155 						   scaling_info);
8156 			if (ret)
8157 				goto cleanup;
8158 
8159 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8160 
8161 			if (amdgpu_fb) {
8162 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8163 				if (ret)
8164 					goto cleanup;
8165 
8166 				ret = fill_dc_plane_info_and_addr(
8167 					dm->adev, new_plane_state, tiling_flags,
8168 					plane_info,
8169 					&flip_addr->address,
8170 					false);
8171 				if (ret)
8172 					goto cleanup;
8173 
8174 				bundle->surface_updates[num_plane].plane_info = plane_info;
8175 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8176 			}
8177 
8178 			num_plane++;
8179 		}
8180 
8181 		if (num_plane == 0)
8182 			continue;
8183 
8184 		ret = dm_atomic_get_state(state, &dm_state);
8185 		if (ret)
8186 			goto cleanup;
8187 
8188 		old_dm_state = dm_atomic_get_old_state(state);
8189 		if (!old_dm_state) {
8190 			ret = -EINVAL;
8191 			goto cleanup;
8192 		}
8193 
8194 		status = dc_stream_get_status_from_state(old_dm_state->context,
8195 							 new_dm_crtc_state->stream);
8196 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8197 		/*
8198 		 * TODO: DC modifies the surface during this call so we need
8199 		 * to lock here - find a way to do this without locking.
8200 		 */
8201 		mutex_lock(&dm->dc_lock);
8202 		update_type = dc_check_update_surfaces_for_stream(
8203 				dc,	bundle->surface_updates, num_plane,
8204 				&bundle->stream_update, status);
8205 		mutex_unlock(&dm->dc_lock);
8206 
8207 		if (update_type > UPDATE_TYPE_MED) {
8208 			update_type = UPDATE_TYPE_FULL;
8209 			goto cleanup;
8210 		}
8211 	}
8212 
8213 cleanup:
8214 	kfree(bundle);
8215 
8216 	*out_type = update_type;
8217 	return ret;
8218 }
8219 
8220 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8221 {
8222 	struct drm_connector *connector;
8223 	struct drm_connector_state *conn_state;
8224 	struct amdgpu_dm_connector *aconnector = NULL;
8225 	int i;
8226 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8227 		if (conn_state->crtc != crtc)
8228 			continue;
8229 
8230 		aconnector = to_amdgpu_dm_connector(connector);
8231 		if (!aconnector->port || !aconnector->mst_port)
8232 			aconnector = NULL;
8233 		else
8234 			break;
8235 	}
8236 
8237 	if (!aconnector)
8238 		return 0;
8239 
8240 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8241 }
8242 
8243 /**
8244  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8245  * @dev: The DRM device
8246  * @state: The atomic state to commit
8247  *
8248  * Validate that the given atomic state is programmable by DC into hardware.
8249  * This involves constructing a &struct dc_state reflecting the new hardware
8250  * state we wish to commit, then querying DC to see if it is programmable. It's
8251  * important not to modify the existing DC state. Otherwise, atomic_check
8252  * may unexpectedly commit hardware changes.
8253  *
8254  * When validating the DC state, it's important that the right locks are
8255  * acquired. For full updates case which removes/adds/updates streams on one
8256  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8257  * that any such full update commit will wait for completion of any outstanding
8258  * flip using DRMs synchronization events. See
8259  * dm_determine_update_type_for_commit()
8260  *
8261  * Note that DM adds the affected connectors for all CRTCs in state, when that
8262  * might not seem necessary. This is because DC stream creation requires the
8263  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8264  * be possible but non-trivial - a possible TODO item.
8265  *
8266  * Return: -Error code if validation failed.
8267  */
8268 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8269 				  struct drm_atomic_state *state)
8270 {
8271 	struct amdgpu_device *adev = dev->dev_private;
8272 	struct dm_atomic_state *dm_state = NULL;
8273 	struct dc *dc = adev->dm.dc;
8274 	struct drm_connector *connector;
8275 	struct drm_connector_state *old_con_state, *new_con_state;
8276 	struct drm_crtc *crtc;
8277 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8278 	struct drm_plane *plane;
8279 	struct drm_plane_state *old_plane_state, *new_plane_state;
8280 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8281 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8282 
8283 	int ret, i;
8284 
8285 	/*
8286 	 * This bool will be set for true for any modeset/reset
8287 	 * or plane update which implies non fast surface update.
8288 	 */
8289 	bool lock_and_validation_needed = false;
8290 
8291 	ret = drm_atomic_helper_check_modeset(dev, state);
8292 	if (ret)
8293 		goto fail;
8294 
8295 	if (adev->asic_type >= CHIP_NAVI10) {
8296 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8297 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8298 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8299 				if (ret)
8300 					goto fail;
8301 			}
8302 		}
8303 	}
8304 
8305 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8306 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8307 		    !new_crtc_state->color_mgmt_changed &&
8308 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8309 			continue;
8310 
8311 		if (!new_crtc_state->enable)
8312 			continue;
8313 
8314 		ret = drm_atomic_add_affected_connectors(state, crtc);
8315 		if (ret)
8316 			return ret;
8317 
8318 		ret = drm_atomic_add_affected_planes(state, crtc);
8319 		if (ret)
8320 			goto fail;
8321 	}
8322 
8323 	/*
8324 	 * Add all primary and overlay planes on the CRTC to the state
8325 	 * whenever a plane is enabled to maintain correct z-ordering
8326 	 * and to enable fast surface updates.
8327 	 */
8328 	drm_for_each_crtc(crtc, dev) {
8329 		bool modified = false;
8330 
8331 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8332 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8333 				continue;
8334 
8335 			if (new_plane_state->crtc == crtc ||
8336 			    old_plane_state->crtc == crtc) {
8337 				modified = true;
8338 				break;
8339 			}
8340 		}
8341 
8342 		if (!modified)
8343 			continue;
8344 
8345 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8346 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8347 				continue;
8348 
8349 			new_plane_state =
8350 				drm_atomic_get_plane_state(state, plane);
8351 
8352 			if (IS_ERR(new_plane_state)) {
8353 				ret = PTR_ERR(new_plane_state);
8354 				goto fail;
8355 			}
8356 		}
8357 	}
8358 
8359 	/* Remove exiting planes if they are modified */
8360 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8361 		ret = dm_update_plane_state(dc, state, plane,
8362 					    old_plane_state,
8363 					    new_plane_state,
8364 					    false,
8365 					    &lock_and_validation_needed);
8366 		if (ret)
8367 			goto fail;
8368 	}
8369 
8370 	/* Disable all crtcs which require disable */
8371 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8372 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8373 					   old_crtc_state,
8374 					   new_crtc_state,
8375 					   false,
8376 					   &lock_and_validation_needed);
8377 		if (ret)
8378 			goto fail;
8379 	}
8380 
8381 	/* Enable all crtcs which require enable */
8382 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8383 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8384 					   old_crtc_state,
8385 					   new_crtc_state,
8386 					   true,
8387 					   &lock_and_validation_needed);
8388 		if (ret)
8389 			goto fail;
8390 	}
8391 
8392 	/* Add new/modified planes */
8393 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8394 		ret = dm_update_plane_state(dc, state, plane,
8395 					    old_plane_state,
8396 					    new_plane_state,
8397 					    true,
8398 					    &lock_and_validation_needed);
8399 		if (ret)
8400 			goto fail;
8401 	}
8402 
8403 	/* Run this here since we want to validate the streams we created */
8404 	ret = drm_atomic_helper_check_planes(dev, state);
8405 	if (ret)
8406 		goto fail;
8407 
8408 	if (state->legacy_cursor_update) {
8409 		/*
8410 		 * This is a fast cursor update coming from the plane update
8411 		 * helper, check if it can be done asynchronously for better
8412 		 * performance.
8413 		 */
8414 		state->async_update =
8415 			!drm_atomic_helper_async_check(dev, state);
8416 
8417 		/*
8418 		 * Skip the remaining global validation if this is an async
8419 		 * update. Cursor updates can be done without affecting
8420 		 * state or bandwidth calcs and this avoids the performance
8421 		 * penalty of locking the private state object and
8422 		 * allocating a new dc_state.
8423 		 */
8424 		if (state->async_update)
8425 			return 0;
8426 	}
8427 
8428 	/* Check scaling and underscan changes*/
8429 	/* TODO Removed scaling changes validation due to inability to commit
8430 	 * new stream into context w\o causing full reset. Need to
8431 	 * decide how to handle.
8432 	 */
8433 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8434 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8435 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8436 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8437 
8438 		/* Skip any modesets/resets */
8439 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8440 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8441 			continue;
8442 
8443 		/* Skip any thing not scale or underscan changes */
8444 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8445 			continue;
8446 
8447 		overall_update_type = UPDATE_TYPE_FULL;
8448 		lock_and_validation_needed = true;
8449 	}
8450 
8451 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8452 	if (ret)
8453 		goto fail;
8454 
8455 	if (overall_update_type < update_type)
8456 		overall_update_type = update_type;
8457 
8458 	/*
8459 	 * lock_and_validation_needed was an old way to determine if we need to set
8460 	 * the global lock. Leaving it in to check if we broke any corner cases
8461 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8462 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8463 	 */
8464 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8465 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8466 
8467 	if (overall_update_type > UPDATE_TYPE_FAST) {
8468 		ret = dm_atomic_get_state(state, &dm_state);
8469 		if (ret)
8470 			goto fail;
8471 
8472 		ret = do_aquire_global_lock(dev, state);
8473 		if (ret)
8474 			goto fail;
8475 
8476 	/* Check connector changes */
8477 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8478 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8479 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8480 
8481 		/* Skip connectors that are disabled or part of modeset already. */
8482 		if (!old_con_state->crtc && !new_con_state->crtc)
8483 			continue;
8484 
8485 		if (!new_con_state->crtc)
8486 			continue;
8487 
8488 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8489 		if (IS_ERR(new_crtc_state)) {
8490 			ret = PTR_ERR(new_crtc_state);
8491 			goto fail;
8492 		}
8493 
8494 		if (dm_old_con_state->abm_level !=
8495 		    dm_new_con_state->abm_level)
8496 			new_crtc_state->connectors_changed = true;
8497 	}
8498 
8499 #if defined(CONFIG_DRM_AMD_DC_DCN)
8500 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8501 			goto fail;
8502 
8503 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8504 		if (ret)
8505 			goto fail;
8506 #endif
8507 
8508 		/*
8509 		 * Perform validation of MST topology in the state:
8510 		 * We need to perform MST atomic check before calling
8511 		 * dc_validate_global_state(), or there is a chance
8512 		 * to get stuck in an infinite loop and hang eventually.
8513 		 */
8514 		ret = drm_dp_mst_atomic_check(state);
8515 		if (ret)
8516 			goto fail;
8517 
8518 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8519 			ret = -EINVAL;
8520 			goto fail;
8521 		}
8522 	} else {
8523 		/*
8524 		 * The commit is a fast update. Fast updates shouldn't change
8525 		 * the DC context, affect global validation, and can have their
8526 		 * commit work done in parallel with other commits not touching
8527 		 * the same resource. If we have a new DC context as part of
8528 		 * the DM atomic state from validation we need to free it and
8529 		 * retain the existing one instead.
8530 		 *
8531 		 * Furthermore, since the DM atomic state only contains the DC
8532 		 * context and can safely be annulled, we can free the state
8533 		 * and clear the associated private object now to free
8534 		 * some memory and avoid a possible use-after-free later.
8535 		 */
8536 
8537 		for (i = 0; i < state->num_private_objs; i++) {
8538 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8539 
8540 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8541 				int j = state->num_private_objs-1;
8542 
8543 				dm_atomic_destroy_state(obj,
8544 						state->private_objs[i].state);
8545 
8546 				/* If i is not at the end of the array then the
8547 				 * last element needs to be moved to where i was
8548 				 * before the array can safely be truncated.
8549 				 */
8550 				if (i != j)
8551 					state->private_objs[i] =
8552 						state->private_objs[j];
8553 
8554 				state->private_objs[j].ptr = NULL;
8555 				state->private_objs[j].state = NULL;
8556 				state->private_objs[j].old_state = NULL;
8557 				state->private_objs[j].new_state = NULL;
8558 
8559 				state->num_private_objs = j;
8560 				break;
8561 			}
8562 		}
8563 	}
8564 
8565 	/* Store the overall update type for use later in atomic check. */
8566 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8567 		struct dm_crtc_state *dm_new_crtc_state =
8568 			to_dm_crtc_state(new_crtc_state);
8569 
8570 		dm_new_crtc_state->update_type = (int)overall_update_type;
8571 	}
8572 
8573 	/* Must be success */
8574 	WARN_ON(ret);
8575 	return ret;
8576 
8577 fail:
8578 	if (ret == -EDEADLK)
8579 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8580 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8581 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8582 	else
8583 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8584 
8585 	return ret;
8586 }
8587 
8588 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8589 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8590 {
8591 	uint8_t dpcd_data;
8592 	bool capable = false;
8593 
8594 	if (amdgpu_dm_connector->dc_link &&
8595 		dm_helpers_dp_read_dpcd(
8596 				NULL,
8597 				amdgpu_dm_connector->dc_link,
8598 				DP_DOWN_STREAM_PORT_COUNT,
8599 				&dpcd_data,
8600 				sizeof(dpcd_data))) {
8601 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8602 	}
8603 
8604 	return capable;
8605 }
8606 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8607 					struct edid *edid)
8608 {
8609 	int i;
8610 	bool edid_check_required;
8611 	struct detailed_timing *timing;
8612 	struct detailed_non_pixel *data;
8613 	struct detailed_data_monitor_range *range;
8614 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8615 			to_amdgpu_dm_connector(connector);
8616 	struct dm_connector_state *dm_con_state = NULL;
8617 
8618 	struct drm_device *dev = connector->dev;
8619 	struct amdgpu_device *adev = dev->dev_private;
8620 	bool freesync_capable = false;
8621 
8622 	if (!connector->state) {
8623 		DRM_ERROR("%s - Connector has no state", __func__);
8624 		goto update;
8625 	}
8626 
8627 	if (!edid) {
8628 		dm_con_state = to_dm_connector_state(connector->state);
8629 
8630 		amdgpu_dm_connector->min_vfreq = 0;
8631 		amdgpu_dm_connector->max_vfreq = 0;
8632 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8633 
8634 		goto update;
8635 	}
8636 
8637 	dm_con_state = to_dm_connector_state(connector->state);
8638 
8639 	edid_check_required = false;
8640 	if (!amdgpu_dm_connector->dc_sink) {
8641 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8642 		goto update;
8643 	}
8644 	if (!adev->dm.freesync_module)
8645 		goto update;
8646 	/*
8647 	 * if edid non zero restrict freesync only for dp and edp
8648 	 */
8649 	if (edid) {
8650 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8651 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8652 			edid_check_required = is_dp_capable_without_timing_msa(
8653 						adev->dm.dc,
8654 						amdgpu_dm_connector);
8655 		}
8656 	}
8657 	if (edid_check_required == true && (edid->version > 1 ||
8658 	   (edid->version == 1 && edid->revision > 1))) {
8659 		for (i = 0; i < 4; i++) {
8660 
8661 			timing	= &edid->detailed_timings[i];
8662 			data	= &timing->data.other_data;
8663 			range	= &data->data.range;
8664 			/*
8665 			 * Check if monitor has continuous frequency mode
8666 			 */
8667 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8668 				continue;
8669 			/*
8670 			 * Check for flag range limits only. If flag == 1 then
8671 			 * no additional timing information provided.
8672 			 * Default GTF, GTF Secondary curve and CVT are not
8673 			 * supported
8674 			 */
8675 			if (range->flags != 1)
8676 				continue;
8677 
8678 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8679 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8680 			amdgpu_dm_connector->pixel_clock_mhz =
8681 				range->pixel_clock_mhz * 10;
8682 			break;
8683 		}
8684 
8685 		if (amdgpu_dm_connector->max_vfreq -
8686 		    amdgpu_dm_connector->min_vfreq > 10) {
8687 
8688 			freesync_capable = true;
8689 		}
8690 	}
8691 
8692 update:
8693 	if (dm_con_state)
8694 		dm_con_state->freesync_capable = freesync_capable;
8695 
8696 	if (connector->vrr_capable_property)
8697 		drm_connector_set_vrr_capable_property(connector,
8698 						       freesync_capable);
8699 }
8700 
8701 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8702 {
8703 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8704 
8705 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8706 		return;
8707 	if (link->type == dc_connection_none)
8708 		return;
8709 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8710 					dpcd_data, sizeof(dpcd_data))) {
8711 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8712 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8713 	}
8714 }
8715 
8716 /*
8717  * amdgpu_dm_link_setup_psr() - configure psr link
8718  * @stream: stream state
8719  *
8720  * Return: true if success
8721  */
8722 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8723 {
8724 	struct dc_link *link = NULL;
8725 	struct psr_config psr_config = {0};
8726 	struct psr_context psr_context = {0};
8727 	struct dc *dc = NULL;
8728 	bool ret = false;
8729 
8730 	if (stream == NULL)
8731 		return false;
8732 
8733 	link = stream->link;
8734 	dc = link->ctx->dc;
8735 
8736 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8737 
8738 	if (psr_config.psr_version > 0) {
8739 		psr_config.psr_exit_link_training_required = 0x1;
8740 		psr_config.psr_frame_capture_indication_req = 0;
8741 		psr_config.psr_rfb_setup_time = 0x37;
8742 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8743 		psr_config.allow_smu_optimizations = 0x0;
8744 
8745 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8746 
8747 	}
8748 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8749 
8750 	return ret;
8751 }
8752 
8753 /*
8754  * amdgpu_dm_psr_enable() - enable psr f/w
8755  * @stream: stream state
8756  *
8757  * Return: true if success
8758  */
8759 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8760 {
8761 	struct dc_link *link = stream->link;
8762 	unsigned int vsync_rate_hz = 0;
8763 	struct dc_static_screen_params params = {0};
8764 	/* Calculate number of static frames before generating interrupt to
8765 	 * enter PSR.
8766 	 */
8767 	// Init fail safe of 2 frames static
8768 	unsigned int num_frames_static = 2;
8769 
8770 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8771 
8772 	vsync_rate_hz = div64_u64(div64_u64((
8773 			stream->timing.pix_clk_100hz * 100),
8774 			stream->timing.v_total),
8775 			stream->timing.h_total);
8776 
8777 	/* Round up
8778 	 * Calculate number of frames such that at least 30 ms of time has
8779 	 * passed.
8780 	 */
8781 	if (vsync_rate_hz != 0) {
8782 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8783 		num_frames_static = (30000 / frame_time_microsec) + 1;
8784 	}
8785 
8786 	params.triggers.cursor_update = true;
8787 	params.triggers.overlay_update = true;
8788 	params.triggers.surface_update = true;
8789 	params.num_frames = num_frames_static;
8790 
8791 	dc_stream_set_static_screen_params(link->ctx->dc,
8792 					   &stream, 1,
8793 					   &params);
8794 
8795 	return dc_link_set_psr_allow_active(link, true, false);
8796 }
8797 
8798 /*
8799  * amdgpu_dm_psr_disable() - disable psr f/w
8800  * @stream:  stream state
8801  *
8802  * Return: true if success
8803  */
8804 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8805 {
8806 
8807 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8808 
8809 	return dc_link_set_psr_allow_active(stream->link, false, true);
8810 }
8811