xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision c020cf82e0cc147236f01a8dca7052034cf9d30d)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 #ifdef notyet
645 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
646 	.bind	= amdgpu_dm_audio_component_bind,
647 	.unbind	= amdgpu_dm_audio_component_unbind,
648 };
649 #endif
650 
651 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
652 {
653 	int i, ret;
654 
655 	if (!amdgpu_audio)
656 		return 0;
657 
658 	adev->mode_info.audio.enabled = true;
659 
660 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
661 
662 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
663 		adev->mode_info.audio.pin[i].channels = -1;
664 		adev->mode_info.audio.pin[i].rate = -1;
665 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
666 		adev->mode_info.audio.pin[i].status_bits = 0;
667 		adev->mode_info.audio.pin[i].category_code = 0;
668 		adev->mode_info.audio.pin[i].connected = false;
669 		adev->mode_info.audio.pin[i].id =
670 			adev->dm.dc->res_pool->audios[i]->inst;
671 		adev->mode_info.audio.pin[i].offset = 0;
672 	}
673 
674 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
675 	if (ret < 0)
676 		return ret;
677 
678 	adev->dm.audio_registered = true;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
684 {
685 	if (!amdgpu_audio)
686 		return;
687 
688 	if (!adev->mode_info.audio.enabled)
689 		return;
690 
691 	if (adev->dm.audio_registered) {
692 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
693 		adev->dm.audio_registered = false;
694 	}
695 
696 	/* TODO: Disable audio? */
697 
698 	adev->mode_info.audio.enabled = false;
699 }
700 
701 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
702 {
703 	struct drm_audio_component *acomp = adev->dm.audio_component;
704 
705 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
706 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
707 
708 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
709 						 pin, -1);
710 	}
711 }
712 
713 static int dm_dmub_hw_init(struct amdgpu_device *adev)
714 {
715 	const struct dmcub_firmware_header_v1_0 *hdr;
716 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
717 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
718 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
719 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
720 	struct abm *abm = adev->dm.dc->res_pool->abm;
721 	struct dmub_srv_hw_params hw_params;
722 	enum dmub_status status;
723 	const unsigned char *fw_inst_const, *fw_bss_data;
724 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
725 	bool has_hw_support;
726 
727 	if (!dmub_srv)
728 		/* DMUB isn't supported on the ASIC. */
729 		return 0;
730 
731 	if (!fb_info) {
732 		DRM_ERROR("No framebuffer info for DMUB service.\n");
733 		return -EINVAL;
734 	}
735 
736 	if (!dmub_fw) {
737 		/* Firmware required for DMUB support. */
738 		DRM_ERROR("No firmware provided for DMUB.\n");
739 		return -EINVAL;
740 	}
741 
742 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
743 	if (status != DMUB_STATUS_OK) {
744 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
745 		return -EINVAL;
746 	}
747 
748 	if (!has_hw_support) {
749 		DRM_INFO("DMUB unsupported on ASIC\n");
750 		return 0;
751 	}
752 
753 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
754 
755 	fw_inst_const = dmub_fw->data +
756 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
757 			PSP_HEADER_BYTES;
758 
759 	fw_bss_data = dmub_fw->data +
760 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 		      le32_to_cpu(hdr->inst_const_bytes);
762 
763 	/* Copy firmware and bios info into FB memory. */
764 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
765 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
766 
767 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
768 
769 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
770 	 * amdgpu_ucode_init_single_fw will load dmub firmware
771 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
772 	 * will be done by dm_dmub_hw_init
773 	 */
774 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
775 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
776 				fw_inst_const_size);
777 	}
778 
779 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
780 	       fw_bss_data_size);
781 
782 	/* Copy firmware bios info into FB memory. */
783 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
784 	       adev->bios_size);
785 
786 	/* Reset regions that need to be reset. */
787 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
788 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
789 
790 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
791 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
792 
793 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
794 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
795 
796 	/* Initialize hardware. */
797 	memset(&hw_params, 0, sizeof(hw_params));
798 	hw_params.fb_base = adev->gmc.fb_start;
799 	hw_params.fb_offset = adev->gmc.aper_base;
800 
801 	/* backdoor load firmware and trigger dmub running */
802 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
803 		hw_params.load_inst_const = true;
804 
805 	if (dmcu)
806 		hw_params.psp_version = dmcu->psp_version;
807 
808 	for (i = 0; i < fb_info->num_fb; ++i)
809 		hw_params.fb[i] = &fb_info->fb[i];
810 
811 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
812 	if (status != DMUB_STATUS_OK) {
813 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
814 		return -EINVAL;
815 	}
816 
817 	/* Wait for firmware load to finish. */
818 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
819 	if (status != DMUB_STATUS_OK)
820 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
821 
822 	/* Init DMCU and ABM if available. */
823 	if (dmcu && abm) {
824 		dmcu->funcs->dmcu_init(dmcu);
825 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
826 	}
827 
828 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
829 	if (!adev->dm.dc->ctx->dmub_srv) {
830 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
831 		return -ENOMEM;
832 	}
833 
834 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
835 		 adev->dm.dmcub_fw_version);
836 
837 	return 0;
838 }
839 
840 static int amdgpu_dm_init(struct amdgpu_device *adev)
841 {
842 	struct dc_init_data init_data;
843 #ifdef CONFIG_DRM_AMD_DC_HDCP
844 	struct dc_callback_init init_params;
845 #endif
846 	int r;
847 
848 	adev->dm.ddev = adev->ddev;
849 	adev->dm.adev = adev;
850 
851 	/* Zero all the fields */
852 	memset(&init_data, 0, sizeof(init_data));
853 #ifdef CONFIG_DRM_AMD_DC_HDCP
854 	memset(&init_params, 0, sizeof(init_params));
855 #endif
856 
857 	rw_init(&adev->dm.dc_lock, "dmdc");
858 	rw_init(&adev->dm.audio_lock, "dmaud");
859 
860 	if(amdgpu_dm_irq_init(adev)) {
861 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
862 		goto error;
863 	}
864 
865 	init_data.asic_id.chip_family = adev->family;
866 
867 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
868 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
869 
870 	init_data.asic_id.vram_width = adev->gmc.vram_width;
871 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
872 	init_data.asic_id.atombios_base_address =
873 		adev->mode_info.atom_context->bios;
874 
875 	init_data.driver = adev;
876 
877 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
878 
879 	if (!adev->dm.cgs_device) {
880 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
881 		goto error;
882 	}
883 
884 	init_data.cgs_device = adev->dm.cgs_device;
885 
886 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
887 
888 	switch (adev->asic_type) {
889 	case CHIP_CARRIZO:
890 	case CHIP_STONEY:
891 	case CHIP_RAVEN:
892 	case CHIP_RENOIR:
893 		init_data.flags.gpu_vm_support = true;
894 		break;
895 	default:
896 		break;
897 	}
898 
899 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
900 		init_data.flags.fbc_support = true;
901 
902 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
903 		init_data.flags.multi_mon_pp_mclk_switch = true;
904 
905 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
906 		init_data.flags.disable_fractional_pwm = true;
907 
908 	init_data.flags.power_down_display_on_boot = true;
909 
910 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
911 
912 	/* Display Core create. */
913 	adev->dm.dc = dc_create(&init_data);
914 
915 	if (adev->dm.dc) {
916 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
917 	} else {
918 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
919 		goto error;
920 	}
921 
922 	r = dm_dmub_hw_init(adev);
923 	if (r) {
924 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
925 		goto error;
926 	}
927 
928 	dc_hardware_init(adev->dm.dc);
929 
930 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
931 	if (!adev->dm.freesync_module) {
932 		DRM_ERROR(
933 		"amdgpu: failed to initialize freesync_module.\n");
934 	} else
935 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
936 				adev->dm.freesync_module);
937 
938 	amdgpu_dm_init_color_mod();
939 
940 #ifdef CONFIG_DRM_AMD_DC_HDCP
941 	if (adev->asic_type >= CHIP_RAVEN) {
942 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
943 
944 		if (!adev->dm.hdcp_workqueue)
945 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
946 		else
947 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
948 
949 		dc_init_callbacks(adev->dm.dc, &init_params);
950 	}
951 #endif
952 	if (amdgpu_dm_initialize_drm_device(adev)) {
953 		DRM_ERROR(
954 		"amdgpu: failed to initialize sw for display support.\n");
955 		goto error;
956 	}
957 
958 	/* Update the actual used number of crtc */
959 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
960 
961 	/* TODO: Add_display_info? */
962 
963 	/* TODO use dynamic cursor width */
964 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
965 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
966 
967 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
968 		DRM_ERROR(
969 		"amdgpu: failed to initialize sw for display support.\n");
970 		goto error;
971 	}
972 
973 	DRM_DEBUG_DRIVER("KMS initialized.\n");
974 
975 	return 0;
976 error:
977 	amdgpu_dm_fini(adev);
978 
979 	return -EINVAL;
980 }
981 
982 static void amdgpu_dm_fini(struct amdgpu_device *adev)
983 {
984 	amdgpu_dm_audio_fini(adev);
985 
986 	amdgpu_dm_destroy_drm_device(&adev->dm);
987 
988 #ifdef CONFIG_DRM_AMD_DC_HDCP
989 	if (adev->dm.hdcp_workqueue) {
990 		hdcp_destroy(adev->dm.hdcp_workqueue);
991 		adev->dm.hdcp_workqueue = NULL;
992 	}
993 
994 	if (adev->dm.dc)
995 		dc_deinit_callbacks(adev->dm.dc);
996 #endif
997 	if (adev->dm.dc->ctx->dmub_srv) {
998 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
999 		adev->dm.dc->ctx->dmub_srv = NULL;
1000 	}
1001 
1002 	if (adev->dm.dmub_bo)
1003 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1004 				      &adev->dm.dmub_bo_gpu_addr,
1005 				      &adev->dm.dmub_bo_cpu_addr);
1006 
1007 	/* DC Destroy TODO: Replace destroy DAL */
1008 	if (adev->dm.dc)
1009 		dc_destroy(&adev->dm.dc);
1010 	/*
1011 	 * TODO: pageflip, vlank interrupt
1012 	 *
1013 	 * amdgpu_dm_irq_fini(adev);
1014 	 */
1015 
1016 	if (adev->dm.cgs_device) {
1017 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1018 		adev->dm.cgs_device = NULL;
1019 	}
1020 	if (adev->dm.freesync_module) {
1021 		mod_freesync_destroy(adev->dm.freesync_module);
1022 		adev->dm.freesync_module = NULL;
1023 	}
1024 
1025 	mutex_destroy(&adev->dm.audio_lock);
1026 	mutex_destroy(&adev->dm.dc_lock);
1027 
1028 	return;
1029 }
1030 
1031 static int load_dmcu_fw(struct amdgpu_device *adev)
1032 {
1033 	const char *fw_name_dmcu = NULL;
1034 	int r;
1035 	const struct dmcu_firmware_header_v1_0 *hdr;
1036 
1037 	switch(adev->asic_type) {
1038 	case CHIP_BONAIRE:
1039 	case CHIP_HAWAII:
1040 	case CHIP_KAVERI:
1041 	case CHIP_KABINI:
1042 	case CHIP_MULLINS:
1043 	case CHIP_TONGA:
1044 	case CHIP_FIJI:
1045 	case CHIP_CARRIZO:
1046 	case CHIP_STONEY:
1047 	case CHIP_POLARIS11:
1048 	case CHIP_POLARIS10:
1049 	case CHIP_POLARIS12:
1050 	case CHIP_VEGAM:
1051 	case CHIP_VEGA10:
1052 	case CHIP_VEGA12:
1053 	case CHIP_VEGA20:
1054 	case CHIP_NAVI10:
1055 	case CHIP_NAVI14:
1056 	case CHIP_RENOIR:
1057 		return 0;
1058 	case CHIP_NAVI12:
1059 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1060 		break;
1061 	case CHIP_RAVEN:
1062 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1063 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1064 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1065 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1066 		else
1067 			return 0;
1068 		break;
1069 	default:
1070 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1071 		return -EINVAL;
1072 	}
1073 
1074 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1076 		return 0;
1077 	}
1078 
1079 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1080 	if (r == -ENOENT) {
1081 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1082 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1083 		adev->dm.fw_dmcu = NULL;
1084 		return 0;
1085 	}
1086 	if (r) {
1087 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1088 			fw_name_dmcu);
1089 		return r;
1090 	}
1091 
1092 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1093 	if (r) {
1094 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1095 			fw_name_dmcu);
1096 		release_firmware(adev->dm.fw_dmcu);
1097 		adev->dm.fw_dmcu = NULL;
1098 		return r;
1099 	}
1100 
1101 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1102 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1103 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1104 	adev->firmware.fw_size +=
1105 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1106 
1107 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1108 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1109 	adev->firmware.fw_size +=
1110 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1111 
1112 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1113 
1114 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1115 
1116 	return 0;
1117 }
1118 
1119 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1120 {
1121 	struct amdgpu_device *adev = ctx;
1122 
1123 	return dm_read_reg(adev->dm.dc->ctx, address);
1124 }
1125 
1126 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1127 				     uint32_t value)
1128 {
1129 	struct amdgpu_device *adev = ctx;
1130 
1131 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1132 }
1133 
1134 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1135 {
1136 	struct dmub_srv_create_params create_params;
1137 	struct dmub_srv_region_params region_params;
1138 	struct dmub_srv_region_info region_info;
1139 	struct dmub_srv_fb_params fb_params;
1140 	struct dmub_srv_fb_info *fb_info;
1141 	struct dmub_srv *dmub_srv;
1142 	const struct dmcub_firmware_header_v1_0 *hdr;
1143 	const char *fw_name_dmub;
1144 	enum dmub_asic dmub_asic;
1145 	enum dmub_status status;
1146 	int r;
1147 
1148 	switch (adev->asic_type) {
1149 	case CHIP_RENOIR:
1150 		dmub_asic = DMUB_ASIC_DCN21;
1151 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1152 		break;
1153 
1154 	default:
1155 		/* ASIC doesn't support DMUB. */
1156 		return 0;
1157 	}
1158 
1159 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1160 	if (r) {
1161 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1162 		return 0;
1163 	}
1164 
1165 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1166 	if (r) {
1167 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1168 		return 0;
1169 	}
1170 
1171 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1172 
1173 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1174 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1175 			AMDGPU_UCODE_ID_DMCUB;
1176 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1177 			adev->dm.dmub_fw;
1178 		adev->firmware.fw_size +=
1179 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1180 
1181 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1182 			 adev->dm.dmcub_fw_version);
1183 	}
1184 
1185 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1186 
1187 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1188 	dmub_srv = adev->dm.dmub_srv;
1189 
1190 	if (!dmub_srv) {
1191 		DRM_ERROR("Failed to allocate DMUB service!\n");
1192 		return -ENOMEM;
1193 	}
1194 
1195 	memset(&create_params, 0, sizeof(create_params));
1196 	create_params.user_ctx = adev;
1197 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1198 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1199 	create_params.asic = dmub_asic;
1200 
1201 	/* Create the DMUB service. */
1202 	status = dmub_srv_create(dmub_srv, &create_params);
1203 	if (status != DMUB_STATUS_OK) {
1204 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1205 		return -EINVAL;
1206 	}
1207 
1208 	/* Calculate the size of all the regions for the DMUB service. */
1209 	memset(&region_params, 0, sizeof(region_params));
1210 
1211 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1212 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1213 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1214 	region_params.vbios_size = adev->bios_size;
1215 	region_params.fw_bss_data =
1216 		adev->dm.dmub_fw->data +
1217 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1218 		le32_to_cpu(hdr->inst_const_bytes);
1219 
1220 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1221 					   &region_info);
1222 
1223 	if (status != DMUB_STATUS_OK) {
1224 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1225 		return -EINVAL;
1226 	}
1227 
1228 	/*
1229 	 * Allocate a framebuffer based on the total size of all the regions.
1230 	 * TODO: Move this into GART.
1231 	 */
1232 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1233 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1234 				    &adev->dm.dmub_bo_gpu_addr,
1235 				    &adev->dm.dmub_bo_cpu_addr);
1236 	if (r)
1237 		return r;
1238 
1239 	/* Rebase the regions on the framebuffer address. */
1240 	memset(&fb_params, 0, sizeof(fb_params));
1241 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1242 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1243 	fb_params.region_info = &region_info;
1244 
1245 	adev->dm.dmub_fb_info =
1246 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1247 	fb_info = adev->dm.dmub_fb_info;
1248 
1249 	if (!fb_info) {
1250 		DRM_ERROR(
1251 			"Failed to allocate framebuffer info for DMUB service!\n");
1252 		return -ENOMEM;
1253 	}
1254 
1255 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1256 	if (status != DMUB_STATUS_OK) {
1257 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1258 		return -EINVAL;
1259 	}
1260 
1261 	return 0;
1262 }
1263 
1264 static int dm_sw_init(void *handle)
1265 {
1266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 	int r;
1268 
1269 	r = dm_dmub_sw_init(adev);
1270 	if (r)
1271 		return r;
1272 
1273 	return load_dmcu_fw(adev);
1274 }
1275 
1276 static int dm_sw_fini(void *handle)
1277 {
1278 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 
1280 	kfree(adev->dm.dmub_fb_info);
1281 	adev->dm.dmub_fb_info = NULL;
1282 
1283 	if (adev->dm.dmub_srv) {
1284 		dmub_srv_destroy(adev->dm.dmub_srv);
1285 		adev->dm.dmub_srv = NULL;
1286 	}
1287 
1288 	if (adev->dm.dmub_fw) {
1289 		release_firmware(adev->dm.dmub_fw);
1290 		adev->dm.dmub_fw = NULL;
1291 	}
1292 
1293 	if(adev->dm.fw_dmcu) {
1294 		release_firmware(adev->dm.fw_dmcu);
1295 		adev->dm.fw_dmcu = NULL;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1302 {
1303 	struct amdgpu_dm_connector *aconnector;
1304 	struct drm_connector *connector;
1305 	struct drm_connector_list_iter iter;
1306 	int ret = 0;
1307 
1308 	drm_connector_list_iter_begin(dev, &iter);
1309 	drm_for_each_connector_iter(connector, &iter) {
1310 		aconnector = to_amdgpu_dm_connector(connector);
1311 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1312 		    aconnector->mst_mgr.aux) {
1313 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1314 					 aconnector,
1315 					 aconnector->base.base.id);
1316 
1317 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1318 			if (ret < 0) {
1319 				DRM_ERROR("DM_MST: Failed to start MST\n");
1320 				aconnector->dc_link->type =
1321 					dc_connection_single;
1322 				break;
1323 			}
1324 		}
1325 	}
1326 	drm_connector_list_iter_end(&iter);
1327 
1328 	return ret;
1329 }
1330 
1331 static int dm_late_init(void *handle)
1332 {
1333 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334 
1335 	struct dmcu_iram_parameters params;
1336 	unsigned int linear_lut[16];
1337 	int i;
1338 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1339 	bool ret = false;
1340 
1341 	for (i = 0; i < 16; i++)
1342 		linear_lut[i] = 0xFFFF * i / 15;
1343 
1344 	params.set = 0;
1345 	params.backlight_ramping_start = 0xCCCC;
1346 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1347 	params.backlight_lut_array_size = 16;
1348 	params.backlight_lut_array = linear_lut;
1349 
1350 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1351 	 * 0xFFFF x 0.01 = 0x28F
1352 	 */
1353 	params.min_abm_backlight = 0x28F;
1354 
1355 	/* todo will enable for navi10 */
1356 	if (adev->asic_type <= CHIP_RAVEN) {
1357 		ret = dmcu_load_iram(dmcu, params);
1358 
1359 		if (!ret)
1360 			return -EINVAL;
1361 	}
1362 
1363 	return detect_mst_link_for_all_connectors(adev->ddev);
1364 }
1365 
1366 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1367 {
1368 	struct amdgpu_dm_connector *aconnector;
1369 	struct drm_connector *connector;
1370 	struct drm_connector_list_iter iter;
1371 	struct drm_dp_mst_topology_mgr *mgr;
1372 	int ret;
1373 	bool need_hotplug = false;
1374 
1375 	drm_connector_list_iter_begin(dev, &iter);
1376 	drm_for_each_connector_iter(connector, &iter) {
1377 		aconnector = to_amdgpu_dm_connector(connector);
1378 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1379 		    aconnector->mst_port)
1380 			continue;
1381 
1382 		mgr = &aconnector->mst_mgr;
1383 
1384 		if (suspend) {
1385 			drm_dp_mst_topology_mgr_suspend(mgr);
1386 		} else {
1387 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1388 			if (ret < 0) {
1389 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1390 				need_hotplug = true;
1391 			}
1392 		}
1393 	}
1394 	drm_connector_list_iter_end(&iter);
1395 
1396 	if (need_hotplug)
1397 		drm_kms_helper_hotplug_event(dev);
1398 }
1399 
1400 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1401 {
1402 	struct smu_context *smu = &adev->smu;
1403 	int ret = 0;
1404 
1405 	if (!is_support_sw_smu(adev))
1406 		return 0;
1407 
1408 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1409 	 * on window driver dc implementation.
1410 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1411 	 * should be passed to smu during boot up and resume from s3.
1412 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1413 	 * dcn20_resource_construct
1414 	 * then call pplib functions below to pass the settings to smu:
1415 	 * smu_set_watermarks_for_clock_ranges
1416 	 * smu_set_watermarks_table
1417 	 * navi10_set_watermarks_table
1418 	 * smu_write_watermarks_table
1419 	 *
1420 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1421 	 * dc has implemented different flow for window driver:
1422 	 * dc_hardware_init / dc_set_power_state
1423 	 * dcn10_init_hw
1424 	 * notify_wm_ranges
1425 	 * set_wm_ranges
1426 	 * -- Linux
1427 	 * smu_set_watermarks_for_clock_ranges
1428 	 * renoir_set_watermarks_table
1429 	 * smu_write_watermarks_table
1430 	 *
1431 	 * For Linux,
1432 	 * dc_hardware_init -> amdgpu_dm_init
1433 	 * dc_set_power_state --> dm_resume
1434 	 *
1435 	 * therefore, this function apply to navi10/12/14 but not Renoir
1436 	 * *
1437 	 */
1438 	switch(adev->asic_type) {
1439 	case CHIP_NAVI10:
1440 	case CHIP_NAVI14:
1441 	case CHIP_NAVI12:
1442 		break;
1443 	default:
1444 		return 0;
1445 	}
1446 
1447 	mutex_lock(&smu->mutex);
1448 
1449 	/* pass data to smu controller */
1450 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1451 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1452 		ret = smu_write_watermarks_table(smu);
1453 
1454 		if (ret) {
1455 			mutex_unlock(&smu->mutex);
1456 			DRM_ERROR("Failed to update WMTABLE!\n");
1457 			return ret;
1458 		}
1459 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1460 	}
1461 
1462 	mutex_unlock(&smu->mutex);
1463 
1464 	return 0;
1465 }
1466 
1467 /**
1468  * dm_hw_init() - Initialize DC device
1469  * @handle: The base driver device containing the amdgpu_dm device.
1470  *
1471  * Initialize the &struct amdgpu_display_manager device. This involves calling
1472  * the initializers of each DM component, then populating the struct with them.
1473  *
1474  * Although the function implies hardware initialization, both hardware and
1475  * software are initialized here. Splitting them out to their relevant init
1476  * hooks is a future TODO item.
1477  *
1478  * Some notable things that are initialized here:
1479  *
1480  * - Display Core, both software and hardware
1481  * - DC modules that we need (freesync and color management)
1482  * - DRM software states
1483  * - Interrupt sources and handlers
1484  * - Vblank support
1485  * - Debug FS entries, if enabled
1486  */
1487 static int dm_hw_init(void *handle)
1488 {
1489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 	/* Create DAL display manager */
1491 	amdgpu_dm_init(adev);
1492 	amdgpu_dm_hpd_init(adev);
1493 
1494 	return 0;
1495 }
1496 
1497 /**
1498  * dm_hw_fini() - Teardown DC device
1499  * @handle: The base driver device containing the amdgpu_dm device.
1500  *
1501  * Teardown components within &struct amdgpu_display_manager that require
1502  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1503  * were loaded. Also flush IRQ workqueues and disable them.
1504  */
1505 static int dm_hw_fini(void *handle)
1506 {
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	amdgpu_dm_hpd_fini(adev);
1510 
1511 	amdgpu_dm_irq_fini(adev);
1512 	amdgpu_dm_fini(adev);
1513 	return 0;
1514 }
1515 
1516 static int dm_suspend(void *handle)
1517 {
1518 	struct amdgpu_device *adev = handle;
1519 	struct amdgpu_display_manager *dm = &adev->dm;
1520 	int ret = 0;
1521 
1522 	WARN_ON(adev->dm.cached_state);
1523 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1524 
1525 	s3_handle_mst(adev->ddev, true);
1526 
1527 	amdgpu_dm_irq_suspend(adev);
1528 
1529 
1530 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1531 
1532 	return ret;
1533 }
1534 
1535 static struct amdgpu_dm_connector *
1536 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1537 					     struct drm_crtc *crtc)
1538 {
1539 	uint32_t i;
1540 	struct drm_connector_state *new_con_state;
1541 	struct drm_connector *connector;
1542 	struct drm_crtc *crtc_from_state;
1543 
1544 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1545 		crtc_from_state = new_con_state->crtc;
1546 
1547 		if (crtc_from_state == crtc)
1548 			return to_amdgpu_dm_connector(connector);
1549 	}
1550 
1551 	return NULL;
1552 }
1553 
1554 static void emulated_link_detect(struct dc_link *link)
1555 {
1556 	struct dc_sink_init_data sink_init_data = { 0 };
1557 	struct display_sink_capability sink_caps = { 0 };
1558 	enum dc_edid_status edid_status;
1559 	struct dc_context *dc_ctx = link->ctx;
1560 	struct dc_sink *sink = NULL;
1561 	struct dc_sink *prev_sink = NULL;
1562 
1563 	link->type = dc_connection_none;
1564 	prev_sink = link->local_sink;
1565 
1566 	if (prev_sink != NULL)
1567 		dc_sink_retain(prev_sink);
1568 
1569 	switch (link->connector_signal) {
1570 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1571 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1572 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1573 		break;
1574 	}
1575 
1576 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1577 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1578 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1579 		break;
1580 	}
1581 
1582 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1583 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1584 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1585 		break;
1586 	}
1587 
1588 	case SIGNAL_TYPE_LVDS: {
1589 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1590 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1591 		break;
1592 	}
1593 
1594 	case SIGNAL_TYPE_EDP: {
1595 		sink_caps.transaction_type =
1596 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1597 		sink_caps.signal = SIGNAL_TYPE_EDP;
1598 		break;
1599 	}
1600 
1601 	case SIGNAL_TYPE_DISPLAY_PORT: {
1602 		sink_caps.transaction_type =
1603 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1604 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1605 		break;
1606 	}
1607 
1608 	default:
1609 		DC_ERROR("Invalid connector type! signal:%d\n",
1610 			link->connector_signal);
1611 		return;
1612 	}
1613 
1614 	sink_init_data.link = link;
1615 	sink_init_data.sink_signal = sink_caps.signal;
1616 
1617 	sink = dc_sink_create(&sink_init_data);
1618 	if (!sink) {
1619 		DC_ERROR("Failed to create sink!\n");
1620 		return;
1621 	}
1622 
1623 	/* dc_sink_create returns a new reference */
1624 	link->local_sink = sink;
1625 
1626 	edid_status = dm_helpers_read_local_edid(
1627 			link->ctx,
1628 			link,
1629 			sink);
1630 
1631 	if (edid_status != EDID_OK)
1632 		DC_ERROR("Failed to read EDID");
1633 
1634 }
1635 
1636 static int dm_resume(void *handle)
1637 {
1638 	struct amdgpu_device *adev = handle;
1639 	struct drm_device *ddev = adev->ddev;
1640 	struct amdgpu_display_manager *dm = &adev->dm;
1641 	struct amdgpu_dm_connector *aconnector;
1642 	struct drm_connector *connector;
1643 	struct drm_connector_list_iter iter;
1644 	struct drm_crtc *crtc;
1645 	struct drm_crtc_state *new_crtc_state;
1646 	struct dm_crtc_state *dm_new_crtc_state;
1647 	struct drm_plane *plane;
1648 	struct drm_plane_state *new_plane_state;
1649 	struct dm_plane_state *dm_new_plane_state;
1650 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1651 	enum dc_connection_type new_connection_type = dc_connection_none;
1652 	int i, r;
1653 
1654 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1655 	dc_release_state(dm_state->context);
1656 	dm_state->context = dc_create_state(dm->dc);
1657 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1658 	dc_resource_state_construct(dm->dc, dm_state->context);
1659 
1660 	/* Before powering on DC we need to re-initialize DMUB. */
1661 	r = dm_dmub_hw_init(adev);
1662 	if (r)
1663 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1664 
1665 	/* power on hardware */
1666 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1667 
1668 	/* program HPD filter */
1669 	dc_resume(dm->dc);
1670 
1671 	/*
1672 	 * early enable HPD Rx IRQ, should be done before set mode as short
1673 	 * pulse interrupts are used for MST
1674 	 */
1675 	amdgpu_dm_irq_resume_early(adev);
1676 
1677 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1678 	s3_handle_mst(ddev, false);
1679 
1680 	/* Do detection*/
1681 	drm_connector_list_iter_begin(ddev, &iter);
1682 	drm_for_each_connector_iter(connector, &iter) {
1683 		aconnector = to_amdgpu_dm_connector(connector);
1684 
1685 		/*
1686 		 * this is the case when traversing through already created
1687 		 * MST connectors, should be skipped
1688 		 */
1689 		if (aconnector->mst_port)
1690 			continue;
1691 
1692 		mutex_lock(&aconnector->hpd_lock);
1693 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1694 			DRM_ERROR("KMS: Failed to detect connector\n");
1695 
1696 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1697 			emulated_link_detect(aconnector->dc_link);
1698 		else
1699 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1700 
1701 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1702 			aconnector->fake_enable = false;
1703 
1704 		if (aconnector->dc_sink)
1705 			dc_sink_release(aconnector->dc_sink);
1706 		aconnector->dc_sink = NULL;
1707 		amdgpu_dm_update_connector_after_detect(aconnector);
1708 		mutex_unlock(&aconnector->hpd_lock);
1709 	}
1710 	drm_connector_list_iter_end(&iter);
1711 
1712 	/* Force mode set in atomic commit */
1713 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1714 		new_crtc_state->active_changed = true;
1715 
1716 	/*
1717 	 * atomic_check is expected to create the dc states. We need to release
1718 	 * them here, since they were duplicated as part of the suspend
1719 	 * procedure.
1720 	 */
1721 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1722 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1723 		if (dm_new_crtc_state->stream) {
1724 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1725 			dc_stream_release(dm_new_crtc_state->stream);
1726 			dm_new_crtc_state->stream = NULL;
1727 		}
1728 	}
1729 
1730 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1731 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1732 		if (dm_new_plane_state->dc_state) {
1733 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1734 			dc_plane_state_release(dm_new_plane_state->dc_state);
1735 			dm_new_plane_state->dc_state = NULL;
1736 		}
1737 	}
1738 
1739 	drm_atomic_helper_resume(ddev, dm->cached_state);
1740 
1741 	dm->cached_state = NULL;
1742 
1743 	amdgpu_dm_irq_resume_late(adev);
1744 
1745 	amdgpu_dm_smu_write_watermarks_table(adev);
1746 
1747 	return 0;
1748 }
1749 
1750 /**
1751  * DOC: DM Lifecycle
1752  *
1753  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1754  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1755  * the base driver's device list to be initialized and torn down accordingly.
1756  *
1757  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1758  */
1759 
1760 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1761 	.name = "dm",
1762 	.early_init = dm_early_init,
1763 	.late_init = dm_late_init,
1764 	.sw_init = dm_sw_init,
1765 	.sw_fini = dm_sw_fini,
1766 	.hw_init = dm_hw_init,
1767 	.hw_fini = dm_hw_fini,
1768 	.suspend = dm_suspend,
1769 	.resume = dm_resume,
1770 	.is_idle = dm_is_idle,
1771 	.wait_for_idle = dm_wait_for_idle,
1772 	.check_soft_reset = dm_check_soft_reset,
1773 	.soft_reset = dm_soft_reset,
1774 	.set_clockgating_state = dm_set_clockgating_state,
1775 	.set_powergating_state = dm_set_powergating_state,
1776 };
1777 
1778 const struct amdgpu_ip_block_version dm_ip_block =
1779 {
1780 	.type = AMD_IP_BLOCK_TYPE_DCE,
1781 	.major = 1,
1782 	.minor = 0,
1783 	.rev = 0,
1784 	.funcs = &amdgpu_dm_funcs,
1785 };
1786 
1787 
1788 /**
1789  * DOC: atomic
1790  *
1791  * *WIP*
1792  */
1793 
1794 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1795 	.fb_create = amdgpu_display_user_framebuffer_create,
1796 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1797 	.atomic_check = amdgpu_dm_atomic_check,
1798 	.atomic_commit = amdgpu_dm_atomic_commit,
1799 };
1800 
1801 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1802 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1803 };
1804 
1805 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1806 {
1807 	u32 max_cll, min_cll, max, min, q, r;
1808 	struct amdgpu_dm_backlight_caps *caps;
1809 	struct amdgpu_display_manager *dm;
1810 	struct drm_connector *conn_base;
1811 	struct amdgpu_device *adev;
1812 	static const u8 pre_computed_values[] = {
1813 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1814 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1815 
1816 	if (!aconnector || !aconnector->dc_link)
1817 		return;
1818 
1819 	conn_base = &aconnector->base;
1820 	adev = conn_base->dev->dev_private;
1821 	dm = &adev->dm;
1822 	caps = &dm->backlight_caps;
1823 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1824 	caps->aux_support = false;
1825 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1826 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1827 
1828 	if (caps->ext_caps->bits.oled == 1 ||
1829 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1830 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1831 		caps->aux_support = true;
1832 
1833 	/* From the specification (CTA-861-G), for calculating the maximum
1834 	 * luminance we need to use:
1835 	 *	Luminance = 50*2**(CV/32)
1836 	 * Where CV is a one-byte value.
1837 	 * For calculating this expression we may need float point precision;
1838 	 * to avoid this complexity level, we take advantage that CV is divided
1839 	 * by a constant. From the Euclids division algorithm, we know that CV
1840 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1841 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1842 	 * need to pre-compute the value of r/32. For pre-computing the values
1843 	 * We just used the following Ruby line:
1844 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1845 	 * The results of the above expressions can be verified at
1846 	 * pre_computed_values.
1847 	 */
1848 	q = max_cll >> 5;
1849 	r = max_cll % 32;
1850 	max = (1 << q) * pre_computed_values[r];
1851 
1852 	// min luminance: maxLum * (CV/255)^2 / 100
1853 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1854 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1855 
1856 	caps->aux_max_input_signal = max;
1857 	caps->aux_min_input_signal = min;
1858 }
1859 
1860 void amdgpu_dm_update_connector_after_detect(
1861 		struct amdgpu_dm_connector *aconnector)
1862 {
1863 	struct drm_connector *connector = &aconnector->base;
1864 	struct drm_device *dev = connector->dev;
1865 	struct dc_sink *sink;
1866 
1867 	/* MST handled by drm_mst framework */
1868 	if (aconnector->mst_mgr.mst_state == true)
1869 		return;
1870 
1871 
1872 	sink = aconnector->dc_link->local_sink;
1873 	if (sink)
1874 		dc_sink_retain(sink);
1875 
1876 	/*
1877 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1878 	 * the connector sink is set to either fake or physical sink depends on link status.
1879 	 * Skip if already done during boot.
1880 	 */
1881 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1882 			&& aconnector->dc_em_sink) {
1883 
1884 		/*
1885 		 * For S3 resume with headless use eml_sink to fake stream
1886 		 * because on resume connector->sink is set to NULL
1887 		 */
1888 		mutex_lock(&dev->mode_config.mutex);
1889 
1890 		if (sink) {
1891 			if (aconnector->dc_sink) {
1892 				amdgpu_dm_update_freesync_caps(connector, NULL);
1893 				/*
1894 				 * retain and release below are used to
1895 				 * bump up refcount for sink because the link doesn't point
1896 				 * to it anymore after disconnect, so on next crtc to connector
1897 				 * reshuffle by UMD we will get into unwanted dc_sink release
1898 				 */
1899 				dc_sink_release(aconnector->dc_sink);
1900 			}
1901 			aconnector->dc_sink = sink;
1902 			dc_sink_retain(aconnector->dc_sink);
1903 			amdgpu_dm_update_freesync_caps(connector,
1904 					aconnector->edid);
1905 		} else {
1906 			amdgpu_dm_update_freesync_caps(connector, NULL);
1907 			if (!aconnector->dc_sink) {
1908 				aconnector->dc_sink = aconnector->dc_em_sink;
1909 				dc_sink_retain(aconnector->dc_sink);
1910 			}
1911 		}
1912 
1913 		mutex_unlock(&dev->mode_config.mutex);
1914 
1915 		if (sink)
1916 			dc_sink_release(sink);
1917 		return;
1918 	}
1919 
1920 	/*
1921 	 * TODO: temporary guard to look for proper fix
1922 	 * if this sink is MST sink, we should not do anything
1923 	 */
1924 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1925 		dc_sink_release(sink);
1926 		return;
1927 	}
1928 
1929 	if (aconnector->dc_sink == sink) {
1930 		/*
1931 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1932 		 * Do nothing!!
1933 		 */
1934 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1935 				aconnector->connector_id);
1936 		if (sink)
1937 			dc_sink_release(sink);
1938 		return;
1939 	}
1940 
1941 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1942 		aconnector->connector_id, aconnector->dc_sink, sink);
1943 
1944 	mutex_lock(&dev->mode_config.mutex);
1945 
1946 	/*
1947 	 * 1. Update status of the drm connector
1948 	 * 2. Send an event and let userspace tell us what to do
1949 	 */
1950 	if (sink) {
1951 		/*
1952 		 * TODO: check if we still need the S3 mode update workaround.
1953 		 * If yes, put it here.
1954 		 */
1955 		if (aconnector->dc_sink)
1956 			amdgpu_dm_update_freesync_caps(connector, NULL);
1957 
1958 		aconnector->dc_sink = sink;
1959 		dc_sink_retain(aconnector->dc_sink);
1960 		if (sink->dc_edid.length == 0) {
1961 			aconnector->edid = NULL;
1962 			if (aconnector->dc_link->aux_mode) {
1963 				drm_dp_cec_unset_edid(
1964 					&aconnector->dm_dp_aux.aux);
1965 			}
1966 		} else {
1967 			aconnector->edid =
1968 				(struct edid *)sink->dc_edid.raw_edid;
1969 
1970 			drm_connector_update_edid_property(connector,
1971 							   aconnector->edid);
1972 
1973 			if (aconnector->dc_link->aux_mode)
1974 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1975 						    aconnector->edid);
1976 		}
1977 
1978 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1979 		update_connector_ext_caps(aconnector);
1980 	} else {
1981 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1982 		amdgpu_dm_update_freesync_caps(connector, NULL);
1983 		drm_connector_update_edid_property(connector, NULL);
1984 		aconnector->num_modes = 0;
1985 		dc_sink_release(aconnector->dc_sink);
1986 		aconnector->dc_sink = NULL;
1987 		aconnector->edid = NULL;
1988 #ifdef CONFIG_DRM_AMD_DC_HDCP
1989 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1990 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1991 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1992 #endif
1993 	}
1994 
1995 	mutex_unlock(&dev->mode_config.mutex);
1996 
1997 	if (sink)
1998 		dc_sink_release(sink);
1999 }
2000 
2001 static void handle_hpd_irq(void *param)
2002 {
2003 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2004 	struct drm_connector *connector = &aconnector->base;
2005 	struct drm_device *dev = connector->dev;
2006 	enum dc_connection_type new_connection_type = dc_connection_none;
2007 #ifdef CONFIG_DRM_AMD_DC_HDCP
2008 	struct amdgpu_device *adev = dev->dev_private;
2009 #endif
2010 
2011 	/*
2012 	 * In case of failure or MST no need to update connector status or notify the OS
2013 	 * since (for MST case) MST does this in its own context.
2014 	 */
2015 	mutex_lock(&aconnector->hpd_lock);
2016 
2017 #ifdef CONFIG_DRM_AMD_DC_HDCP
2018 	if (adev->dm.hdcp_workqueue)
2019 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2020 #endif
2021 	if (aconnector->fake_enable)
2022 		aconnector->fake_enable = false;
2023 
2024 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2025 		DRM_ERROR("KMS: Failed to detect connector\n");
2026 
2027 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2028 		emulated_link_detect(aconnector->dc_link);
2029 
2030 
2031 		drm_modeset_lock_all(dev);
2032 		dm_restore_drm_connector_state(dev, connector);
2033 		drm_modeset_unlock_all(dev);
2034 
2035 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2036 			drm_kms_helper_hotplug_event(dev);
2037 
2038 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2039 		amdgpu_dm_update_connector_after_detect(aconnector);
2040 
2041 
2042 		drm_modeset_lock_all(dev);
2043 		dm_restore_drm_connector_state(dev, connector);
2044 		drm_modeset_unlock_all(dev);
2045 
2046 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2047 			drm_kms_helper_hotplug_event(dev);
2048 	}
2049 	mutex_unlock(&aconnector->hpd_lock);
2050 
2051 }
2052 
2053 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2054 {
2055 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2056 	uint8_t dret;
2057 	bool new_irq_handled = false;
2058 	int dpcd_addr;
2059 	int dpcd_bytes_to_read;
2060 
2061 	const int max_process_count = 30;
2062 	int process_count = 0;
2063 
2064 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2065 
2066 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2067 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2068 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2069 		dpcd_addr = DP_SINK_COUNT;
2070 	} else {
2071 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2072 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2073 		dpcd_addr = DP_SINK_COUNT_ESI;
2074 	}
2075 
2076 	dret = drm_dp_dpcd_read(
2077 		&aconnector->dm_dp_aux.aux,
2078 		dpcd_addr,
2079 		esi,
2080 		dpcd_bytes_to_read);
2081 
2082 	while (dret == dpcd_bytes_to_read &&
2083 		process_count < max_process_count) {
2084 		uint8_t retry;
2085 		dret = 0;
2086 
2087 		process_count++;
2088 
2089 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2090 		/* handle HPD short pulse irq */
2091 		if (aconnector->mst_mgr.mst_state)
2092 			drm_dp_mst_hpd_irq(
2093 				&aconnector->mst_mgr,
2094 				esi,
2095 				&new_irq_handled);
2096 
2097 		if (new_irq_handled) {
2098 			/* ACK at DPCD to notify down stream */
2099 			const int ack_dpcd_bytes_to_write =
2100 				dpcd_bytes_to_read - 1;
2101 
2102 			for (retry = 0; retry < 3; retry++) {
2103 				uint8_t wret;
2104 
2105 				wret = drm_dp_dpcd_write(
2106 					&aconnector->dm_dp_aux.aux,
2107 					dpcd_addr + 1,
2108 					&esi[1],
2109 					ack_dpcd_bytes_to_write);
2110 				if (wret == ack_dpcd_bytes_to_write)
2111 					break;
2112 			}
2113 
2114 			/* check if there is new irq to be handled */
2115 			dret = drm_dp_dpcd_read(
2116 				&aconnector->dm_dp_aux.aux,
2117 				dpcd_addr,
2118 				esi,
2119 				dpcd_bytes_to_read);
2120 
2121 			new_irq_handled = false;
2122 		} else {
2123 			break;
2124 		}
2125 	}
2126 
2127 	if (process_count == max_process_count)
2128 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2129 }
2130 
2131 static void handle_hpd_rx_irq(void *param)
2132 {
2133 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2134 	struct drm_connector *connector = &aconnector->base;
2135 	struct drm_device *dev = connector->dev;
2136 	struct dc_link *dc_link = aconnector->dc_link;
2137 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2138 	enum dc_connection_type new_connection_type = dc_connection_none;
2139 #ifdef CONFIG_DRM_AMD_DC_HDCP
2140 	union hpd_irq_data hpd_irq_data;
2141 	struct amdgpu_device *adev = dev->dev_private;
2142 
2143 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2144 #endif
2145 
2146 	/*
2147 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2148 	 * conflict, after implement i2c helper, this mutex should be
2149 	 * retired.
2150 	 */
2151 	if (dc_link->type != dc_connection_mst_branch)
2152 		mutex_lock(&aconnector->hpd_lock);
2153 
2154 
2155 #ifdef CONFIG_DRM_AMD_DC_HDCP
2156 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2157 #else
2158 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2159 #endif
2160 			!is_mst_root_connector) {
2161 		/* Downstream Port status changed. */
2162 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2163 			DRM_ERROR("KMS: Failed to detect connector\n");
2164 
2165 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2166 			emulated_link_detect(dc_link);
2167 
2168 			if (aconnector->fake_enable)
2169 				aconnector->fake_enable = false;
2170 
2171 			amdgpu_dm_update_connector_after_detect(aconnector);
2172 
2173 
2174 			drm_modeset_lock_all(dev);
2175 			dm_restore_drm_connector_state(dev, connector);
2176 			drm_modeset_unlock_all(dev);
2177 
2178 			drm_kms_helper_hotplug_event(dev);
2179 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2180 
2181 			if (aconnector->fake_enable)
2182 				aconnector->fake_enable = false;
2183 
2184 			amdgpu_dm_update_connector_after_detect(aconnector);
2185 
2186 
2187 			drm_modeset_lock_all(dev);
2188 			dm_restore_drm_connector_state(dev, connector);
2189 			drm_modeset_unlock_all(dev);
2190 
2191 			drm_kms_helper_hotplug_event(dev);
2192 		}
2193 	}
2194 #ifdef CONFIG_DRM_AMD_DC_HDCP
2195 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2196 		if (adev->dm.hdcp_workqueue)
2197 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2198 	}
2199 #endif
2200 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2201 	    (dc_link->type == dc_connection_mst_branch))
2202 		dm_handle_hpd_rx_irq(aconnector);
2203 
2204 	if (dc_link->type != dc_connection_mst_branch) {
2205 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2206 		mutex_unlock(&aconnector->hpd_lock);
2207 	}
2208 }
2209 
2210 static void register_hpd_handlers(struct amdgpu_device *adev)
2211 {
2212 	struct drm_device *dev = adev->ddev;
2213 	struct drm_connector *connector;
2214 	struct amdgpu_dm_connector *aconnector;
2215 	const struct dc_link *dc_link;
2216 	struct dc_interrupt_params int_params = {0};
2217 
2218 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2219 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2220 
2221 	list_for_each_entry(connector,
2222 			&dev->mode_config.connector_list, head)	{
2223 
2224 		aconnector = to_amdgpu_dm_connector(connector);
2225 		dc_link = aconnector->dc_link;
2226 
2227 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2228 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2229 			int_params.irq_source = dc_link->irq_source_hpd;
2230 
2231 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2232 					handle_hpd_irq,
2233 					(void *) aconnector);
2234 		}
2235 
2236 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2237 
2238 			/* Also register for DP short pulse (hpd_rx). */
2239 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2240 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2241 
2242 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2243 					handle_hpd_rx_irq,
2244 					(void *) aconnector);
2245 		}
2246 	}
2247 }
2248 
2249 /* Register IRQ sources and initialize IRQ callbacks */
2250 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2251 {
2252 	struct dc *dc = adev->dm.dc;
2253 	struct common_irq_params *c_irq_params;
2254 	struct dc_interrupt_params int_params = {0};
2255 	int r;
2256 	int i;
2257 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2258 
2259 	if (adev->asic_type >= CHIP_VEGA10)
2260 		client_id = SOC15_IH_CLIENTID_DCE;
2261 
2262 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2264 
2265 	/*
2266 	 * Actions of amdgpu_irq_add_id():
2267 	 * 1. Register a set() function with base driver.
2268 	 *    Base driver will call set() function to enable/disable an
2269 	 *    interrupt in DC hardware.
2270 	 * 2. Register amdgpu_dm_irq_handler().
2271 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2272 	 *    coming from DC hardware.
2273 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2274 	 *    for acknowledging and handling. */
2275 
2276 	/* Use VBLANK interrupt */
2277 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2278 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2279 		if (r) {
2280 			DRM_ERROR("Failed to add crtc irq id!\n");
2281 			return r;
2282 		}
2283 
2284 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2285 		int_params.irq_source =
2286 			dc_interrupt_to_irq_source(dc, i, 0);
2287 
2288 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2289 
2290 		c_irq_params->adev = adev;
2291 		c_irq_params->irq_src = int_params.irq_source;
2292 
2293 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2294 				dm_crtc_high_irq, c_irq_params);
2295 	}
2296 
2297 	/* Use VUPDATE interrupt */
2298 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2299 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2300 		if (r) {
2301 			DRM_ERROR("Failed to add vupdate irq id!\n");
2302 			return r;
2303 		}
2304 
2305 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2306 		int_params.irq_source =
2307 			dc_interrupt_to_irq_source(dc, i, 0);
2308 
2309 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2310 
2311 		c_irq_params->adev = adev;
2312 		c_irq_params->irq_src = int_params.irq_source;
2313 
2314 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2315 				dm_vupdate_high_irq, c_irq_params);
2316 	}
2317 
2318 	/* Use GRPH_PFLIP interrupt */
2319 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2320 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2321 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2322 		if (r) {
2323 			DRM_ERROR("Failed to add page flip irq id!\n");
2324 			return r;
2325 		}
2326 
2327 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2328 		int_params.irq_source =
2329 			dc_interrupt_to_irq_source(dc, i, 0);
2330 
2331 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2332 
2333 		c_irq_params->adev = adev;
2334 		c_irq_params->irq_src = int_params.irq_source;
2335 
2336 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2337 				dm_pflip_high_irq, c_irq_params);
2338 
2339 	}
2340 
2341 	/* HPD */
2342 	r = amdgpu_irq_add_id(adev, client_id,
2343 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2344 	if (r) {
2345 		DRM_ERROR("Failed to add hpd irq id!\n");
2346 		return r;
2347 	}
2348 
2349 	register_hpd_handlers(adev);
2350 
2351 	return 0;
2352 }
2353 
2354 #if defined(CONFIG_DRM_AMD_DC_DCN)
2355 /* Register IRQ sources and initialize IRQ callbacks */
2356 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2357 {
2358 	struct dc *dc = adev->dm.dc;
2359 	struct common_irq_params *c_irq_params;
2360 	struct dc_interrupt_params int_params = {0};
2361 	int r;
2362 	int i;
2363 
2364 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2365 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2366 
2367 	/*
2368 	 * Actions of amdgpu_irq_add_id():
2369 	 * 1. Register a set() function with base driver.
2370 	 *    Base driver will call set() function to enable/disable an
2371 	 *    interrupt in DC hardware.
2372 	 * 2. Register amdgpu_dm_irq_handler().
2373 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2374 	 *    coming from DC hardware.
2375 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2376 	 *    for acknowledging and handling.
2377 	 */
2378 
2379 	/* Use VSTARTUP interrupt */
2380 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2381 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2382 			i++) {
2383 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2384 
2385 		if (r) {
2386 			DRM_ERROR("Failed to add crtc irq id!\n");
2387 			return r;
2388 		}
2389 
2390 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2391 		int_params.irq_source =
2392 			dc_interrupt_to_irq_source(dc, i, 0);
2393 
2394 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2395 
2396 		c_irq_params->adev = adev;
2397 		c_irq_params->irq_src = int_params.irq_source;
2398 
2399 		amdgpu_dm_irq_register_interrupt(
2400 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2401 	}
2402 
2403 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2404 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2405 	 * to trigger at end of each vblank, regardless of state of the lock,
2406 	 * matching DCE behaviour.
2407 	 */
2408 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2409 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2410 	     i++) {
2411 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2412 
2413 		if (r) {
2414 			DRM_ERROR("Failed to add vupdate irq id!\n");
2415 			return r;
2416 		}
2417 
2418 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2419 		int_params.irq_source =
2420 			dc_interrupt_to_irq_source(dc, i, 0);
2421 
2422 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2423 
2424 		c_irq_params->adev = adev;
2425 		c_irq_params->irq_src = int_params.irq_source;
2426 
2427 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2428 				dm_vupdate_high_irq, c_irq_params);
2429 	}
2430 
2431 	/* Use GRPH_PFLIP interrupt */
2432 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2433 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2434 			i++) {
2435 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2436 		if (r) {
2437 			DRM_ERROR("Failed to add page flip irq id!\n");
2438 			return r;
2439 		}
2440 
2441 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2442 		int_params.irq_source =
2443 			dc_interrupt_to_irq_source(dc, i, 0);
2444 
2445 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2446 
2447 		c_irq_params->adev = adev;
2448 		c_irq_params->irq_src = int_params.irq_source;
2449 
2450 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2451 				dm_pflip_high_irq, c_irq_params);
2452 
2453 	}
2454 
2455 	/* HPD */
2456 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2457 			&adev->hpd_irq);
2458 	if (r) {
2459 		DRM_ERROR("Failed to add hpd irq id!\n");
2460 		return r;
2461 	}
2462 
2463 	register_hpd_handlers(adev);
2464 
2465 	return 0;
2466 }
2467 #endif
2468 
2469 /*
2470  * Acquires the lock for the atomic state object and returns
2471  * the new atomic state.
2472  *
2473  * This should only be called during atomic check.
2474  */
2475 static int dm_atomic_get_state(struct drm_atomic_state *state,
2476 			       struct dm_atomic_state **dm_state)
2477 {
2478 	struct drm_device *dev = state->dev;
2479 	struct amdgpu_device *adev = dev->dev_private;
2480 	struct amdgpu_display_manager *dm = &adev->dm;
2481 	struct drm_private_state *priv_state;
2482 
2483 	if (*dm_state)
2484 		return 0;
2485 
2486 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2487 	if (IS_ERR(priv_state))
2488 		return PTR_ERR(priv_state);
2489 
2490 	*dm_state = to_dm_atomic_state(priv_state);
2491 
2492 	return 0;
2493 }
2494 
2495 struct dm_atomic_state *
2496 dm_atomic_get_new_state(struct drm_atomic_state *state)
2497 {
2498 	struct drm_device *dev = state->dev;
2499 	struct amdgpu_device *adev = dev->dev_private;
2500 	struct amdgpu_display_manager *dm = &adev->dm;
2501 	struct drm_private_obj *obj;
2502 	struct drm_private_state *new_obj_state;
2503 	int i;
2504 
2505 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2506 		if (obj->funcs == dm->atomic_obj.funcs)
2507 			return to_dm_atomic_state(new_obj_state);
2508 	}
2509 
2510 	return NULL;
2511 }
2512 
2513 struct dm_atomic_state *
2514 dm_atomic_get_old_state(struct drm_atomic_state *state)
2515 {
2516 	struct drm_device *dev = state->dev;
2517 	struct amdgpu_device *adev = dev->dev_private;
2518 	struct amdgpu_display_manager *dm = &adev->dm;
2519 	struct drm_private_obj *obj;
2520 	struct drm_private_state *old_obj_state;
2521 	int i;
2522 
2523 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2524 		if (obj->funcs == dm->atomic_obj.funcs)
2525 			return to_dm_atomic_state(old_obj_state);
2526 	}
2527 
2528 	return NULL;
2529 }
2530 
2531 static struct drm_private_state *
2532 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2533 {
2534 	struct dm_atomic_state *old_state, *new_state;
2535 
2536 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2537 	if (!new_state)
2538 		return NULL;
2539 
2540 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2541 
2542 	old_state = to_dm_atomic_state(obj->state);
2543 
2544 	if (old_state && old_state->context)
2545 		new_state->context = dc_copy_state(old_state->context);
2546 
2547 	if (!new_state->context) {
2548 		kfree(new_state);
2549 		return NULL;
2550 	}
2551 
2552 	return &new_state->base;
2553 }
2554 
2555 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2556 				    struct drm_private_state *state)
2557 {
2558 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2559 
2560 	if (dm_state && dm_state->context)
2561 		dc_release_state(dm_state->context);
2562 
2563 	kfree(dm_state);
2564 }
2565 
2566 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2567 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2568 	.atomic_destroy_state = dm_atomic_destroy_state,
2569 };
2570 
2571 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2572 {
2573 	struct dm_atomic_state *state;
2574 	int r;
2575 
2576 	adev->mode_info.mode_config_initialized = true;
2577 
2578 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2579 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2580 
2581 	adev->ddev->mode_config.max_width = 16384;
2582 	adev->ddev->mode_config.max_height = 16384;
2583 
2584 	adev->ddev->mode_config.preferred_depth = 24;
2585 	adev->ddev->mode_config.prefer_shadow = 1;
2586 	/* indicates support for immediate flip */
2587 	adev->ddev->mode_config.async_page_flip = true;
2588 
2589 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2590 
2591 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2592 	if (!state)
2593 		return -ENOMEM;
2594 
2595 	state->context = dc_create_state(adev->dm.dc);
2596 	if (!state->context) {
2597 		kfree(state);
2598 		return -ENOMEM;
2599 	}
2600 
2601 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2602 
2603 	drm_atomic_private_obj_init(adev->ddev,
2604 				    &adev->dm.atomic_obj,
2605 				    &state->base,
2606 				    &dm_atomic_state_funcs);
2607 
2608 	r = amdgpu_display_modeset_create_props(adev);
2609 	if (r)
2610 		return r;
2611 
2612 	r = amdgpu_dm_audio_init(adev);
2613 	if (r)
2614 		return r;
2615 
2616 	return 0;
2617 }
2618 
2619 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2620 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2621 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2622 
2623 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2624 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2625 
2626 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2627 {
2628 #if defined(CONFIG_ACPI)
2629 	struct amdgpu_dm_backlight_caps caps;
2630 
2631 	if (dm->backlight_caps.caps_valid)
2632 		return;
2633 
2634 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2635 	if (caps.caps_valid) {
2636 		dm->backlight_caps.caps_valid = true;
2637 		if (caps.aux_support)
2638 			return;
2639 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2640 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2641 	} else {
2642 		dm->backlight_caps.min_input_signal =
2643 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2644 		dm->backlight_caps.max_input_signal =
2645 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2646 	}
2647 #else
2648 	if (dm->backlight_caps.aux_support)
2649 		return;
2650 
2651 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2652 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2653 #endif
2654 }
2655 
2656 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2657 {
2658 	bool rc;
2659 
2660 	if (!link)
2661 		return 1;
2662 
2663 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2664 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2665 
2666 	return rc ? 0 : 1;
2667 }
2668 
2669 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2670 			      const uint32_t user_brightness)
2671 {
2672 	u32 min, max, conversion_pace;
2673 	u32 brightness = user_brightness;
2674 
2675 	if (!caps)
2676 		goto out;
2677 
2678 	if (!caps->aux_support) {
2679 		max = caps->max_input_signal;
2680 		min = caps->min_input_signal;
2681 		/*
2682 		 * The brightness input is in the range 0-255
2683 		 * It needs to be rescaled to be between the
2684 		 * requested min and max input signal
2685 		 * It also needs to be scaled up by 0x101 to
2686 		 * match the DC interface which has a range of
2687 		 * 0 to 0xffff
2688 		 */
2689 		conversion_pace = 0x101;
2690 		brightness =
2691 			user_brightness
2692 			* conversion_pace
2693 			* (max - min)
2694 			/ AMDGPU_MAX_BL_LEVEL
2695 			+ min * conversion_pace;
2696 	} else {
2697 		/* TODO
2698 		 * We are doing a linear interpolation here, which is OK but
2699 		 * does not provide the optimal result. We probably want
2700 		 * something close to the Perceptual Quantizer (PQ) curve.
2701 		 */
2702 		max = caps->aux_max_input_signal;
2703 		min = caps->aux_min_input_signal;
2704 
2705 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2706 			       + user_brightness * max;
2707 		// Multiple the value by 1000 since we use millinits
2708 		brightness *= 1000;
2709 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2710 	}
2711 
2712 out:
2713 	return brightness;
2714 }
2715 
2716 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2717 {
2718 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2719 	struct amdgpu_dm_backlight_caps caps;
2720 	struct dc_link *link = NULL;
2721 	u32 brightness;
2722 	bool rc;
2723 
2724 	amdgpu_dm_update_backlight_caps(dm);
2725 	caps = dm->backlight_caps;
2726 
2727 	link = (struct dc_link *)dm->backlight_link;
2728 
2729 	brightness = convert_brightness(&caps, bd->props.brightness);
2730 	// Change brightness based on AUX property
2731 	if (caps.aux_support)
2732 		return set_backlight_via_aux(link, brightness);
2733 
2734 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2735 
2736 	return rc ? 0 : 1;
2737 }
2738 
2739 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2740 {
2741 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2742 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2743 
2744 	if (ret == DC_ERROR_UNEXPECTED)
2745 		return bd->props.brightness;
2746 	return ret;
2747 }
2748 
2749 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2750 	.options = BL_CORE_SUSPENDRESUME,
2751 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2752 	.update_status	= amdgpu_dm_backlight_update_status,
2753 };
2754 
2755 static void
2756 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2757 {
2758 	char bl_name[16];
2759 	struct backlight_properties props = { 0 };
2760 
2761 	amdgpu_dm_update_backlight_caps(dm);
2762 
2763 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2764 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2765 	props.type = BACKLIGHT_RAW;
2766 
2767 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2768 			dm->adev->ddev->primary->index);
2769 
2770 	dm->backlight_dev = backlight_device_register(bl_name,
2771 			dm->adev->ddev->dev,
2772 			dm,
2773 			&amdgpu_dm_backlight_ops,
2774 			&props);
2775 
2776 	if (IS_ERR(dm->backlight_dev))
2777 		DRM_ERROR("DM: Backlight registration failed!\n");
2778 	else
2779 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2780 }
2781 
2782 #endif
2783 
2784 static int initialize_plane(struct amdgpu_display_manager *dm,
2785 			    struct amdgpu_mode_info *mode_info, int plane_id,
2786 			    enum drm_plane_type plane_type,
2787 			    const struct dc_plane_cap *plane_cap)
2788 {
2789 	struct drm_plane *plane;
2790 	unsigned long possible_crtcs;
2791 	int ret = 0;
2792 
2793 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2794 	if (!plane) {
2795 		DRM_ERROR("KMS: Failed to allocate plane\n");
2796 		return -ENOMEM;
2797 	}
2798 	plane->type = plane_type;
2799 
2800 	/*
2801 	 * HACK: IGT tests expect that the primary plane for a CRTC
2802 	 * can only have one possible CRTC. Only expose support for
2803 	 * any CRTC if they're not going to be used as a primary plane
2804 	 * for a CRTC - like overlay or underlay planes.
2805 	 */
2806 	possible_crtcs = 1 << plane_id;
2807 	if (plane_id >= dm->dc->caps.max_streams)
2808 		possible_crtcs = 0xff;
2809 
2810 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2811 
2812 	if (ret) {
2813 		DRM_ERROR("KMS: Failed to initialize plane\n");
2814 		kfree(plane);
2815 		return ret;
2816 	}
2817 
2818 	if (mode_info)
2819 		mode_info->planes[plane_id] = plane;
2820 
2821 	return ret;
2822 }
2823 
2824 
2825 static void register_backlight_device(struct amdgpu_display_manager *dm,
2826 				      struct dc_link *link)
2827 {
2828 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2829 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2830 
2831 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2832 	    link->type != dc_connection_none) {
2833 		/*
2834 		 * Event if registration failed, we should continue with
2835 		 * DM initialization because not having a backlight control
2836 		 * is better then a black screen.
2837 		 */
2838 		amdgpu_dm_register_backlight_device(dm);
2839 
2840 		if (dm->backlight_dev)
2841 			dm->backlight_link = link;
2842 	}
2843 #endif
2844 }
2845 
2846 
2847 /*
2848  * In this architecture, the association
2849  * connector -> encoder -> crtc
2850  * id not really requried. The crtc and connector will hold the
2851  * display_index as an abstraction to use with DAL component
2852  *
2853  * Returns 0 on success
2854  */
2855 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2856 {
2857 	struct amdgpu_display_manager *dm = &adev->dm;
2858 	int32_t i;
2859 	struct amdgpu_dm_connector *aconnector = NULL;
2860 	struct amdgpu_encoder *aencoder = NULL;
2861 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2862 	uint32_t link_cnt;
2863 	int32_t primary_planes;
2864 	enum dc_connection_type new_connection_type = dc_connection_none;
2865 	const struct dc_plane_cap *plane;
2866 
2867 	link_cnt = dm->dc->caps.max_links;
2868 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2869 		DRM_ERROR("DM: Failed to initialize mode config\n");
2870 		return -EINVAL;
2871 	}
2872 
2873 	/* There is one primary plane per CRTC */
2874 	primary_planes = dm->dc->caps.max_streams;
2875 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2876 
2877 	/*
2878 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2879 	 * Order is reversed to match iteration order in atomic check.
2880 	 */
2881 	for (i = (primary_planes - 1); i >= 0; i--) {
2882 		plane = &dm->dc->caps.planes[i];
2883 
2884 		if (initialize_plane(dm, mode_info, i,
2885 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2886 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2887 			goto fail;
2888 		}
2889 	}
2890 
2891 	/*
2892 	 * Initialize overlay planes, index starting after primary planes.
2893 	 * These planes have a higher DRM index than the primary planes since
2894 	 * they should be considered as having a higher z-order.
2895 	 * Order is reversed to match iteration order in atomic check.
2896 	 *
2897 	 * Only support DCN for now, and only expose one so we don't encourage
2898 	 * userspace to use up all the pipes.
2899 	 */
2900 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2901 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2902 
2903 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2904 			continue;
2905 
2906 		if (!plane->blends_with_above || !plane->blends_with_below)
2907 			continue;
2908 
2909 		if (!plane->pixel_format_support.argb8888)
2910 			continue;
2911 
2912 		if (initialize_plane(dm, NULL, primary_planes + i,
2913 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2914 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2915 			goto fail;
2916 		}
2917 
2918 		/* Only create one overlay plane. */
2919 		break;
2920 	}
2921 
2922 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2923 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2924 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2925 			goto fail;
2926 		}
2927 
2928 	dm->display_indexes_num = dm->dc->caps.max_streams;
2929 
2930 	/* loops over all connectors on the board */
2931 	for (i = 0; i < link_cnt; i++) {
2932 		struct dc_link *link = NULL;
2933 
2934 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2935 			DRM_ERROR(
2936 				"KMS: Cannot support more than %d display indexes\n",
2937 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2938 			continue;
2939 		}
2940 
2941 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2942 		if (!aconnector)
2943 			goto fail;
2944 
2945 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2946 		if (!aencoder)
2947 			goto fail;
2948 
2949 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2950 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2951 			goto fail;
2952 		}
2953 
2954 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2955 			DRM_ERROR("KMS: Failed to initialize connector\n");
2956 			goto fail;
2957 		}
2958 
2959 		link = dc_get_link_at_index(dm->dc, i);
2960 
2961 		if (!dc_link_detect_sink(link, &new_connection_type))
2962 			DRM_ERROR("KMS: Failed to detect connector\n");
2963 
2964 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2965 			emulated_link_detect(link);
2966 			amdgpu_dm_update_connector_after_detect(aconnector);
2967 
2968 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2969 			amdgpu_dm_update_connector_after_detect(aconnector);
2970 			register_backlight_device(dm, link);
2971 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2972 				amdgpu_dm_set_psr_caps(link);
2973 		}
2974 
2975 
2976 	}
2977 
2978 	/* Software is initialized. Now we can register interrupt handlers. */
2979 	switch (adev->asic_type) {
2980 	case CHIP_BONAIRE:
2981 	case CHIP_HAWAII:
2982 	case CHIP_KAVERI:
2983 	case CHIP_KABINI:
2984 	case CHIP_MULLINS:
2985 	case CHIP_TONGA:
2986 	case CHIP_FIJI:
2987 	case CHIP_CARRIZO:
2988 	case CHIP_STONEY:
2989 	case CHIP_POLARIS11:
2990 	case CHIP_POLARIS10:
2991 	case CHIP_POLARIS12:
2992 	case CHIP_VEGAM:
2993 	case CHIP_VEGA10:
2994 	case CHIP_VEGA12:
2995 	case CHIP_VEGA20:
2996 		if (dce110_register_irq_handlers(dm->adev)) {
2997 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2998 			goto fail;
2999 		}
3000 		break;
3001 #if defined(CONFIG_DRM_AMD_DC_DCN)
3002 	case CHIP_RAVEN:
3003 	case CHIP_NAVI12:
3004 	case CHIP_NAVI10:
3005 	case CHIP_NAVI14:
3006 	case CHIP_RENOIR:
3007 		if (dcn10_register_irq_handlers(dm->adev)) {
3008 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3009 			goto fail;
3010 		}
3011 		break;
3012 #endif
3013 	default:
3014 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3015 		goto fail;
3016 	}
3017 
3018 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3019 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3020 
3021 	/* No userspace support. */
3022 	dm->dc->debug.disable_tri_buf = true;
3023 
3024 	return 0;
3025 fail:
3026 	kfree(aencoder);
3027 	kfree(aconnector);
3028 
3029 	return -EINVAL;
3030 }
3031 
3032 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3033 {
3034 	drm_mode_config_cleanup(dm->ddev);
3035 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3036 	return;
3037 }
3038 
3039 /******************************************************************************
3040  * amdgpu_display_funcs functions
3041  *****************************************************************************/
3042 
3043 /*
3044  * dm_bandwidth_update - program display watermarks
3045  *
3046  * @adev: amdgpu_device pointer
3047  *
3048  * Calculate and program the display watermarks and line buffer allocation.
3049  */
3050 static void dm_bandwidth_update(struct amdgpu_device *adev)
3051 {
3052 	/* TODO: implement later */
3053 }
3054 
3055 static const struct amdgpu_display_funcs dm_display_funcs = {
3056 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3057 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3058 	.backlight_set_level = NULL, /* never called for DC */
3059 	.backlight_get_level = NULL, /* never called for DC */
3060 	.hpd_sense = NULL,/* called unconditionally */
3061 	.hpd_set_polarity = NULL, /* called unconditionally */
3062 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3063 	.page_flip_get_scanoutpos =
3064 		dm_crtc_get_scanoutpos,/* called unconditionally */
3065 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3066 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3067 };
3068 
3069 #if defined(CONFIG_DEBUG_KERNEL_DC)
3070 
3071 static ssize_t s3_debug_store(struct device *device,
3072 			      struct device_attribute *attr,
3073 			      const char *buf,
3074 			      size_t count)
3075 {
3076 	int ret;
3077 	int s3_state;
3078 	struct drm_device *drm_dev = dev_get_drvdata(device);
3079 	struct amdgpu_device *adev = drm_dev->dev_private;
3080 
3081 	ret = kstrtoint(buf, 0, &s3_state);
3082 
3083 	if (ret == 0) {
3084 		if (s3_state) {
3085 			dm_resume(adev);
3086 			drm_kms_helper_hotplug_event(adev->ddev);
3087 		} else
3088 			dm_suspend(adev);
3089 	}
3090 
3091 	return ret == 0 ? count : 0;
3092 }
3093 
3094 DEVICE_ATTR_WO(s3_debug);
3095 
3096 #endif
3097 
3098 static int dm_early_init(void *handle)
3099 {
3100 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3101 
3102 	switch (adev->asic_type) {
3103 	case CHIP_BONAIRE:
3104 	case CHIP_HAWAII:
3105 		adev->mode_info.num_crtc = 6;
3106 		adev->mode_info.num_hpd = 6;
3107 		adev->mode_info.num_dig = 6;
3108 		break;
3109 	case CHIP_KAVERI:
3110 		adev->mode_info.num_crtc = 4;
3111 		adev->mode_info.num_hpd = 6;
3112 		adev->mode_info.num_dig = 7;
3113 		break;
3114 	case CHIP_KABINI:
3115 	case CHIP_MULLINS:
3116 		adev->mode_info.num_crtc = 2;
3117 		adev->mode_info.num_hpd = 6;
3118 		adev->mode_info.num_dig = 6;
3119 		break;
3120 	case CHIP_FIJI:
3121 	case CHIP_TONGA:
3122 		adev->mode_info.num_crtc = 6;
3123 		adev->mode_info.num_hpd = 6;
3124 		adev->mode_info.num_dig = 7;
3125 		break;
3126 	case CHIP_CARRIZO:
3127 		adev->mode_info.num_crtc = 3;
3128 		adev->mode_info.num_hpd = 6;
3129 		adev->mode_info.num_dig = 9;
3130 		break;
3131 	case CHIP_STONEY:
3132 		adev->mode_info.num_crtc = 2;
3133 		adev->mode_info.num_hpd = 6;
3134 		adev->mode_info.num_dig = 9;
3135 		break;
3136 	case CHIP_POLARIS11:
3137 	case CHIP_POLARIS12:
3138 		adev->mode_info.num_crtc = 5;
3139 		adev->mode_info.num_hpd = 5;
3140 		adev->mode_info.num_dig = 5;
3141 		break;
3142 	case CHIP_POLARIS10:
3143 	case CHIP_VEGAM:
3144 		adev->mode_info.num_crtc = 6;
3145 		adev->mode_info.num_hpd = 6;
3146 		adev->mode_info.num_dig = 6;
3147 		break;
3148 	case CHIP_VEGA10:
3149 	case CHIP_VEGA12:
3150 	case CHIP_VEGA20:
3151 		adev->mode_info.num_crtc = 6;
3152 		adev->mode_info.num_hpd = 6;
3153 		adev->mode_info.num_dig = 6;
3154 		break;
3155 #if defined(CONFIG_DRM_AMD_DC_DCN)
3156 	case CHIP_RAVEN:
3157 		adev->mode_info.num_crtc = 4;
3158 		adev->mode_info.num_hpd = 4;
3159 		adev->mode_info.num_dig = 4;
3160 		break;
3161 #endif
3162 	case CHIP_NAVI10:
3163 	case CHIP_NAVI12:
3164 		adev->mode_info.num_crtc = 6;
3165 		adev->mode_info.num_hpd = 6;
3166 		adev->mode_info.num_dig = 6;
3167 		break;
3168 	case CHIP_NAVI14:
3169 		adev->mode_info.num_crtc = 5;
3170 		adev->mode_info.num_hpd = 5;
3171 		adev->mode_info.num_dig = 5;
3172 		break;
3173 	case CHIP_RENOIR:
3174 		adev->mode_info.num_crtc = 4;
3175 		adev->mode_info.num_hpd = 4;
3176 		adev->mode_info.num_dig = 4;
3177 		break;
3178 	default:
3179 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3180 		return -EINVAL;
3181 	}
3182 
3183 	amdgpu_dm_set_irq_funcs(adev);
3184 
3185 	if (adev->mode_info.funcs == NULL)
3186 		adev->mode_info.funcs = &dm_display_funcs;
3187 
3188 	/*
3189 	 * Note: Do NOT change adev->audio_endpt_rreg and
3190 	 * adev->audio_endpt_wreg because they are initialised in
3191 	 * amdgpu_device_init()
3192 	 */
3193 #if defined(CONFIG_DEBUG_KERNEL_DC)
3194 	device_create_file(
3195 		adev->ddev->dev,
3196 		&dev_attr_s3_debug);
3197 #endif
3198 
3199 	return 0;
3200 }
3201 
3202 static bool modeset_required(struct drm_crtc_state *crtc_state,
3203 			     struct dc_stream_state *new_stream,
3204 			     struct dc_stream_state *old_stream)
3205 {
3206 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3207 		return false;
3208 
3209 	if (!crtc_state->enable)
3210 		return false;
3211 
3212 	return crtc_state->active;
3213 }
3214 
3215 static bool modereset_required(struct drm_crtc_state *crtc_state)
3216 {
3217 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3218 		return false;
3219 
3220 	return !crtc_state->enable || !crtc_state->active;
3221 }
3222 
3223 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3224 {
3225 	drm_encoder_cleanup(encoder);
3226 	kfree(encoder);
3227 }
3228 
3229 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3230 	.destroy = amdgpu_dm_encoder_destroy,
3231 };
3232 
3233 
3234 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3235 				struct dc_scaling_info *scaling_info)
3236 {
3237 	int scale_w, scale_h;
3238 
3239 	memset(scaling_info, 0, sizeof(*scaling_info));
3240 
3241 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3242 	scaling_info->src_rect.x = state->src_x >> 16;
3243 	scaling_info->src_rect.y = state->src_y >> 16;
3244 
3245 	scaling_info->src_rect.width = state->src_w >> 16;
3246 	if (scaling_info->src_rect.width == 0)
3247 		return -EINVAL;
3248 
3249 	scaling_info->src_rect.height = state->src_h >> 16;
3250 	if (scaling_info->src_rect.height == 0)
3251 		return -EINVAL;
3252 
3253 	scaling_info->dst_rect.x = state->crtc_x;
3254 	scaling_info->dst_rect.y = state->crtc_y;
3255 
3256 	if (state->crtc_w == 0)
3257 		return -EINVAL;
3258 
3259 	scaling_info->dst_rect.width = state->crtc_w;
3260 
3261 	if (state->crtc_h == 0)
3262 		return -EINVAL;
3263 
3264 	scaling_info->dst_rect.height = state->crtc_h;
3265 
3266 	/* DRM doesn't specify clipping on destination output. */
3267 	scaling_info->clip_rect = scaling_info->dst_rect;
3268 
3269 	/* TODO: Validate scaling per-format with DC plane caps */
3270 	scale_w = scaling_info->dst_rect.width * 1000 /
3271 		  scaling_info->src_rect.width;
3272 
3273 	if (scale_w < 250 || scale_w > 16000)
3274 		return -EINVAL;
3275 
3276 	scale_h = scaling_info->dst_rect.height * 1000 /
3277 		  scaling_info->src_rect.height;
3278 
3279 	if (scale_h < 250 || scale_h > 16000)
3280 		return -EINVAL;
3281 
3282 	/*
3283 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3284 	 * assume reasonable defaults based on the format.
3285 	 */
3286 
3287 	return 0;
3288 }
3289 
3290 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3291 		       uint64_t *tiling_flags)
3292 {
3293 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3294 	int r = amdgpu_bo_reserve(rbo, false);
3295 
3296 	if (unlikely(r)) {
3297 		/* Don't show error message when returning -ERESTARTSYS */
3298 		if (r != -ERESTARTSYS)
3299 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3300 		return r;
3301 	}
3302 
3303 	if (tiling_flags)
3304 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3305 
3306 	amdgpu_bo_unreserve(rbo);
3307 
3308 	return r;
3309 }
3310 
3311 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3312 {
3313 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3314 
3315 	return offset ? (address + offset * 256) : 0;
3316 }
3317 
3318 static int
3319 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3320 			  const struct amdgpu_framebuffer *afb,
3321 			  const enum surface_pixel_format format,
3322 			  const enum dc_rotation_angle rotation,
3323 			  const struct plane_size *plane_size,
3324 			  const union dc_tiling_info *tiling_info,
3325 			  const uint64_t info,
3326 			  struct dc_plane_dcc_param *dcc,
3327 			  struct dc_plane_address *address,
3328 			  bool force_disable_dcc)
3329 {
3330 	struct dc *dc = adev->dm.dc;
3331 	struct dc_dcc_surface_param input;
3332 	struct dc_surface_dcc_cap output;
3333 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3334 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3335 	uint64_t dcc_address;
3336 
3337 	memset(&input, 0, sizeof(input));
3338 	memset(&output, 0, sizeof(output));
3339 
3340 	if (force_disable_dcc)
3341 		return 0;
3342 
3343 	if (!offset)
3344 		return 0;
3345 
3346 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3347 		return 0;
3348 
3349 	if (!dc->cap_funcs.get_dcc_compression_cap)
3350 		return -EINVAL;
3351 
3352 	input.format = format;
3353 	input.surface_size.width = plane_size->surface_size.width;
3354 	input.surface_size.height = plane_size->surface_size.height;
3355 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3356 
3357 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3358 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3359 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3360 		input.scan = SCAN_DIRECTION_VERTICAL;
3361 
3362 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3363 		return -EINVAL;
3364 
3365 	if (!output.capable)
3366 		return -EINVAL;
3367 
3368 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3369 		return -EINVAL;
3370 
3371 	dcc->enable = 1;
3372 	dcc->meta_pitch =
3373 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3374 	dcc->independent_64b_blks = i64b;
3375 
3376 	dcc_address = get_dcc_address(afb->address, info);
3377 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3378 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3379 
3380 	return 0;
3381 }
3382 
3383 static int
3384 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3385 			     const struct amdgpu_framebuffer *afb,
3386 			     const enum surface_pixel_format format,
3387 			     const enum dc_rotation_angle rotation,
3388 			     const uint64_t tiling_flags,
3389 			     union dc_tiling_info *tiling_info,
3390 			     struct plane_size *plane_size,
3391 			     struct dc_plane_dcc_param *dcc,
3392 			     struct dc_plane_address *address,
3393 			     bool force_disable_dcc)
3394 {
3395 	const struct drm_framebuffer *fb = &afb->base;
3396 	int ret;
3397 
3398 	memset(tiling_info, 0, sizeof(*tiling_info));
3399 	memset(plane_size, 0, sizeof(*plane_size));
3400 	memset(dcc, 0, sizeof(*dcc));
3401 	memset(address, 0, sizeof(*address));
3402 
3403 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3404 		plane_size->surface_size.x = 0;
3405 		plane_size->surface_size.y = 0;
3406 		plane_size->surface_size.width = fb->width;
3407 		plane_size->surface_size.height = fb->height;
3408 		plane_size->surface_pitch =
3409 			fb->pitches[0] / fb->format->cpp[0];
3410 
3411 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3412 		address->grph.addr.low_part = lower_32_bits(afb->address);
3413 		address->grph.addr.high_part = upper_32_bits(afb->address);
3414 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3415 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3416 
3417 		plane_size->surface_size.x = 0;
3418 		plane_size->surface_size.y = 0;
3419 		plane_size->surface_size.width = fb->width;
3420 		plane_size->surface_size.height = fb->height;
3421 		plane_size->surface_pitch =
3422 			fb->pitches[0] / fb->format->cpp[0];
3423 
3424 		plane_size->chroma_size.x = 0;
3425 		plane_size->chroma_size.y = 0;
3426 		/* TODO: set these based on surface format */
3427 		plane_size->chroma_size.width = fb->width / 2;
3428 		plane_size->chroma_size.height = fb->height / 2;
3429 
3430 		plane_size->chroma_pitch =
3431 			fb->pitches[1] / fb->format->cpp[1];
3432 
3433 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3434 		address->video_progressive.luma_addr.low_part =
3435 			lower_32_bits(afb->address);
3436 		address->video_progressive.luma_addr.high_part =
3437 			upper_32_bits(afb->address);
3438 		address->video_progressive.chroma_addr.low_part =
3439 			lower_32_bits(chroma_addr);
3440 		address->video_progressive.chroma_addr.high_part =
3441 			upper_32_bits(chroma_addr);
3442 	}
3443 
3444 	/* Fill GFX8 params */
3445 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3446 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3447 
3448 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3449 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3450 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3451 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3452 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3453 
3454 		/* XXX fix me for VI */
3455 		tiling_info->gfx8.num_banks = num_banks;
3456 		tiling_info->gfx8.array_mode =
3457 				DC_ARRAY_2D_TILED_THIN1;
3458 		tiling_info->gfx8.tile_split = tile_split;
3459 		tiling_info->gfx8.bank_width = bankw;
3460 		tiling_info->gfx8.bank_height = bankh;
3461 		tiling_info->gfx8.tile_aspect = mtaspect;
3462 		tiling_info->gfx8.tile_mode =
3463 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3464 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3465 			== DC_ARRAY_1D_TILED_THIN1) {
3466 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3467 	}
3468 
3469 	tiling_info->gfx8.pipe_config =
3470 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3471 
3472 	if (adev->asic_type == CHIP_VEGA10 ||
3473 	    adev->asic_type == CHIP_VEGA12 ||
3474 	    adev->asic_type == CHIP_VEGA20 ||
3475 	    adev->asic_type == CHIP_NAVI10 ||
3476 	    adev->asic_type == CHIP_NAVI14 ||
3477 	    adev->asic_type == CHIP_NAVI12 ||
3478 	    adev->asic_type == CHIP_RENOIR ||
3479 	    adev->asic_type == CHIP_RAVEN) {
3480 		/* Fill GFX9 params */
3481 		tiling_info->gfx9.num_pipes =
3482 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3483 		tiling_info->gfx9.num_banks =
3484 			adev->gfx.config.gb_addr_config_fields.num_banks;
3485 		tiling_info->gfx9.pipe_interleave =
3486 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3487 		tiling_info->gfx9.num_shader_engines =
3488 			adev->gfx.config.gb_addr_config_fields.num_se;
3489 		tiling_info->gfx9.max_compressed_frags =
3490 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3491 		tiling_info->gfx9.num_rb_per_se =
3492 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3493 		tiling_info->gfx9.swizzle =
3494 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3495 		tiling_info->gfx9.shaderEnable = 1;
3496 
3497 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3498 						plane_size, tiling_info,
3499 						tiling_flags, dcc, address,
3500 						force_disable_dcc);
3501 		if (ret)
3502 			return ret;
3503 	}
3504 
3505 	return 0;
3506 }
3507 
3508 static void
3509 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3510 			       bool *per_pixel_alpha, bool *global_alpha,
3511 			       int *global_alpha_value)
3512 {
3513 	*per_pixel_alpha = false;
3514 	*global_alpha = false;
3515 	*global_alpha_value = 0xff;
3516 
3517 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3518 		return;
3519 
3520 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3521 		static const uint32_t alpha_formats[] = {
3522 			DRM_FORMAT_ARGB8888,
3523 			DRM_FORMAT_RGBA8888,
3524 			DRM_FORMAT_ABGR8888,
3525 		};
3526 		uint32_t format = plane_state->fb->format->format;
3527 		unsigned int i;
3528 
3529 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3530 			if (format == alpha_formats[i]) {
3531 				*per_pixel_alpha = true;
3532 				break;
3533 			}
3534 		}
3535 	}
3536 
3537 	if (plane_state->alpha < 0xffff) {
3538 		*global_alpha = true;
3539 		*global_alpha_value = plane_state->alpha >> 8;
3540 	}
3541 }
3542 
3543 static int
3544 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3545 			    const enum surface_pixel_format format,
3546 			    enum dc_color_space *color_space)
3547 {
3548 	bool full_range;
3549 
3550 	*color_space = COLOR_SPACE_SRGB;
3551 
3552 	/* DRM color properties only affect non-RGB formats. */
3553 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3554 		return 0;
3555 
3556 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3557 
3558 	switch (plane_state->color_encoding) {
3559 	case DRM_COLOR_YCBCR_BT601:
3560 		if (full_range)
3561 			*color_space = COLOR_SPACE_YCBCR601;
3562 		else
3563 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3564 		break;
3565 
3566 	case DRM_COLOR_YCBCR_BT709:
3567 		if (full_range)
3568 			*color_space = COLOR_SPACE_YCBCR709;
3569 		else
3570 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3571 		break;
3572 
3573 	case DRM_COLOR_YCBCR_BT2020:
3574 		if (full_range)
3575 			*color_space = COLOR_SPACE_2020_YCBCR;
3576 		else
3577 			return -EINVAL;
3578 		break;
3579 
3580 	default:
3581 		return -EINVAL;
3582 	}
3583 
3584 	return 0;
3585 }
3586 
3587 static int
3588 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3589 			    const struct drm_plane_state *plane_state,
3590 			    const uint64_t tiling_flags,
3591 			    struct dc_plane_info *plane_info,
3592 			    struct dc_plane_address *address,
3593 			    bool force_disable_dcc)
3594 {
3595 	const struct drm_framebuffer *fb = plane_state->fb;
3596 	const struct amdgpu_framebuffer *afb =
3597 		to_amdgpu_framebuffer(plane_state->fb);
3598 	struct drm_format_name_buf format_name;
3599 	int ret;
3600 
3601 	memset(plane_info, 0, sizeof(*plane_info));
3602 
3603 	switch (fb->format->format) {
3604 	case DRM_FORMAT_C8:
3605 		plane_info->format =
3606 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3607 		break;
3608 	case DRM_FORMAT_RGB565:
3609 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3610 		break;
3611 	case DRM_FORMAT_XRGB8888:
3612 	case DRM_FORMAT_ARGB8888:
3613 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3614 		break;
3615 	case DRM_FORMAT_XRGB2101010:
3616 	case DRM_FORMAT_ARGB2101010:
3617 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3618 		break;
3619 	case DRM_FORMAT_XBGR2101010:
3620 	case DRM_FORMAT_ABGR2101010:
3621 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3622 		break;
3623 	case DRM_FORMAT_XBGR8888:
3624 	case DRM_FORMAT_ABGR8888:
3625 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3626 		break;
3627 	case DRM_FORMAT_NV21:
3628 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3629 		break;
3630 	case DRM_FORMAT_NV12:
3631 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3632 		break;
3633 	case DRM_FORMAT_P010:
3634 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3635 		break;
3636 	default:
3637 		DRM_ERROR(
3638 			"Unsupported screen format %s\n",
3639 			drm_get_format_name(fb->format->format, &format_name));
3640 		return -EINVAL;
3641 	}
3642 
3643 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3644 	case DRM_MODE_ROTATE_0:
3645 		plane_info->rotation = ROTATION_ANGLE_0;
3646 		break;
3647 	case DRM_MODE_ROTATE_90:
3648 		plane_info->rotation = ROTATION_ANGLE_90;
3649 		break;
3650 	case DRM_MODE_ROTATE_180:
3651 		plane_info->rotation = ROTATION_ANGLE_180;
3652 		break;
3653 	case DRM_MODE_ROTATE_270:
3654 		plane_info->rotation = ROTATION_ANGLE_270;
3655 		break;
3656 	default:
3657 		plane_info->rotation = ROTATION_ANGLE_0;
3658 		break;
3659 	}
3660 
3661 	plane_info->visible = true;
3662 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3663 
3664 	plane_info->layer_index = 0;
3665 
3666 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3667 					  &plane_info->color_space);
3668 	if (ret)
3669 		return ret;
3670 
3671 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3672 					   plane_info->rotation, tiling_flags,
3673 					   &plane_info->tiling_info,
3674 					   &plane_info->plane_size,
3675 					   &plane_info->dcc, address,
3676 					   force_disable_dcc);
3677 	if (ret)
3678 		return ret;
3679 
3680 	fill_blending_from_plane_state(
3681 		plane_state, &plane_info->per_pixel_alpha,
3682 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3683 
3684 	return 0;
3685 }
3686 
3687 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3688 				    struct dc_plane_state *dc_plane_state,
3689 				    struct drm_plane_state *plane_state,
3690 				    struct drm_crtc_state *crtc_state)
3691 {
3692 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3693 	const struct amdgpu_framebuffer *amdgpu_fb =
3694 		to_amdgpu_framebuffer(plane_state->fb);
3695 	struct dc_scaling_info scaling_info;
3696 	struct dc_plane_info plane_info;
3697 	uint64_t tiling_flags;
3698 	int ret;
3699 	bool force_disable_dcc = false;
3700 
3701 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3702 	if (ret)
3703 		return ret;
3704 
3705 	dc_plane_state->src_rect = scaling_info.src_rect;
3706 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3707 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3708 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3709 
3710 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3711 	if (ret)
3712 		return ret;
3713 
3714 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3715 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3716 					  &plane_info,
3717 					  &dc_plane_state->address,
3718 					  force_disable_dcc);
3719 	if (ret)
3720 		return ret;
3721 
3722 	dc_plane_state->format = plane_info.format;
3723 	dc_plane_state->color_space = plane_info.color_space;
3724 	dc_plane_state->format = plane_info.format;
3725 	dc_plane_state->plane_size = plane_info.plane_size;
3726 	dc_plane_state->rotation = plane_info.rotation;
3727 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3728 	dc_plane_state->stereo_format = plane_info.stereo_format;
3729 	dc_plane_state->tiling_info = plane_info.tiling_info;
3730 	dc_plane_state->visible = plane_info.visible;
3731 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3732 	dc_plane_state->global_alpha = plane_info.global_alpha;
3733 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3734 	dc_plane_state->dcc = plane_info.dcc;
3735 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3736 
3737 	/*
3738 	 * Always set input transfer function, since plane state is refreshed
3739 	 * every time.
3740 	 */
3741 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3742 	if (ret)
3743 		return ret;
3744 
3745 	return 0;
3746 }
3747 
3748 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3749 					   const struct dm_connector_state *dm_state,
3750 					   struct dc_stream_state *stream)
3751 {
3752 	enum amdgpu_rmx_type rmx_type;
3753 
3754 	struct rect src = { 0 }; /* viewport in composition space*/
3755 	struct rect dst = { 0 }; /* stream addressable area */
3756 
3757 	/* no mode. nothing to be done */
3758 	if (!mode)
3759 		return;
3760 
3761 	/* Full screen scaling by default */
3762 	src.width = mode->hdisplay;
3763 	src.height = mode->vdisplay;
3764 	dst.width = stream->timing.h_addressable;
3765 	dst.height = stream->timing.v_addressable;
3766 
3767 	if (dm_state) {
3768 		rmx_type = dm_state->scaling;
3769 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3770 			if (src.width * dst.height <
3771 					src.height * dst.width) {
3772 				/* height needs less upscaling/more downscaling */
3773 				dst.width = src.width *
3774 						dst.height / src.height;
3775 			} else {
3776 				/* width needs less upscaling/more downscaling */
3777 				dst.height = src.height *
3778 						dst.width / src.width;
3779 			}
3780 		} else if (rmx_type == RMX_CENTER) {
3781 			dst = src;
3782 		}
3783 
3784 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3785 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3786 
3787 		if (dm_state->underscan_enable) {
3788 			dst.x += dm_state->underscan_hborder / 2;
3789 			dst.y += dm_state->underscan_vborder / 2;
3790 			dst.width -= dm_state->underscan_hborder;
3791 			dst.height -= dm_state->underscan_vborder;
3792 		}
3793 	}
3794 
3795 	stream->src = src;
3796 	stream->dst = dst;
3797 
3798 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3799 			dst.x, dst.y, dst.width, dst.height);
3800 
3801 }
3802 
3803 static enum dc_color_depth
3804 convert_color_depth_from_display_info(const struct drm_connector *connector,
3805 				      const struct drm_connector_state *state,
3806 				      bool is_y420)
3807 {
3808 	uint8_t bpc;
3809 
3810 	if (is_y420) {
3811 		bpc = 8;
3812 
3813 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3814 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3815 			bpc = 16;
3816 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3817 			bpc = 12;
3818 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3819 			bpc = 10;
3820 	} else {
3821 		bpc = (uint8_t)connector->display_info.bpc;
3822 		/* Assume 8 bpc by default if no bpc is specified. */
3823 		bpc = bpc ? bpc : 8;
3824 	}
3825 
3826 	if (!state)
3827 		state = connector->state;
3828 
3829 	if (state) {
3830 		/*
3831 		 * Cap display bpc based on the user requested value.
3832 		 *
3833 		 * The value for state->max_bpc may not correctly updated
3834 		 * depending on when the connector gets added to the state
3835 		 * or if this was called outside of atomic check, so it
3836 		 * can't be used directly.
3837 		 */
3838 		bpc = min(bpc, state->max_requested_bpc);
3839 
3840 		/* Round down to the nearest even number. */
3841 		bpc = bpc - (bpc & 1);
3842 	}
3843 
3844 	switch (bpc) {
3845 	case 0:
3846 		/*
3847 		 * Temporary Work around, DRM doesn't parse color depth for
3848 		 * EDID revision before 1.4
3849 		 * TODO: Fix edid parsing
3850 		 */
3851 		return COLOR_DEPTH_888;
3852 	case 6:
3853 		return COLOR_DEPTH_666;
3854 	case 8:
3855 		return COLOR_DEPTH_888;
3856 	case 10:
3857 		return COLOR_DEPTH_101010;
3858 	case 12:
3859 		return COLOR_DEPTH_121212;
3860 	case 14:
3861 		return COLOR_DEPTH_141414;
3862 	case 16:
3863 		return COLOR_DEPTH_161616;
3864 	default:
3865 		return COLOR_DEPTH_UNDEFINED;
3866 	}
3867 }
3868 
3869 static enum dc_aspect_ratio
3870 get_aspect_ratio(const struct drm_display_mode *mode_in)
3871 {
3872 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3873 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3874 }
3875 
3876 static enum dc_color_space
3877 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3878 {
3879 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3880 
3881 	switch (dc_crtc_timing->pixel_encoding)	{
3882 	case PIXEL_ENCODING_YCBCR422:
3883 	case PIXEL_ENCODING_YCBCR444:
3884 	case PIXEL_ENCODING_YCBCR420:
3885 	{
3886 		/*
3887 		 * 27030khz is the separation point between HDTV and SDTV
3888 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3889 		 * respectively
3890 		 */
3891 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3892 			if (dc_crtc_timing->flags.Y_ONLY)
3893 				color_space =
3894 					COLOR_SPACE_YCBCR709_LIMITED;
3895 			else
3896 				color_space = COLOR_SPACE_YCBCR709;
3897 		} else {
3898 			if (dc_crtc_timing->flags.Y_ONLY)
3899 				color_space =
3900 					COLOR_SPACE_YCBCR601_LIMITED;
3901 			else
3902 				color_space = COLOR_SPACE_YCBCR601;
3903 		}
3904 
3905 	}
3906 	break;
3907 	case PIXEL_ENCODING_RGB:
3908 		color_space = COLOR_SPACE_SRGB;
3909 		break;
3910 
3911 	default:
3912 		WARN_ON(1);
3913 		break;
3914 	}
3915 
3916 	return color_space;
3917 }
3918 
3919 static bool adjust_colour_depth_from_display_info(
3920 	struct dc_crtc_timing *timing_out,
3921 	const struct drm_display_info *info)
3922 {
3923 	enum dc_color_depth depth = timing_out->display_color_depth;
3924 	int normalized_clk;
3925 	do {
3926 		normalized_clk = timing_out->pix_clk_100hz / 10;
3927 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3928 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3929 			normalized_clk /= 2;
3930 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3931 		switch (depth) {
3932 		case COLOR_DEPTH_888:
3933 			break;
3934 		case COLOR_DEPTH_101010:
3935 			normalized_clk = (normalized_clk * 30) / 24;
3936 			break;
3937 		case COLOR_DEPTH_121212:
3938 			normalized_clk = (normalized_clk * 36) / 24;
3939 			break;
3940 		case COLOR_DEPTH_161616:
3941 			normalized_clk = (normalized_clk * 48) / 24;
3942 			break;
3943 		default:
3944 			/* The above depths are the only ones valid for HDMI. */
3945 			return false;
3946 		}
3947 		if (normalized_clk <= info->max_tmds_clock) {
3948 			timing_out->display_color_depth = depth;
3949 			return true;
3950 		}
3951 	} while (--depth > COLOR_DEPTH_666);
3952 	return false;
3953 }
3954 
3955 static void fill_stream_properties_from_drm_display_mode(
3956 	struct dc_stream_state *stream,
3957 	const struct drm_display_mode *mode_in,
3958 	const struct drm_connector *connector,
3959 	const struct drm_connector_state *connector_state,
3960 	const struct dc_stream_state *old_stream)
3961 {
3962 	struct dc_crtc_timing *timing_out = &stream->timing;
3963 	const struct drm_display_info *info = &connector->display_info;
3964 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3965 	struct hdmi_vendor_infoframe hv_frame;
3966 	struct hdmi_avi_infoframe avi_frame;
3967 
3968 	memset(&hv_frame, 0, sizeof(hv_frame));
3969 	memset(&avi_frame, 0, sizeof(avi_frame));
3970 
3971 	timing_out->h_border_left = 0;
3972 	timing_out->h_border_right = 0;
3973 	timing_out->v_border_top = 0;
3974 	timing_out->v_border_bottom = 0;
3975 	/* TODO: un-hardcode */
3976 	if (drm_mode_is_420_only(info, mode_in)
3977 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3978 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3979 	else if (drm_mode_is_420_also(info, mode_in)
3980 			&& aconnector->force_yuv420_output)
3981 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3982 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3983 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3984 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3985 	else
3986 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3987 
3988 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3989 	timing_out->display_color_depth = convert_color_depth_from_display_info(
3990 		connector, connector_state,
3991 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3992 	timing_out->scan_type = SCANNING_TYPE_NODATA;
3993 	timing_out->hdmi_vic = 0;
3994 
3995 	if(old_stream) {
3996 		timing_out->vic = old_stream->timing.vic;
3997 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3998 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3999 	} else {
4000 		timing_out->vic = drm_match_cea_mode(mode_in);
4001 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4002 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4003 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4004 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4005 	}
4006 
4007 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4008 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4009 		timing_out->vic = avi_frame.video_code;
4010 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4011 		timing_out->hdmi_vic = hv_frame.vic;
4012 	}
4013 
4014 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4015 	timing_out->h_total = mode_in->crtc_htotal;
4016 	timing_out->h_sync_width =
4017 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4018 	timing_out->h_front_porch =
4019 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4020 	timing_out->v_total = mode_in->crtc_vtotal;
4021 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4022 	timing_out->v_front_porch =
4023 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4024 	timing_out->v_sync_width =
4025 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4026 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4027 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4028 
4029 	stream->output_color_space = get_output_color_space(timing_out);
4030 
4031 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4032 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4033 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4034 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4035 		    drm_mode_is_420_also(info, mode_in) &&
4036 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4037 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4038 			adjust_colour_depth_from_display_info(timing_out, info);
4039 		}
4040 	}
4041 }
4042 
4043 static void fill_audio_info(struct audio_info *audio_info,
4044 			    const struct drm_connector *drm_connector,
4045 			    const struct dc_sink *dc_sink)
4046 {
4047 	int i = 0;
4048 	int cea_revision = 0;
4049 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4050 
4051 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4052 	audio_info->product_id = edid_caps->product_id;
4053 
4054 	cea_revision = drm_connector->display_info.cea_rev;
4055 
4056 #ifdef __linux__
4057 	strscpy(audio_info->display_name,
4058 		edid_caps->display_name,
4059 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4060 #else
4061 	strncpy(audio_info->display_name,
4062 		edid_caps->display_name,
4063 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
4064 #endif
4065 
4066 	if (cea_revision >= 3) {
4067 		audio_info->mode_count = edid_caps->audio_mode_count;
4068 
4069 		for (i = 0; i < audio_info->mode_count; ++i) {
4070 			audio_info->modes[i].format_code =
4071 					(enum audio_format_code)
4072 					(edid_caps->audio_modes[i].format_code);
4073 			audio_info->modes[i].channel_count =
4074 					edid_caps->audio_modes[i].channel_count;
4075 			audio_info->modes[i].sample_rates.all =
4076 					edid_caps->audio_modes[i].sample_rate;
4077 			audio_info->modes[i].sample_size =
4078 					edid_caps->audio_modes[i].sample_size;
4079 		}
4080 	}
4081 
4082 	audio_info->flags.all = edid_caps->speaker_flags;
4083 
4084 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4085 	if (drm_connector->latency_present[0]) {
4086 		audio_info->video_latency = drm_connector->video_latency[0];
4087 		audio_info->audio_latency = drm_connector->audio_latency[0];
4088 	}
4089 
4090 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4091 
4092 }
4093 
4094 static void
4095 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4096 				      struct drm_display_mode *dst_mode)
4097 {
4098 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4099 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4100 	dst_mode->crtc_clock = src_mode->crtc_clock;
4101 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4102 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4103 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4104 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4105 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4106 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4107 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4108 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4109 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4110 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4111 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4112 }
4113 
4114 static void
4115 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4116 					const struct drm_display_mode *native_mode,
4117 					bool scale_enabled)
4118 {
4119 	if (scale_enabled) {
4120 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4121 	} else if (native_mode->clock == drm_mode->clock &&
4122 			native_mode->htotal == drm_mode->htotal &&
4123 			native_mode->vtotal == drm_mode->vtotal) {
4124 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4125 	} else {
4126 		/* no scaling nor amdgpu inserted, no need to patch */
4127 	}
4128 }
4129 
4130 static struct dc_sink *
4131 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4132 {
4133 	struct dc_sink_init_data sink_init_data = { 0 };
4134 	struct dc_sink *sink = NULL;
4135 	sink_init_data.link = aconnector->dc_link;
4136 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4137 
4138 	sink = dc_sink_create(&sink_init_data);
4139 	if (!sink) {
4140 		DRM_ERROR("Failed to create sink!\n");
4141 		return NULL;
4142 	}
4143 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4144 
4145 	return sink;
4146 }
4147 
4148 static void set_multisync_trigger_params(
4149 		struct dc_stream_state *stream)
4150 {
4151 	if (stream->triggered_crtc_reset.enabled) {
4152 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4153 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4154 	}
4155 }
4156 
4157 static void set_master_stream(struct dc_stream_state *stream_set[],
4158 			      int stream_count)
4159 {
4160 	int j, highest_rfr = 0, master_stream = 0;
4161 
4162 	for (j = 0;  j < stream_count; j++) {
4163 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4164 			int refresh_rate = 0;
4165 
4166 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4167 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4168 			if (refresh_rate > highest_rfr) {
4169 				highest_rfr = refresh_rate;
4170 				master_stream = j;
4171 			}
4172 		}
4173 	}
4174 	for (j = 0;  j < stream_count; j++) {
4175 		if (stream_set[j])
4176 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4177 	}
4178 }
4179 
4180 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4181 {
4182 	int i = 0;
4183 
4184 	if (context->stream_count < 2)
4185 		return;
4186 	for (i = 0; i < context->stream_count ; i++) {
4187 		if (!context->streams[i])
4188 			continue;
4189 		/*
4190 		 * TODO: add a function to read AMD VSDB bits and set
4191 		 * crtc_sync_master.multi_sync_enabled flag
4192 		 * For now it's set to false
4193 		 */
4194 		set_multisync_trigger_params(context->streams[i]);
4195 	}
4196 	set_master_stream(context->streams, context->stream_count);
4197 }
4198 
4199 static struct dc_stream_state *
4200 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4201 		       const struct drm_display_mode *drm_mode,
4202 		       const struct dm_connector_state *dm_state,
4203 		       const struct dc_stream_state *old_stream)
4204 {
4205 	struct drm_display_mode *preferred_mode = NULL;
4206 	struct drm_connector *drm_connector;
4207 	const struct drm_connector_state *con_state =
4208 		dm_state ? &dm_state->base : NULL;
4209 	struct dc_stream_state *stream = NULL;
4210 	struct drm_display_mode mode = *drm_mode;
4211 	bool native_mode_found = false;
4212 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4213 	int mode_refresh;
4214 	int preferred_refresh = 0;
4215 #if defined(CONFIG_DRM_AMD_DC_DCN)
4216 	struct dsc_dec_dpcd_caps dsc_caps;
4217 #endif
4218 	uint32_t link_bandwidth_kbps;
4219 
4220 	struct dc_sink *sink = NULL;
4221 	if (aconnector == NULL) {
4222 		DRM_ERROR("aconnector is NULL!\n");
4223 		return stream;
4224 	}
4225 
4226 	drm_connector = &aconnector->base;
4227 
4228 	if (!aconnector->dc_sink) {
4229 		sink = create_fake_sink(aconnector);
4230 		if (!sink)
4231 			return stream;
4232 	} else {
4233 		sink = aconnector->dc_sink;
4234 		dc_sink_retain(sink);
4235 	}
4236 
4237 	stream = dc_create_stream_for_sink(sink);
4238 
4239 	if (stream == NULL) {
4240 		DRM_ERROR("Failed to create stream for sink!\n");
4241 		goto finish;
4242 	}
4243 
4244 	stream->dm_stream_context = aconnector;
4245 
4246 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4247 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4248 
4249 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4250 		/* Search for preferred mode */
4251 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4252 			native_mode_found = true;
4253 			break;
4254 		}
4255 	}
4256 	if (!native_mode_found)
4257 		preferred_mode = list_first_entry_or_null(
4258 				&aconnector->base.modes,
4259 				struct drm_display_mode,
4260 				head);
4261 
4262 	mode_refresh = drm_mode_vrefresh(&mode);
4263 
4264 	if (preferred_mode == NULL) {
4265 		/*
4266 		 * This may not be an error, the use case is when we have no
4267 		 * usermode calls to reset and set mode upon hotplug. In this
4268 		 * case, we call set mode ourselves to restore the previous mode
4269 		 * and the modelist may not be filled in in time.
4270 		 */
4271 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4272 	} else {
4273 		decide_crtc_timing_for_drm_display_mode(
4274 				&mode, preferred_mode,
4275 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4276 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4277 	}
4278 
4279 	if (!dm_state)
4280 		drm_mode_set_crtcinfo(&mode, 0);
4281 
4282 	/*
4283 	* If scaling is enabled and refresh rate didn't change
4284 	* we copy the vic and polarities of the old timings
4285 	*/
4286 	if (!scale || mode_refresh != preferred_refresh)
4287 		fill_stream_properties_from_drm_display_mode(stream,
4288 			&mode, &aconnector->base, con_state, NULL);
4289 	else
4290 		fill_stream_properties_from_drm_display_mode(stream,
4291 			&mode, &aconnector->base, con_state, old_stream);
4292 
4293 	stream->timing.flags.DSC = 0;
4294 
4295 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4296 #if defined(CONFIG_DRM_AMD_DC_DCN)
4297 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4298 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4299 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4300 				      &dsc_caps);
4301 #endif
4302 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4303 							     dc_link_get_link_cap(aconnector->dc_link));
4304 
4305 #if defined(CONFIG_DRM_AMD_DC_DCN)
4306 		if (dsc_caps.is_dsc_supported)
4307 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4308 						  &dsc_caps,
4309 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4310 						  link_bandwidth_kbps,
4311 						  &stream->timing,
4312 						  &stream->timing.dsc_cfg))
4313 				stream->timing.flags.DSC = 1;
4314 #endif
4315 	}
4316 
4317 	update_stream_scaling_settings(&mode, dm_state, stream);
4318 
4319 	fill_audio_info(
4320 		&stream->audio_info,
4321 		drm_connector,
4322 		sink);
4323 
4324 	update_stream_signal(stream, sink);
4325 
4326 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4327 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4328 	if (stream->link->psr_feature_enabled)	{
4329 		struct dc  *core_dc = stream->link->ctx->dc;
4330 
4331 		if (dc_is_dmcu_initialized(core_dc)) {
4332 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4333 
4334 			stream->psr_version = dmcu->dmcu_version.psr_version;
4335 
4336 			//
4337 			// should decide stream support vsc sdp colorimetry capability
4338 			// before building vsc info packet
4339 			//
4340 			stream->use_vsc_sdp_for_colorimetry = false;
4341 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4342 				stream->use_vsc_sdp_for_colorimetry =
4343 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4344 			} else {
4345 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4346 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4347 					stream->use_vsc_sdp_for_colorimetry = true;
4348 				}
4349 			}
4350 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4351 		}
4352 	}
4353 finish:
4354 	dc_sink_release(sink);
4355 
4356 	return stream;
4357 }
4358 
4359 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4360 {
4361 	drm_crtc_cleanup(crtc);
4362 	kfree(crtc);
4363 }
4364 
4365 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4366 				  struct drm_crtc_state *state)
4367 {
4368 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4369 
4370 	/* TODO Destroy dc_stream objects are stream object is flattened */
4371 	if (cur->stream)
4372 		dc_stream_release(cur->stream);
4373 
4374 
4375 	__drm_atomic_helper_crtc_destroy_state(state);
4376 
4377 
4378 	kfree(state);
4379 }
4380 
4381 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4382 {
4383 	struct dm_crtc_state *state;
4384 
4385 	if (crtc->state)
4386 		dm_crtc_destroy_state(crtc, crtc->state);
4387 
4388 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4389 	if (WARN_ON(!state))
4390 		return;
4391 
4392 	crtc->state = &state->base;
4393 	crtc->state->crtc = crtc;
4394 
4395 }
4396 
4397 static struct drm_crtc_state *
4398 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4399 {
4400 	struct dm_crtc_state *state, *cur;
4401 
4402 	cur = to_dm_crtc_state(crtc->state);
4403 
4404 	if (WARN_ON(!crtc->state))
4405 		return NULL;
4406 
4407 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4408 	if (!state)
4409 		return NULL;
4410 
4411 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4412 
4413 	if (cur->stream) {
4414 		state->stream = cur->stream;
4415 		dc_stream_retain(state->stream);
4416 	}
4417 
4418 	state->active_planes = cur->active_planes;
4419 	state->interrupts_enabled = cur->interrupts_enabled;
4420 	state->vrr_params = cur->vrr_params;
4421 	state->vrr_infopacket = cur->vrr_infopacket;
4422 	state->abm_level = cur->abm_level;
4423 	state->vrr_supported = cur->vrr_supported;
4424 	state->freesync_config = cur->freesync_config;
4425 	state->crc_src = cur->crc_src;
4426 	state->cm_has_degamma = cur->cm_has_degamma;
4427 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4428 
4429 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4430 
4431 	return &state->base;
4432 }
4433 
4434 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4435 {
4436 	enum dc_irq_source irq_source;
4437 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4438 	struct amdgpu_device *adev = crtc->dev->dev_private;
4439 	int rc;
4440 
4441 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4442 
4443 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4444 
4445 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4446 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4447 	return rc;
4448 }
4449 
4450 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4451 {
4452 	enum dc_irq_source irq_source;
4453 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4454 	struct amdgpu_device *adev = crtc->dev->dev_private;
4455 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4456 	int rc = 0;
4457 
4458 	if (enable) {
4459 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4460 		if (amdgpu_dm_vrr_active(acrtc_state))
4461 			rc = dm_set_vupdate_irq(crtc, true);
4462 	} else {
4463 		/* vblank irq off -> vupdate irq off */
4464 		rc = dm_set_vupdate_irq(crtc, false);
4465 	}
4466 
4467 	if (rc)
4468 		return rc;
4469 
4470 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4471 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4472 }
4473 
4474 static int dm_enable_vblank(struct drm_crtc *crtc)
4475 {
4476 	return dm_set_vblank(crtc, true);
4477 }
4478 
4479 static void dm_disable_vblank(struct drm_crtc *crtc)
4480 {
4481 	dm_set_vblank(crtc, false);
4482 }
4483 
4484 /* Implemented only the options currently availible for the driver */
4485 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4486 	.reset = dm_crtc_reset_state,
4487 	.destroy = amdgpu_dm_crtc_destroy,
4488 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4489 	.set_config = drm_atomic_helper_set_config,
4490 	.page_flip = drm_atomic_helper_page_flip,
4491 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4492 	.atomic_destroy_state = dm_crtc_destroy_state,
4493 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4494 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4495 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4496 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4497 	.enable_vblank = dm_enable_vblank,
4498 	.disable_vblank = dm_disable_vblank,
4499 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4500 };
4501 
4502 static enum drm_connector_status
4503 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4504 {
4505 	bool connected;
4506 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4507 
4508 	/*
4509 	 * Notes:
4510 	 * 1. This interface is NOT called in context of HPD irq.
4511 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4512 	 * makes it a bad place for *any* MST-related activity.
4513 	 */
4514 
4515 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4516 	    !aconnector->fake_enable)
4517 		connected = (aconnector->dc_sink != NULL);
4518 	else
4519 		connected = (aconnector->base.force == DRM_FORCE_ON);
4520 
4521 	return (connected ? connector_status_connected :
4522 			connector_status_disconnected);
4523 }
4524 
4525 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4526 					    struct drm_connector_state *connector_state,
4527 					    struct drm_property *property,
4528 					    uint64_t val)
4529 {
4530 	struct drm_device *dev = connector->dev;
4531 	struct amdgpu_device *adev = dev->dev_private;
4532 	struct dm_connector_state *dm_old_state =
4533 		to_dm_connector_state(connector->state);
4534 	struct dm_connector_state *dm_new_state =
4535 		to_dm_connector_state(connector_state);
4536 
4537 	int ret = -EINVAL;
4538 
4539 	if (property == dev->mode_config.scaling_mode_property) {
4540 		enum amdgpu_rmx_type rmx_type;
4541 
4542 		switch (val) {
4543 		case DRM_MODE_SCALE_CENTER:
4544 			rmx_type = RMX_CENTER;
4545 			break;
4546 		case DRM_MODE_SCALE_ASPECT:
4547 			rmx_type = RMX_ASPECT;
4548 			break;
4549 		case DRM_MODE_SCALE_FULLSCREEN:
4550 			rmx_type = RMX_FULL;
4551 			break;
4552 		case DRM_MODE_SCALE_NONE:
4553 		default:
4554 			rmx_type = RMX_OFF;
4555 			break;
4556 		}
4557 
4558 		if (dm_old_state->scaling == rmx_type)
4559 			return 0;
4560 
4561 		dm_new_state->scaling = rmx_type;
4562 		ret = 0;
4563 	} else if (property == adev->mode_info.underscan_hborder_property) {
4564 		dm_new_state->underscan_hborder = val;
4565 		ret = 0;
4566 	} else if (property == adev->mode_info.underscan_vborder_property) {
4567 		dm_new_state->underscan_vborder = val;
4568 		ret = 0;
4569 	} else if (property == adev->mode_info.underscan_property) {
4570 		dm_new_state->underscan_enable = val;
4571 		ret = 0;
4572 	} else if (property == adev->mode_info.abm_level_property) {
4573 		dm_new_state->abm_level = val;
4574 		ret = 0;
4575 	}
4576 
4577 	return ret;
4578 }
4579 
4580 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4581 					    const struct drm_connector_state *state,
4582 					    struct drm_property *property,
4583 					    uint64_t *val)
4584 {
4585 	struct drm_device *dev = connector->dev;
4586 	struct amdgpu_device *adev = dev->dev_private;
4587 	struct dm_connector_state *dm_state =
4588 		to_dm_connector_state(state);
4589 	int ret = -EINVAL;
4590 
4591 	if (property == dev->mode_config.scaling_mode_property) {
4592 		switch (dm_state->scaling) {
4593 		case RMX_CENTER:
4594 			*val = DRM_MODE_SCALE_CENTER;
4595 			break;
4596 		case RMX_ASPECT:
4597 			*val = DRM_MODE_SCALE_ASPECT;
4598 			break;
4599 		case RMX_FULL:
4600 			*val = DRM_MODE_SCALE_FULLSCREEN;
4601 			break;
4602 		case RMX_OFF:
4603 		default:
4604 			*val = DRM_MODE_SCALE_NONE;
4605 			break;
4606 		}
4607 		ret = 0;
4608 	} else if (property == adev->mode_info.underscan_hborder_property) {
4609 		*val = dm_state->underscan_hborder;
4610 		ret = 0;
4611 	} else if (property == adev->mode_info.underscan_vborder_property) {
4612 		*val = dm_state->underscan_vborder;
4613 		ret = 0;
4614 	} else if (property == adev->mode_info.underscan_property) {
4615 		*val = dm_state->underscan_enable;
4616 		ret = 0;
4617 	} else if (property == adev->mode_info.abm_level_property) {
4618 		*val = dm_state->abm_level;
4619 		ret = 0;
4620 	}
4621 
4622 	return ret;
4623 }
4624 
4625 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4626 {
4627 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4628 
4629 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4630 }
4631 
4632 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4633 {
4634 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4635 	const struct dc_link *link = aconnector->dc_link;
4636 	struct amdgpu_device *adev = connector->dev->dev_private;
4637 	struct amdgpu_display_manager *dm = &adev->dm;
4638 
4639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4640 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4641 
4642 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4643 	    link->type != dc_connection_none &&
4644 	    dm->backlight_dev) {
4645 		backlight_device_unregister(dm->backlight_dev);
4646 		dm->backlight_dev = NULL;
4647 	}
4648 #endif
4649 
4650 	if (aconnector->dc_em_sink)
4651 		dc_sink_release(aconnector->dc_em_sink);
4652 	aconnector->dc_em_sink = NULL;
4653 	if (aconnector->dc_sink)
4654 		dc_sink_release(aconnector->dc_sink);
4655 	aconnector->dc_sink = NULL;
4656 
4657 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4658 	drm_connector_unregister(connector);
4659 	drm_connector_cleanup(connector);
4660 	if (aconnector->i2c) {
4661 		i2c_del_adapter(&aconnector->i2c->base);
4662 		kfree(aconnector->i2c);
4663 	}
4664 	kfree(aconnector->dm_dp_aux.aux.name);
4665 
4666 	kfree(connector);
4667 }
4668 
4669 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4670 {
4671 	struct dm_connector_state *state =
4672 		to_dm_connector_state(connector->state);
4673 
4674 	if (connector->state)
4675 		__drm_atomic_helper_connector_destroy_state(connector->state);
4676 
4677 	kfree(state);
4678 
4679 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4680 
4681 	if (state) {
4682 		state->scaling = RMX_OFF;
4683 		state->underscan_enable = false;
4684 		state->underscan_hborder = 0;
4685 		state->underscan_vborder = 0;
4686 		state->base.max_requested_bpc = 8;
4687 		state->vcpi_slots = 0;
4688 		state->pbn = 0;
4689 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4690 			state->abm_level = amdgpu_dm_abm_level;
4691 
4692 		__drm_atomic_helper_connector_reset(connector, &state->base);
4693 	}
4694 }
4695 
4696 struct drm_connector_state *
4697 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4698 {
4699 	struct dm_connector_state *state =
4700 		to_dm_connector_state(connector->state);
4701 
4702 	struct dm_connector_state *new_state =
4703 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4704 
4705 	if (!new_state)
4706 		return NULL;
4707 
4708 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4709 
4710 	new_state->freesync_capable = state->freesync_capable;
4711 	new_state->abm_level = state->abm_level;
4712 	new_state->scaling = state->scaling;
4713 	new_state->underscan_enable = state->underscan_enable;
4714 	new_state->underscan_hborder = state->underscan_hborder;
4715 	new_state->underscan_vborder = state->underscan_vborder;
4716 	new_state->vcpi_slots = state->vcpi_slots;
4717 	new_state->pbn = state->pbn;
4718 	return &new_state->base;
4719 }
4720 
4721 static int
4722 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4723 {
4724 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4725 		to_amdgpu_dm_connector(connector);
4726 	int r;
4727 
4728 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4729 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4730 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4731 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4732 		if (r)
4733 			return r;
4734 	}
4735 
4736 #if defined(CONFIG_DEBUG_FS)
4737 	connector_debugfs_init(amdgpu_dm_connector);
4738 #endif
4739 
4740 	return 0;
4741 }
4742 
4743 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4744 	.reset = amdgpu_dm_connector_funcs_reset,
4745 	.detect = amdgpu_dm_connector_detect,
4746 	.fill_modes = drm_helper_probe_single_connector_modes,
4747 	.destroy = amdgpu_dm_connector_destroy,
4748 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4749 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4750 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4751 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4752 	.late_register = amdgpu_dm_connector_late_register,
4753 	.early_unregister = amdgpu_dm_connector_unregister
4754 };
4755 
4756 static int get_modes(struct drm_connector *connector)
4757 {
4758 	return amdgpu_dm_connector_get_modes(connector);
4759 }
4760 
4761 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4762 {
4763 	struct dc_sink_init_data init_params = {
4764 			.link = aconnector->dc_link,
4765 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4766 	};
4767 	struct edid *edid;
4768 
4769 	if (!aconnector->base.edid_blob_ptr) {
4770 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4771 				aconnector->base.name);
4772 
4773 		aconnector->base.force = DRM_FORCE_OFF;
4774 		aconnector->base.override_edid = false;
4775 		return;
4776 	}
4777 
4778 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4779 
4780 	aconnector->edid = edid;
4781 
4782 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4783 		aconnector->dc_link,
4784 		(uint8_t *)edid,
4785 		(edid->extensions + 1) * EDID_LENGTH,
4786 		&init_params);
4787 
4788 	if (aconnector->base.force == DRM_FORCE_ON) {
4789 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4790 		aconnector->dc_link->local_sink :
4791 		aconnector->dc_em_sink;
4792 		dc_sink_retain(aconnector->dc_sink);
4793 	}
4794 }
4795 
4796 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4797 {
4798 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4799 
4800 	/*
4801 	 * In case of headless boot with force on for DP managed connector
4802 	 * Those settings have to be != 0 to get initial modeset
4803 	 */
4804 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4805 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4806 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4807 	}
4808 
4809 
4810 	aconnector->base.override_edid = true;
4811 	create_eml_sink(aconnector);
4812 }
4813 
4814 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4815 				   struct drm_display_mode *mode)
4816 {
4817 	int result = MODE_ERROR;
4818 	struct dc_sink *dc_sink;
4819 	struct amdgpu_device *adev = connector->dev->dev_private;
4820 	/* TODO: Unhardcode stream count */
4821 	struct dc_stream_state *stream;
4822 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4823 	enum dc_status dc_result = DC_OK;
4824 
4825 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4826 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4827 		return result;
4828 
4829 	/*
4830 	 * Only run this the first time mode_valid is called to initilialize
4831 	 * EDID mgmt
4832 	 */
4833 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4834 		!aconnector->dc_em_sink)
4835 		handle_edid_mgmt(aconnector);
4836 
4837 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4838 
4839 	if (dc_sink == NULL) {
4840 		DRM_ERROR("dc_sink is NULL!\n");
4841 		goto fail;
4842 	}
4843 
4844 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4845 	if (stream == NULL) {
4846 		DRM_ERROR("Failed to create stream for sink!\n");
4847 		goto fail;
4848 	}
4849 
4850 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4851 
4852 	if (dc_result == DC_OK)
4853 		result = MODE_OK;
4854 	else
4855 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4856 			      mode->hdisplay,
4857 			      mode->vdisplay,
4858 			      mode->clock,
4859 			      dc_result);
4860 
4861 	dc_stream_release(stream);
4862 
4863 fail:
4864 	/* TODO: error handling*/
4865 	return result;
4866 }
4867 
4868 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4869 				struct dc_info_packet *out)
4870 {
4871 	struct hdmi_drm_infoframe frame;
4872 	unsigned char buf[30]; /* 26 + 4 */
4873 	ssize_t len;
4874 	int ret, i;
4875 
4876 	memset(out, 0, sizeof(*out));
4877 
4878 	if (!state->hdr_output_metadata)
4879 		return 0;
4880 
4881 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4882 	if (ret)
4883 		return ret;
4884 
4885 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4886 	if (len < 0)
4887 		return (int)len;
4888 
4889 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4890 	if (len != 30)
4891 		return -EINVAL;
4892 
4893 	/* Prepare the infopacket for DC. */
4894 	switch (state->connector->connector_type) {
4895 	case DRM_MODE_CONNECTOR_HDMIA:
4896 		out->hb0 = 0x87; /* type */
4897 		out->hb1 = 0x01; /* version */
4898 		out->hb2 = 0x1A; /* length */
4899 		out->sb[0] = buf[3]; /* checksum */
4900 		i = 1;
4901 		break;
4902 
4903 	case DRM_MODE_CONNECTOR_DisplayPort:
4904 	case DRM_MODE_CONNECTOR_eDP:
4905 		out->hb0 = 0x00; /* sdp id, zero */
4906 		out->hb1 = 0x87; /* type */
4907 		out->hb2 = 0x1D; /* payload len - 1 */
4908 		out->hb3 = (0x13 << 2); /* sdp version */
4909 		out->sb[0] = 0x01; /* version */
4910 		out->sb[1] = 0x1A; /* length */
4911 		i = 2;
4912 		break;
4913 
4914 	default:
4915 		return -EINVAL;
4916 	}
4917 
4918 	memcpy(&out->sb[i], &buf[4], 26);
4919 	out->valid = true;
4920 
4921 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4922 		       sizeof(out->sb), false);
4923 
4924 	return 0;
4925 }
4926 
4927 static bool
4928 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4929 			  const struct drm_connector_state *new_state)
4930 {
4931 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4932 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4933 
4934 	if (old_blob != new_blob) {
4935 		if (old_blob && new_blob &&
4936 		    old_blob->length == new_blob->length)
4937 			return memcmp(old_blob->data, new_blob->data,
4938 				      old_blob->length);
4939 
4940 		return true;
4941 	}
4942 
4943 	return false;
4944 }
4945 
4946 static int
4947 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4948 				 struct drm_atomic_state *state)
4949 {
4950 	struct drm_connector_state *new_con_state =
4951 		drm_atomic_get_new_connector_state(state, conn);
4952 	struct drm_connector_state *old_con_state =
4953 		drm_atomic_get_old_connector_state(state, conn);
4954 	struct drm_crtc *crtc = new_con_state->crtc;
4955 	struct drm_crtc_state *new_crtc_state;
4956 	int ret;
4957 
4958 	if (!crtc)
4959 		return 0;
4960 
4961 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4962 		struct dc_info_packet hdr_infopacket;
4963 
4964 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4965 		if (ret)
4966 			return ret;
4967 
4968 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4969 		if (IS_ERR(new_crtc_state))
4970 			return PTR_ERR(new_crtc_state);
4971 
4972 		/*
4973 		 * DC considers the stream backends changed if the
4974 		 * static metadata changes. Forcing the modeset also
4975 		 * gives a simple way for userspace to switch from
4976 		 * 8bpc to 10bpc when setting the metadata to enter
4977 		 * or exit HDR.
4978 		 *
4979 		 * Changing the static metadata after it's been
4980 		 * set is permissible, however. So only force a
4981 		 * modeset if we're entering or exiting HDR.
4982 		 */
4983 		new_crtc_state->mode_changed =
4984 			!old_con_state->hdr_output_metadata ||
4985 			!new_con_state->hdr_output_metadata;
4986 	}
4987 
4988 	return 0;
4989 }
4990 
4991 static const struct drm_connector_helper_funcs
4992 amdgpu_dm_connector_helper_funcs = {
4993 	/*
4994 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4995 	 * modes will be filtered by drm_mode_validate_size(), and those modes
4996 	 * are missing after user start lightdm. So we need to renew modes list.
4997 	 * in get_modes call back, not just return the modes count
4998 	 */
4999 	.get_modes = get_modes,
5000 	.mode_valid = amdgpu_dm_connector_mode_valid,
5001 	.atomic_check = amdgpu_dm_connector_atomic_check,
5002 };
5003 
5004 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5005 {
5006 }
5007 
5008 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5009 {
5010 	struct drm_device *dev = new_crtc_state->crtc->dev;
5011 	struct drm_plane *plane;
5012 
5013 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5014 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5015 			return true;
5016 	}
5017 
5018 	return false;
5019 }
5020 
5021 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5022 {
5023 	struct drm_atomic_state *state = new_crtc_state->state;
5024 	struct drm_plane *plane;
5025 	int num_active = 0;
5026 
5027 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5028 		struct drm_plane_state *new_plane_state;
5029 
5030 		/* Cursor planes are "fake". */
5031 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5032 			continue;
5033 
5034 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5035 
5036 		if (!new_plane_state) {
5037 			/*
5038 			 * The plane is enable on the CRTC and hasn't changed
5039 			 * state. This means that it previously passed
5040 			 * validation and is therefore enabled.
5041 			 */
5042 			num_active += 1;
5043 			continue;
5044 		}
5045 
5046 		/* We need a framebuffer to be considered enabled. */
5047 		num_active += (new_plane_state->fb != NULL);
5048 	}
5049 
5050 	return num_active;
5051 }
5052 
5053 /*
5054  * Sets whether interrupts should be enabled on a specific CRTC.
5055  * We require that the stream be enabled and that there exist active
5056  * DC planes on the stream.
5057  */
5058 static void
5059 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5060 			       struct drm_crtc_state *new_crtc_state)
5061 {
5062 	struct dm_crtc_state *dm_new_crtc_state =
5063 		to_dm_crtc_state(new_crtc_state);
5064 
5065 	dm_new_crtc_state->active_planes = 0;
5066 	dm_new_crtc_state->interrupts_enabled = false;
5067 
5068 	if (!dm_new_crtc_state->stream)
5069 		return;
5070 
5071 	dm_new_crtc_state->active_planes =
5072 		count_crtc_active_planes(new_crtc_state);
5073 
5074 	dm_new_crtc_state->interrupts_enabled =
5075 		dm_new_crtc_state->active_planes > 0;
5076 }
5077 
5078 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5079 				       struct drm_crtc_state *state)
5080 {
5081 	struct amdgpu_device *adev = crtc->dev->dev_private;
5082 	struct dc *dc = adev->dm.dc;
5083 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5084 	int ret = -EINVAL;
5085 
5086 	/*
5087 	 * Update interrupt state for the CRTC. This needs to happen whenever
5088 	 * the CRTC has changed or whenever any of its planes have changed.
5089 	 * Atomic check satisfies both of these requirements since the CRTC
5090 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5091 	 */
5092 	dm_update_crtc_interrupt_state(crtc, state);
5093 
5094 	if (unlikely(!dm_crtc_state->stream &&
5095 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5096 		WARN_ON(1);
5097 		return ret;
5098 	}
5099 
5100 	/* In some use cases, like reset, no stream is attached */
5101 	if (!dm_crtc_state->stream)
5102 		return 0;
5103 
5104 	/*
5105 	 * We want at least one hardware plane enabled to use
5106 	 * the stream with a cursor enabled.
5107 	 */
5108 	if (state->enable && state->active &&
5109 	    does_crtc_have_active_cursor(state) &&
5110 	    dm_crtc_state->active_planes == 0)
5111 		return -EINVAL;
5112 
5113 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5114 		return 0;
5115 
5116 	return ret;
5117 }
5118 
5119 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5120 				      const struct drm_display_mode *mode,
5121 				      struct drm_display_mode *adjusted_mode)
5122 {
5123 	return true;
5124 }
5125 
5126 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5127 	.disable = dm_crtc_helper_disable,
5128 	.atomic_check = dm_crtc_helper_atomic_check,
5129 	.mode_fixup = dm_crtc_helper_mode_fixup,
5130 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5131 };
5132 
5133 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5134 {
5135 
5136 }
5137 
5138 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5139 {
5140 	switch (display_color_depth) {
5141 		case COLOR_DEPTH_666:
5142 			return 6;
5143 		case COLOR_DEPTH_888:
5144 			return 8;
5145 		case COLOR_DEPTH_101010:
5146 			return 10;
5147 		case COLOR_DEPTH_121212:
5148 			return 12;
5149 		case COLOR_DEPTH_141414:
5150 			return 14;
5151 		case COLOR_DEPTH_161616:
5152 			return 16;
5153 		default:
5154 			break;
5155 		}
5156 	return 0;
5157 }
5158 
5159 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5160 					  struct drm_crtc_state *crtc_state,
5161 					  struct drm_connector_state *conn_state)
5162 {
5163 	struct drm_atomic_state *state = crtc_state->state;
5164 	struct drm_connector *connector = conn_state->connector;
5165 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5166 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5167 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5168 	struct drm_dp_mst_topology_mgr *mst_mgr;
5169 	struct drm_dp_mst_port *mst_port;
5170 	enum dc_color_depth color_depth;
5171 	int clock, bpp = 0;
5172 	bool is_y420 = false;
5173 
5174 	if (!aconnector->port || !aconnector->dc_sink)
5175 		return 0;
5176 
5177 	mst_port = aconnector->port;
5178 	mst_mgr = &aconnector->mst_port->mst_mgr;
5179 
5180 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5181 		return 0;
5182 
5183 	if (!state->duplicated) {
5184 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5185 				aconnector->force_yuv420_output;
5186 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5187 								    is_y420);
5188 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5189 		clock = adjusted_mode->clock;
5190 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5191 	}
5192 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5193 									   mst_mgr,
5194 									   mst_port,
5195 									   dm_new_connector_state->pbn,
5196 									   0);
5197 	if (dm_new_connector_state->vcpi_slots < 0) {
5198 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5199 		return dm_new_connector_state->vcpi_slots;
5200 	}
5201 	return 0;
5202 }
5203 
5204 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5205 	.disable = dm_encoder_helper_disable,
5206 	.atomic_check = dm_encoder_helper_atomic_check
5207 };
5208 
5209 #if defined(CONFIG_DRM_AMD_DC_DCN)
5210 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5211 					    struct dc_state *dc_state)
5212 {
5213 	struct dc_stream_state *stream = NULL;
5214 	struct drm_connector *connector;
5215 	struct drm_connector_state *new_con_state, *old_con_state;
5216 	struct amdgpu_dm_connector *aconnector;
5217 	struct dm_connector_state *dm_conn_state;
5218 	int i, j, clock, bpp;
5219 	int vcpi, pbn_div, pbn = 0;
5220 
5221 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5222 
5223 		aconnector = to_amdgpu_dm_connector(connector);
5224 
5225 		if (!aconnector->port)
5226 			continue;
5227 
5228 		if (!new_con_state || !new_con_state->crtc)
5229 			continue;
5230 
5231 		dm_conn_state = to_dm_connector_state(new_con_state);
5232 
5233 		for (j = 0; j < dc_state->stream_count; j++) {
5234 			stream = dc_state->streams[j];
5235 			if (!stream)
5236 				continue;
5237 
5238 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5239 				break;
5240 
5241 			stream = NULL;
5242 		}
5243 
5244 		if (!stream)
5245 			continue;
5246 
5247 		if (stream->timing.flags.DSC != 1) {
5248 			drm_dp_mst_atomic_enable_dsc(state,
5249 						     aconnector->port,
5250 						     dm_conn_state->pbn,
5251 						     0,
5252 						     false);
5253 			continue;
5254 		}
5255 
5256 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5257 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5258 		clock = stream->timing.pix_clk_100hz / 10;
5259 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5260 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5261 						    aconnector->port,
5262 						    pbn, pbn_div,
5263 						    true);
5264 		if (vcpi < 0)
5265 			return vcpi;
5266 
5267 		dm_conn_state->pbn = pbn;
5268 		dm_conn_state->vcpi_slots = vcpi;
5269 	}
5270 	return 0;
5271 }
5272 #endif
5273 
5274 static void dm_drm_plane_reset(struct drm_plane *plane)
5275 {
5276 	struct dm_plane_state *amdgpu_state = NULL;
5277 
5278 	if (plane->state)
5279 		plane->funcs->atomic_destroy_state(plane, plane->state);
5280 
5281 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5282 	WARN_ON(amdgpu_state == NULL);
5283 
5284 	if (amdgpu_state)
5285 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5286 }
5287 
5288 static struct drm_plane_state *
5289 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5290 {
5291 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5292 
5293 	old_dm_plane_state = to_dm_plane_state(plane->state);
5294 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5295 	if (!dm_plane_state)
5296 		return NULL;
5297 
5298 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5299 
5300 	if (old_dm_plane_state->dc_state) {
5301 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5302 		dc_plane_state_retain(dm_plane_state->dc_state);
5303 	}
5304 
5305 	return &dm_plane_state->base;
5306 }
5307 
5308 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5309 				struct drm_plane_state *state)
5310 {
5311 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5312 
5313 	if (dm_plane_state->dc_state)
5314 		dc_plane_state_release(dm_plane_state->dc_state);
5315 
5316 	drm_atomic_helper_plane_destroy_state(plane, state);
5317 }
5318 
5319 static const struct drm_plane_funcs dm_plane_funcs = {
5320 	.update_plane	= drm_atomic_helper_update_plane,
5321 	.disable_plane	= drm_atomic_helper_disable_plane,
5322 	.destroy	= drm_primary_helper_destroy,
5323 	.reset = dm_drm_plane_reset,
5324 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5325 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5326 };
5327 
5328 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5329 				      struct drm_plane_state *new_state)
5330 {
5331 	struct amdgpu_framebuffer *afb;
5332 	struct drm_gem_object *obj;
5333 	struct amdgpu_device *adev;
5334 	struct amdgpu_bo *rbo;
5335 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5336 	struct list_head list;
5337 	struct ttm_validate_buffer tv;
5338 	struct ww_acquire_ctx ticket;
5339 	uint64_t tiling_flags;
5340 	uint32_t domain;
5341 	int r;
5342 	bool force_disable_dcc = false;
5343 
5344 	dm_plane_state_old = to_dm_plane_state(plane->state);
5345 	dm_plane_state_new = to_dm_plane_state(new_state);
5346 
5347 	if (!new_state->fb) {
5348 		DRM_DEBUG_DRIVER("No FB bound\n");
5349 		return 0;
5350 	}
5351 
5352 	afb = to_amdgpu_framebuffer(new_state->fb);
5353 	obj = new_state->fb->obj[0];
5354 	rbo = gem_to_amdgpu_bo(obj);
5355 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5356 	INIT_LIST_HEAD(&list);
5357 
5358 	tv.bo = &rbo->tbo;
5359 	tv.num_shared = 1;
5360 	list_add(&tv.head, &list);
5361 
5362 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5363 	if (r) {
5364 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5365 		return r;
5366 	}
5367 
5368 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5369 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5370 	else
5371 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5372 
5373 	r = amdgpu_bo_pin(rbo, domain);
5374 	if (unlikely(r != 0)) {
5375 		if (r != -ERESTARTSYS)
5376 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5377 		ttm_eu_backoff_reservation(&ticket, &list);
5378 		return r;
5379 	}
5380 
5381 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5382 	if (unlikely(r != 0)) {
5383 		amdgpu_bo_unpin(rbo);
5384 		ttm_eu_backoff_reservation(&ticket, &list);
5385 		DRM_ERROR("%p bind failed\n", rbo);
5386 		return r;
5387 	}
5388 
5389 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5390 
5391 	ttm_eu_backoff_reservation(&ticket, &list);
5392 
5393 	afb->address = amdgpu_bo_gpu_offset(rbo);
5394 
5395 	amdgpu_bo_ref(rbo);
5396 
5397 	if (dm_plane_state_new->dc_state &&
5398 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5399 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5400 
5401 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5402 		fill_plane_buffer_attributes(
5403 			adev, afb, plane_state->format, plane_state->rotation,
5404 			tiling_flags, &plane_state->tiling_info,
5405 			&plane_state->plane_size, &plane_state->dcc,
5406 			&plane_state->address,
5407 			force_disable_dcc);
5408 	}
5409 
5410 	return 0;
5411 }
5412 
5413 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5414 				       struct drm_plane_state *old_state)
5415 {
5416 	struct amdgpu_bo *rbo;
5417 	int r;
5418 
5419 	if (!old_state->fb)
5420 		return;
5421 
5422 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5423 	r = amdgpu_bo_reserve(rbo, false);
5424 	if (unlikely(r)) {
5425 		DRM_ERROR("failed to reserve rbo before unpin\n");
5426 		return;
5427 	}
5428 
5429 	amdgpu_bo_unpin(rbo);
5430 	amdgpu_bo_unreserve(rbo);
5431 	amdgpu_bo_unref(&rbo);
5432 }
5433 
5434 static int dm_plane_atomic_check(struct drm_plane *plane,
5435 				 struct drm_plane_state *state)
5436 {
5437 	struct amdgpu_device *adev = plane->dev->dev_private;
5438 	struct dc *dc = adev->dm.dc;
5439 	struct dm_plane_state *dm_plane_state;
5440 	struct dc_scaling_info scaling_info;
5441 	int ret;
5442 
5443 	dm_plane_state = to_dm_plane_state(state);
5444 
5445 	if (!dm_plane_state->dc_state)
5446 		return 0;
5447 
5448 	ret = fill_dc_scaling_info(state, &scaling_info);
5449 	if (ret)
5450 		return ret;
5451 
5452 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5453 		return 0;
5454 
5455 	return -EINVAL;
5456 }
5457 
5458 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5459 				       struct drm_plane_state *new_plane_state)
5460 {
5461 	/* Only support async updates on cursor planes. */
5462 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5463 		return -EINVAL;
5464 
5465 	return 0;
5466 }
5467 
5468 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5469 					 struct drm_plane_state *new_state)
5470 {
5471 	struct drm_plane_state *old_state =
5472 		drm_atomic_get_old_plane_state(new_state->state, plane);
5473 
5474 	swap(plane->state->fb, new_state->fb);
5475 
5476 	plane->state->src_x = new_state->src_x;
5477 	plane->state->src_y = new_state->src_y;
5478 	plane->state->src_w = new_state->src_w;
5479 	plane->state->src_h = new_state->src_h;
5480 	plane->state->crtc_x = new_state->crtc_x;
5481 	plane->state->crtc_y = new_state->crtc_y;
5482 	plane->state->crtc_w = new_state->crtc_w;
5483 	plane->state->crtc_h = new_state->crtc_h;
5484 
5485 	handle_cursor_update(plane, old_state);
5486 }
5487 
5488 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5489 	.prepare_fb = dm_plane_helper_prepare_fb,
5490 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5491 	.atomic_check = dm_plane_atomic_check,
5492 	.atomic_async_check = dm_plane_atomic_async_check,
5493 	.atomic_async_update = dm_plane_atomic_async_update
5494 };
5495 
5496 /*
5497  * TODO: these are currently initialized to rgb formats only.
5498  * For future use cases we should either initialize them dynamically based on
5499  * plane capabilities, or initialize this array to all formats, so internal drm
5500  * check will succeed, and let DC implement proper check
5501  */
5502 static const uint32_t rgb_formats[] = {
5503 	DRM_FORMAT_XRGB8888,
5504 	DRM_FORMAT_ARGB8888,
5505 	DRM_FORMAT_RGBA8888,
5506 	DRM_FORMAT_XRGB2101010,
5507 	DRM_FORMAT_XBGR2101010,
5508 	DRM_FORMAT_ARGB2101010,
5509 	DRM_FORMAT_ABGR2101010,
5510 	DRM_FORMAT_XBGR8888,
5511 	DRM_FORMAT_ABGR8888,
5512 	DRM_FORMAT_RGB565,
5513 };
5514 
5515 static const uint32_t overlay_formats[] = {
5516 	DRM_FORMAT_XRGB8888,
5517 	DRM_FORMAT_ARGB8888,
5518 	DRM_FORMAT_RGBA8888,
5519 	DRM_FORMAT_XBGR8888,
5520 	DRM_FORMAT_ABGR8888,
5521 	DRM_FORMAT_RGB565
5522 };
5523 
5524 static const u32 cursor_formats[] = {
5525 	DRM_FORMAT_ARGB8888
5526 };
5527 
5528 static int get_plane_formats(const struct drm_plane *plane,
5529 			     const struct dc_plane_cap *plane_cap,
5530 			     uint32_t *formats, int max_formats)
5531 {
5532 	int i, num_formats = 0;
5533 
5534 	/*
5535 	 * TODO: Query support for each group of formats directly from
5536 	 * DC plane caps. This will require adding more formats to the
5537 	 * caps list.
5538 	 */
5539 
5540 	switch (plane->type) {
5541 	case DRM_PLANE_TYPE_PRIMARY:
5542 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5543 			if (num_formats >= max_formats)
5544 				break;
5545 
5546 			formats[num_formats++] = rgb_formats[i];
5547 		}
5548 
5549 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5550 			formats[num_formats++] = DRM_FORMAT_NV12;
5551 		if (plane_cap && plane_cap->pixel_format_support.p010)
5552 			formats[num_formats++] = DRM_FORMAT_P010;
5553 		break;
5554 
5555 	case DRM_PLANE_TYPE_OVERLAY:
5556 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5557 			if (num_formats >= max_formats)
5558 				break;
5559 
5560 			formats[num_formats++] = overlay_formats[i];
5561 		}
5562 		break;
5563 
5564 	case DRM_PLANE_TYPE_CURSOR:
5565 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5566 			if (num_formats >= max_formats)
5567 				break;
5568 
5569 			formats[num_formats++] = cursor_formats[i];
5570 		}
5571 		break;
5572 	}
5573 
5574 	return num_formats;
5575 }
5576 
5577 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5578 				struct drm_plane *plane,
5579 				unsigned long possible_crtcs,
5580 				const struct dc_plane_cap *plane_cap)
5581 {
5582 	uint32_t formats[32];
5583 	int num_formats;
5584 	int res = -EPERM;
5585 
5586 	num_formats = get_plane_formats(plane, plane_cap, formats,
5587 					ARRAY_SIZE(formats));
5588 
5589 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5590 				       &dm_plane_funcs, formats, num_formats,
5591 				       NULL, plane->type, NULL);
5592 	if (res)
5593 		return res;
5594 
5595 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5596 	    plane_cap && plane_cap->per_pixel_alpha) {
5597 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5598 					  BIT(DRM_MODE_BLEND_PREMULTI);
5599 
5600 		drm_plane_create_alpha_property(plane);
5601 		drm_plane_create_blend_mode_property(plane, blend_caps);
5602 	}
5603 
5604 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5605 	    plane_cap &&
5606 	    (plane_cap->pixel_format_support.nv12 ||
5607 	     plane_cap->pixel_format_support.p010)) {
5608 		/* This only affects YUV formats. */
5609 		drm_plane_create_color_properties(
5610 			plane,
5611 			BIT(DRM_COLOR_YCBCR_BT601) |
5612 			BIT(DRM_COLOR_YCBCR_BT709) |
5613 			BIT(DRM_COLOR_YCBCR_BT2020),
5614 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5615 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5616 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5617 	}
5618 
5619 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5620 
5621 	/* Create (reset) the plane state */
5622 	if (plane->funcs->reset)
5623 		plane->funcs->reset(plane);
5624 
5625 	return 0;
5626 }
5627 
5628 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5629 			       struct drm_plane *plane,
5630 			       uint32_t crtc_index)
5631 {
5632 	struct amdgpu_crtc *acrtc = NULL;
5633 	struct drm_plane *cursor_plane;
5634 
5635 	int res = -ENOMEM;
5636 
5637 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5638 	if (!cursor_plane)
5639 		goto fail;
5640 
5641 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5642 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5643 
5644 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5645 	if (!acrtc)
5646 		goto fail;
5647 
5648 	res = drm_crtc_init_with_planes(
5649 			dm->ddev,
5650 			&acrtc->base,
5651 			plane,
5652 			cursor_plane,
5653 			&amdgpu_dm_crtc_funcs, NULL);
5654 
5655 	if (res)
5656 		goto fail;
5657 
5658 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5659 
5660 	/* Create (reset) the plane state */
5661 	if (acrtc->base.funcs->reset)
5662 		acrtc->base.funcs->reset(&acrtc->base);
5663 
5664 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5665 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5666 
5667 	acrtc->crtc_id = crtc_index;
5668 	acrtc->base.enabled = false;
5669 	acrtc->otg_inst = -1;
5670 
5671 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5672 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5673 				   true, MAX_COLOR_LUT_ENTRIES);
5674 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5675 
5676 	return 0;
5677 
5678 fail:
5679 	kfree(acrtc);
5680 	kfree(cursor_plane);
5681 	return res;
5682 }
5683 
5684 
5685 static int to_drm_connector_type(enum amd_signal_type st)
5686 {
5687 	switch (st) {
5688 	case SIGNAL_TYPE_HDMI_TYPE_A:
5689 		return DRM_MODE_CONNECTOR_HDMIA;
5690 	case SIGNAL_TYPE_EDP:
5691 		return DRM_MODE_CONNECTOR_eDP;
5692 	case SIGNAL_TYPE_LVDS:
5693 		return DRM_MODE_CONNECTOR_LVDS;
5694 	case SIGNAL_TYPE_RGB:
5695 		return DRM_MODE_CONNECTOR_VGA;
5696 	case SIGNAL_TYPE_DISPLAY_PORT:
5697 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5698 		return DRM_MODE_CONNECTOR_DisplayPort;
5699 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5700 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5701 		return DRM_MODE_CONNECTOR_DVID;
5702 	case SIGNAL_TYPE_VIRTUAL:
5703 		return DRM_MODE_CONNECTOR_VIRTUAL;
5704 
5705 	default:
5706 		return DRM_MODE_CONNECTOR_Unknown;
5707 	}
5708 }
5709 
5710 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5711 {
5712 	struct drm_encoder *encoder;
5713 
5714 	/* There is only one encoder per connector */
5715 	drm_connector_for_each_possible_encoder(connector, encoder)
5716 		return encoder;
5717 
5718 	return NULL;
5719 }
5720 
5721 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5722 {
5723 	struct drm_encoder *encoder;
5724 	struct amdgpu_encoder *amdgpu_encoder;
5725 
5726 	encoder = amdgpu_dm_connector_to_encoder(connector);
5727 
5728 	if (encoder == NULL)
5729 		return;
5730 
5731 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5732 
5733 	amdgpu_encoder->native_mode.clock = 0;
5734 
5735 	if (!list_empty(&connector->probed_modes)) {
5736 		struct drm_display_mode *preferred_mode = NULL;
5737 
5738 		list_for_each_entry(preferred_mode,
5739 				    &connector->probed_modes,
5740 				    head) {
5741 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5742 				amdgpu_encoder->native_mode = *preferred_mode;
5743 
5744 			break;
5745 		}
5746 
5747 	}
5748 }
5749 
5750 static struct drm_display_mode *
5751 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5752 			     char *name,
5753 			     int hdisplay, int vdisplay)
5754 {
5755 	struct drm_device *dev = encoder->dev;
5756 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5757 	struct drm_display_mode *mode = NULL;
5758 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5759 
5760 	mode = drm_mode_duplicate(dev, native_mode);
5761 
5762 	if (mode == NULL)
5763 		return NULL;
5764 
5765 	mode->hdisplay = hdisplay;
5766 	mode->vdisplay = vdisplay;
5767 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5768 #ifdef __linux__
5769 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5770 #else
5771 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5772 #endif
5773 
5774 	return mode;
5775 
5776 }
5777 
5778 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5779 						 struct drm_connector *connector)
5780 {
5781 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5782 	struct drm_display_mode *mode = NULL;
5783 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5784 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5785 				to_amdgpu_dm_connector(connector);
5786 	int i;
5787 	int n;
5788 	struct mode_size {
5789 		char name[DRM_DISPLAY_MODE_LEN];
5790 		int w;
5791 		int h;
5792 	} common_modes[] = {
5793 		{  "640x480",  640,  480},
5794 		{  "800x600",  800,  600},
5795 		{ "1024x768", 1024,  768},
5796 		{ "1280x720", 1280,  720},
5797 		{ "1280x800", 1280,  800},
5798 		{"1280x1024", 1280, 1024},
5799 		{ "1440x900", 1440,  900},
5800 		{"1680x1050", 1680, 1050},
5801 		{"1600x1200", 1600, 1200},
5802 		{"1920x1080", 1920, 1080},
5803 		{"1920x1200", 1920, 1200}
5804 	};
5805 
5806 	n = ARRAY_SIZE(common_modes);
5807 
5808 	for (i = 0; i < n; i++) {
5809 		struct drm_display_mode *curmode = NULL;
5810 		bool mode_existed = false;
5811 
5812 		if (common_modes[i].w > native_mode->hdisplay ||
5813 		    common_modes[i].h > native_mode->vdisplay ||
5814 		   (common_modes[i].w == native_mode->hdisplay &&
5815 		    common_modes[i].h == native_mode->vdisplay))
5816 			continue;
5817 
5818 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5819 			if (common_modes[i].w == curmode->hdisplay &&
5820 			    common_modes[i].h == curmode->vdisplay) {
5821 				mode_existed = true;
5822 				break;
5823 			}
5824 		}
5825 
5826 		if (mode_existed)
5827 			continue;
5828 
5829 		mode = amdgpu_dm_create_common_mode(encoder,
5830 				common_modes[i].name, common_modes[i].w,
5831 				common_modes[i].h);
5832 		drm_mode_probed_add(connector, mode);
5833 		amdgpu_dm_connector->num_modes++;
5834 	}
5835 }
5836 
5837 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5838 					      struct edid *edid)
5839 {
5840 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5841 			to_amdgpu_dm_connector(connector);
5842 
5843 	if (edid) {
5844 		/* empty probed_modes */
5845 		INIT_LIST_HEAD(&connector->probed_modes);
5846 		amdgpu_dm_connector->num_modes =
5847 				drm_add_edid_modes(connector, edid);
5848 
5849 		/* sorting the probed modes before calling function
5850 		 * amdgpu_dm_get_native_mode() since EDID can have
5851 		 * more than one preferred mode. The modes that are
5852 		 * later in the probed mode list could be of higher
5853 		 * and preferred resolution. For example, 3840x2160
5854 		 * resolution in base EDID preferred timing and 4096x2160
5855 		 * preferred resolution in DID extension block later.
5856 		 */
5857 		drm_mode_sort(&connector->probed_modes);
5858 		amdgpu_dm_get_native_mode(connector);
5859 	} else {
5860 		amdgpu_dm_connector->num_modes = 0;
5861 	}
5862 }
5863 
5864 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5865 {
5866 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5867 			to_amdgpu_dm_connector(connector);
5868 	struct drm_encoder *encoder;
5869 	struct edid *edid = amdgpu_dm_connector->edid;
5870 
5871 	encoder = amdgpu_dm_connector_to_encoder(connector);
5872 
5873 	if (!edid || !drm_edid_is_valid(edid)) {
5874 		amdgpu_dm_connector->num_modes =
5875 				drm_add_modes_noedid(connector, 640, 480);
5876 	} else {
5877 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5878 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5879 	}
5880 	amdgpu_dm_fbc_init(connector);
5881 
5882 	return amdgpu_dm_connector->num_modes;
5883 }
5884 
5885 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5886 				     struct amdgpu_dm_connector *aconnector,
5887 				     int connector_type,
5888 				     struct dc_link *link,
5889 				     int link_index)
5890 {
5891 	struct amdgpu_device *adev = dm->ddev->dev_private;
5892 
5893 	/*
5894 	 * Some of the properties below require access to state, like bpc.
5895 	 * Allocate some default initial connector state with our reset helper.
5896 	 */
5897 	if (aconnector->base.funcs->reset)
5898 		aconnector->base.funcs->reset(&aconnector->base);
5899 
5900 	aconnector->connector_id = link_index;
5901 	aconnector->dc_link = link;
5902 	aconnector->base.interlace_allowed = false;
5903 	aconnector->base.doublescan_allowed = false;
5904 	aconnector->base.stereo_allowed = false;
5905 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5906 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5907 	aconnector->audio_inst = -1;
5908 	rw_init(&aconnector->hpd_lock, "dmhpd");
5909 
5910 	/*
5911 	 * configure support HPD hot plug connector_>polled default value is 0
5912 	 * which means HPD hot plug not supported
5913 	 */
5914 	switch (connector_type) {
5915 	case DRM_MODE_CONNECTOR_HDMIA:
5916 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5917 		aconnector->base.ycbcr_420_allowed =
5918 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5919 		break;
5920 	case DRM_MODE_CONNECTOR_DisplayPort:
5921 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5922 		aconnector->base.ycbcr_420_allowed =
5923 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5924 		break;
5925 	case DRM_MODE_CONNECTOR_DVID:
5926 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5927 		break;
5928 	default:
5929 		break;
5930 	}
5931 
5932 	drm_object_attach_property(&aconnector->base.base,
5933 				dm->ddev->mode_config.scaling_mode_property,
5934 				DRM_MODE_SCALE_NONE);
5935 
5936 	drm_object_attach_property(&aconnector->base.base,
5937 				adev->mode_info.underscan_property,
5938 				UNDERSCAN_OFF);
5939 	drm_object_attach_property(&aconnector->base.base,
5940 				adev->mode_info.underscan_hborder_property,
5941 				0);
5942 	drm_object_attach_property(&aconnector->base.base,
5943 				adev->mode_info.underscan_vborder_property,
5944 				0);
5945 
5946 	if (!aconnector->mst_port)
5947 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5948 
5949 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5950 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5951 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5952 
5953 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5954 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5955 		drm_object_attach_property(&aconnector->base.base,
5956 				adev->mode_info.abm_level_property, 0);
5957 	}
5958 
5959 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5960 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5961 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5962 		drm_object_attach_property(
5963 			&aconnector->base.base,
5964 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5965 
5966 		if (!aconnector->mst_port)
5967 			drm_connector_attach_vrr_capable_property(&aconnector->base);
5968 
5969 #ifdef CONFIG_DRM_AMD_DC_HDCP
5970 		if (adev->dm.hdcp_workqueue)
5971 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5972 #endif
5973 	}
5974 }
5975 
5976 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5977 			      struct i2c_msg *msgs, int num)
5978 {
5979 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5980 	struct ddc_service *ddc_service = i2c->ddc_service;
5981 	struct i2c_command cmd;
5982 	int i;
5983 	int result = -EIO;
5984 
5985 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5986 
5987 	if (!cmd.payloads)
5988 		return result;
5989 
5990 	cmd.number_of_payloads = num;
5991 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5992 	cmd.speed = 100;
5993 
5994 	for (i = 0; i < num; i++) {
5995 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5996 		cmd.payloads[i].address = msgs[i].addr;
5997 		cmd.payloads[i].length = msgs[i].len;
5998 		cmd.payloads[i].data = msgs[i].buf;
5999 	}
6000 
6001 	if (dc_submit_i2c(
6002 			ddc_service->ctx->dc,
6003 			ddc_service->ddc_pin->hw_info.ddc_channel,
6004 			&cmd))
6005 		result = num;
6006 
6007 	kfree(cmd.payloads);
6008 	return result;
6009 }
6010 
6011 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6012 {
6013 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6014 }
6015 
6016 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6017 	.master_xfer = amdgpu_dm_i2c_xfer,
6018 	.functionality = amdgpu_dm_i2c_func,
6019 };
6020 
6021 static struct amdgpu_i2c_adapter *
6022 create_i2c(struct ddc_service *ddc_service,
6023 	   int link_index,
6024 	   int *res)
6025 {
6026 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6027 	struct amdgpu_i2c_adapter *i2c;
6028 
6029 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6030 	if (!i2c)
6031 		return NULL;
6032 #ifdef notyet
6033 	i2c->base.owner = THIS_MODULE;
6034 	i2c->base.class = I2C_CLASS_DDC;
6035 	i2c->base.dev.parent = &adev->pdev->dev;
6036 #endif
6037 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6038 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6039 	i2c_set_adapdata(&i2c->base, i2c);
6040 	i2c->ddc_service = ddc_service;
6041 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6042 
6043 	return i2c;
6044 }
6045 
6046 
6047 /*
6048  * Note: this function assumes that dc_link_detect() was called for the
6049  * dc_link which will be represented by this aconnector.
6050  */
6051 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6052 				    struct amdgpu_dm_connector *aconnector,
6053 				    uint32_t link_index,
6054 				    struct amdgpu_encoder *aencoder)
6055 {
6056 	int res = 0;
6057 	int connector_type;
6058 	struct dc *dc = dm->dc;
6059 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6060 	struct amdgpu_i2c_adapter *i2c;
6061 
6062 	link->priv = aconnector;
6063 
6064 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6065 
6066 	i2c = create_i2c(link->ddc, link->link_index, &res);
6067 	if (!i2c) {
6068 		DRM_ERROR("Failed to create i2c adapter data\n");
6069 		return -ENOMEM;
6070 	}
6071 
6072 	aconnector->i2c = i2c;
6073 	res = i2c_add_adapter(&i2c->base);
6074 
6075 	if (res) {
6076 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6077 		goto out_free;
6078 	}
6079 
6080 	connector_type = to_drm_connector_type(link->connector_signal);
6081 
6082 	res = drm_connector_init_with_ddc(
6083 			dm->ddev,
6084 			&aconnector->base,
6085 			&amdgpu_dm_connector_funcs,
6086 			connector_type,
6087 			&i2c->base);
6088 
6089 	if (res) {
6090 		DRM_ERROR("connector_init failed\n");
6091 		aconnector->connector_id = -1;
6092 		goto out_free;
6093 	}
6094 
6095 	drm_connector_helper_add(
6096 			&aconnector->base,
6097 			&amdgpu_dm_connector_helper_funcs);
6098 
6099 	amdgpu_dm_connector_init_helper(
6100 		dm,
6101 		aconnector,
6102 		connector_type,
6103 		link,
6104 		link_index);
6105 
6106 	drm_connector_attach_encoder(
6107 		&aconnector->base, &aencoder->base);
6108 
6109 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6110 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6111 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6112 
6113 out_free:
6114 	if (res) {
6115 		kfree(i2c);
6116 		aconnector->i2c = NULL;
6117 	}
6118 	return res;
6119 }
6120 
6121 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6122 {
6123 	switch (adev->mode_info.num_crtc) {
6124 	case 1:
6125 		return 0x1;
6126 	case 2:
6127 		return 0x3;
6128 	case 3:
6129 		return 0x7;
6130 	case 4:
6131 		return 0xf;
6132 	case 5:
6133 		return 0x1f;
6134 	case 6:
6135 	default:
6136 		return 0x3f;
6137 	}
6138 }
6139 
6140 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6141 				  struct amdgpu_encoder *aencoder,
6142 				  uint32_t link_index)
6143 {
6144 	struct amdgpu_device *adev = dev->dev_private;
6145 
6146 	int res = drm_encoder_init(dev,
6147 				   &aencoder->base,
6148 				   &amdgpu_dm_encoder_funcs,
6149 				   DRM_MODE_ENCODER_TMDS,
6150 				   NULL);
6151 
6152 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6153 
6154 	if (!res)
6155 		aencoder->encoder_id = link_index;
6156 	else
6157 		aencoder->encoder_id = -1;
6158 
6159 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6160 
6161 	return res;
6162 }
6163 
6164 static void manage_dm_interrupts(struct amdgpu_device *adev,
6165 				 struct amdgpu_crtc *acrtc,
6166 				 bool enable)
6167 {
6168 	/*
6169 	 * this is not correct translation but will work as soon as VBLANK
6170 	 * constant is the same as PFLIP
6171 	 */
6172 	int irq_type =
6173 		amdgpu_display_crtc_idx_to_irq_type(
6174 			adev,
6175 			acrtc->crtc_id);
6176 
6177 	if (enable) {
6178 		drm_crtc_vblank_on(&acrtc->base);
6179 		amdgpu_irq_get(
6180 			adev,
6181 			&adev->pageflip_irq,
6182 			irq_type);
6183 	} else {
6184 
6185 		amdgpu_irq_put(
6186 			adev,
6187 			&adev->pageflip_irq,
6188 			irq_type);
6189 		drm_crtc_vblank_off(&acrtc->base);
6190 	}
6191 }
6192 
6193 static bool
6194 is_scaling_state_different(const struct dm_connector_state *dm_state,
6195 			   const struct dm_connector_state *old_dm_state)
6196 {
6197 	if (dm_state->scaling != old_dm_state->scaling)
6198 		return true;
6199 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6200 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6201 			return true;
6202 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6203 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6204 			return true;
6205 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6206 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6207 		return true;
6208 	return false;
6209 }
6210 
6211 #ifdef CONFIG_DRM_AMD_DC_HDCP
6212 static bool is_content_protection_different(struct drm_connector_state *state,
6213 					    const struct drm_connector_state *old_state,
6214 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6215 {
6216 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6217 
6218 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6219 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6220 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6221 		return true;
6222 	}
6223 
6224 	/* CP is being re enabled, ignore this */
6225 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6226 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6227 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6228 		return false;
6229 	}
6230 
6231 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6232 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6233 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6234 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6235 
6236 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6237 	 * hot-plug, headless s3, dpms
6238 	 */
6239 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6240 	    aconnector->dc_sink != NULL)
6241 		return true;
6242 
6243 	if (old_state->content_protection == state->content_protection)
6244 		return false;
6245 
6246 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6247 		return true;
6248 
6249 	return false;
6250 }
6251 
6252 #endif
6253 static void remove_stream(struct amdgpu_device *adev,
6254 			  struct amdgpu_crtc *acrtc,
6255 			  struct dc_stream_state *stream)
6256 {
6257 	/* this is the update mode case */
6258 
6259 	acrtc->otg_inst = -1;
6260 	acrtc->enabled = false;
6261 }
6262 
6263 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6264 			       struct dc_cursor_position *position)
6265 {
6266 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6267 	int x, y;
6268 	int xorigin = 0, yorigin = 0;
6269 
6270 	position->enable = false;
6271 	position->x = 0;
6272 	position->y = 0;
6273 
6274 	if (!crtc || !plane->state->fb)
6275 		return 0;
6276 
6277 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6278 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6279 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6280 			  __func__,
6281 			  plane->state->crtc_w,
6282 			  plane->state->crtc_h);
6283 		return -EINVAL;
6284 	}
6285 
6286 	x = plane->state->crtc_x;
6287 	y = plane->state->crtc_y;
6288 
6289 	if (x <= -amdgpu_crtc->max_cursor_width ||
6290 	    y <= -amdgpu_crtc->max_cursor_height)
6291 		return 0;
6292 
6293 	if (x < 0) {
6294 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6295 		x = 0;
6296 	}
6297 	if (y < 0) {
6298 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6299 		y = 0;
6300 	}
6301 	position->enable = true;
6302 	position->translate_by_source = true;
6303 	position->x = x;
6304 	position->y = y;
6305 	position->x_hotspot = xorigin;
6306 	position->y_hotspot = yorigin;
6307 
6308 	return 0;
6309 }
6310 
6311 static void handle_cursor_update(struct drm_plane *plane,
6312 				 struct drm_plane_state *old_plane_state)
6313 {
6314 	struct amdgpu_device *adev = plane->dev->dev_private;
6315 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6316 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6317 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6318 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6319 	uint64_t address = afb ? afb->address : 0;
6320 	struct dc_cursor_position position;
6321 	struct dc_cursor_attributes attributes;
6322 	int ret;
6323 
6324 	if (!plane->state->fb && !old_plane_state->fb)
6325 		return;
6326 
6327 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6328 			 __func__,
6329 			 amdgpu_crtc->crtc_id,
6330 			 plane->state->crtc_w,
6331 			 plane->state->crtc_h);
6332 
6333 	ret = get_cursor_position(plane, crtc, &position);
6334 	if (ret)
6335 		return;
6336 
6337 	if (!position.enable) {
6338 		/* turn off cursor */
6339 		if (crtc_state && crtc_state->stream) {
6340 			mutex_lock(&adev->dm.dc_lock);
6341 			dc_stream_set_cursor_position(crtc_state->stream,
6342 						      &position);
6343 			mutex_unlock(&adev->dm.dc_lock);
6344 		}
6345 		return;
6346 	}
6347 
6348 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6349 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6350 
6351 	memset(&attributes, 0, sizeof(attributes));
6352 	attributes.address.high_part = upper_32_bits(address);
6353 	attributes.address.low_part  = lower_32_bits(address);
6354 	attributes.width             = plane->state->crtc_w;
6355 	attributes.height            = plane->state->crtc_h;
6356 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6357 	attributes.rotation_angle    = 0;
6358 	attributes.attribute_flags.value = 0;
6359 
6360 	attributes.pitch = attributes.width;
6361 
6362 	if (crtc_state->stream) {
6363 		mutex_lock(&adev->dm.dc_lock);
6364 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6365 							 &attributes))
6366 			DRM_ERROR("DC failed to set cursor attributes\n");
6367 
6368 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6369 						   &position))
6370 			DRM_ERROR("DC failed to set cursor position\n");
6371 		mutex_unlock(&adev->dm.dc_lock);
6372 	}
6373 }
6374 
6375 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6376 {
6377 
6378 	assert_spin_locked(&acrtc->base.dev->event_lock);
6379 	WARN_ON(acrtc->event);
6380 
6381 	acrtc->event = acrtc->base.state->event;
6382 
6383 	/* Set the flip status */
6384 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6385 
6386 	/* Mark this event as consumed */
6387 	acrtc->base.state->event = NULL;
6388 
6389 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6390 						 acrtc->crtc_id);
6391 }
6392 
6393 static void update_freesync_state_on_stream(
6394 	struct amdgpu_display_manager *dm,
6395 	struct dm_crtc_state *new_crtc_state,
6396 	struct dc_stream_state *new_stream,
6397 	struct dc_plane_state *surface,
6398 	u32 flip_timestamp_in_us)
6399 {
6400 	struct mod_vrr_params vrr_params;
6401 	struct dc_info_packet vrr_infopacket = {0};
6402 	struct amdgpu_device *adev = dm->adev;
6403 	unsigned long flags;
6404 
6405 	if (!new_stream)
6406 		return;
6407 
6408 	/*
6409 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6410 	 * For now it's sufficient to just guard against these conditions.
6411 	 */
6412 
6413 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6414 		return;
6415 
6416 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6417 	vrr_params = new_crtc_state->vrr_params;
6418 
6419 	if (surface) {
6420 		mod_freesync_handle_preflip(
6421 			dm->freesync_module,
6422 			surface,
6423 			new_stream,
6424 			flip_timestamp_in_us,
6425 			&vrr_params);
6426 
6427 		if (adev->family < AMDGPU_FAMILY_AI &&
6428 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6429 			mod_freesync_handle_v_update(dm->freesync_module,
6430 						     new_stream, &vrr_params);
6431 
6432 			/* Need to call this before the frame ends. */
6433 			dc_stream_adjust_vmin_vmax(dm->dc,
6434 						   new_crtc_state->stream,
6435 						   &vrr_params.adjust);
6436 		}
6437 	}
6438 
6439 	mod_freesync_build_vrr_infopacket(
6440 		dm->freesync_module,
6441 		new_stream,
6442 		&vrr_params,
6443 		PACKET_TYPE_VRR,
6444 		TRANSFER_FUNC_UNKNOWN,
6445 		&vrr_infopacket);
6446 
6447 	new_crtc_state->freesync_timing_changed |=
6448 		(memcmp(&new_crtc_state->vrr_params.adjust,
6449 			&vrr_params.adjust,
6450 			sizeof(vrr_params.adjust)) != 0);
6451 
6452 	new_crtc_state->freesync_vrr_info_changed |=
6453 		(memcmp(&new_crtc_state->vrr_infopacket,
6454 			&vrr_infopacket,
6455 			sizeof(vrr_infopacket)) != 0);
6456 
6457 	new_crtc_state->vrr_params = vrr_params;
6458 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6459 
6460 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6461 	new_stream->vrr_infopacket = vrr_infopacket;
6462 
6463 	if (new_crtc_state->freesync_vrr_info_changed)
6464 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6465 			      new_crtc_state->base.crtc->base.id,
6466 			      (int)new_crtc_state->base.vrr_enabled,
6467 			      (int)vrr_params.state);
6468 
6469 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6470 }
6471 
6472 static void pre_update_freesync_state_on_stream(
6473 	struct amdgpu_display_manager *dm,
6474 	struct dm_crtc_state *new_crtc_state)
6475 {
6476 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6477 	struct mod_vrr_params vrr_params;
6478 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6479 	struct amdgpu_device *adev = dm->adev;
6480 	unsigned long flags;
6481 
6482 	if (!new_stream)
6483 		return;
6484 
6485 	/*
6486 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6487 	 * For now it's sufficient to just guard against these conditions.
6488 	 */
6489 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6490 		return;
6491 
6492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6493 	vrr_params = new_crtc_state->vrr_params;
6494 
6495 	if (new_crtc_state->vrr_supported &&
6496 	    config.min_refresh_in_uhz &&
6497 	    config.max_refresh_in_uhz) {
6498 		config.state = new_crtc_state->base.vrr_enabled ?
6499 			VRR_STATE_ACTIVE_VARIABLE :
6500 			VRR_STATE_INACTIVE;
6501 	} else {
6502 		config.state = VRR_STATE_UNSUPPORTED;
6503 	}
6504 
6505 	mod_freesync_build_vrr_params(dm->freesync_module,
6506 				      new_stream,
6507 				      &config, &vrr_params);
6508 
6509 	new_crtc_state->freesync_timing_changed |=
6510 		(memcmp(&new_crtc_state->vrr_params.adjust,
6511 			&vrr_params.adjust,
6512 			sizeof(vrr_params.adjust)) != 0);
6513 
6514 	new_crtc_state->vrr_params = vrr_params;
6515 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6516 }
6517 
6518 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6519 					    struct dm_crtc_state *new_state)
6520 {
6521 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6522 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6523 
6524 	if (!old_vrr_active && new_vrr_active) {
6525 		/* Transition VRR inactive -> active:
6526 		 * While VRR is active, we must not disable vblank irq, as a
6527 		 * reenable after disable would compute bogus vblank/pflip
6528 		 * timestamps if it likely happened inside display front-porch.
6529 		 *
6530 		 * We also need vupdate irq for the actual core vblank handling
6531 		 * at end of vblank.
6532 		 */
6533 		dm_set_vupdate_irq(new_state->base.crtc, true);
6534 		drm_crtc_vblank_get(new_state->base.crtc);
6535 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6536 				 __func__, new_state->base.crtc->base.id);
6537 	} else if (old_vrr_active && !new_vrr_active) {
6538 		/* Transition VRR active -> inactive:
6539 		 * Allow vblank irq disable again for fixed refresh rate.
6540 		 */
6541 		dm_set_vupdate_irq(new_state->base.crtc, false);
6542 		drm_crtc_vblank_put(new_state->base.crtc);
6543 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6544 				 __func__, new_state->base.crtc->base.id);
6545 	}
6546 }
6547 
6548 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6549 {
6550 	struct drm_plane *plane;
6551 	struct drm_plane_state *old_plane_state, *new_plane_state;
6552 	int i;
6553 
6554 	/*
6555 	 * TODO: Make this per-stream so we don't issue redundant updates for
6556 	 * commits with multiple streams.
6557 	 */
6558 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6559 				       new_plane_state, i)
6560 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6561 			handle_cursor_update(plane, old_plane_state);
6562 }
6563 
6564 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6565 				    struct dc_state *dc_state,
6566 				    struct drm_device *dev,
6567 				    struct amdgpu_display_manager *dm,
6568 				    struct drm_crtc *pcrtc,
6569 				    bool wait_for_vblank)
6570 {
6571 	uint32_t i;
6572 	uint64_t timestamp_ns;
6573 	struct drm_plane *plane;
6574 	struct drm_plane_state *old_plane_state, *new_plane_state;
6575 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6576 	struct drm_crtc_state *new_pcrtc_state =
6577 			drm_atomic_get_new_crtc_state(state, pcrtc);
6578 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6579 	struct dm_crtc_state *dm_old_crtc_state =
6580 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6581 	int planes_count = 0, vpos, hpos;
6582 	long r;
6583 	unsigned long flags;
6584 	struct amdgpu_bo *abo;
6585 	uint64_t tiling_flags;
6586 	uint32_t target_vblank, last_flip_vblank;
6587 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6588 	bool pflip_present = false;
6589 	struct {
6590 		struct dc_surface_update surface_updates[MAX_SURFACES];
6591 		struct dc_plane_info plane_infos[MAX_SURFACES];
6592 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6593 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6594 		struct dc_stream_update stream_update;
6595 	} *bundle;
6596 
6597 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6598 
6599 	if (!bundle) {
6600 		dm_error("Failed to allocate update bundle\n");
6601 		goto cleanup;
6602 	}
6603 
6604 	/*
6605 	 * Disable the cursor first if we're disabling all the planes.
6606 	 * It'll remain on the screen after the planes are re-enabled
6607 	 * if we don't.
6608 	 */
6609 	if (acrtc_state->active_planes == 0)
6610 		amdgpu_dm_commit_cursors(state);
6611 
6612 	/* update planes when needed */
6613 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6614 		struct drm_crtc *crtc = new_plane_state->crtc;
6615 		struct drm_crtc_state *new_crtc_state;
6616 		struct drm_framebuffer *fb = new_plane_state->fb;
6617 		bool plane_needs_flip;
6618 		struct dc_plane_state *dc_plane;
6619 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6620 
6621 		/* Cursor plane is handled after stream updates */
6622 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6623 			continue;
6624 
6625 		if (!fb || !crtc || pcrtc != crtc)
6626 			continue;
6627 
6628 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6629 		if (!new_crtc_state->active)
6630 			continue;
6631 
6632 		dc_plane = dm_new_plane_state->dc_state;
6633 
6634 		bundle->surface_updates[planes_count].surface = dc_plane;
6635 		if (new_pcrtc_state->color_mgmt_changed) {
6636 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6637 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6638 		}
6639 
6640 		fill_dc_scaling_info(new_plane_state,
6641 				     &bundle->scaling_infos[planes_count]);
6642 
6643 		bundle->surface_updates[planes_count].scaling_info =
6644 			&bundle->scaling_infos[planes_count];
6645 
6646 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6647 
6648 		pflip_present = pflip_present || plane_needs_flip;
6649 
6650 		if (!plane_needs_flip) {
6651 			planes_count += 1;
6652 			continue;
6653 		}
6654 
6655 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6656 
6657 		/*
6658 		 * Wait for all fences on this FB. Do limited wait to avoid
6659 		 * deadlock during GPU reset when this fence will not signal
6660 		 * but we hold reservation lock for the BO.
6661 		 */
6662 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6663 							false,
6664 							msecs_to_jiffies(5000));
6665 		if (unlikely(r <= 0))
6666 			DRM_ERROR("Waiting for fences timed out!");
6667 
6668 		/*
6669 		 * TODO This might fail and hence better not used, wait
6670 		 * explicitly on fences instead
6671 		 * and in general should be called for
6672 		 * blocking commit to as per framework helpers
6673 		 */
6674 		r = amdgpu_bo_reserve(abo, true);
6675 		if (unlikely(r != 0))
6676 			DRM_ERROR("failed to reserve buffer before flip\n");
6677 
6678 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6679 
6680 		amdgpu_bo_unreserve(abo);
6681 
6682 		fill_dc_plane_info_and_addr(
6683 			dm->adev, new_plane_state, tiling_flags,
6684 			&bundle->plane_infos[planes_count],
6685 			&bundle->flip_addrs[planes_count].address,
6686 			false);
6687 
6688 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6689 				 new_plane_state->plane->index,
6690 				 bundle->plane_infos[planes_count].dcc.enable);
6691 
6692 		bundle->surface_updates[planes_count].plane_info =
6693 			&bundle->plane_infos[planes_count];
6694 
6695 		/*
6696 		 * Only allow immediate flips for fast updates that don't
6697 		 * change FB pitch, DCC state, rotation or mirroing.
6698 		 */
6699 		bundle->flip_addrs[planes_count].flip_immediate =
6700 			crtc->state->async_flip &&
6701 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6702 
6703 		timestamp_ns = ktime_get_ns();
6704 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6705 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6706 		bundle->surface_updates[planes_count].surface = dc_plane;
6707 
6708 		if (!bundle->surface_updates[planes_count].surface) {
6709 			DRM_ERROR("No surface for CRTC: id=%d\n",
6710 					acrtc_attach->crtc_id);
6711 			continue;
6712 		}
6713 
6714 		if (plane == pcrtc->primary)
6715 			update_freesync_state_on_stream(
6716 				dm,
6717 				acrtc_state,
6718 				acrtc_state->stream,
6719 				dc_plane,
6720 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6721 
6722 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6723 				 __func__,
6724 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6725 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6726 
6727 		planes_count += 1;
6728 
6729 	}
6730 
6731 	if (pflip_present) {
6732 		if (!vrr_active) {
6733 			/* Use old throttling in non-vrr fixed refresh rate mode
6734 			 * to keep flip scheduling based on target vblank counts
6735 			 * working in a backwards compatible way, e.g., for
6736 			 * clients using the GLX_OML_sync_control extension or
6737 			 * DRI3/Present extension with defined target_msc.
6738 			 */
6739 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6740 		}
6741 		else {
6742 			/* For variable refresh rate mode only:
6743 			 * Get vblank of last completed flip to avoid > 1 vrr
6744 			 * flips per video frame by use of throttling, but allow
6745 			 * flip programming anywhere in the possibly large
6746 			 * variable vrr vblank interval for fine-grained flip
6747 			 * timing control and more opportunity to avoid stutter
6748 			 * on late submission of flips.
6749 			 */
6750 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6751 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6752 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6753 		}
6754 
6755 		target_vblank = last_flip_vblank + wait_for_vblank;
6756 
6757 		/*
6758 		 * Wait until we're out of the vertical blank period before the one
6759 		 * targeted by the flip
6760 		 */
6761 		while ((acrtc_attach->enabled &&
6762 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6763 							    0, &vpos, &hpos, NULL,
6764 							    NULL, &pcrtc->hwmode)
6765 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6766 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6767 			(int)(target_vblank -
6768 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6769 			usleep_range(1000, 1100);
6770 		}
6771 
6772 		if (acrtc_attach->base.state->event) {
6773 			drm_crtc_vblank_get(pcrtc);
6774 
6775 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6776 
6777 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6778 			prepare_flip_isr(acrtc_attach);
6779 
6780 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6781 		}
6782 
6783 		if (acrtc_state->stream) {
6784 			if (acrtc_state->freesync_vrr_info_changed)
6785 				bundle->stream_update.vrr_infopacket =
6786 					&acrtc_state->stream->vrr_infopacket;
6787 		}
6788 	}
6789 
6790 	/* Update the planes if changed or disable if we don't have any. */
6791 	if ((planes_count || acrtc_state->active_planes == 0) &&
6792 		acrtc_state->stream) {
6793 		bundle->stream_update.stream = acrtc_state->stream;
6794 		if (new_pcrtc_state->mode_changed) {
6795 			bundle->stream_update.src = acrtc_state->stream->src;
6796 			bundle->stream_update.dst = acrtc_state->stream->dst;
6797 		}
6798 
6799 		if (new_pcrtc_state->color_mgmt_changed) {
6800 			/*
6801 			 * TODO: This isn't fully correct since we've actually
6802 			 * already modified the stream in place.
6803 			 */
6804 			bundle->stream_update.gamut_remap =
6805 				&acrtc_state->stream->gamut_remap_matrix;
6806 			bundle->stream_update.output_csc_transform =
6807 				&acrtc_state->stream->csc_color_matrix;
6808 			bundle->stream_update.out_transfer_func =
6809 				acrtc_state->stream->out_transfer_func;
6810 		}
6811 
6812 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6813 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6814 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6815 
6816 		/*
6817 		 * If FreeSync state on the stream has changed then we need to
6818 		 * re-adjust the min/max bounds now that DC doesn't handle this
6819 		 * as part of commit.
6820 		 */
6821 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6822 		    amdgpu_dm_vrr_active(acrtc_state)) {
6823 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6824 			dc_stream_adjust_vmin_vmax(
6825 				dm->dc, acrtc_state->stream,
6826 				&acrtc_state->vrr_params.adjust);
6827 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6828 		}
6829 		mutex_lock(&dm->dc_lock);
6830 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6831 				acrtc_state->stream->link->psr_allow_active)
6832 			amdgpu_dm_psr_disable(acrtc_state->stream);
6833 
6834 		dc_commit_updates_for_stream(dm->dc,
6835 						     bundle->surface_updates,
6836 						     planes_count,
6837 						     acrtc_state->stream,
6838 						     &bundle->stream_update,
6839 						     dc_state);
6840 
6841 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6842 						acrtc_state->stream->psr_version &&
6843 						!acrtc_state->stream->link->psr_feature_enabled)
6844 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6845 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6846 						acrtc_state->stream->link->psr_feature_enabled &&
6847 						!acrtc_state->stream->link->psr_allow_active) {
6848 			amdgpu_dm_psr_enable(acrtc_state->stream);
6849 		}
6850 
6851 		mutex_unlock(&dm->dc_lock);
6852 	}
6853 
6854 	/*
6855 	 * Update cursor state *after* programming all the planes.
6856 	 * This avoids redundant programming in the case where we're going
6857 	 * to be disabling a single plane - those pipes are being disabled.
6858 	 */
6859 	if (acrtc_state->active_planes)
6860 		amdgpu_dm_commit_cursors(state);
6861 
6862 cleanup:
6863 	kfree(bundle);
6864 }
6865 
6866 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6867 				   struct drm_atomic_state *state)
6868 {
6869 	struct amdgpu_device *adev = dev->dev_private;
6870 	struct amdgpu_dm_connector *aconnector;
6871 	struct drm_connector *connector;
6872 	struct drm_connector_state *old_con_state, *new_con_state;
6873 	struct drm_crtc_state *new_crtc_state;
6874 	struct dm_crtc_state *new_dm_crtc_state;
6875 	const struct dc_stream_status *status;
6876 	int i, inst;
6877 
6878 	/* Notify device removals. */
6879 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6880 		if (old_con_state->crtc != new_con_state->crtc) {
6881 			/* CRTC changes require notification. */
6882 			goto notify;
6883 		}
6884 
6885 		if (!new_con_state->crtc)
6886 			continue;
6887 
6888 		new_crtc_state = drm_atomic_get_new_crtc_state(
6889 			state, new_con_state->crtc);
6890 
6891 		if (!new_crtc_state)
6892 			continue;
6893 
6894 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6895 			continue;
6896 
6897 	notify:
6898 		aconnector = to_amdgpu_dm_connector(connector);
6899 
6900 		mutex_lock(&adev->dm.audio_lock);
6901 		inst = aconnector->audio_inst;
6902 		aconnector->audio_inst = -1;
6903 		mutex_unlock(&adev->dm.audio_lock);
6904 
6905 		amdgpu_dm_audio_eld_notify(adev, inst);
6906 	}
6907 
6908 	/* Notify audio device additions. */
6909 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6910 		if (!new_con_state->crtc)
6911 			continue;
6912 
6913 		new_crtc_state = drm_atomic_get_new_crtc_state(
6914 			state, new_con_state->crtc);
6915 
6916 		if (!new_crtc_state)
6917 			continue;
6918 
6919 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6920 			continue;
6921 
6922 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6923 		if (!new_dm_crtc_state->stream)
6924 			continue;
6925 
6926 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6927 		if (!status)
6928 			continue;
6929 
6930 		aconnector = to_amdgpu_dm_connector(connector);
6931 
6932 		mutex_lock(&adev->dm.audio_lock);
6933 		inst = status->audio_inst;
6934 		aconnector->audio_inst = inst;
6935 		mutex_unlock(&adev->dm.audio_lock);
6936 
6937 		amdgpu_dm_audio_eld_notify(adev, inst);
6938 	}
6939 }
6940 
6941 /*
6942  * Enable interrupts on CRTCs that are newly active, undergone
6943  * a modeset, or have active planes again.
6944  *
6945  * Done in two passes, based on the for_modeset flag:
6946  * Pass 1: For CRTCs going through modeset
6947  * Pass 2: For CRTCs going from 0 to n active planes
6948  *
6949  * Interrupts can only be enabled after the planes are programmed,
6950  * so this requires a two-pass approach since we don't want to
6951  * just defer the interrupts until after commit planes every time.
6952  */
6953 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6954 					     struct drm_atomic_state *state,
6955 					     bool for_modeset)
6956 {
6957 	struct amdgpu_device *adev = dev->dev_private;
6958 	struct drm_crtc *crtc;
6959 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6960 	int i;
6961 #ifdef CONFIG_DEBUG_FS
6962 	enum amdgpu_dm_pipe_crc_source source;
6963 #endif
6964 
6965 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6966 				      new_crtc_state, i) {
6967 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6968 		struct dm_crtc_state *dm_new_crtc_state =
6969 			to_dm_crtc_state(new_crtc_state);
6970 		struct dm_crtc_state *dm_old_crtc_state =
6971 			to_dm_crtc_state(old_crtc_state);
6972 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6973 		bool run_pass;
6974 
6975 		run_pass = (for_modeset && modeset) ||
6976 			   (!for_modeset && !modeset &&
6977 			    !dm_old_crtc_state->interrupts_enabled);
6978 
6979 		if (!run_pass)
6980 			continue;
6981 
6982 		if (!dm_new_crtc_state->interrupts_enabled)
6983 			continue;
6984 
6985 		manage_dm_interrupts(adev, acrtc, true);
6986 
6987 #ifdef CONFIG_DEBUG_FS
6988 		/* The stream has changed so CRC capture needs to re-enabled. */
6989 		source = dm_new_crtc_state->crc_src;
6990 		if (amdgpu_dm_is_valid_crc_source(source)) {
6991 			amdgpu_dm_crtc_configure_crc_source(
6992 				crtc, dm_new_crtc_state,
6993 				dm_new_crtc_state->crc_src);
6994 		}
6995 #endif
6996 	}
6997 }
6998 
6999 /*
7000  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7001  * @crtc_state: the DRM CRTC state
7002  * @stream_state: the DC stream state.
7003  *
7004  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7005  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7006  */
7007 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7008 						struct dc_stream_state *stream_state)
7009 {
7010 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7011 }
7012 
7013 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7014 				   struct drm_atomic_state *state,
7015 				   bool nonblock)
7016 {
7017 	struct drm_crtc *crtc;
7018 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7019 	struct amdgpu_device *adev = dev->dev_private;
7020 	int i;
7021 
7022 	/*
7023 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7024 	 * a modeset, being disabled, or have no active planes.
7025 	 *
7026 	 * It's done in atomic commit rather than commit tail for now since
7027 	 * some of these interrupt handlers access the current CRTC state and
7028 	 * potentially the stream pointer itself.
7029 	 *
7030 	 * Since the atomic state is swapped within atomic commit and not within
7031 	 * commit tail this would leave to new state (that hasn't been committed yet)
7032 	 * being accesssed from within the handlers.
7033 	 *
7034 	 * TODO: Fix this so we can do this in commit tail and not have to block
7035 	 * in atomic check.
7036 	 */
7037 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7038 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7039 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7040 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7041 
7042 		if (dm_old_crtc_state->interrupts_enabled &&
7043 		    (!dm_new_crtc_state->interrupts_enabled ||
7044 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7045 			manage_dm_interrupts(adev, acrtc, false);
7046 	}
7047 	/*
7048 	 * Add check here for SoC's that support hardware cursor plane, to
7049 	 * unset legacy_cursor_update
7050 	 */
7051 
7052 	return drm_atomic_helper_commit(dev, state, nonblock);
7053 
7054 	/*TODO Handle EINTR, reenable IRQ*/
7055 }
7056 
7057 /**
7058  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7059  * @state: The atomic state to commit
7060  *
7061  * This will tell DC to commit the constructed DC state from atomic_check,
7062  * programming the hardware. Any failures here implies a hardware failure, since
7063  * atomic check should have filtered anything non-kosher.
7064  */
7065 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7066 {
7067 	struct drm_device *dev = state->dev;
7068 	struct amdgpu_device *adev = dev->dev_private;
7069 	struct amdgpu_display_manager *dm = &adev->dm;
7070 	struct dm_atomic_state *dm_state;
7071 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7072 	uint32_t i, j;
7073 	struct drm_crtc *crtc;
7074 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7075 	unsigned long flags;
7076 	bool wait_for_vblank = true;
7077 	struct drm_connector *connector;
7078 	struct drm_connector_state *old_con_state, *new_con_state;
7079 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7080 	int crtc_disable_count = 0;
7081 
7082 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7083 
7084 	dm_state = dm_atomic_get_new_state(state);
7085 	if (dm_state && dm_state->context) {
7086 		dc_state = dm_state->context;
7087 	} else {
7088 		/* No state changes, retain current state. */
7089 		dc_state_temp = dc_create_state(dm->dc);
7090 		ASSERT(dc_state_temp);
7091 		dc_state = dc_state_temp;
7092 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7093 	}
7094 
7095 	/* update changed items */
7096 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7097 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7098 
7099 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7100 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7101 
7102 		DRM_DEBUG_DRIVER(
7103 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7104 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7105 			"connectors_changed:%d\n",
7106 			acrtc->crtc_id,
7107 			new_crtc_state->enable,
7108 			new_crtc_state->active,
7109 			new_crtc_state->planes_changed,
7110 			new_crtc_state->mode_changed,
7111 			new_crtc_state->active_changed,
7112 			new_crtc_state->connectors_changed);
7113 
7114 		/* Copy all transient state flags into dc state */
7115 		if (dm_new_crtc_state->stream) {
7116 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7117 							    dm_new_crtc_state->stream);
7118 		}
7119 
7120 		/* handles headless hotplug case, updating new_state and
7121 		 * aconnector as needed
7122 		 */
7123 
7124 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7125 
7126 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7127 
7128 			if (!dm_new_crtc_state->stream) {
7129 				/*
7130 				 * this could happen because of issues with
7131 				 * userspace notifications delivery.
7132 				 * In this case userspace tries to set mode on
7133 				 * display which is disconnected in fact.
7134 				 * dc_sink is NULL in this case on aconnector.
7135 				 * We expect reset mode will come soon.
7136 				 *
7137 				 * This can also happen when unplug is done
7138 				 * during resume sequence ended
7139 				 *
7140 				 * In this case, we want to pretend we still
7141 				 * have a sink to keep the pipe running so that
7142 				 * hw state is consistent with the sw state
7143 				 */
7144 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7145 						__func__, acrtc->base.base.id);
7146 				continue;
7147 			}
7148 
7149 			if (dm_old_crtc_state->stream)
7150 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7151 
7152 			pm_runtime_get_noresume(dev->dev);
7153 
7154 			acrtc->enabled = true;
7155 			acrtc->hw_mode = new_crtc_state->mode;
7156 			crtc->hwmode = new_crtc_state->mode;
7157 		} else if (modereset_required(new_crtc_state)) {
7158 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7159 			/* i.e. reset mode */
7160 			if (dm_old_crtc_state->stream) {
7161 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7162 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7163 
7164 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7165 			}
7166 		}
7167 	} /* for_each_crtc_in_state() */
7168 
7169 	if (dc_state) {
7170 		dm_enable_per_frame_crtc_master_sync(dc_state);
7171 		mutex_lock(&dm->dc_lock);
7172 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7173 		mutex_unlock(&dm->dc_lock);
7174 	}
7175 
7176 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7177 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7178 
7179 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7180 
7181 		if (dm_new_crtc_state->stream != NULL) {
7182 			const struct dc_stream_status *status =
7183 					dc_stream_get_status(dm_new_crtc_state->stream);
7184 
7185 			if (!status)
7186 				status = dc_stream_get_status_from_state(dc_state,
7187 									 dm_new_crtc_state->stream);
7188 
7189 			if (!status)
7190 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7191 			else
7192 				acrtc->otg_inst = status->primary_otg_inst;
7193 		}
7194 	}
7195 #ifdef CONFIG_DRM_AMD_DC_HDCP
7196 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7197 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7198 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7199 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7200 
7201 		new_crtc_state = NULL;
7202 
7203 		if (acrtc)
7204 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7205 
7206 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7207 
7208 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7209 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7210 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7211 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7212 			continue;
7213 		}
7214 
7215 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7216 			hdcp_update_display(
7217 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7218 				new_con_state->hdcp_content_type,
7219 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7220 													 : false);
7221 	}
7222 #endif
7223 
7224 	/* Handle connector state changes */
7225 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7226 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7227 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7228 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7229 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7230 		struct dc_stream_update stream_update;
7231 		struct dc_info_packet hdr_packet;
7232 		struct dc_stream_status *status = NULL;
7233 		bool abm_changed, hdr_changed, scaling_changed;
7234 
7235 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7236 		memset(&stream_update, 0, sizeof(stream_update));
7237 
7238 		if (acrtc) {
7239 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7240 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7241 		}
7242 
7243 		/* Skip any modesets/resets */
7244 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7245 			continue;
7246 
7247 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7248 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7249 
7250 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7251 							     dm_old_con_state);
7252 
7253 		abm_changed = dm_new_crtc_state->abm_level !=
7254 			      dm_old_crtc_state->abm_level;
7255 
7256 		hdr_changed =
7257 			is_hdr_metadata_different(old_con_state, new_con_state);
7258 
7259 		if (!scaling_changed && !abm_changed && !hdr_changed)
7260 			continue;
7261 
7262 		stream_update.stream = dm_new_crtc_state->stream;
7263 		if (scaling_changed) {
7264 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7265 					dm_new_con_state, dm_new_crtc_state->stream);
7266 
7267 			stream_update.src = dm_new_crtc_state->stream->src;
7268 			stream_update.dst = dm_new_crtc_state->stream->dst;
7269 		}
7270 
7271 		if (abm_changed) {
7272 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7273 
7274 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7275 		}
7276 
7277 		if (hdr_changed) {
7278 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7279 			stream_update.hdr_static_metadata = &hdr_packet;
7280 		}
7281 
7282 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7283 		WARN_ON(!status);
7284 		WARN_ON(!status->plane_count);
7285 
7286 		/*
7287 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7288 		 * Here we create an empty update on each plane.
7289 		 * To fix this, DC should permit updating only stream properties.
7290 		 */
7291 		for (j = 0; j < status->plane_count; j++)
7292 			dummy_updates[j].surface = status->plane_states[0];
7293 
7294 
7295 		mutex_lock(&dm->dc_lock);
7296 		dc_commit_updates_for_stream(dm->dc,
7297 						     dummy_updates,
7298 						     status->plane_count,
7299 						     dm_new_crtc_state->stream,
7300 						     &stream_update,
7301 						     dc_state);
7302 		mutex_unlock(&dm->dc_lock);
7303 	}
7304 
7305 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7306 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7307 				      new_crtc_state, i) {
7308 		if (old_crtc_state->active && !new_crtc_state->active)
7309 			crtc_disable_count++;
7310 
7311 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7312 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7313 
7314 		/* Update freesync active state. */
7315 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7316 
7317 		/* Handle vrr on->off / off->on transitions */
7318 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7319 						dm_new_crtc_state);
7320 	}
7321 
7322 	/* Enable interrupts for CRTCs going through a modeset. */
7323 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7324 
7325 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7326 		if (new_crtc_state->async_flip)
7327 			wait_for_vblank = false;
7328 
7329 	/* update planes when needed per crtc*/
7330 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7331 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7332 
7333 		if (dm_new_crtc_state->stream)
7334 			amdgpu_dm_commit_planes(state, dc_state, dev,
7335 						dm, crtc, wait_for_vblank);
7336 	}
7337 
7338 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7339 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7340 
7341 	/* Update audio instances for each connector. */
7342 	amdgpu_dm_commit_audio(dev, state);
7343 
7344 	/*
7345 	 * send vblank event on all events not handled in flip and
7346 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7347 	 */
7348 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7349 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7350 
7351 		if (new_crtc_state->event)
7352 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7353 
7354 		new_crtc_state->event = NULL;
7355 	}
7356 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7357 
7358 	/* Signal HW programming completion */
7359 	drm_atomic_helper_commit_hw_done(state);
7360 
7361 	if (wait_for_vblank)
7362 		drm_atomic_helper_wait_for_flip_done(dev, state);
7363 
7364 	drm_atomic_helper_cleanup_planes(dev, state);
7365 
7366 	/*
7367 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7368 	 * so we can put the GPU into runtime suspend if we're not driving any
7369 	 * displays anymore
7370 	 */
7371 	for (i = 0; i < crtc_disable_count; i++)
7372 		pm_runtime_put_autosuspend(dev->dev);
7373 	pm_runtime_mark_last_busy(dev->dev);
7374 
7375 	if (dc_state_temp)
7376 		dc_release_state(dc_state_temp);
7377 }
7378 
7379 
7380 static int dm_force_atomic_commit(struct drm_connector *connector)
7381 {
7382 	int ret = 0;
7383 	struct drm_device *ddev = connector->dev;
7384 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7385 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7386 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7387 	struct drm_connector_state *conn_state;
7388 	struct drm_crtc_state *crtc_state;
7389 	struct drm_plane_state *plane_state;
7390 
7391 	if (!state)
7392 		return -ENOMEM;
7393 
7394 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7395 
7396 	/* Construct an atomic state to restore previous display setting */
7397 
7398 	/*
7399 	 * Attach connectors to drm_atomic_state
7400 	 */
7401 	conn_state = drm_atomic_get_connector_state(state, connector);
7402 
7403 	ret = PTR_ERR_OR_ZERO(conn_state);
7404 	if (ret)
7405 		goto err;
7406 
7407 	/* Attach crtc to drm_atomic_state*/
7408 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7409 
7410 	ret = PTR_ERR_OR_ZERO(crtc_state);
7411 	if (ret)
7412 		goto err;
7413 
7414 	/* force a restore */
7415 	crtc_state->mode_changed = true;
7416 
7417 	/* Attach plane to drm_atomic_state */
7418 	plane_state = drm_atomic_get_plane_state(state, plane);
7419 
7420 	ret = PTR_ERR_OR_ZERO(plane_state);
7421 	if (ret)
7422 		goto err;
7423 
7424 
7425 	/* Call commit internally with the state we just constructed */
7426 	ret = drm_atomic_commit(state);
7427 	if (!ret)
7428 		return 0;
7429 
7430 err:
7431 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7432 	drm_atomic_state_put(state);
7433 
7434 	return ret;
7435 }
7436 
7437 /*
7438  * This function handles all cases when set mode does not come upon hotplug.
7439  * This includes when a display is unplugged then plugged back into the
7440  * same port and when running without usermode desktop manager supprot
7441  */
7442 void dm_restore_drm_connector_state(struct drm_device *dev,
7443 				    struct drm_connector *connector)
7444 {
7445 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7446 	struct amdgpu_crtc *disconnected_acrtc;
7447 	struct dm_crtc_state *acrtc_state;
7448 
7449 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7450 		return;
7451 
7452 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7453 	if (!disconnected_acrtc)
7454 		return;
7455 
7456 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7457 	if (!acrtc_state->stream)
7458 		return;
7459 
7460 	/*
7461 	 * If the previous sink is not released and different from the current,
7462 	 * we deduce we are in a state where we can not rely on usermode call
7463 	 * to turn on the display, so we do it here
7464 	 */
7465 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7466 		dm_force_atomic_commit(&aconnector->base);
7467 }
7468 
7469 /*
7470  * Grabs all modesetting locks to serialize against any blocking commits,
7471  * Waits for completion of all non blocking commits.
7472  */
7473 static int do_aquire_global_lock(struct drm_device *dev,
7474 				 struct drm_atomic_state *state)
7475 {
7476 	struct drm_crtc *crtc;
7477 	struct drm_crtc_commit *commit;
7478 	long ret;
7479 
7480 	/*
7481 	 * Adding all modeset locks to aquire_ctx will
7482 	 * ensure that when the framework release it the
7483 	 * extra locks we are locking here will get released to
7484 	 */
7485 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7486 	if (ret)
7487 		return ret;
7488 
7489 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7490 		spin_lock(&crtc->commit_lock);
7491 		commit = list_first_entry_or_null(&crtc->commit_list,
7492 				struct drm_crtc_commit, commit_entry);
7493 		if (commit)
7494 			drm_crtc_commit_get(commit);
7495 		spin_unlock(&crtc->commit_lock);
7496 
7497 		if (!commit)
7498 			continue;
7499 
7500 		/*
7501 		 * Make sure all pending HW programming completed and
7502 		 * page flips done
7503 		 */
7504 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7505 
7506 		if (ret > 0)
7507 			ret = wait_for_completion_interruptible_timeout(
7508 					&commit->flip_done, 10*HZ);
7509 
7510 		if (ret == 0)
7511 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7512 				  "timed out\n", crtc->base.id, crtc->name);
7513 
7514 		drm_crtc_commit_put(commit);
7515 	}
7516 
7517 	return ret < 0 ? ret : 0;
7518 }
7519 
7520 static void get_freesync_config_for_crtc(
7521 	struct dm_crtc_state *new_crtc_state,
7522 	struct dm_connector_state *new_con_state)
7523 {
7524 	struct mod_freesync_config config = {0};
7525 	struct amdgpu_dm_connector *aconnector =
7526 			to_amdgpu_dm_connector(new_con_state->base.connector);
7527 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7528 	int vrefresh = drm_mode_vrefresh(mode);
7529 
7530 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7531 					vrefresh >= aconnector->min_vfreq &&
7532 					vrefresh <= aconnector->max_vfreq;
7533 
7534 	if (new_crtc_state->vrr_supported) {
7535 		new_crtc_state->stream->ignore_msa_timing_param = true;
7536 		config.state = new_crtc_state->base.vrr_enabled ?
7537 				VRR_STATE_ACTIVE_VARIABLE :
7538 				VRR_STATE_INACTIVE;
7539 		config.min_refresh_in_uhz =
7540 				aconnector->min_vfreq * 1000000;
7541 		config.max_refresh_in_uhz =
7542 				aconnector->max_vfreq * 1000000;
7543 		config.vsif_supported = true;
7544 		config.btr = true;
7545 	}
7546 
7547 	new_crtc_state->freesync_config = config;
7548 }
7549 
7550 static void reset_freesync_config_for_crtc(
7551 	struct dm_crtc_state *new_crtc_state)
7552 {
7553 	new_crtc_state->vrr_supported = false;
7554 
7555 	memset(&new_crtc_state->vrr_params, 0,
7556 	       sizeof(new_crtc_state->vrr_params));
7557 	memset(&new_crtc_state->vrr_infopacket, 0,
7558 	       sizeof(new_crtc_state->vrr_infopacket));
7559 }
7560 
7561 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7562 				struct drm_atomic_state *state,
7563 				struct drm_crtc *crtc,
7564 				struct drm_crtc_state *old_crtc_state,
7565 				struct drm_crtc_state *new_crtc_state,
7566 				bool enable,
7567 				bool *lock_and_validation_needed)
7568 {
7569 	struct dm_atomic_state *dm_state = NULL;
7570 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7571 	struct dc_stream_state *new_stream;
7572 	int ret = 0;
7573 
7574 	/*
7575 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7576 	 * update changed items
7577 	 */
7578 	struct amdgpu_crtc *acrtc = NULL;
7579 	struct amdgpu_dm_connector *aconnector = NULL;
7580 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7581 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7582 
7583 	new_stream = NULL;
7584 
7585 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7586 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7587 	acrtc = to_amdgpu_crtc(crtc);
7588 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7589 
7590 	/* TODO This hack should go away */
7591 	if (aconnector && enable) {
7592 		/* Make sure fake sink is created in plug-in scenario */
7593 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7594 							    &aconnector->base);
7595 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7596 							    &aconnector->base);
7597 
7598 		if (IS_ERR(drm_new_conn_state)) {
7599 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7600 			goto fail;
7601 		}
7602 
7603 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7604 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7605 
7606 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7607 			goto skip_modeset;
7608 
7609 		new_stream = create_stream_for_sink(aconnector,
7610 						     &new_crtc_state->mode,
7611 						    dm_new_conn_state,
7612 						    dm_old_crtc_state->stream);
7613 
7614 		/*
7615 		 * we can have no stream on ACTION_SET if a display
7616 		 * was disconnected during S3, in this case it is not an
7617 		 * error, the OS will be updated after detection, and
7618 		 * will do the right thing on next atomic commit
7619 		 */
7620 
7621 		if (!new_stream) {
7622 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7623 					__func__, acrtc->base.base.id);
7624 			ret = -ENOMEM;
7625 			goto fail;
7626 		}
7627 
7628 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7629 
7630 		ret = fill_hdr_info_packet(drm_new_conn_state,
7631 					   &new_stream->hdr_static_metadata);
7632 		if (ret)
7633 			goto fail;
7634 
7635 		/*
7636 		 * If we already removed the old stream from the context
7637 		 * (and set the new stream to NULL) then we can't reuse
7638 		 * the old stream even if the stream and scaling are unchanged.
7639 		 * We'll hit the BUG_ON and black screen.
7640 		 *
7641 		 * TODO: Refactor this function to allow this check to work
7642 		 * in all conditions.
7643 		 */
7644 		if (dm_new_crtc_state->stream &&
7645 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7646 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7647 			new_crtc_state->mode_changed = false;
7648 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7649 					 new_crtc_state->mode_changed);
7650 		}
7651 	}
7652 
7653 	/* mode_changed flag may get updated above, need to check again */
7654 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7655 		goto skip_modeset;
7656 
7657 	DRM_DEBUG_DRIVER(
7658 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7659 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7660 		"connectors_changed:%d\n",
7661 		acrtc->crtc_id,
7662 		new_crtc_state->enable,
7663 		new_crtc_state->active,
7664 		new_crtc_state->planes_changed,
7665 		new_crtc_state->mode_changed,
7666 		new_crtc_state->active_changed,
7667 		new_crtc_state->connectors_changed);
7668 
7669 	/* Remove stream for any changed/disabled CRTC */
7670 	if (!enable) {
7671 
7672 		if (!dm_old_crtc_state->stream)
7673 			goto skip_modeset;
7674 
7675 		ret = dm_atomic_get_state(state, &dm_state);
7676 		if (ret)
7677 			goto fail;
7678 
7679 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7680 				crtc->base.id);
7681 
7682 		/* i.e. reset mode */
7683 		if (dc_remove_stream_from_ctx(
7684 				dm->dc,
7685 				dm_state->context,
7686 				dm_old_crtc_state->stream) != DC_OK) {
7687 			ret = -EINVAL;
7688 			goto fail;
7689 		}
7690 
7691 		dc_stream_release(dm_old_crtc_state->stream);
7692 		dm_new_crtc_state->stream = NULL;
7693 
7694 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7695 
7696 		*lock_and_validation_needed = true;
7697 
7698 	} else {/* Add stream for any updated/enabled CRTC */
7699 		/*
7700 		 * Quick fix to prevent NULL pointer on new_stream when
7701 		 * added MST connectors not found in existing crtc_state in the chained mode
7702 		 * TODO: need to dig out the root cause of that
7703 		 */
7704 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7705 			goto skip_modeset;
7706 
7707 		if (modereset_required(new_crtc_state))
7708 			goto skip_modeset;
7709 
7710 		if (modeset_required(new_crtc_state, new_stream,
7711 				     dm_old_crtc_state->stream)) {
7712 
7713 			WARN_ON(dm_new_crtc_state->stream);
7714 
7715 			ret = dm_atomic_get_state(state, &dm_state);
7716 			if (ret)
7717 				goto fail;
7718 
7719 			dm_new_crtc_state->stream = new_stream;
7720 
7721 			dc_stream_retain(new_stream);
7722 
7723 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7724 						crtc->base.id);
7725 
7726 			if (dc_add_stream_to_ctx(
7727 					dm->dc,
7728 					dm_state->context,
7729 					dm_new_crtc_state->stream) != DC_OK) {
7730 				ret = -EINVAL;
7731 				goto fail;
7732 			}
7733 
7734 			*lock_and_validation_needed = true;
7735 		}
7736 	}
7737 
7738 skip_modeset:
7739 	/* Release extra reference */
7740 	if (new_stream)
7741 		 dc_stream_release(new_stream);
7742 
7743 	/*
7744 	 * We want to do dc stream updates that do not require a
7745 	 * full modeset below.
7746 	 */
7747 	if (!(enable && aconnector && new_crtc_state->enable &&
7748 	      new_crtc_state->active))
7749 		return 0;
7750 	/*
7751 	 * Given above conditions, the dc state cannot be NULL because:
7752 	 * 1. We're in the process of enabling CRTCs (just been added
7753 	 *    to the dc context, or already is on the context)
7754 	 * 2. Has a valid connector attached, and
7755 	 * 3. Is currently active and enabled.
7756 	 * => The dc stream state currently exists.
7757 	 */
7758 	BUG_ON(dm_new_crtc_state->stream == NULL);
7759 
7760 	/* Scaling or underscan settings */
7761 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7762 		update_stream_scaling_settings(
7763 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7764 
7765 	/* ABM settings */
7766 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7767 
7768 	/*
7769 	 * Color management settings. We also update color properties
7770 	 * when a modeset is needed, to ensure it gets reprogrammed.
7771 	 */
7772 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7773 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7774 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7775 		if (ret)
7776 			goto fail;
7777 	}
7778 
7779 	/* Update Freesync settings. */
7780 	get_freesync_config_for_crtc(dm_new_crtc_state,
7781 				     dm_new_conn_state);
7782 
7783 	return ret;
7784 
7785 fail:
7786 	if (new_stream)
7787 		dc_stream_release(new_stream);
7788 	return ret;
7789 }
7790 
7791 static bool should_reset_plane(struct drm_atomic_state *state,
7792 			       struct drm_plane *plane,
7793 			       struct drm_plane_state *old_plane_state,
7794 			       struct drm_plane_state *new_plane_state)
7795 {
7796 	struct drm_plane *other;
7797 	struct drm_plane_state *old_other_state, *new_other_state;
7798 	struct drm_crtc_state *new_crtc_state;
7799 	int i;
7800 
7801 	/*
7802 	 * TODO: Remove this hack once the checks below are sufficient
7803 	 * enough to determine when we need to reset all the planes on
7804 	 * the stream.
7805 	 */
7806 	if (state->allow_modeset)
7807 		return true;
7808 
7809 	/* Exit early if we know that we're adding or removing the plane. */
7810 	if (old_plane_state->crtc != new_plane_state->crtc)
7811 		return true;
7812 
7813 	/* old crtc == new_crtc == NULL, plane not in context. */
7814 	if (!new_plane_state->crtc)
7815 		return false;
7816 
7817 	new_crtc_state =
7818 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7819 
7820 	if (!new_crtc_state)
7821 		return true;
7822 
7823 	/* CRTC Degamma changes currently require us to recreate planes. */
7824 	if (new_crtc_state->color_mgmt_changed)
7825 		return true;
7826 
7827 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7828 		return true;
7829 
7830 	/*
7831 	 * If there are any new primary or overlay planes being added or
7832 	 * removed then the z-order can potentially change. To ensure
7833 	 * correct z-order and pipe acquisition the current DC architecture
7834 	 * requires us to remove and recreate all existing planes.
7835 	 *
7836 	 * TODO: Come up with a more elegant solution for this.
7837 	 */
7838 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7839 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7840 			continue;
7841 
7842 		if (old_other_state->crtc != new_plane_state->crtc &&
7843 		    new_other_state->crtc != new_plane_state->crtc)
7844 			continue;
7845 
7846 		if (old_other_state->crtc != new_other_state->crtc)
7847 			return true;
7848 
7849 		/* TODO: Remove this once we can handle fast format changes. */
7850 		if (old_other_state->fb && new_other_state->fb &&
7851 		    old_other_state->fb->format != new_other_state->fb->format)
7852 			return true;
7853 	}
7854 
7855 	return false;
7856 }
7857 
7858 static int dm_update_plane_state(struct dc *dc,
7859 				 struct drm_atomic_state *state,
7860 				 struct drm_plane *plane,
7861 				 struct drm_plane_state *old_plane_state,
7862 				 struct drm_plane_state *new_plane_state,
7863 				 bool enable,
7864 				 bool *lock_and_validation_needed)
7865 {
7866 
7867 	struct dm_atomic_state *dm_state = NULL;
7868 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7869 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7870 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7871 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7872 	struct amdgpu_crtc *new_acrtc;
7873 	bool needs_reset;
7874 	int ret = 0;
7875 
7876 
7877 	new_plane_crtc = new_plane_state->crtc;
7878 	old_plane_crtc = old_plane_state->crtc;
7879 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7880 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7881 
7882 	/*TODO Implement better atomic check for cursor plane */
7883 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7884 		if (!enable || !new_plane_crtc ||
7885 			drm_atomic_plane_disabling(plane->state, new_plane_state))
7886 			return 0;
7887 
7888 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7889 
7890 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7891 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7892 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7893 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
7894 			return -EINVAL;
7895 		}
7896 
7897 		return 0;
7898 	}
7899 
7900 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7901 					 new_plane_state);
7902 
7903 	/* Remove any changed/removed planes */
7904 	if (!enable) {
7905 		if (!needs_reset)
7906 			return 0;
7907 
7908 		if (!old_plane_crtc)
7909 			return 0;
7910 
7911 		old_crtc_state = drm_atomic_get_old_crtc_state(
7912 				state, old_plane_crtc);
7913 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7914 
7915 		if (!dm_old_crtc_state->stream)
7916 			return 0;
7917 
7918 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7919 				plane->base.id, old_plane_crtc->base.id);
7920 
7921 		ret = dm_atomic_get_state(state, &dm_state);
7922 		if (ret)
7923 			return ret;
7924 
7925 		if (!dc_remove_plane_from_context(
7926 				dc,
7927 				dm_old_crtc_state->stream,
7928 				dm_old_plane_state->dc_state,
7929 				dm_state->context)) {
7930 
7931 			ret = EINVAL;
7932 			return ret;
7933 		}
7934 
7935 
7936 		dc_plane_state_release(dm_old_plane_state->dc_state);
7937 		dm_new_plane_state->dc_state = NULL;
7938 
7939 		*lock_and_validation_needed = true;
7940 
7941 	} else { /* Add new planes */
7942 		struct dc_plane_state *dc_new_plane_state;
7943 
7944 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7945 			return 0;
7946 
7947 		if (!new_plane_crtc)
7948 			return 0;
7949 
7950 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7951 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7952 
7953 		if (!dm_new_crtc_state->stream)
7954 			return 0;
7955 
7956 		if (!needs_reset)
7957 			return 0;
7958 
7959 		WARN_ON(dm_new_plane_state->dc_state);
7960 
7961 		dc_new_plane_state = dc_create_plane_state(dc);
7962 		if (!dc_new_plane_state)
7963 			return -ENOMEM;
7964 
7965 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7966 				plane->base.id, new_plane_crtc->base.id);
7967 
7968 		ret = fill_dc_plane_attributes(
7969 			new_plane_crtc->dev->dev_private,
7970 			dc_new_plane_state,
7971 			new_plane_state,
7972 			new_crtc_state);
7973 		if (ret) {
7974 			dc_plane_state_release(dc_new_plane_state);
7975 			return ret;
7976 		}
7977 
7978 		ret = dm_atomic_get_state(state, &dm_state);
7979 		if (ret) {
7980 			dc_plane_state_release(dc_new_plane_state);
7981 			return ret;
7982 		}
7983 
7984 		/*
7985 		 * Any atomic check errors that occur after this will
7986 		 * not need a release. The plane state will be attached
7987 		 * to the stream, and therefore part of the atomic
7988 		 * state. It'll be released when the atomic state is
7989 		 * cleaned.
7990 		 */
7991 		if (!dc_add_plane_to_context(
7992 				dc,
7993 				dm_new_crtc_state->stream,
7994 				dc_new_plane_state,
7995 				dm_state->context)) {
7996 
7997 			dc_plane_state_release(dc_new_plane_state);
7998 			return -EINVAL;
7999 		}
8000 
8001 		dm_new_plane_state->dc_state = dc_new_plane_state;
8002 
8003 		/* Tell DC to do a full surface update every time there
8004 		 * is a plane change. Inefficient, but works for now.
8005 		 */
8006 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8007 
8008 		*lock_and_validation_needed = true;
8009 	}
8010 
8011 
8012 	return ret;
8013 }
8014 
8015 static int
8016 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8017 				    struct drm_atomic_state *state,
8018 				    enum surface_update_type *out_type)
8019 {
8020 	struct dc *dc = dm->dc;
8021 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8022 	int i, j, num_plane, ret = 0;
8023 	struct drm_plane_state *old_plane_state, *new_plane_state;
8024 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8025 	struct drm_crtc *new_plane_crtc;
8026 	struct drm_plane *plane;
8027 
8028 	struct drm_crtc *crtc;
8029 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8030 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8031 	struct dc_stream_status *status = NULL;
8032 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8033 	struct surface_info_bundle {
8034 		struct dc_surface_update surface_updates[MAX_SURFACES];
8035 		struct dc_plane_info plane_infos[MAX_SURFACES];
8036 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8037 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8038 		struct dc_stream_update stream_update;
8039 	} *bundle;
8040 
8041 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8042 
8043 	if (!bundle) {
8044 		DRM_ERROR("Failed to allocate update bundle\n");
8045 		/* Set type to FULL to avoid crashing in DC*/
8046 		update_type = UPDATE_TYPE_FULL;
8047 		goto cleanup;
8048 	}
8049 
8050 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8051 
8052 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8053 
8054 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8055 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8056 		num_plane = 0;
8057 
8058 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8059 			update_type = UPDATE_TYPE_FULL;
8060 			goto cleanup;
8061 		}
8062 
8063 		if (!new_dm_crtc_state->stream)
8064 			continue;
8065 
8066 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8067 			const struct amdgpu_framebuffer *amdgpu_fb =
8068 				to_amdgpu_framebuffer(new_plane_state->fb);
8069 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8070 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8071 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8072 			uint64_t tiling_flags;
8073 
8074 			new_plane_crtc = new_plane_state->crtc;
8075 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8076 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8077 
8078 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8079 				continue;
8080 
8081 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8082 				update_type = UPDATE_TYPE_FULL;
8083 				goto cleanup;
8084 			}
8085 
8086 			if (crtc != new_plane_crtc)
8087 				continue;
8088 
8089 			bundle->surface_updates[num_plane].surface =
8090 					new_dm_plane_state->dc_state;
8091 
8092 			if (new_crtc_state->mode_changed) {
8093 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8094 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8095 			}
8096 
8097 			if (new_crtc_state->color_mgmt_changed) {
8098 				bundle->surface_updates[num_plane].gamma =
8099 						new_dm_plane_state->dc_state->gamma_correction;
8100 				bundle->surface_updates[num_plane].in_transfer_func =
8101 						new_dm_plane_state->dc_state->in_transfer_func;
8102 				bundle->stream_update.gamut_remap =
8103 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8104 				bundle->stream_update.output_csc_transform =
8105 						&new_dm_crtc_state->stream->csc_color_matrix;
8106 				bundle->stream_update.out_transfer_func =
8107 						new_dm_crtc_state->stream->out_transfer_func;
8108 			}
8109 
8110 			ret = fill_dc_scaling_info(new_plane_state,
8111 						   scaling_info);
8112 			if (ret)
8113 				goto cleanup;
8114 
8115 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8116 
8117 			if (amdgpu_fb) {
8118 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8119 				if (ret)
8120 					goto cleanup;
8121 
8122 				ret = fill_dc_plane_info_and_addr(
8123 					dm->adev, new_plane_state, tiling_flags,
8124 					plane_info,
8125 					&flip_addr->address,
8126 					false);
8127 				if (ret)
8128 					goto cleanup;
8129 
8130 				bundle->surface_updates[num_plane].plane_info = plane_info;
8131 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8132 			}
8133 
8134 			num_plane++;
8135 		}
8136 
8137 		if (num_plane == 0)
8138 			continue;
8139 
8140 		ret = dm_atomic_get_state(state, &dm_state);
8141 		if (ret)
8142 			goto cleanup;
8143 
8144 		old_dm_state = dm_atomic_get_old_state(state);
8145 		if (!old_dm_state) {
8146 			ret = -EINVAL;
8147 			goto cleanup;
8148 		}
8149 
8150 		status = dc_stream_get_status_from_state(old_dm_state->context,
8151 							 new_dm_crtc_state->stream);
8152 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8153 		/*
8154 		 * TODO: DC modifies the surface during this call so we need
8155 		 * to lock here - find a way to do this without locking.
8156 		 */
8157 		mutex_lock(&dm->dc_lock);
8158 		update_type = dc_check_update_surfaces_for_stream(
8159 				dc,	bundle->surface_updates, num_plane,
8160 				&bundle->stream_update, status);
8161 		mutex_unlock(&dm->dc_lock);
8162 
8163 		if (update_type > UPDATE_TYPE_MED) {
8164 			update_type = UPDATE_TYPE_FULL;
8165 			goto cleanup;
8166 		}
8167 	}
8168 
8169 cleanup:
8170 	kfree(bundle);
8171 
8172 	*out_type = update_type;
8173 	return ret;
8174 }
8175 
8176 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8177 {
8178 	struct drm_connector *connector;
8179 	struct drm_connector_state *conn_state;
8180 	struct amdgpu_dm_connector *aconnector = NULL;
8181 	int i;
8182 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8183 		if (conn_state->crtc != crtc)
8184 			continue;
8185 
8186 		aconnector = to_amdgpu_dm_connector(connector);
8187 		if (!aconnector->port || !aconnector->mst_port)
8188 			aconnector = NULL;
8189 		else
8190 			break;
8191 	}
8192 
8193 	if (!aconnector)
8194 		return 0;
8195 
8196 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8197 }
8198 
8199 /**
8200  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8201  * @dev: The DRM device
8202  * @state: The atomic state to commit
8203  *
8204  * Validate that the given atomic state is programmable by DC into hardware.
8205  * This involves constructing a &struct dc_state reflecting the new hardware
8206  * state we wish to commit, then querying DC to see if it is programmable. It's
8207  * important not to modify the existing DC state. Otherwise, atomic_check
8208  * may unexpectedly commit hardware changes.
8209  *
8210  * When validating the DC state, it's important that the right locks are
8211  * acquired. For full updates case which removes/adds/updates streams on one
8212  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8213  * that any such full update commit will wait for completion of any outstanding
8214  * flip using DRMs synchronization events. See
8215  * dm_determine_update_type_for_commit()
8216  *
8217  * Note that DM adds the affected connectors for all CRTCs in state, when that
8218  * might not seem necessary. This is because DC stream creation requires the
8219  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8220  * be possible but non-trivial - a possible TODO item.
8221  *
8222  * Return: -Error code if validation failed.
8223  */
8224 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8225 				  struct drm_atomic_state *state)
8226 {
8227 	struct amdgpu_device *adev = dev->dev_private;
8228 	struct dm_atomic_state *dm_state = NULL;
8229 	struct dc *dc = adev->dm.dc;
8230 	struct drm_connector *connector;
8231 	struct drm_connector_state *old_con_state, *new_con_state;
8232 	struct drm_crtc *crtc;
8233 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8234 	struct drm_plane *plane;
8235 	struct drm_plane_state *old_plane_state, *new_plane_state;
8236 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8237 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8238 
8239 	int ret, i;
8240 
8241 	/*
8242 	 * This bool will be set for true for any modeset/reset
8243 	 * or plane update which implies non fast surface update.
8244 	 */
8245 	bool lock_and_validation_needed = false;
8246 
8247 	ret = drm_atomic_helper_check_modeset(dev, state);
8248 	if (ret)
8249 		goto fail;
8250 
8251 	if (adev->asic_type >= CHIP_NAVI10) {
8252 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8253 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8254 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8255 				if (ret)
8256 					goto fail;
8257 			}
8258 		}
8259 	}
8260 
8261 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8262 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8263 		    !new_crtc_state->color_mgmt_changed &&
8264 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8265 			continue;
8266 
8267 		if (!new_crtc_state->enable)
8268 			continue;
8269 
8270 		ret = drm_atomic_add_affected_connectors(state, crtc);
8271 		if (ret)
8272 			return ret;
8273 
8274 		ret = drm_atomic_add_affected_planes(state, crtc);
8275 		if (ret)
8276 			goto fail;
8277 	}
8278 
8279 	/*
8280 	 * Add all primary and overlay planes on the CRTC to the state
8281 	 * whenever a plane is enabled to maintain correct z-ordering
8282 	 * and to enable fast surface updates.
8283 	 */
8284 	drm_for_each_crtc(crtc, dev) {
8285 		bool modified = false;
8286 
8287 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8288 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8289 				continue;
8290 
8291 			if (new_plane_state->crtc == crtc ||
8292 			    old_plane_state->crtc == crtc) {
8293 				modified = true;
8294 				break;
8295 			}
8296 		}
8297 
8298 		if (!modified)
8299 			continue;
8300 
8301 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8302 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8303 				continue;
8304 
8305 			new_plane_state =
8306 				drm_atomic_get_plane_state(state, plane);
8307 
8308 			if (IS_ERR(new_plane_state)) {
8309 				ret = PTR_ERR(new_plane_state);
8310 				goto fail;
8311 			}
8312 		}
8313 	}
8314 
8315 	/* Remove exiting planes if they are modified */
8316 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8317 		ret = dm_update_plane_state(dc, state, plane,
8318 					    old_plane_state,
8319 					    new_plane_state,
8320 					    false,
8321 					    &lock_and_validation_needed);
8322 		if (ret)
8323 			goto fail;
8324 	}
8325 
8326 	/* Disable all crtcs which require disable */
8327 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8328 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8329 					   old_crtc_state,
8330 					   new_crtc_state,
8331 					   false,
8332 					   &lock_and_validation_needed);
8333 		if (ret)
8334 			goto fail;
8335 	}
8336 
8337 	/* Enable all crtcs which require enable */
8338 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8339 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8340 					   old_crtc_state,
8341 					   new_crtc_state,
8342 					   true,
8343 					   &lock_and_validation_needed);
8344 		if (ret)
8345 			goto fail;
8346 	}
8347 
8348 	/* Add new/modified planes */
8349 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8350 		ret = dm_update_plane_state(dc, state, plane,
8351 					    old_plane_state,
8352 					    new_plane_state,
8353 					    true,
8354 					    &lock_and_validation_needed);
8355 		if (ret)
8356 			goto fail;
8357 	}
8358 
8359 	/* Run this here since we want to validate the streams we created */
8360 	ret = drm_atomic_helper_check_planes(dev, state);
8361 	if (ret)
8362 		goto fail;
8363 
8364 	if (state->legacy_cursor_update) {
8365 		/*
8366 		 * This is a fast cursor update coming from the plane update
8367 		 * helper, check if it can be done asynchronously for better
8368 		 * performance.
8369 		 */
8370 		state->async_update =
8371 			!drm_atomic_helper_async_check(dev, state);
8372 
8373 		/*
8374 		 * Skip the remaining global validation if this is an async
8375 		 * update. Cursor updates can be done without affecting
8376 		 * state or bandwidth calcs and this avoids the performance
8377 		 * penalty of locking the private state object and
8378 		 * allocating a new dc_state.
8379 		 */
8380 		if (state->async_update)
8381 			return 0;
8382 	}
8383 
8384 	/* Check scaling and underscan changes*/
8385 	/* TODO Removed scaling changes validation due to inability to commit
8386 	 * new stream into context w\o causing full reset. Need to
8387 	 * decide how to handle.
8388 	 */
8389 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8390 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8391 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8392 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8393 
8394 		/* Skip any modesets/resets */
8395 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8396 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8397 			continue;
8398 
8399 		/* Skip any thing not scale or underscan changes */
8400 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8401 			continue;
8402 
8403 		overall_update_type = UPDATE_TYPE_FULL;
8404 		lock_and_validation_needed = true;
8405 	}
8406 
8407 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8408 	if (ret)
8409 		goto fail;
8410 
8411 	if (overall_update_type < update_type)
8412 		overall_update_type = update_type;
8413 
8414 	/*
8415 	 * lock_and_validation_needed was an old way to determine if we need to set
8416 	 * the global lock. Leaving it in to check if we broke any corner cases
8417 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8418 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8419 	 */
8420 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8421 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8422 
8423 	if (overall_update_type > UPDATE_TYPE_FAST) {
8424 		ret = dm_atomic_get_state(state, &dm_state);
8425 		if (ret)
8426 			goto fail;
8427 
8428 		ret = do_aquire_global_lock(dev, state);
8429 		if (ret)
8430 			goto fail;
8431 
8432 #if defined(CONFIG_DRM_AMD_DC_DCN)
8433 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8434 			goto fail;
8435 
8436 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8437 		if (ret)
8438 			goto fail;
8439 #endif
8440 
8441 		/*
8442 		 * Perform validation of MST topology in the state:
8443 		 * We need to perform MST atomic check before calling
8444 		 * dc_validate_global_state(), or there is a chance
8445 		 * to get stuck in an infinite loop and hang eventually.
8446 		 */
8447 		ret = drm_dp_mst_atomic_check(state);
8448 		if (ret)
8449 			goto fail;
8450 
8451 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8452 			ret = -EINVAL;
8453 			goto fail;
8454 		}
8455 	} else {
8456 		/*
8457 		 * The commit is a fast update. Fast updates shouldn't change
8458 		 * the DC context, affect global validation, and can have their
8459 		 * commit work done in parallel with other commits not touching
8460 		 * the same resource. If we have a new DC context as part of
8461 		 * the DM atomic state from validation we need to free it and
8462 		 * retain the existing one instead.
8463 		 */
8464 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8465 
8466 		new_dm_state = dm_atomic_get_new_state(state);
8467 		old_dm_state = dm_atomic_get_old_state(state);
8468 
8469 		if (new_dm_state && old_dm_state) {
8470 			if (new_dm_state->context)
8471 				dc_release_state(new_dm_state->context);
8472 
8473 			new_dm_state->context = old_dm_state->context;
8474 
8475 			if (old_dm_state->context)
8476 				dc_retain_state(old_dm_state->context);
8477 		}
8478 	}
8479 
8480 	/* Store the overall update type for use later in atomic check. */
8481 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8482 		struct dm_crtc_state *dm_new_crtc_state =
8483 			to_dm_crtc_state(new_crtc_state);
8484 
8485 		dm_new_crtc_state->update_type = (int)overall_update_type;
8486 	}
8487 
8488 	/* Must be success */
8489 	WARN_ON(ret);
8490 	return ret;
8491 
8492 fail:
8493 	if (ret == -EDEADLK)
8494 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8495 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8496 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8497 	else
8498 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8499 
8500 	return ret;
8501 }
8502 
8503 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8504 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8505 {
8506 	uint8_t dpcd_data;
8507 	bool capable = false;
8508 
8509 	if (amdgpu_dm_connector->dc_link &&
8510 		dm_helpers_dp_read_dpcd(
8511 				NULL,
8512 				amdgpu_dm_connector->dc_link,
8513 				DP_DOWN_STREAM_PORT_COUNT,
8514 				&dpcd_data,
8515 				sizeof(dpcd_data))) {
8516 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8517 	}
8518 
8519 	return capable;
8520 }
8521 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8522 					struct edid *edid)
8523 {
8524 	int i;
8525 	bool edid_check_required;
8526 	struct detailed_timing *timing;
8527 	struct detailed_non_pixel *data;
8528 	struct detailed_data_monitor_range *range;
8529 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8530 			to_amdgpu_dm_connector(connector);
8531 	struct dm_connector_state *dm_con_state = NULL;
8532 
8533 	struct drm_device *dev = connector->dev;
8534 	struct amdgpu_device *adev = dev->dev_private;
8535 	bool freesync_capable = false;
8536 
8537 	if (!connector->state) {
8538 		DRM_ERROR("%s - Connector has no state", __func__);
8539 		goto update;
8540 	}
8541 
8542 	if (!edid) {
8543 		dm_con_state = to_dm_connector_state(connector->state);
8544 
8545 		amdgpu_dm_connector->min_vfreq = 0;
8546 		amdgpu_dm_connector->max_vfreq = 0;
8547 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8548 
8549 		goto update;
8550 	}
8551 
8552 	dm_con_state = to_dm_connector_state(connector->state);
8553 
8554 	edid_check_required = false;
8555 	if (!amdgpu_dm_connector->dc_sink) {
8556 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8557 		goto update;
8558 	}
8559 	if (!adev->dm.freesync_module)
8560 		goto update;
8561 	/*
8562 	 * if edid non zero restrict freesync only for dp and edp
8563 	 */
8564 	if (edid) {
8565 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8566 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8567 			edid_check_required = is_dp_capable_without_timing_msa(
8568 						adev->dm.dc,
8569 						amdgpu_dm_connector);
8570 		}
8571 	}
8572 	if (edid_check_required == true && (edid->version > 1 ||
8573 	   (edid->version == 1 && edid->revision > 1))) {
8574 		for (i = 0; i < 4; i++) {
8575 
8576 			timing	= &edid->detailed_timings[i];
8577 			data	= &timing->data.other_data;
8578 			range	= &data->data.range;
8579 			/*
8580 			 * Check if monitor has continuous frequency mode
8581 			 */
8582 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8583 				continue;
8584 			/*
8585 			 * Check for flag range limits only. If flag == 1 then
8586 			 * no additional timing information provided.
8587 			 * Default GTF, GTF Secondary curve and CVT are not
8588 			 * supported
8589 			 */
8590 			if (range->flags != 1)
8591 				continue;
8592 
8593 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8594 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8595 			amdgpu_dm_connector->pixel_clock_mhz =
8596 				range->pixel_clock_mhz * 10;
8597 			break;
8598 		}
8599 
8600 		if (amdgpu_dm_connector->max_vfreq -
8601 		    amdgpu_dm_connector->min_vfreq > 10) {
8602 
8603 			freesync_capable = true;
8604 		}
8605 	}
8606 
8607 update:
8608 	if (dm_con_state)
8609 		dm_con_state->freesync_capable = freesync_capable;
8610 
8611 	if (connector->vrr_capable_property)
8612 		drm_connector_set_vrr_capable_property(connector,
8613 						       freesync_capable);
8614 }
8615 
8616 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8617 {
8618 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8619 
8620 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8621 		return;
8622 	if (link->type == dc_connection_none)
8623 		return;
8624 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8625 					dpcd_data, sizeof(dpcd_data))) {
8626 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8627 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8628 	}
8629 }
8630 
8631 /*
8632  * amdgpu_dm_link_setup_psr() - configure psr link
8633  * @stream: stream state
8634  *
8635  * Return: true if success
8636  */
8637 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8638 {
8639 	struct dc_link *link = NULL;
8640 	struct psr_config psr_config = {0};
8641 	struct psr_context psr_context = {0};
8642 	struct dc *dc = NULL;
8643 	bool ret = false;
8644 
8645 	if (stream == NULL)
8646 		return false;
8647 
8648 	link = stream->link;
8649 	dc = link->ctx->dc;
8650 
8651 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8652 
8653 	if (psr_config.psr_version > 0) {
8654 		psr_config.psr_exit_link_training_required = 0x1;
8655 		psr_config.psr_frame_capture_indication_req = 0;
8656 		psr_config.psr_rfb_setup_time = 0x37;
8657 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8658 		psr_config.allow_smu_optimizations = 0x0;
8659 
8660 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8661 
8662 	}
8663 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8664 
8665 	return ret;
8666 }
8667 
8668 /*
8669  * amdgpu_dm_psr_enable() - enable psr f/w
8670  * @stream: stream state
8671  *
8672  * Return: true if success
8673  */
8674 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8675 {
8676 	struct dc_link *link = stream->link;
8677 	unsigned int vsync_rate_hz = 0;
8678 	struct dc_static_screen_params params = {0};
8679 	/* Calculate number of static frames before generating interrupt to
8680 	 * enter PSR.
8681 	 */
8682 	// Init fail safe of 2 frames static
8683 	unsigned int num_frames_static = 2;
8684 
8685 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8686 
8687 	vsync_rate_hz = div64_u64(div64_u64((
8688 			stream->timing.pix_clk_100hz * 100),
8689 			stream->timing.v_total),
8690 			stream->timing.h_total);
8691 
8692 	/* Round up
8693 	 * Calculate number of frames such that at least 30 ms of time has
8694 	 * passed.
8695 	 */
8696 	if (vsync_rate_hz != 0) {
8697 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8698 		num_frames_static = (30000 / frame_time_microsec) + 1;
8699 	}
8700 
8701 	params.triggers.cursor_update = true;
8702 	params.triggers.overlay_update = true;
8703 	params.triggers.surface_update = true;
8704 	params.num_frames = num_frames_static;
8705 
8706 	dc_stream_set_static_screen_params(link->ctx->dc,
8707 					   &stream, 1,
8708 					   &params);
8709 
8710 	return dc_link_set_psr_allow_active(link, true, false);
8711 }
8712 
8713 /*
8714  * amdgpu_dm_psr_disable() - disable psr f/w
8715  * @stream:  stream state
8716  *
8717  * Return: true if success
8718  */
8719 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8720 {
8721 
8722 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8723 
8724 	return dc_link_set_psr_allow_active(stream->link, false, true);
8725 }
8726