xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 #ifdef notyet
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 #endif
696 
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
698 {
699 	int i, ret;
700 
701 	if (!amdgpu_audio)
702 		return 0;
703 
704 	adev->mode_info.audio.enabled = true;
705 
706 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 
708 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 		adev->mode_info.audio.pin[i].channels = -1;
710 		adev->mode_info.audio.pin[i].rate = -1;
711 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 		adev->mode_info.audio.pin[i].status_bits = 0;
713 		adev->mode_info.audio.pin[i].category_code = 0;
714 		adev->mode_info.audio.pin[i].connected = false;
715 		adev->mode_info.audio.pin[i].id =
716 			adev->dm.dc->res_pool->audios[i]->inst;
717 		adev->mode_info.audio.pin[i].offset = 0;
718 	}
719 
720 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
721 	if (ret < 0)
722 		return ret;
723 
724 	adev->dm.audio_registered = true;
725 
726 	return 0;
727 }
728 
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
730 {
731 	if (!amdgpu_audio)
732 		return;
733 
734 	if (!adev->mode_info.audio.enabled)
735 		return;
736 
737 	if (adev->dm.audio_registered) {
738 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 		adev->dm.audio_registered = false;
740 	}
741 
742 	/* TODO: Disable audio? */
743 
744 	adev->mode_info.audio.enabled = false;
745 }
746 
747 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 {
749 	struct drm_audio_component *acomp = adev->dm.audio_component;
750 
751 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 
754 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
755 						 pin, -1);
756 	}
757 }
758 
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 {
761 	const struct dmcub_firmware_header_v1_0 *hdr;
762 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 	struct abm *abm = adev->dm.dc->res_pool->abm;
767 	struct dmub_srv_hw_params hw_params;
768 	enum dmub_status status;
769 	const unsigned char *fw_inst_const, *fw_bss_data;
770 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
771 	bool has_hw_support;
772 
773 	if (!dmub_srv)
774 		/* DMUB isn't supported on the ASIC. */
775 		return 0;
776 
777 	if (!fb_info) {
778 		DRM_ERROR("No framebuffer info for DMUB service.\n");
779 		return -EINVAL;
780 	}
781 
782 	if (!dmub_fw) {
783 		/* Firmware required for DMUB support. */
784 		DRM_ERROR("No firmware provided for DMUB.\n");
785 		return -EINVAL;
786 	}
787 
788 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 	if (status != DMUB_STATUS_OK) {
790 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
791 		return -EINVAL;
792 	}
793 
794 	if (!has_hw_support) {
795 		DRM_INFO("DMUB unsupported on ASIC\n");
796 		return 0;
797 	}
798 
799 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 
801 	fw_inst_const = dmub_fw->data +
802 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
803 			PSP_HEADER_BYTES;
804 
805 	fw_bss_data = dmub_fw->data +
806 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 		      le32_to_cpu(hdr->inst_const_bytes);
808 
809 	/* Copy firmware and bios info into FB memory. */
810 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 
813 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 
815 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 	 * amdgpu_ucode_init_single_fw will load dmub firmware
817 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 	 * will be done by dm_dmub_hw_init
819 	 */
820 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
822 				fw_inst_const_size);
823 	}
824 
825 	if (fw_bss_data_size)
826 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 		       fw_bss_data, fw_bss_data_size);
828 
829 	/* Copy firmware bios info into FB memory. */
830 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
831 	       adev->bios_size);
832 
833 	/* Reset regions that need to be reset. */
834 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 
837 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 
840 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 
843 	/* Initialize hardware. */
844 	memset(&hw_params, 0, sizeof(hw_params));
845 	hw_params.fb_base = adev->gmc.fb_start;
846 	hw_params.fb_offset = adev->gmc.aper_base;
847 
848 	/* backdoor load firmware and trigger dmub running */
849 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 		hw_params.load_inst_const = true;
851 
852 	if (dmcu)
853 		hw_params.psp_version = dmcu->psp_version;
854 
855 	for (i = 0; i < fb_info->num_fb; ++i)
856 		hw_params.fb[i] = &fb_info->fb[i];
857 
858 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 	if (status != DMUB_STATUS_OK) {
860 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
861 		return -EINVAL;
862 	}
863 
864 	/* Wait for firmware load to finish. */
865 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 	if (status != DMUB_STATUS_OK)
867 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 
869 	/* Init DMCU and ABM if available. */
870 	if (dmcu && abm) {
871 		dmcu->funcs->dmcu_init(dmcu);
872 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
873 	}
874 
875 	if (!adev->dm.dc->ctx->dmub_srv)
876 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
877 	if (!adev->dm.dc->ctx->dmub_srv) {
878 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
879 		return -ENOMEM;
880 	}
881 
882 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
883 		 adev->dm.dmcub_fw_version);
884 
885 	return 0;
886 }
887 
888 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
889 							   struct drm_atomic_state *state)
890 {
891 	struct drm_connector *connector;
892 	struct drm_crtc *crtc;
893 	struct amdgpu_dm_connector *amdgpu_dm_connector;
894 	struct drm_connector_state *conn_state;
895 	struct dm_crtc_state *acrtc_state;
896 	struct drm_crtc_state *crtc_state;
897 	struct dc_stream_state *stream;
898 	struct drm_device *dev = adev_to_drm(adev);
899 
900 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
901 
902 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
903 		conn_state = connector->state;
904 
905 		if (!(conn_state && conn_state->crtc))
906 			continue;
907 
908 		crtc = conn_state->crtc;
909 		acrtc_state = to_dm_crtc_state(crtc->state);
910 
911 		if (!(acrtc_state && acrtc_state->stream))
912 			continue;
913 
914 		stream = acrtc_state->stream;
915 
916 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
917 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
918 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
919 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
920 			conn_state = drm_atomic_get_connector_state(state, connector);
921 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
922 			crtc_state->mode_changed = true;
923 		}
924 	}
925 }
926 
927 static int amdgpu_dm_init(struct amdgpu_device *adev)
928 {
929 	struct dc_init_data init_data;
930 #ifdef CONFIG_DRM_AMD_DC_HDCP
931 	struct dc_callback_init init_params;
932 #endif
933 	int r;
934 
935 	adev->dm.ddev = adev_to_drm(adev);
936 	adev->dm.adev = adev;
937 
938 	/* Zero all the fields */
939 	memset(&init_data, 0, sizeof(init_data));
940 #ifdef CONFIG_DRM_AMD_DC_HDCP
941 	memset(&init_params, 0, sizeof(init_params));
942 #endif
943 
944 	rw_init(&adev->dm.dc_lock, "dmdc");
945 	rw_init(&adev->dm.audio_lock, "dmaud");
946 
947 	if(amdgpu_dm_irq_init(adev)) {
948 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
949 		goto error;
950 	}
951 
952 	init_data.asic_id.chip_family = adev->family;
953 
954 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
955 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
956 
957 	init_data.asic_id.vram_width = adev->gmc.vram_width;
958 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
959 	init_data.asic_id.atombios_base_address =
960 		adev->mode_info.atom_context->bios;
961 
962 	init_data.driver = adev;
963 
964 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
965 
966 	if (!adev->dm.cgs_device) {
967 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
968 		goto error;
969 	}
970 
971 	init_data.cgs_device = adev->dm.cgs_device;
972 
973 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
974 
975 	switch (adev->asic_type) {
976 	case CHIP_CARRIZO:
977 	case CHIP_STONEY:
978 	case CHIP_RAVEN:
979 	case CHIP_RENOIR:
980 		init_data.flags.gpu_vm_support = true;
981 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
982 			init_data.flags.disable_dmcu = true;
983 		break;
984 	default:
985 		break;
986 	}
987 
988 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
989 		init_data.flags.fbc_support = true;
990 
991 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
992 		init_data.flags.multi_mon_pp_mclk_switch = true;
993 
994 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
995 		init_data.flags.disable_fractional_pwm = true;
996 
997 	init_data.flags.power_down_display_on_boot = true;
998 
999 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1000 
1001 	/* Display Core create. */
1002 	adev->dm.dc = dc_create(&init_data);
1003 
1004 	if (adev->dm.dc) {
1005 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1006 	} else {
1007 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1008 		goto error;
1009 	}
1010 
1011 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1012 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1013 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1014 	}
1015 
1016 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1017 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1018 
1019 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1020 		adev->dm.dc->debug.disable_stutter = true;
1021 
1022 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1023 		adev->dm.dc->debug.disable_dsc = true;
1024 
1025 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1026 		adev->dm.dc->debug.disable_clock_gate = true;
1027 
1028 	r = dm_dmub_hw_init(adev);
1029 	if (r) {
1030 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1031 		goto error;
1032 	}
1033 
1034 	dc_hardware_init(adev->dm.dc);
1035 
1036 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1037 	if (!adev->dm.freesync_module) {
1038 		DRM_ERROR(
1039 		"amdgpu: failed to initialize freesync_module.\n");
1040 	} else
1041 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1042 				adev->dm.freesync_module);
1043 
1044 	amdgpu_dm_init_color_mod();
1045 
1046 #ifdef CONFIG_DRM_AMD_DC_HDCP
1047 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1048 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1049 
1050 		if (!adev->dm.hdcp_workqueue)
1051 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1052 		else
1053 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1054 
1055 		dc_init_callbacks(adev->dm.dc, &init_params);
1056 	}
1057 #endif
1058 	if (amdgpu_dm_initialize_drm_device(adev)) {
1059 		DRM_ERROR(
1060 		"amdgpu: failed to initialize sw for display support.\n");
1061 		goto error;
1062 	}
1063 
1064 	/* create fake encoders for MST */
1065 	dm_dp_create_fake_mst_encoders(adev);
1066 
1067 	/* TODO: Add_display_info? */
1068 
1069 	/* TODO use dynamic cursor width */
1070 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1071 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1072 
1073 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1074 		DRM_ERROR(
1075 		"amdgpu: failed to initialize sw for display support.\n");
1076 		goto error;
1077 	}
1078 
1079 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1080 
1081 	return 0;
1082 error:
1083 	amdgpu_dm_fini(adev);
1084 
1085 	return -EINVAL;
1086 }
1087 
1088 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1093 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1094 	}
1095 
1096 	amdgpu_dm_audio_fini(adev);
1097 
1098 	amdgpu_dm_destroy_drm_device(&adev->dm);
1099 
1100 #ifdef CONFIG_DRM_AMD_DC_HDCP
1101 	if (adev->dm.hdcp_workqueue) {
1102 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1103 		adev->dm.hdcp_workqueue = NULL;
1104 	}
1105 
1106 	if (adev->dm.dc)
1107 		dc_deinit_callbacks(adev->dm.dc);
1108 #endif
1109 	if (adev->dm.dc->ctx->dmub_srv) {
1110 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1111 		adev->dm.dc->ctx->dmub_srv = NULL;
1112 	}
1113 
1114 	if (adev->dm.dmub_bo)
1115 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1116 				      &adev->dm.dmub_bo_gpu_addr,
1117 				      &adev->dm.dmub_bo_cpu_addr);
1118 
1119 	/* DC Destroy TODO: Replace destroy DAL */
1120 	if (adev->dm.dc)
1121 		dc_destroy(&adev->dm.dc);
1122 	/*
1123 	 * TODO: pageflip, vlank interrupt
1124 	 *
1125 	 * amdgpu_dm_irq_fini(adev);
1126 	 */
1127 
1128 	if (adev->dm.cgs_device) {
1129 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1130 		adev->dm.cgs_device = NULL;
1131 	}
1132 	if (adev->dm.freesync_module) {
1133 		mod_freesync_destroy(adev->dm.freesync_module);
1134 		adev->dm.freesync_module = NULL;
1135 	}
1136 
1137 	mutex_destroy(&adev->dm.audio_lock);
1138 	mutex_destroy(&adev->dm.dc_lock);
1139 
1140 	return;
1141 }
1142 
1143 static int load_dmcu_fw(struct amdgpu_device *adev)
1144 {
1145 	const char *fw_name_dmcu = NULL;
1146 	int r;
1147 	const struct dmcu_firmware_header_v1_0 *hdr;
1148 
1149 	switch(adev->asic_type) {
1150 #if defined(CONFIG_DRM_AMD_DC_SI)
1151 	case CHIP_TAHITI:
1152 	case CHIP_PITCAIRN:
1153 	case CHIP_VERDE:
1154 	case CHIP_OLAND:
1155 #endif
1156 	case CHIP_BONAIRE:
1157 	case CHIP_HAWAII:
1158 	case CHIP_KAVERI:
1159 	case CHIP_KABINI:
1160 	case CHIP_MULLINS:
1161 	case CHIP_TONGA:
1162 	case CHIP_FIJI:
1163 	case CHIP_CARRIZO:
1164 	case CHIP_STONEY:
1165 	case CHIP_POLARIS11:
1166 	case CHIP_POLARIS10:
1167 	case CHIP_POLARIS12:
1168 	case CHIP_VEGAM:
1169 	case CHIP_VEGA10:
1170 	case CHIP_VEGA12:
1171 	case CHIP_VEGA20:
1172 	case CHIP_NAVI10:
1173 	case CHIP_NAVI14:
1174 	case CHIP_RENOIR:
1175 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1176 	case CHIP_SIENNA_CICHLID:
1177 	case CHIP_NAVY_FLOUNDER:
1178 #endif
1179 		return 0;
1180 	case CHIP_NAVI12:
1181 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1182 		break;
1183 	case CHIP_RAVEN:
1184 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1185 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1186 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1187 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1188 		else
1189 			return 0;
1190 		break;
1191 	default:
1192 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1193 		return -EINVAL;
1194 	}
1195 
1196 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1197 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1198 		return 0;
1199 	}
1200 
1201 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1202 	if (r == -ENOENT) {
1203 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1204 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1205 		adev->dm.fw_dmcu = NULL;
1206 		return 0;
1207 	}
1208 	if (r) {
1209 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1210 			fw_name_dmcu);
1211 		return r;
1212 	}
1213 
1214 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1215 	if (r) {
1216 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1217 			fw_name_dmcu);
1218 		release_firmware(adev->dm.fw_dmcu);
1219 		adev->dm.fw_dmcu = NULL;
1220 		return r;
1221 	}
1222 
1223 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1224 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1225 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1226 	adev->firmware.fw_size +=
1227 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1228 
1229 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1230 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1231 	adev->firmware.fw_size +=
1232 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1233 
1234 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235 
1236 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1237 
1238 	return 0;
1239 }
1240 
1241 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1242 {
1243 	struct amdgpu_device *adev = ctx;
1244 
1245 	return dm_read_reg(adev->dm.dc->ctx, address);
1246 }
1247 
1248 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1249 				     uint32_t value)
1250 {
1251 	struct amdgpu_device *adev = ctx;
1252 
1253 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1254 }
1255 
1256 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1257 {
1258 	struct dmub_srv_create_params create_params;
1259 	struct dmub_srv_region_params region_params;
1260 	struct dmub_srv_region_info region_info;
1261 	struct dmub_srv_fb_params fb_params;
1262 	struct dmub_srv_fb_info *fb_info;
1263 	struct dmub_srv *dmub_srv;
1264 	const struct dmcub_firmware_header_v1_0 *hdr;
1265 	const char *fw_name_dmub;
1266 	enum dmub_asic dmub_asic;
1267 	enum dmub_status status;
1268 	int r;
1269 
1270 	switch (adev->asic_type) {
1271 	case CHIP_RENOIR:
1272 		dmub_asic = DMUB_ASIC_DCN21;
1273 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1274 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1275 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1276 		break;
1277 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1278 	case CHIP_SIENNA_CICHLID:
1279 		dmub_asic = DMUB_ASIC_DCN30;
1280 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1281 		break;
1282 	case CHIP_NAVY_FLOUNDER:
1283 		dmub_asic = DMUB_ASIC_DCN30;
1284 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1285 		break;
1286 #endif
1287 
1288 	default:
1289 		/* ASIC doesn't support DMUB. */
1290 		return 0;
1291 	}
1292 
1293 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1294 	if (r) {
1295 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1296 		return 0;
1297 	}
1298 
1299 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1300 	if (r) {
1301 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1302 		return 0;
1303 	}
1304 
1305 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1306 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1307 
1308 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1309 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1310 			AMDGPU_UCODE_ID_DMCUB;
1311 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1312 			adev->dm.dmub_fw;
1313 		adev->firmware.fw_size +=
1314 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1315 
1316 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1317 			 adev->dm.dmcub_fw_version);
1318 	}
1319 
1320 
1321 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1322 	dmub_srv = adev->dm.dmub_srv;
1323 
1324 	if (!dmub_srv) {
1325 		DRM_ERROR("Failed to allocate DMUB service!\n");
1326 		return -ENOMEM;
1327 	}
1328 
1329 	memset(&create_params, 0, sizeof(create_params));
1330 	create_params.user_ctx = adev;
1331 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1332 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1333 	create_params.asic = dmub_asic;
1334 
1335 	/* Create the DMUB service. */
1336 	status = dmub_srv_create(dmub_srv, &create_params);
1337 	if (status != DMUB_STATUS_OK) {
1338 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1339 		return -EINVAL;
1340 	}
1341 
1342 	/* Calculate the size of all the regions for the DMUB service. */
1343 	memset(&region_params, 0, sizeof(region_params));
1344 
1345 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1346 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1347 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1348 	region_params.vbios_size = adev->bios_size;
1349 	region_params.fw_bss_data = region_params.bss_data_size ?
1350 		adev->dm.dmub_fw->data +
1351 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1352 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1353 	region_params.fw_inst_const =
1354 		adev->dm.dmub_fw->data +
1355 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1356 		PSP_HEADER_BYTES;
1357 
1358 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1359 					   &region_info);
1360 
1361 	if (status != DMUB_STATUS_OK) {
1362 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1363 		return -EINVAL;
1364 	}
1365 
1366 	/*
1367 	 * Allocate a framebuffer based on the total size of all the regions.
1368 	 * TODO: Move this into GART.
1369 	 */
1370 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1371 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1372 				    &adev->dm.dmub_bo_gpu_addr,
1373 				    &adev->dm.dmub_bo_cpu_addr);
1374 	if (r)
1375 		return r;
1376 
1377 	/* Rebase the regions on the framebuffer address. */
1378 	memset(&fb_params, 0, sizeof(fb_params));
1379 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1380 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1381 	fb_params.region_info = &region_info;
1382 
1383 	adev->dm.dmub_fb_info =
1384 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1385 	fb_info = adev->dm.dmub_fb_info;
1386 
1387 	if (!fb_info) {
1388 		DRM_ERROR(
1389 			"Failed to allocate framebuffer info for DMUB service!\n");
1390 		return -ENOMEM;
1391 	}
1392 
1393 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1394 	if (status != DMUB_STATUS_OK) {
1395 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1396 		return -EINVAL;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static int dm_sw_init(void *handle)
1403 {
1404 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405 	int r;
1406 
1407 	r = dm_dmub_sw_init(adev);
1408 	if (r)
1409 		return r;
1410 
1411 	return load_dmcu_fw(adev);
1412 }
1413 
1414 static int dm_sw_fini(void *handle)
1415 {
1416 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1417 
1418 	kfree(adev->dm.dmub_fb_info);
1419 	adev->dm.dmub_fb_info = NULL;
1420 
1421 	if (adev->dm.dmub_srv) {
1422 		dmub_srv_destroy(adev->dm.dmub_srv);
1423 		adev->dm.dmub_srv = NULL;
1424 	}
1425 
1426 	release_firmware(adev->dm.dmub_fw);
1427 	adev->dm.dmub_fw = NULL;
1428 
1429 	release_firmware(adev->dm.fw_dmcu);
1430 	adev->dm.fw_dmcu = NULL;
1431 
1432 	return 0;
1433 }
1434 
1435 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1436 {
1437 	struct amdgpu_dm_connector *aconnector;
1438 	struct drm_connector *connector;
1439 	struct drm_connector_list_iter iter;
1440 	int ret = 0;
1441 
1442 	drm_connector_list_iter_begin(dev, &iter);
1443 	drm_for_each_connector_iter(connector, &iter) {
1444 		aconnector = to_amdgpu_dm_connector(connector);
1445 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1446 		    aconnector->mst_mgr.aux) {
1447 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1448 					 aconnector,
1449 					 aconnector->base.base.id);
1450 
1451 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1452 			if (ret < 0) {
1453 				DRM_ERROR("DM_MST: Failed to start MST\n");
1454 				aconnector->dc_link->type =
1455 					dc_connection_single;
1456 				break;
1457 			}
1458 		}
1459 	}
1460 	drm_connector_list_iter_end(&iter);
1461 
1462 	return ret;
1463 }
1464 
1465 static int dm_late_init(void *handle)
1466 {
1467 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1468 
1469 	struct dmcu_iram_parameters params;
1470 	unsigned int linear_lut[16];
1471 	int i;
1472 	struct dmcu *dmcu = NULL;
1473 	bool ret = true;
1474 
1475 	dmcu = adev->dm.dc->res_pool->dmcu;
1476 
1477 	for (i = 0; i < 16; i++)
1478 		linear_lut[i] = 0xFFFF * i / 15;
1479 
1480 	params.set = 0;
1481 	params.backlight_ramping_start = 0xCCCC;
1482 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1483 	params.backlight_lut_array_size = 16;
1484 	params.backlight_lut_array = linear_lut;
1485 
1486 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1487 	 * 0xFFFF x 0.01 = 0x28F
1488 	 */
1489 	params.min_abm_backlight = 0x28F;
1490 
1491 	/* In the case where abm is implemented on dmcub,
1492 	 * dmcu object will be null.
1493 	 * ABM 2.4 and up are implemented on dmcub.
1494 	 */
1495 	if (dmcu)
1496 		ret = dmcu_load_iram(dmcu, params);
1497 	else if (adev->dm.dc->ctx->dmub_srv)
1498 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1499 
1500 	if (!ret)
1501 		return -EINVAL;
1502 
1503 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1504 }
1505 
1506 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1507 {
1508 	struct amdgpu_dm_connector *aconnector;
1509 	struct drm_connector *connector;
1510 	struct drm_connector_list_iter iter;
1511 	struct drm_dp_mst_topology_mgr *mgr;
1512 	int ret;
1513 	bool need_hotplug = false;
1514 
1515 	drm_connector_list_iter_begin(dev, &iter);
1516 	drm_for_each_connector_iter(connector, &iter) {
1517 		aconnector = to_amdgpu_dm_connector(connector);
1518 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1519 		    aconnector->mst_port)
1520 			continue;
1521 
1522 		mgr = &aconnector->mst_mgr;
1523 
1524 		if (suspend) {
1525 			drm_dp_mst_topology_mgr_suspend(mgr);
1526 		} else {
1527 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1528 			if (ret < 0) {
1529 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1530 				need_hotplug = true;
1531 			}
1532 		}
1533 	}
1534 	drm_connector_list_iter_end(&iter);
1535 
1536 	if (need_hotplug)
1537 		drm_kms_helper_hotplug_event(dev);
1538 }
1539 
1540 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1541 {
1542 	struct smu_context *smu = &adev->smu;
1543 	int ret = 0;
1544 
1545 	if (!is_support_sw_smu(adev))
1546 		return 0;
1547 
1548 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1549 	 * on window driver dc implementation.
1550 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1551 	 * should be passed to smu during boot up and resume from s3.
1552 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1553 	 * dcn20_resource_construct
1554 	 * then call pplib functions below to pass the settings to smu:
1555 	 * smu_set_watermarks_for_clock_ranges
1556 	 * smu_set_watermarks_table
1557 	 * navi10_set_watermarks_table
1558 	 * smu_write_watermarks_table
1559 	 *
1560 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1561 	 * dc has implemented different flow for window driver:
1562 	 * dc_hardware_init / dc_set_power_state
1563 	 * dcn10_init_hw
1564 	 * notify_wm_ranges
1565 	 * set_wm_ranges
1566 	 * -- Linux
1567 	 * smu_set_watermarks_for_clock_ranges
1568 	 * renoir_set_watermarks_table
1569 	 * smu_write_watermarks_table
1570 	 *
1571 	 * For Linux,
1572 	 * dc_hardware_init -> amdgpu_dm_init
1573 	 * dc_set_power_state --> dm_resume
1574 	 *
1575 	 * therefore, this function apply to navi10/12/14 but not Renoir
1576 	 * *
1577 	 */
1578 	switch(adev->asic_type) {
1579 	case CHIP_NAVI10:
1580 	case CHIP_NAVI14:
1581 	case CHIP_NAVI12:
1582 		break;
1583 	default:
1584 		return 0;
1585 	}
1586 
1587 	ret = smu_write_watermarks_table(smu);
1588 	if (ret) {
1589 		DRM_ERROR("Failed to update WMTABLE!\n");
1590 		return ret;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * dm_hw_init() - Initialize DC device
1598  * @handle: The base driver device containing the amdgpu_dm device.
1599  *
1600  * Initialize the &struct amdgpu_display_manager device. This involves calling
1601  * the initializers of each DM component, then populating the struct with them.
1602  *
1603  * Although the function implies hardware initialization, both hardware and
1604  * software are initialized here. Splitting them out to their relevant init
1605  * hooks is a future TODO item.
1606  *
1607  * Some notable things that are initialized here:
1608  *
1609  * - Display Core, both software and hardware
1610  * - DC modules that we need (freesync and color management)
1611  * - DRM software states
1612  * - Interrupt sources and handlers
1613  * - Vblank support
1614  * - Debug FS entries, if enabled
1615  */
1616 static int dm_hw_init(void *handle)
1617 {
1618 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1619 	/* Create DAL display manager */
1620 	amdgpu_dm_init(adev);
1621 	amdgpu_dm_hpd_init(adev);
1622 
1623 	return 0;
1624 }
1625 
1626 /**
1627  * dm_hw_fini() - Teardown DC device
1628  * @handle: The base driver device containing the amdgpu_dm device.
1629  *
1630  * Teardown components within &struct amdgpu_display_manager that require
1631  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1632  * were loaded. Also flush IRQ workqueues and disable them.
1633  */
1634 static int dm_hw_fini(void *handle)
1635 {
1636 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1637 
1638 	amdgpu_dm_hpd_fini(adev);
1639 
1640 	amdgpu_dm_irq_fini(adev);
1641 	amdgpu_dm_fini(adev);
1642 	return 0;
1643 }
1644 
1645 
1646 static int dm_enable_vblank(struct drm_crtc *crtc);
1647 static void dm_disable_vblank(struct drm_crtc *crtc);
1648 
1649 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1650 				 struct dc_state *state, bool enable)
1651 {
1652 	enum dc_irq_source irq_source;
1653 	struct amdgpu_crtc *acrtc;
1654 	int rc = -EBUSY;
1655 	int i = 0;
1656 
1657 	for (i = 0; i < state->stream_count; i++) {
1658 		acrtc = get_crtc_by_otg_inst(
1659 				adev, state->stream_status[i].primary_otg_inst);
1660 
1661 		if (acrtc && state->stream_status[i].plane_count != 0) {
1662 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1663 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1664 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1665 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1666 			if (rc)
1667 				DRM_WARN("Failed to %s pflip interrupts\n",
1668 					 enable ? "enable" : "disable");
1669 
1670 			if (enable) {
1671 				rc = dm_enable_vblank(&acrtc->base);
1672 				if (rc)
1673 					DRM_WARN("Failed to enable vblank interrupts\n");
1674 			} else {
1675 				dm_disable_vblank(&acrtc->base);
1676 			}
1677 
1678 		}
1679 	}
1680 
1681 }
1682 
1683 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1684 {
1685 	struct dc_state *context = NULL;
1686 	enum dc_status res = DC_ERROR_UNEXPECTED;
1687 	int i;
1688 	struct dc_stream_state *del_streams[MAX_PIPES];
1689 	int del_streams_count = 0;
1690 
1691 	memset(del_streams, 0, sizeof(del_streams));
1692 
1693 	context = dc_create_state(dc);
1694 	if (context == NULL)
1695 		goto context_alloc_fail;
1696 
1697 	dc_resource_state_copy_construct_current(dc, context);
1698 
1699 	/* First remove from context all streams */
1700 	for (i = 0; i < context->stream_count; i++) {
1701 		struct dc_stream_state *stream = context->streams[i];
1702 
1703 		del_streams[del_streams_count++] = stream;
1704 	}
1705 
1706 	/* Remove all planes for removed streams and then remove the streams */
1707 	for (i = 0; i < del_streams_count; i++) {
1708 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1709 			res = DC_FAIL_DETACH_SURFACES;
1710 			goto fail;
1711 		}
1712 
1713 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1714 		if (res != DC_OK)
1715 			goto fail;
1716 	}
1717 
1718 
1719 	res = dc_validate_global_state(dc, context, false);
1720 
1721 	if (res != DC_OK) {
1722 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1723 		goto fail;
1724 	}
1725 
1726 	res = dc_commit_state(dc, context);
1727 
1728 fail:
1729 	dc_release_state(context);
1730 
1731 context_alloc_fail:
1732 	return res;
1733 }
1734 
1735 static int dm_suspend(void *handle)
1736 {
1737 	struct amdgpu_device *adev = handle;
1738 	struct amdgpu_display_manager *dm = &adev->dm;
1739 	int ret = 0;
1740 
1741 	if (amdgpu_in_reset(adev)) {
1742 		mutex_lock(&dm->dc_lock);
1743 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1744 
1745 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1746 
1747 		amdgpu_dm_commit_zero_streams(dm->dc);
1748 
1749 		amdgpu_dm_irq_suspend(adev);
1750 
1751 		return ret;
1752 	}
1753 
1754 	WARN_ON(adev->dm.cached_state);
1755 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1756 
1757 	s3_handle_mst(adev_to_drm(adev), true);
1758 
1759 	amdgpu_dm_irq_suspend(adev);
1760 
1761 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1762 
1763 	return 0;
1764 }
1765 
1766 static struct amdgpu_dm_connector *
1767 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1768 					     struct drm_crtc *crtc)
1769 {
1770 	uint32_t i;
1771 	struct drm_connector_state *new_con_state;
1772 	struct drm_connector *connector;
1773 	struct drm_crtc *crtc_from_state;
1774 
1775 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1776 		crtc_from_state = new_con_state->crtc;
1777 
1778 		if (crtc_from_state == crtc)
1779 			return to_amdgpu_dm_connector(connector);
1780 	}
1781 
1782 	return NULL;
1783 }
1784 
1785 static void emulated_link_detect(struct dc_link *link)
1786 {
1787 	struct dc_sink_init_data sink_init_data = { 0 };
1788 	struct display_sink_capability sink_caps = { 0 };
1789 	enum dc_edid_status edid_status;
1790 	struct dc_context *dc_ctx = link->ctx;
1791 	struct dc_sink *sink = NULL;
1792 	struct dc_sink *prev_sink = NULL;
1793 
1794 	link->type = dc_connection_none;
1795 	prev_sink = link->local_sink;
1796 
1797 	if (prev_sink)
1798 		dc_sink_release(prev_sink);
1799 
1800 	switch (link->connector_signal) {
1801 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1802 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1803 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1804 		break;
1805 	}
1806 
1807 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1808 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1809 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1810 		break;
1811 	}
1812 
1813 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1814 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1815 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1816 		break;
1817 	}
1818 
1819 	case SIGNAL_TYPE_LVDS: {
1820 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1821 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1822 		break;
1823 	}
1824 
1825 	case SIGNAL_TYPE_EDP: {
1826 		sink_caps.transaction_type =
1827 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1828 		sink_caps.signal = SIGNAL_TYPE_EDP;
1829 		break;
1830 	}
1831 
1832 	case SIGNAL_TYPE_DISPLAY_PORT: {
1833 		sink_caps.transaction_type =
1834 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1835 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1836 		break;
1837 	}
1838 
1839 	default:
1840 		DC_ERROR("Invalid connector type! signal:%d\n",
1841 			link->connector_signal);
1842 		return;
1843 	}
1844 
1845 	sink_init_data.link = link;
1846 	sink_init_data.sink_signal = sink_caps.signal;
1847 
1848 	sink = dc_sink_create(&sink_init_data);
1849 	if (!sink) {
1850 		DC_ERROR("Failed to create sink!\n");
1851 		return;
1852 	}
1853 
1854 	/* dc_sink_create returns a new reference */
1855 	link->local_sink = sink;
1856 
1857 	edid_status = dm_helpers_read_local_edid(
1858 			link->ctx,
1859 			link,
1860 			sink);
1861 
1862 	if (edid_status != EDID_OK)
1863 		DC_ERROR("Failed to read EDID");
1864 
1865 }
1866 
1867 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1868 				     struct amdgpu_display_manager *dm)
1869 {
1870 	struct {
1871 		struct dc_surface_update surface_updates[MAX_SURFACES];
1872 		struct dc_plane_info plane_infos[MAX_SURFACES];
1873 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1874 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1875 		struct dc_stream_update stream_update;
1876 	} * bundle;
1877 	int k, m;
1878 
1879 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1880 
1881 	if (!bundle) {
1882 		dm_error("Failed to allocate update bundle\n");
1883 		goto cleanup;
1884 	}
1885 
1886 	for (k = 0; k < dc_state->stream_count; k++) {
1887 		bundle->stream_update.stream = dc_state->streams[k];
1888 
1889 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1890 			bundle->surface_updates[m].surface =
1891 				dc_state->stream_status->plane_states[m];
1892 			bundle->surface_updates[m].surface->force_full_update =
1893 				true;
1894 		}
1895 		dc_commit_updates_for_stream(
1896 			dm->dc, bundle->surface_updates,
1897 			dc_state->stream_status->plane_count,
1898 			dc_state->streams[k], &bundle->stream_update, dc_state);
1899 	}
1900 
1901 cleanup:
1902 	kfree(bundle);
1903 
1904 	return;
1905 }
1906 
1907 static void dm_set_dpms_off(struct dc_link *link)
1908 {
1909 	struct dc_stream_state *stream_state;
1910 	struct amdgpu_dm_connector *aconnector = link->priv;
1911 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1912 	struct dc_stream_update stream_update;
1913 	bool dpms_off = true;
1914 
1915 	memset(&stream_update, 0, sizeof(stream_update));
1916 	stream_update.dpms_off = &dpms_off;
1917 
1918 	mutex_lock(&adev->dm.dc_lock);
1919 	stream_state = dc_stream_find_from_link(link);
1920 
1921 	if (stream_state == NULL) {
1922 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1923 		mutex_unlock(&adev->dm.dc_lock);
1924 		return;
1925 	}
1926 
1927 	stream_update.stream = stream_state;
1928 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1929 				     stream_state, &stream_update,
1930 				     stream_state->ctx->dc->current_state);
1931 	mutex_unlock(&adev->dm.dc_lock);
1932 }
1933 
1934 static int dm_resume(void *handle)
1935 {
1936 	struct amdgpu_device *adev = handle;
1937 	struct drm_device *ddev = adev_to_drm(adev);
1938 	struct amdgpu_display_manager *dm = &adev->dm;
1939 	struct amdgpu_dm_connector *aconnector;
1940 	struct drm_connector *connector;
1941 	struct drm_connector_list_iter iter;
1942 	struct drm_crtc *crtc;
1943 	struct drm_crtc_state *new_crtc_state;
1944 	struct dm_crtc_state *dm_new_crtc_state;
1945 	struct drm_plane *plane;
1946 	struct drm_plane_state *new_plane_state;
1947 	struct dm_plane_state *dm_new_plane_state;
1948 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1949 	enum dc_connection_type new_connection_type = dc_connection_none;
1950 	struct dc_state *dc_state;
1951 	int i, r, j;
1952 
1953 	if (amdgpu_in_reset(adev)) {
1954 		dc_state = dm->cached_dc_state;
1955 
1956 		r = dm_dmub_hw_init(adev);
1957 		if (r)
1958 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1959 
1960 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1961 		dc_resume(dm->dc);
1962 
1963 		amdgpu_dm_irq_resume_early(adev);
1964 
1965 		for (i = 0; i < dc_state->stream_count; i++) {
1966 			dc_state->streams[i]->mode_changed = true;
1967 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1968 				dc_state->stream_status->plane_states[j]->update_flags.raw
1969 					= 0xffffffff;
1970 			}
1971 		}
1972 
1973 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1974 
1975 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1976 
1977 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1978 
1979 		dc_release_state(dm->cached_dc_state);
1980 		dm->cached_dc_state = NULL;
1981 
1982 		amdgpu_dm_irq_resume_late(adev);
1983 
1984 		mutex_unlock(&dm->dc_lock);
1985 
1986 		return 0;
1987 	}
1988 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1989 	dc_release_state(dm_state->context);
1990 	dm_state->context = dc_create_state(dm->dc);
1991 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1992 	dc_resource_state_construct(dm->dc, dm_state->context);
1993 
1994 	/* Before powering on DC we need to re-initialize DMUB. */
1995 	r = dm_dmub_hw_init(adev);
1996 	if (r)
1997 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1998 
1999 	/* power on hardware */
2000 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2001 
2002 	/* program HPD filter */
2003 	dc_resume(dm->dc);
2004 
2005 	/*
2006 	 * early enable HPD Rx IRQ, should be done before set mode as short
2007 	 * pulse interrupts are used for MST
2008 	 */
2009 	amdgpu_dm_irq_resume_early(adev);
2010 
2011 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2012 	s3_handle_mst(ddev, false);
2013 
2014 	/* Do detection*/
2015 	drm_connector_list_iter_begin(ddev, &iter);
2016 	drm_for_each_connector_iter(connector, &iter) {
2017 		aconnector = to_amdgpu_dm_connector(connector);
2018 
2019 		/*
2020 		 * this is the case when traversing through already created
2021 		 * MST connectors, should be skipped
2022 		 */
2023 		if (aconnector->mst_port)
2024 			continue;
2025 
2026 		mutex_lock(&aconnector->hpd_lock);
2027 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2028 			DRM_ERROR("KMS: Failed to detect connector\n");
2029 
2030 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2031 			emulated_link_detect(aconnector->dc_link);
2032 		else
2033 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2034 
2035 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2036 			aconnector->fake_enable = false;
2037 
2038 		if (aconnector->dc_sink)
2039 			dc_sink_release(aconnector->dc_sink);
2040 		aconnector->dc_sink = NULL;
2041 		amdgpu_dm_update_connector_after_detect(aconnector);
2042 		mutex_unlock(&aconnector->hpd_lock);
2043 	}
2044 	drm_connector_list_iter_end(&iter);
2045 
2046 	/* Force mode set in atomic commit */
2047 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2048 		new_crtc_state->active_changed = true;
2049 
2050 	/*
2051 	 * atomic_check is expected to create the dc states. We need to release
2052 	 * them here, since they were duplicated as part of the suspend
2053 	 * procedure.
2054 	 */
2055 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2056 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2057 		if (dm_new_crtc_state->stream) {
2058 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2059 			dc_stream_release(dm_new_crtc_state->stream);
2060 			dm_new_crtc_state->stream = NULL;
2061 		}
2062 	}
2063 
2064 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2065 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2066 		if (dm_new_plane_state->dc_state) {
2067 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2068 			dc_plane_state_release(dm_new_plane_state->dc_state);
2069 			dm_new_plane_state->dc_state = NULL;
2070 		}
2071 	}
2072 
2073 	drm_atomic_helper_resume(ddev, dm->cached_state);
2074 
2075 	dm->cached_state = NULL;
2076 
2077 	amdgpu_dm_irq_resume_late(adev);
2078 
2079 	amdgpu_dm_smu_write_watermarks_table(adev);
2080 
2081 	return 0;
2082 }
2083 
2084 /**
2085  * DOC: DM Lifecycle
2086  *
2087  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2088  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2089  * the base driver's device list to be initialized and torn down accordingly.
2090  *
2091  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2092  */
2093 
2094 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2095 	.name = "dm",
2096 	.early_init = dm_early_init,
2097 	.late_init = dm_late_init,
2098 	.sw_init = dm_sw_init,
2099 	.sw_fini = dm_sw_fini,
2100 	.hw_init = dm_hw_init,
2101 	.hw_fini = dm_hw_fini,
2102 	.suspend = dm_suspend,
2103 	.resume = dm_resume,
2104 	.is_idle = dm_is_idle,
2105 	.wait_for_idle = dm_wait_for_idle,
2106 	.check_soft_reset = dm_check_soft_reset,
2107 	.soft_reset = dm_soft_reset,
2108 	.set_clockgating_state = dm_set_clockgating_state,
2109 	.set_powergating_state = dm_set_powergating_state,
2110 };
2111 
2112 const struct amdgpu_ip_block_version dm_ip_block =
2113 {
2114 	.type = AMD_IP_BLOCK_TYPE_DCE,
2115 	.major = 1,
2116 	.minor = 0,
2117 	.rev = 0,
2118 	.funcs = &amdgpu_dm_funcs,
2119 };
2120 
2121 
2122 /**
2123  * DOC: atomic
2124  *
2125  * *WIP*
2126  */
2127 
2128 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2129 	.fb_create = amdgpu_display_user_framebuffer_create,
2130 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2131 	.atomic_check = amdgpu_dm_atomic_check,
2132 	.atomic_commit = amdgpu_dm_atomic_commit,
2133 };
2134 
2135 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2136 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2137 };
2138 
2139 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2140 {
2141 	u32 max_cll, min_cll, max, min, q, r;
2142 	struct amdgpu_dm_backlight_caps *caps;
2143 	struct amdgpu_display_manager *dm;
2144 	struct drm_connector *conn_base;
2145 	struct amdgpu_device *adev;
2146 	struct dc_link *link = NULL;
2147 	static const u8 pre_computed_values[] = {
2148 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2149 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2150 
2151 	if (!aconnector || !aconnector->dc_link)
2152 		return;
2153 
2154 	link = aconnector->dc_link;
2155 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2156 		return;
2157 
2158 	conn_base = &aconnector->base;
2159 	adev = drm_to_adev(conn_base->dev);
2160 	dm = &adev->dm;
2161 	caps = &dm->backlight_caps;
2162 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2163 	caps->aux_support = false;
2164 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2165 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2166 
2167 	if (caps->ext_caps->bits.oled == 1 /*||
2168 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2169 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2170 		caps->aux_support = true;
2171 
2172 	if (amdgpu_backlight == 0)
2173 		caps->aux_support = false;
2174 	else if (amdgpu_backlight == 1)
2175 		caps->aux_support = true;
2176 
2177 	/* From the specification (CTA-861-G), for calculating the maximum
2178 	 * luminance we need to use:
2179 	 *	Luminance = 50*2**(CV/32)
2180 	 * Where CV is a one-byte value.
2181 	 * For calculating this expression we may need float point precision;
2182 	 * to avoid this complexity level, we take advantage that CV is divided
2183 	 * by a constant. From the Euclids division algorithm, we know that CV
2184 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2185 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2186 	 * need to pre-compute the value of r/32. For pre-computing the values
2187 	 * We just used the following Ruby line:
2188 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2189 	 * The results of the above expressions can be verified at
2190 	 * pre_computed_values.
2191 	 */
2192 	q = max_cll >> 5;
2193 	r = max_cll % 32;
2194 	max = (1 << q) * pre_computed_values[r];
2195 
2196 	// min luminance: maxLum * (CV/255)^2 / 100
2197 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2198 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2199 
2200 	caps->aux_max_input_signal = max;
2201 	caps->aux_min_input_signal = min;
2202 }
2203 
2204 void amdgpu_dm_update_connector_after_detect(
2205 		struct amdgpu_dm_connector *aconnector)
2206 {
2207 	struct drm_connector *connector = &aconnector->base;
2208 	struct drm_device *dev = connector->dev;
2209 	struct dc_sink *sink;
2210 
2211 	/* MST handled by drm_mst framework */
2212 	if (aconnector->mst_mgr.mst_state == true)
2213 		return;
2214 
2215 	sink = aconnector->dc_link->local_sink;
2216 	if (sink)
2217 		dc_sink_retain(sink);
2218 
2219 	/*
2220 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2221 	 * the connector sink is set to either fake or physical sink depends on link status.
2222 	 * Skip if already done during boot.
2223 	 */
2224 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2225 			&& aconnector->dc_em_sink) {
2226 
2227 		/*
2228 		 * For S3 resume with headless use eml_sink to fake stream
2229 		 * because on resume connector->sink is set to NULL
2230 		 */
2231 		mutex_lock(&dev->mode_config.mutex);
2232 
2233 		if (sink) {
2234 			if (aconnector->dc_sink) {
2235 				amdgpu_dm_update_freesync_caps(connector, NULL);
2236 				/*
2237 				 * retain and release below are used to
2238 				 * bump up refcount for sink because the link doesn't point
2239 				 * to it anymore after disconnect, so on next crtc to connector
2240 				 * reshuffle by UMD we will get into unwanted dc_sink release
2241 				 */
2242 				dc_sink_release(aconnector->dc_sink);
2243 			}
2244 			aconnector->dc_sink = sink;
2245 			dc_sink_retain(aconnector->dc_sink);
2246 			amdgpu_dm_update_freesync_caps(connector,
2247 					aconnector->edid);
2248 		} else {
2249 			amdgpu_dm_update_freesync_caps(connector, NULL);
2250 			if (!aconnector->dc_sink) {
2251 				aconnector->dc_sink = aconnector->dc_em_sink;
2252 				dc_sink_retain(aconnector->dc_sink);
2253 			}
2254 		}
2255 
2256 		mutex_unlock(&dev->mode_config.mutex);
2257 
2258 		if (sink)
2259 			dc_sink_release(sink);
2260 		return;
2261 	}
2262 
2263 	/*
2264 	 * TODO: temporary guard to look for proper fix
2265 	 * if this sink is MST sink, we should not do anything
2266 	 */
2267 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2268 		dc_sink_release(sink);
2269 		return;
2270 	}
2271 
2272 	if (aconnector->dc_sink == sink) {
2273 		/*
2274 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2275 		 * Do nothing!!
2276 		 */
2277 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2278 				aconnector->connector_id);
2279 		if (sink)
2280 			dc_sink_release(sink);
2281 		return;
2282 	}
2283 
2284 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2285 		aconnector->connector_id, aconnector->dc_sink, sink);
2286 
2287 	mutex_lock(&dev->mode_config.mutex);
2288 
2289 	/*
2290 	 * 1. Update status of the drm connector
2291 	 * 2. Send an event and let userspace tell us what to do
2292 	 */
2293 	if (sink) {
2294 		/*
2295 		 * TODO: check if we still need the S3 mode update workaround.
2296 		 * If yes, put it here.
2297 		 */
2298 		if (aconnector->dc_sink) {
2299 			amdgpu_dm_update_freesync_caps(connector, NULL);
2300 			dc_sink_release(aconnector->dc_sink);
2301 		}
2302 
2303 		aconnector->dc_sink = sink;
2304 		dc_sink_retain(aconnector->dc_sink);
2305 		if (sink->dc_edid.length == 0) {
2306 			aconnector->edid = NULL;
2307 			if (aconnector->dc_link->aux_mode) {
2308 				drm_dp_cec_unset_edid(
2309 					&aconnector->dm_dp_aux.aux);
2310 			}
2311 		} else {
2312 			aconnector->edid =
2313 				(struct edid *)sink->dc_edid.raw_edid;
2314 
2315 			drm_connector_update_edid_property(connector,
2316 							   aconnector->edid);
2317 			if (aconnector->dc_link->aux_mode)
2318 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2319 						    aconnector->edid);
2320 		}
2321 
2322 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2323 		update_connector_ext_caps(aconnector);
2324 	} else {
2325 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2326 		amdgpu_dm_update_freesync_caps(connector, NULL);
2327 		drm_connector_update_edid_property(connector, NULL);
2328 		aconnector->num_modes = 0;
2329 		dc_sink_release(aconnector->dc_sink);
2330 		aconnector->dc_sink = NULL;
2331 		aconnector->edid = NULL;
2332 #ifdef CONFIG_DRM_AMD_DC_HDCP
2333 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2334 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2335 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2336 #endif
2337 	}
2338 
2339 	mutex_unlock(&dev->mode_config.mutex);
2340 
2341 	update_subconnector_property(aconnector);
2342 
2343 	if (sink)
2344 		dc_sink_release(sink);
2345 }
2346 
2347 static void handle_hpd_irq(void *param)
2348 {
2349 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2350 	struct drm_connector *connector = &aconnector->base;
2351 	struct drm_device *dev = connector->dev;
2352 	enum dc_connection_type new_connection_type = dc_connection_none;
2353 #ifdef CONFIG_DRM_AMD_DC_HDCP
2354 	struct amdgpu_device *adev = drm_to_adev(dev);
2355 #endif
2356 
2357 	/*
2358 	 * In case of failure or MST no need to update connector status or notify the OS
2359 	 * since (for MST case) MST does this in its own context.
2360 	 */
2361 	mutex_lock(&aconnector->hpd_lock);
2362 
2363 #ifdef CONFIG_DRM_AMD_DC_HDCP
2364 	if (adev->dm.hdcp_workqueue)
2365 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2366 #endif
2367 	if (aconnector->fake_enable)
2368 		aconnector->fake_enable = false;
2369 
2370 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2371 		DRM_ERROR("KMS: Failed to detect connector\n");
2372 
2373 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2374 		emulated_link_detect(aconnector->dc_link);
2375 
2376 
2377 		drm_modeset_lock_all(dev);
2378 		dm_restore_drm_connector_state(dev, connector);
2379 		drm_modeset_unlock_all(dev);
2380 
2381 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2382 			drm_kms_helper_hotplug_event(dev);
2383 
2384 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2385 		if (new_connection_type == dc_connection_none &&
2386 		    aconnector->dc_link->type == dc_connection_none)
2387 			dm_set_dpms_off(aconnector->dc_link);
2388 
2389 		amdgpu_dm_update_connector_after_detect(aconnector);
2390 
2391 		drm_modeset_lock_all(dev);
2392 		dm_restore_drm_connector_state(dev, connector);
2393 		drm_modeset_unlock_all(dev);
2394 
2395 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2396 			drm_kms_helper_hotplug_event(dev);
2397 	}
2398 	mutex_unlock(&aconnector->hpd_lock);
2399 
2400 }
2401 
2402 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2403 {
2404 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2405 	uint8_t dret;
2406 	bool new_irq_handled = false;
2407 	int dpcd_addr;
2408 	int dpcd_bytes_to_read;
2409 
2410 	const int max_process_count = 30;
2411 	int process_count = 0;
2412 
2413 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2414 
2415 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2416 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2417 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2418 		dpcd_addr = DP_SINK_COUNT;
2419 	} else {
2420 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2421 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2422 		dpcd_addr = DP_SINK_COUNT_ESI;
2423 	}
2424 
2425 	dret = drm_dp_dpcd_read(
2426 		&aconnector->dm_dp_aux.aux,
2427 		dpcd_addr,
2428 		esi,
2429 		dpcd_bytes_to_read);
2430 
2431 	while (dret == dpcd_bytes_to_read &&
2432 		process_count < max_process_count) {
2433 		uint8_t retry;
2434 		dret = 0;
2435 
2436 		process_count++;
2437 
2438 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2439 		/* handle HPD short pulse irq */
2440 		if (aconnector->mst_mgr.mst_state)
2441 			drm_dp_mst_hpd_irq(
2442 				&aconnector->mst_mgr,
2443 				esi,
2444 				&new_irq_handled);
2445 
2446 		if (new_irq_handled) {
2447 			/* ACK at DPCD to notify down stream */
2448 			const int ack_dpcd_bytes_to_write =
2449 				dpcd_bytes_to_read - 1;
2450 
2451 			for (retry = 0; retry < 3; retry++) {
2452 				uint8_t wret;
2453 
2454 				wret = drm_dp_dpcd_write(
2455 					&aconnector->dm_dp_aux.aux,
2456 					dpcd_addr + 1,
2457 					&esi[1],
2458 					ack_dpcd_bytes_to_write);
2459 				if (wret == ack_dpcd_bytes_to_write)
2460 					break;
2461 			}
2462 
2463 			/* check if there is new irq to be handled */
2464 			dret = drm_dp_dpcd_read(
2465 				&aconnector->dm_dp_aux.aux,
2466 				dpcd_addr,
2467 				esi,
2468 				dpcd_bytes_to_read);
2469 
2470 			new_irq_handled = false;
2471 		} else {
2472 			break;
2473 		}
2474 	}
2475 
2476 	if (process_count == max_process_count)
2477 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2478 }
2479 
2480 static void handle_hpd_rx_irq(void *param)
2481 {
2482 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2483 	struct drm_connector *connector = &aconnector->base;
2484 	struct drm_device *dev = connector->dev;
2485 	struct dc_link *dc_link = aconnector->dc_link;
2486 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2487 	enum dc_connection_type new_connection_type = dc_connection_none;
2488 #ifdef CONFIG_DRM_AMD_DC_HDCP
2489 	union hpd_irq_data hpd_irq_data;
2490 	struct amdgpu_device *adev = drm_to_adev(dev);
2491 
2492 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2493 #endif
2494 
2495 	/*
2496 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2497 	 * conflict, after implement i2c helper, this mutex should be
2498 	 * retired.
2499 	 */
2500 	if (dc_link->type != dc_connection_mst_branch)
2501 		mutex_lock(&aconnector->hpd_lock);
2502 
2503 
2504 #ifdef CONFIG_DRM_AMD_DC_HDCP
2505 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2506 #else
2507 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2508 #endif
2509 			!is_mst_root_connector) {
2510 		/* Downstream Port status changed. */
2511 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2512 			DRM_ERROR("KMS: Failed to detect connector\n");
2513 
2514 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2515 			emulated_link_detect(dc_link);
2516 
2517 			if (aconnector->fake_enable)
2518 				aconnector->fake_enable = false;
2519 
2520 			amdgpu_dm_update_connector_after_detect(aconnector);
2521 
2522 
2523 			drm_modeset_lock_all(dev);
2524 			dm_restore_drm_connector_state(dev, connector);
2525 			drm_modeset_unlock_all(dev);
2526 
2527 			drm_kms_helper_hotplug_event(dev);
2528 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2529 
2530 			if (aconnector->fake_enable)
2531 				aconnector->fake_enable = false;
2532 
2533 			amdgpu_dm_update_connector_after_detect(aconnector);
2534 
2535 
2536 			drm_modeset_lock_all(dev);
2537 			dm_restore_drm_connector_state(dev, connector);
2538 			drm_modeset_unlock_all(dev);
2539 
2540 			drm_kms_helper_hotplug_event(dev);
2541 		}
2542 	}
2543 #ifdef CONFIG_DRM_AMD_DC_HDCP
2544 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2545 		if (adev->dm.hdcp_workqueue)
2546 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2547 	}
2548 #endif
2549 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2550 	    (dc_link->type == dc_connection_mst_branch))
2551 		dm_handle_hpd_rx_irq(aconnector);
2552 
2553 	if (dc_link->type != dc_connection_mst_branch) {
2554 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2555 		mutex_unlock(&aconnector->hpd_lock);
2556 	}
2557 }
2558 
2559 static void register_hpd_handlers(struct amdgpu_device *adev)
2560 {
2561 	struct drm_device *dev = adev_to_drm(adev);
2562 	struct drm_connector *connector;
2563 	struct amdgpu_dm_connector *aconnector;
2564 	const struct dc_link *dc_link;
2565 	struct dc_interrupt_params int_params = {0};
2566 
2567 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2568 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2569 
2570 	list_for_each_entry(connector,
2571 			&dev->mode_config.connector_list, head)	{
2572 
2573 		aconnector = to_amdgpu_dm_connector(connector);
2574 		dc_link = aconnector->dc_link;
2575 
2576 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2577 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2578 			int_params.irq_source = dc_link->irq_source_hpd;
2579 
2580 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2581 					handle_hpd_irq,
2582 					(void *) aconnector);
2583 		}
2584 
2585 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2586 
2587 			/* Also register for DP short pulse (hpd_rx). */
2588 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2589 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2590 
2591 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2592 					handle_hpd_rx_irq,
2593 					(void *) aconnector);
2594 		}
2595 	}
2596 }
2597 
2598 #if defined(CONFIG_DRM_AMD_DC_SI)
2599 /* Register IRQ sources and initialize IRQ callbacks */
2600 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2601 {
2602 	struct dc *dc = adev->dm.dc;
2603 	struct common_irq_params *c_irq_params;
2604 	struct dc_interrupt_params int_params = {0};
2605 	int r;
2606 	int i;
2607 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2608 
2609 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2610 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2611 
2612 	/*
2613 	 * Actions of amdgpu_irq_add_id():
2614 	 * 1. Register a set() function with base driver.
2615 	 *    Base driver will call set() function to enable/disable an
2616 	 *    interrupt in DC hardware.
2617 	 * 2. Register amdgpu_dm_irq_handler().
2618 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2619 	 *    coming from DC hardware.
2620 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2621 	 *    for acknowledging and handling. */
2622 
2623 	/* Use VBLANK interrupt */
2624 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2625 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2626 		if (r) {
2627 			DRM_ERROR("Failed to add crtc irq id!\n");
2628 			return r;
2629 		}
2630 
2631 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2632 		int_params.irq_source =
2633 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2634 
2635 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2636 
2637 		c_irq_params->adev = adev;
2638 		c_irq_params->irq_src = int_params.irq_source;
2639 
2640 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2641 				dm_crtc_high_irq, c_irq_params);
2642 	}
2643 
2644 	/* Use GRPH_PFLIP interrupt */
2645 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2646 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2647 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2648 		if (r) {
2649 			DRM_ERROR("Failed to add page flip irq id!\n");
2650 			return r;
2651 		}
2652 
2653 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2654 		int_params.irq_source =
2655 			dc_interrupt_to_irq_source(dc, i, 0);
2656 
2657 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2658 
2659 		c_irq_params->adev = adev;
2660 		c_irq_params->irq_src = int_params.irq_source;
2661 
2662 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2663 				dm_pflip_high_irq, c_irq_params);
2664 
2665 	}
2666 
2667 	/* HPD */
2668 	r = amdgpu_irq_add_id(adev, client_id,
2669 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2670 	if (r) {
2671 		DRM_ERROR("Failed to add hpd irq id!\n");
2672 		return r;
2673 	}
2674 
2675 	register_hpd_handlers(adev);
2676 
2677 	return 0;
2678 }
2679 #endif
2680 
2681 /* Register IRQ sources and initialize IRQ callbacks */
2682 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2683 {
2684 	struct dc *dc = adev->dm.dc;
2685 	struct common_irq_params *c_irq_params;
2686 	struct dc_interrupt_params int_params = {0};
2687 	int r;
2688 	int i;
2689 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2690 
2691 	if (adev->asic_type >= CHIP_VEGA10)
2692 		client_id = SOC15_IH_CLIENTID_DCE;
2693 
2694 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2695 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2696 
2697 	/*
2698 	 * Actions of amdgpu_irq_add_id():
2699 	 * 1. Register a set() function with base driver.
2700 	 *    Base driver will call set() function to enable/disable an
2701 	 *    interrupt in DC hardware.
2702 	 * 2. Register amdgpu_dm_irq_handler().
2703 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2704 	 *    coming from DC hardware.
2705 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2706 	 *    for acknowledging and handling. */
2707 
2708 	/* Use VBLANK interrupt */
2709 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2710 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2711 		if (r) {
2712 			DRM_ERROR("Failed to add crtc irq id!\n");
2713 			return r;
2714 		}
2715 
2716 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2717 		int_params.irq_source =
2718 			dc_interrupt_to_irq_source(dc, i, 0);
2719 
2720 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2721 
2722 		c_irq_params->adev = adev;
2723 		c_irq_params->irq_src = int_params.irq_source;
2724 
2725 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2726 				dm_crtc_high_irq, c_irq_params);
2727 	}
2728 
2729 	/* Use VUPDATE interrupt */
2730 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2731 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2732 		if (r) {
2733 			DRM_ERROR("Failed to add vupdate irq id!\n");
2734 			return r;
2735 		}
2736 
2737 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2738 		int_params.irq_source =
2739 			dc_interrupt_to_irq_source(dc, i, 0);
2740 
2741 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2742 
2743 		c_irq_params->adev = adev;
2744 		c_irq_params->irq_src = int_params.irq_source;
2745 
2746 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2747 				dm_vupdate_high_irq, c_irq_params);
2748 	}
2749 
2750 	/* Use GRPH_PFLIP interrupt */
2751 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2752 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2753 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2754 		if (r) {
2755 			DRM_ERROR("Failed to add page flip irq id!\n");
2756 			return r;
2757 		}
2758 
2759 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2760 		int_params.irq_source =
2761 			dc_interrupt_to_irq_source(dc, i, 0);
2762 
2763 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2764 
2765 		c_irq_params->adev = adev;
2766 		c_irq_params->irq_src = int_params.irq_source;
2767 
2768 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2769 				dm_pflip_high_irq, c_irq_params);
2770 
2771 	}
2772 
2773 	/* HPD */
2774 	r = amdgpu_irq_add_id(adev, client_id,
2775 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2776 	if (r) {
2777 		DRM_ERROR("Failed to add hpd irq id!\n");
2778 		return r;
2779 	}
2780 
2781 	register_hpd_handlers(adev);
2782 
2783 	return 0;
2784 }
2785 
2786 #if defined(CONFIG_DRM_AMD_DC_DCN)
2787 /* Register IRQ sources and initialize IRQ callbacks */
2788 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2789 {
2790 	struct dc *dc = adev->dm.dc;
2791 	struct common_irq_params *c_irq_params;
2792 	struct dc_interrupt_params int_params = {0};
2793 	int r;
2794 	int i;
2795 
2796 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2797 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2798 
2799 	/*
2800 	 * Actions of amdgpu_irq_add_id():
2801 	 * 1. Register a set() function with base driver.
2802 	 *    Base driver will call set() function to enable/disable an
2803 	 *    interrupt in DC hardware.
2804 	 * 2. Register amdgpu_dm_irq_handler().
2805 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2806 	 *    coming from DC hardware.
2807 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2808 	 *    for acknowledging and handling.
2809 	 */
2810 
2811 	/* Use VSTARTUP interrupt */
2812 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2813 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2814 			i++) {
2815 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2816 
2817 		if (r) {
2818 			DRM_ERROR("Failed to add crtc irq id!\n");
2819 			return r;
2820 		}
2821 
2822 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2823 		int_params.irq_source =
2824 			dc_interrupt_to_irq_source(dc, i, 0);
2825 
2826 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2827 
2828 		c_irq_params->adev = adev;
2829 		c_irq_params->irq_src = int_params.irq_source;
2830 
2831 		amdgpu_dm_irq_register_interrupt(
2832 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2833 	}
2834 
2835 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2836 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2837 	 * to trigger at end of each vblank, regardless of state of the lock,
2838 	 * matching DCE behaviour.
2839 	 */
2840 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2841 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2842 	     i++) {
2843 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2844 
2845 		if (r) {
2846 			DRM_ERROR("Failed to add vupdate irq id!\n");
2847 			return r;
2848 		}
2849 
2850 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 		int_params.irq_source =
2852 			dc_interrupt_to_irq_source(dc, i, 0);
2853 
2854 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2855 
2856 		c_irq_params->adev = adev;
2857 		c_irq_params->irq_src = int_params.irq_source;
2858 
2859 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 				dm_vupdate_high_irq, c_irq_params);
2861 	}
2862 
2863 	/* Use GRPH_PFLIP interrupt */
2864 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2865 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2866 			i++) {
2867 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2868 		if (r) {
2869 			DRM_ERROR("Failed to add page flip irq id!\n");
2870 			return r;
2871 		}
2872 
2873 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2874 		int_params.irq_source =
2875 			dc_interrupt_to_irq_source(dc, i, 0);
2876 
2877 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2878 
2879 		c_irq_params->adev = adev;
2880 		c_irq_params->irq_src = int_params.irq_source;
2881 
2882 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2883 				dm_pflip_high_irq, c_irq_params);
2884 
2885 	}
2886 
2887 	/* HPD */
2888 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2889 			&adev->hpd_irq);
2890 	if (r) {
2891 		DRM_ERROR("Failed to add hpd irq id!\n");
2892 		return r;
2893 	}
2894 
2895 	register_hpd_handlers(adev);
2896 
2897 	return 0;
2898 }
2899 #endif
2900 
2901 /*
2902  * Acquires the lock for the atomic state object and returns
2903  * the new atomic state.
2904  *
2905  * This should only be called during atomic check.
2906  */
2907 static int dm_atomic_get_state(struct drm_atomic_state *state,
2908 			       struct dm_atomic_state **dm_state)
2909 {
2910 	struct drm_device *dev = state->dev;
2911 	struct amdgpu_device *adev = drm_to_adev(dev);
2912 	struct amdgpu_display_manager *dm = &adev->dm;
2913 	struct drm_private_state *priv_state;
2914 
2915 	if (*dm_state)
2916 		return 0;
2917 
2918 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2919 	if (IS_ERR(priv_state))
2920 		return PTR_ERR(priv_state);
2921 
2922 	*dm_state = to_dm_atomic_state(priv_state);
2923 
2924 	return 0;
2925 }
2926 
2927 static struct dm_atomic_state *
2928 dm_atomic_get_new_state(struct drm_atomic_state *state)
2929 {
2930 	struct drm_device *dev = state->dev;
2931 	struct amdgpu_device *adev = drm_to_adev(dev);
2932 	struct amdgpu_display_manager *dm = &adev->dm;
2933 	struct drm_private_obj *obj;
2934 	struct drm_private_state *new_obj_state;
2935 	int i;
2936 
2937 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2938 		if (obj->funcs == dm->atomic_obj.funcs)
2939 			return to_dm_atomic_state(new_obj_state);
2940 	}
2941 
2942 	return NULL;
2943 }
2944 
2945 static struct drm_private_state *
2946 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2947 {
2948 	struct dm_atomic_state *old_state, *new_state;
2949 
2950 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2951 	if (!new_state)
2952 		return NULL;
2953 
2954 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2955 
2956 	old_state = to_dm_atomic_state(obj->state);
2957 
2958 	if (old_state && old_state->context)
2959 		new_state->context = dc_copy_state(old_state->context);
2960 
2961 	if (!new_state->context) {
2962 		kfree(new_state);
2963 		return NULL;
2964 	}
2965 
2966 	return &new_state->base;
2967 }
2968 
2969 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2970 				    struct drm_private_state *state)
2971 {
2972 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2973 
2974 	if (dm_state && dm_state->context)
2975 		dc_release_state(dm_state->context);
2976 
2977 	kfree(dm_state);
2978 }
2979 
2980 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2981 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2982 	.atomic_destroy_state = dm_atomic_destroy_state,
2983 };
2984 
2985 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2986 {
2987 	struct dm_atomic_state *state;
2988 	int r;
2989 
2990 	adev->mode_info.mode_config_initialized = true;
2991 
2992 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2993 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2994 
2995 	adev_to_drm(adev)->mode_config.max_width = 16384;
2996 	adev_to_drm(adev)->mode_config.max_height = 16384;
2997 
2998 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2999 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3000 	/* indicates support for immediate flip */
3001 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3002 
3003 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3004 
3005 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3006 	if (!state)
3007 		return -ENOMEM;
3008 
3009 	state->context = dc_create_state(adev->dm.dc);
3010 	if (!state->context) {
3011 		kfree(state);
3012 		return -ENOMEM;
3013 	}
3014 
3015 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3016 
3017 	drm_atomic_private_obj_init(adev_to_drm(adev),
3018 				    &adev->dm.atomic_obj,
3019 				    &state->base,
3020 				    &dm_atomic_state_funcs);
3021 
3022 	r = amdgpu_display_modeset_create_props(adev);
3023 	if (r) {
3024 		dc_release_state(state->context);
3025 		kfree(state);
3026 		return r;
3027 	}
3028 
3029 	r = amdgpu_dm_audio_init(adev);
3030 	if (r) {
3031 		dc_release_state(state->context);
3032 		kfree(state);
3033 		return r;
3034 	}
3035 
3036 	return 0;
3037 }
3038 
3039 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3040 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3041 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3042 
3043 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3044 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3045 
3046 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3047 {
3048 #if defined(CONFIG_ACPI)
3049 	struct amdgpu_dm_backlight_caps caps;
3050 
3051 	memset(&caps, 0, sizeof(caps));
3052 
3053 	if (dm->backlight_caps.caps_valid)
3054 		return;
3055 
3056 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3057 	if (caps.caps_valid) {
3058 		dm->backlight_caps.caps_valid = true;
3059 		if (caps.aux_support)
3060 			return;
3061 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3062 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3063 	} else {
3064 		dm->backlight_caps.min_input_signal =
3065 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3066 		dm->backlight_caps.max_input_signal =
3067 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3068 	}
3069 #else
3070 	if (dm->backlight_caps.aux_support)
3071 		return;
3072 
3073 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3074 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3075 #endif
3076 }
3077 
3078 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3079 				unsigned *min, unsigned *max)
3080 {
3081 	if (!caps)
3082 		return 0;
3083 
3084 	if (caps->aux_support) {
3085 		// Firmware limits are in nits, DC API wants millinits.
3086 		*max = 1000 * caps->aux_max_input_signal;
3087 		*min = 1000 * caps->aux_min_input_signal;
3088 	} else {
3089 		// Firmware limits are 8-bit, PWM control is 16-bit.
3090 		*max = 0x101 * caps->max_input_signal;
3091 		*min = 0x101 * caps->min_input_signal;
3092 	}
3093 	return 1;
3094 }
3095 
3096 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3097 					uint32_t brightness)
3098 {
3099 	unsigned min, max;
3100 
3101 	if (!get_brightness_range(caps, &min, &max))
3102 		return brightness;
3103 
3104 	// Rescale 0..255 to min..max
3105 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3106 				       AMDGPU_MAX_BL_LEVEL);
3107 }
3108 
3109 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3110 				      uint32_t brightness)
3111 {
3112 	unsigned min, max;
3113 
3114 	if (!get_brightness_range(caps, &min, &max))
3115 		return brightness;
3116 
3117 	if (brightness < min)
3118 		return 0;
3119 	// Rescale min..max to 0..255
3120 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3121 				 max - min);
3122 }
3123 
3124 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3125 {
3126 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3127 	struct amdgpu_dm_backlight_caps caps;
3128 	struct dc_link *link = NULL;
3129 	u32 brightness;
3130 	bool rc;
3131 
3132 	amdgpu_dm_update_backlight_caps(dm);
3133 	caps = dm->backlight_caps;
3134 
3135 	link = (struct dc_link *)dm->backlight_link;
3136 
3137 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3138 	// Change brightness based on AUX property
3139 	if (caps.aux_support)
3140 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3141 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3142 	else
3143 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3144 
3145 	return rc ? 0 : 1;
3146 }
3147 
3148 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3149 {
3150 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3151 	struct amdgpu_dm_backlight_caps caps;
3152 
3153 	amdgpu_dm_update_backlight_caps(dm);
3154 	caps = dm->backlight_caps;
3155 
3156 	if (caps.aux_support) {
3157 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3158 		u32 avg, peak;
3159 		bool rc;
3160 
3161 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3162 		if (!rc)
3163 			return bd->props.brightness;
3164 		return convert_brightness_to_user(&caps, avg);
3165 	} else {
3166 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3167 
3168 		if (ret == DC_ERROR_UNEXPECTED)
3169 			return bd->props.brightness;
3170 		return convert_brightness_to_user(&caps, ret);
3171 	}
3172 }
3173 
3174 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3175 	.options = BL_CORE_SUSPENDRESUME,
3176 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3177 	.update_status	= amdgpu_dm_backlight_update_status,
3178 };
3179 
3180 static void
3181 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3182 {
3183 	char bl_name[16];
3184 	struct backlight_properties props = { 0 };
3185 
3186 	amdgpu_dm_update_backlight_caps(dm);
3187 
3188 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3189 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3190 	props.type = BACKLIGHT_RAW;
3191 
3192 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3193 		 adev_to_drm(dm->adev)->primary->index);
3194 
3195 	dm->backlight_dev = backlight_device_register(bl_name,
3196 						      adev_to_drm(dm->adev)->dev,
3197 						      dm,
3198 						      &amdgpu_dm_backlight_ops,
3199 						      &props);
3200 
3201 	if (IS_ERR(dm->backlight_dev))
3202 		DRM_ERROR("DM: Backlight registration failed!\n");
3203 	else
3204 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3205 }
3206 
3207 #endif
3208 
3209 static int initialize_plane(struct amdgpu_display_manager *dm,
3210 			    struct amdgpu_mode_info *mode_info, int plane_id,
3211 			    enum drm_plane_type plane_type,
3212 			    const struct dc_plane_cap *plane_cap)
3213 {
3214 	struct drm_plane *plane;
3215 	unsigned long possible_crtcs;
3216 	int ret = 0;
3217 
3218 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3219 	if (!plane) {
3220 		DRM_ERROR("KMS: Failed to allocate plane\n");
3221 		return -ENOMEM;
3222 	}
3223 	plane->type = plane_type;
3224 
3225 	/*
3226 	 * HACK: IGT tests expect that the primary plane for a CRTC
3227 	 * can only have one possible CRTC. Only expose support for
3228 	 * any CRTC if they're not going to be used as a primary plane
3229 	 * for a CRTC - like overlay or underlay planes.
3230 	 */
3231 	possible_crtcs = 1 << plane_id;
3232 	if (plane_id >= dm->dc->caps.max_streams)
3233 		possible_crtcs = 0xff;
3234 
3235 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3236 
3237 	if (ret) {
3238 		DRM_ERROR("KMS: Failed to initialize plane\n");
3239 		kfree(plane);
3240 		return ret;
3241 	}
3242 
3243 	if (mode_info)
3244 		mode_info->planes[plane_id] = plane;
3245 
3246 	return ret;
3247 }
3248 
3249 
3250 static void register_backlight_device(struct amdgpu_display_manager *dm,
3251 				      struct dc_link *link)
3252 {
3253 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3254 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3255 
3256 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3257 	    link->type != dc_connection_none) {
3258 		/*
3259 		 * Event if registration failed, we should continue with
3260 		 * DM initialization because not having a backlight control
3261 		 * is better then a black screen.
3262 		 */
3263 		amdgpu_dm_register_backlight_device(dm);
3264 
3265 		if (dm->backlight_dev)
3266 			dm->backlight_link = link;
3267 	}
3268 #endif
3269 }
3270 
3271 
3272 /*
3273  * In this architecture, the association
3274  * connector -> encoder -> crtc
3275  * id not really requried. The crtc and connector will hold the
3276  * display_index as an abstraction to use with DAL component
3277  *
3278  * Returns 0 on success
3279  */
3280 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3281 {
3282 	struct amdgpu_display_manager *dm = &adev->dm;
3283 	int32_t i;
3284 	struct amdgpu_dm_connector *aconnector = NULL;
3285 	struct amdgpu_encoder *aencoder = NULL;
3286 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3287 	uint32_t link_cnt;
3288 	int32_t primary_planes;
3289 	enum dc_connection_type new_connection_type = dc_connection_none;
3290 	const struct dc_plane_cap *plane;
3291 
3292 	dm->display_indexes_num = dm->dc->caps.max_streams;
3293 	/* Update the actual used number of crtc */
3294 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3295 
3296 	link_cnt = dm->dc->caps.max_links;
3297 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3298 		DRM_ERROR("DM: Failed to initialize mode config\n");
3299 		return -EINVAL;
3300 	}
3301 
3302 	/* There is one primary plane per CRTC */
3303 	primary_planes = dm->dc->caps.max_streams;
3304 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3305 
3306 	/*
3307 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3308 	 * Order is reversed to match iteration order in atomic check.
3309 	 */
3310 	for (i = (primary_planes - 1); i >= 0; i--) {
3311 		plane = &dm->dc->caps.planes[i];
3312 
3313 		if (initialize_plane(dm, mode_info, i,
3314 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3315 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3316 			goto fail;
3317 		}
3318 	}
3319 
3320 	/*
3321 	 * Initialize overlay planes, index starting after primary planes.
3322 	 * These planes have a higher DRM index than the primary planes since
3323 	 * they should be considered as having a higher z-order.
3324 	 * Order is reversed to match iteration order in atomic check.
3325 	 *
3326 	 * Only support DCN for now, and only expose one so we don't encourage
3327 	 * userspace to use up all the pipes.
3328 	 */
3329 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3330 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3331 
3332 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3333 			continue;
3334 
3335 		if (!plane->blends_with_above || !plane->blends_with_below)
3336 			continue;
3337 
3338 		if (!plane->pixel_format_support.argb8888)
3339 			continue;
3340 
3341 		if (initialize_plane(dm, NULL, primary_planes + i,
3342 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3343 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3344 			goto fail;
3345 		}
3346 
3347 		/* Only create one overlay plane. */
3348 		break;
3349 	}
3350 
3351 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3352 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3353 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3354 			goto fail;
3355 		}
3356 
3357 	/* loops over all connectors on the board */
3358 	for (i = 0; i < link_cnt; i++) {
3359 		struct dc_link *link = NULL;
3360 
3361 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3362 			DRM_ERROR(
3363 				"KMS: Cannot support more than %d display indexes\n",
3364 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3365 			continue;
3366 		}
3367 
3368 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3369 		if (!aconnector)
3370 			goto fail;
3371 
3372 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3373 		if (!aencoder)
3374 			goto fail;
3375 
3376 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3377 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3378 			goto fail;
3379 		}
3380 
3381 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3382 			DRM_ERROR("KMS: Failed to initialize connector\n");
3383 			goto fail;
3384 		}
3385 
3386 		link = dc_get_link_at_index(dm->dc, i);
3387 
3388 		if (!dc_link_detect_sink(link, &new_connection_type))
3389 			DRM_ERROR("KMS: Failed to detect connector\n");
3390 
3391 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3392 			emulated_link_detect(link);
3393 			amdgpu_dm_update_connector_after_detect(aconnector);
3394 
3395 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3396 			amdgpu_dm_update_connector_after_detect(aconnector);
3397 			register_backlight_device(dm, link);
3398 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3399 				amdgpu_dm_set_psr_caps(link);
3400 		}
3401 
3402 
3403 	}
3404 
3405 	/* Software is initialized. Now we can register interrupt handlers. */
3406 	switch (adev->asic_type) {
3407 #if defined(CONFIG_DRM_AMD_DC_SI)
3408 	case CHIP_TAHITI:
3409 	case CHIP_PITCAIRN:
3410 	case CHIP_VERDE:
3411 	case CHIP_OLAND:
3412 		if (dce60_register_irq_handlers(dm->adev)) {
3413 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3414 			goto fail;
3415 		}
3416 		break;
3417 #endif
3418 	case CHIP_BONAIRE:
3419 	case CHIP_HAWAII:
3420 	case CHIP_KAVERI:
3421 	case CHIP_KABINI:
3422 	case CHIP_MULLINS:
3423 	case CHIP_TONGA:
3424 	case CHIP_FIJI:
3425 	case CHIP_CARRIZO:
3426 	case CHIP_STONEY:
3427 	case CHIP_POLARIS11:
3428 	case CHIP_POLARIS10:
3429 	case CHIP_POLARIS12:
3430 	case CHIP_VEGAM:
3431 	case CHIP_VEGA10:
3432 	case CHIP_VEGA12:
3433 	case CHIP_VEGA20:
3434 		if (dce110_register_irq_handlers(dm->adev)) {
3435 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3436 			goto fail;
3437 		}
3438 		break;
3439 #if defined(CONFIG_DRM_AMD_DC_DCN)
3440 	case CHIP_RAVEN:
3441 	case CHIP_NAVI12:
3442 	case CHIP_NAVI10:
3443 	case CHIP_NAVI14:
3444 	case CHIP_RENOIR:
3445 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3446 	case CHIP_SIENNA_CICHLID:
3447 	case CHIP_NAVY_FLOUNDER:
3448 #endif
3449 		if (dcn10_register_irq_handlers(dm->adev)) {
3450 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3451 			goto fail;
3452 		}
3453 		break;
3454 #endif
3455 	default:
3456 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3457 		goto fail;
3458 	}
3459 
3460 	return 0;
3461 fail:
3462 	kfree(aencoder);
3463 	kfree(aconnector);
3464 
3465 	return -EINVAL;
3466 }
3467 
3468 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3469 {
3470 	drm_mode_config_cleanup(dm->ddev);
3471 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3472 	return;
3473 }
3474 
3475 /******************************************************************************
3476  * amdgpu_display_funcs functions
3477  *****************************************************************************/
3478 
3479 /*
3480  * dm_bandwidth_update - program display watermarks
3481  *
3482  * @adev: amdgpu_device pointer
3483  *
3484  * Calculate and program the display watermarks and line buffer allocation.
3485  */
3486 static void dm_bandwidth_update(struct amdgpu_device *adev)
3487 {
3488 	/* TODO: implement later */
3489 }
3490 
3491 static const struct amdgpu_display_funcs dm_display_funcs = {
3492 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3493 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3494 	.backlight_set_level = NULL, /* never called for DC */
3495 	.backlight_get_level = NULL, /* never called for DC */
3496 	.hpd_sense = NULL,/* called unconditionally */
3497 	.hpd_set_polarity = NULL, /* called unconditionally */
3498 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3499 	.page_flip_get_scanoutpos =
3500 		dm_crtc_get_scanoutpos,/* called unconditionally */
3501 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3502 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3503 };
3504 
3505 #if defined(CONFIG_DEBUG_KERNEL_DC)
3506 
3507 static ssize_t s3_debug_store(struct device *device,
3508 			      struct device_attribute *attr,
3509 			      const char *buf,
3510 			      size_t count)
3511 {
3512 	int ret;
3513 	int s3_state;
3514 	struct drm_device *drm_dev = dev_get_drvdata(device);
3515 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3516 
3517 	ret = kstrtoint(buf, 0, &s3_state);
3518 
3519 	if (ret == 0) {
3520 		if (s3_state) {
3521 			dm_resume(adev);
3522 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3523 		} else
3524 			dm_suspend(adev);
3525 	}
3526 
3527 	return ret == 0 ? count : 0;
3528 }
3529 
3530 DEVICE_ATTR_WO(s3_debug);
3531 
3532 #endif
3533 
3534 static int dm_early_init(void *handle)
3535 {
3536 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3537 
3538 	switch (adev->asic_type) {
3539 #if defined(CONFIG_DRM_AMD_DC_SI)
3540 	case CHIP_TAHITI:
3541 	case CHIP_PITCAIRN:
3542 	case CHIP_VERDE:
3543 		adev->mode_info.num_crtc = 6;
3544 		adev->mode_info.num_hpd = 6;
3545 		adev->mode_info.num_dig = 6;
3546 		break;
3547 	case CHIP_OLAND:
3548 		adev->mode_info.num_crtc = 2;
3549 		adev->mode_info.num_hpd = 2;
3550 		adev->mode_info.num_dig = 2;
3551 		break;
3552 #endif
3553 	case CHIP_BONAIRE:
3554 	case CHIP_HAWAII:
3555 		adev->mode_info.num_crtc = 6;
3556 		adev->mode_info.num_hpd = 6;
3557 		adev->mode_info.num_dig = 6;
3558 		break;
3559 	case CHIP_KAVERI:
3560 		adev->mode_info.num_crtc = 4;
3561 		adev->mode_info.num_hpd = 6;
3562 		adev->mode_info.num_dig = 7;
3563 		break;
3564 	case CHIP_KABINI:
3565 	case CHIP_MULLINS:
3566 		adev->mode_info.num_crtc = 2;
3567 		adev->mode_info.num_hpd = 6;
3568 		adev->mode_info.num_dig = 6;
3569 		break;
3570 	case CHIP_FIJI:
3571 	case CHIP_TONGA:
3572 		adev->mode_info.num_crtc = 6;
3573 		adev->mode_info.num_hpd = 6;
3574 		adev->mode_info.num_dig = 7;
3575 		break;
3576 	case CHIP_CARRIZO:
3577 		adev->mode_info.num_crtc = 3;
3578 		adev->mode_info.num_hpd = 6;
3579 		adev->mode_info.num_dig = 9;
3580 		break;
3581 	case CHIP_STONEY:
3582 		adev->mode_info.num_crtc = 2;
3583 		adev->mode_info.num_hpd = 6;
3584 		adev->mode_info.num_dig = 9;
3585 		break;
3586 	case CHIP_POLARIS11:
3587 	case CHIP_POLARIS12:
3588 		adev->mode_info.num_crtc = 5;
3589 		adev->mode_info.num_hpd = 5;
3590 		adev->mode_info.num_dig = 5;
3591 		break;
3592 	case CHIP_POLARIS10:
3593 	case CHIP_VEGAM:
3594 		adev->mode_info.num_crtc = 6;
3595 		adev->mode_info.num_hpd = 6;
3596 		adev->mode_info.num_dig = 6;
3597 		break;
3598 	case CHIP_VEGA10:
3599 	case CHIP_VEGA12:
3600 	case CHIP_VEGA20:
3601 		adev->mode_info.num_crtc = 6;
3602 		adev->mode_info.num_hpd = 6;
3603 		adev->mode_info.num_dig = 6;
3604 		break;
3605 #if defined(CONFIG_DRM_AMD_DC_DCN)
3606 	case CHIP_RAVEN:
3607 		adev->mode_info.num_crtc = 4;
3608 		adev->mode_info.num_hpd = 4;
3609 		adev->mode_info.num_dig = 4;
3610 		break;
3611 #endif
3612 	case CHIP_NAVI10:
3613 	case CHIP_NAVI12:
3614 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3615 	case CHIP_SIENNA_CICHLID:
3616 	case CHIP_NAVY_FLOUNDER:
3617 #endif
3618 		adev->mode_info.num_crtc = 6;
3619 		adev->mode_info.num_hpd = 6;
3620 		adev->mode_info.num_dig = 6;
3621 		break;
3622 	case CHIP_NAVI14:
3623 		adev->mode_info.num_crtc = 5;
3624 		adev->mode_info.num_hpd = 5;
3625 		adev->mode_info.num_dig = 5;
3626 		break;
3627 	case CHIP_RENOIR:
3628 		adev->mode_info.num_crtc = 4;
3629 		adev->mode_info.num_hpd = 4;
3630 		adev->mode_info.num_dig = 4;
3631 		break;
3632 	default:
3633 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3634 		return -EINVAL;
3635 	}
3636 
3637 	amdgpu_dm_set_irq_funcs(adev);
3638 
3639 	if (adev->mode_info.funcs == NULL)
3640 		adev->mode_info.funcs = &dm_display_funcs;
3641 
3642 	/*
3643 	 * Note: Do NOT change adev->audio_endpt_rreg and
3644 	 * adev->audio_endpt_wreg because they are initialised in
3645 	 * amdgpu_device_init()
3646 	 */
3647 #if defined(CONFIG_DEBUG_KERNEL_DC)
3648 	device_create_file(
3649 		adev_to_drm(adev)->dev,
3650 		&dev_attr_s3_debug);
3651 #endif
3652 
3653 	return 0;
3654 }
3655 
3656 static bool modeset_required(struct drm_crtc_state *crtc_state,
3657 			     struct dc_stream_state *new_stream,
3658 			     struct dc_stream_state *old_stream)
3659 {
3660 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3661 }
3662 
3663 static bool modereset_required(struct drm_crtc_state *crtc_state)
3664 {
3665 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3666 }
3667 
3668 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3669 {
3670 	drm_encoder_cleanup(encoder);
3671 	kfree(encoder);
3672 }
3673 
3674 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3675 	.destroy = amdgpu_dm_encoder_destroy,
3676 };
3677 
3678 
3679 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3680 				struct dc_scaling_info *scaling_info)
3681 {
3682 	int scale_w, scale_h;
3683 
3684 	memset(scaling_info, 0, sizeof(*scaling_info));
3685 
3686 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3687 	scaling_info->src_rect.x = state->src_x >> 16;
3688 	scaling_info->src_rect.y = state->src_y >> 16;
3689 
3690 	/*
3691 	 * For reasons we don't (yet) fully understand a non-zero
3692 	 * src_y coordinate into an NV12 buffer can cause a
3693 	 * system hang. To avoid hangs (and maybe be overly cautious)
3694 	 * let's reject both non-zero src_x and src_y.
3695 	 *
3696 	 * We currently know of only one use-case to reproduce a
3697 	 * scenario with non-zero src_x and src_y for NV12, which
3698 	 * is to gesture the YouTube Android app into full screen
3699 	 * on ChromeOS.
3700 	 */
3701 	if (state->fb &&
3702 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3703 	    (scaling_info->src_rect.x != 0 ||
3704 	     scaling_info->src_rect.y != 0))
3705 		return -EINVAL;
3706 
3707 	/*
3708 	 * For reasons we don't (yet) fully understand a non-zero
3709 	 * src_y coordinate into an NV12 buffer can cause a
3710 	 * system hang. To avoid hangs (and maybe be overly cautious)
3711 	 * let's reject both non-zero src_x and src_y.
3712 	 *
3713 	 * We currently know of only one use-case to reproduce a
3714 	 * scenario with non-zero src_x and src_y for NV12, which
3715 	 * is to gesture the YouTube Android app into full screen
3716 	 * on ChromeOS.
3717 	 */
3718 	if (state->fb &&
3719 	    state->fb->format->format == DRM_FORMAT_NV12 &&
3720 	    (scaling_info->src_rect.x != 0 ||
3721 	     scaling_info->src_rect.y != 0))
3722 		return -EINVAL;
3723 
3724 	scaling_info->src_rect.width = state->src_w >> 16;
3725 	if (scaling_info->src_rect.width == 0)
3726 		return -EINVAL;
3727 
3728 	scaling_info->src_rect.height = state->src_h >> 16;
3729 	if (scaling_info->src_rect.height == 0)
3730 		return -EINVAL;
3731 
3732 	scaling_info->dst_rect.x = state->crtc_x;
3733 	scaling_info->dst_rect.y = state->crtc_y;
3734 
3735 	if (state->crtc_w == 0)
3736 		return -EINVAL;
3737 
3738 	scaling_info->dst_rect.width = state->crtc_w;
3739 
3740 	if (state->crtc_h == 0)
3741 		return -EINVAL;
3742 
3743 	scaling_info->dst_rect.height = state->crtc_h;
3744 
3745 	/* DRM doesn't specify clipping on destination output. */
3746 	scaling_info->clip_rect = scaling_info->dst_rect;
3747 
3748 	/* TODO: Validate scaling per-format with DC plane caps */
3749 	scale_w = scaling_info->dst_rect.width * 1000 /
3750 		  scaling_info->src_rect.width;
3751 
3752 	if (scale_w < 250 || scale_w > 16000)
3753 		return -EINVAL;
3754 
3755 	scale_h = scaling_info->dst_rect.height * 1000 /
3756 		  scaling_info->src_rect.height;
3757 
3758 	if (scale_h < 250 || scale_h > 16000)
3759 		return -EINVAL;
3760 
3761 	/*
3762 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3763 	 * assume reasonable defaults based on the format.
3764 	 */
3765 
3766 	return 0;
3767 }
3768 
3769 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3770 		       uint64_t *tiling_flags, bool *tmz_surface)
3771 {
3772 	struct amdgpu_bo *rbo;
3773 	int r;
3774 
3775 	if (!amdgpu_fb) {
3776 		*tiling_flags = 0;
3777 		*tmz_surface = false;
3778 		return 0;
3779 	}
3780 
3781 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3782 	r = amdgpu_bo_reserve(rbo, false);
3783 
3784 	if (unlikely(r)) {
3785 		/* Don't show error message when returning -ERESTARTSYS */
3786 		if (r != -ERESTARTSYS)
3787 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3788 		return r;
3789 	}
3790 
3791 	if (tiling_flags)
3792 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3793 
3794 	if (tmz_surface)
3795 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3796 
3797 	amdgpu_bo_unreserve(rbo);
3798 
3799 	return r;
3800 }
3801 
3802 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3803 {
3804 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3805 
3806 	return offset ? (address + offset * 256) : 0;
3807 }
3808 
3809 static int
3810 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3811 			  const struct amdgpu_framebuffer *afb,
3812 			  const enum surface_pixel_format format,
3813 			  const enum dc_rotation_angle rotation,
3814 			  const struct plane_size *plane_size,
3815 			  const union dc_tiling_info *tiling_info,
3816 			  const uint64_t info,
3817 			  struct dc_plane_dcc_param *dcc,
3818 			  struct dc_plane_address *address,
3819 			  bool force_disable_dcc)
3820 {
3821 	struct dc *dc = adev->dm.dc;
3822 	struct dc_dcc_surface_param input;
3823 	struct dc_surface_dcc_cap output;
3824 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3825 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3826 	uint64_t dcc_address;
3827 
3828 	memset(&input, 0, sizeof(input));
3829 	memset(&output, 0, sizeof(output));
3830 
3831 	if (force_disable_dcc)
3832 		return 0;
3833 
3834 	if (!offset)
3835 		return 0;
3836 
3837 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3838 		return 0;
3839 
3840 	if (!dc->cap_funcs.get_dcc_compression_cap)
3841 		return -EINVAL;
3842 
3843 	input.format = format;
3844 	input.surface_size.width = plane_size->surface_size.width;
3845 	input.surface_size.height = plane_size->surface_size.height;
3846 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3847 
3848 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3849 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3850 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3851 		input.scan = SCAN_DIRECTION_VERTICAL;
3852 
3853 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3854 		return -EINVAL;
3855 
3856 	if (!output.capable)
3857 		return -EINVAL;
3858 
3859 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3860 		return -EINVAL;
3861 
3862 	dcc->enable = 1;
3863 	dcc->meta_pitch =
3864 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3865 	dcc->independent_64b_blks = i64b;
3866 
3867 	dcc_address = get_dcc_address(afb->address, info);
3868 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3869 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3870 
3871 	return 0;
3872 }
3873 
3874 static int
3875 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3876 			     const struct amdgpu_framebuffer *afb,
3877 			     const enum surface_pixel_format format,
3878 			     const enum dc_rotation_angle rotation,
3879 			     const uint64_t tiling_flags,
3880 			     union dc_tiling_info *tiling_info,
3881 			     struct plane_size *plane_size,
3882 			     struct dc_plane_dcc_param *dcc,
3883 			     struct dc_plane_address *address,
3884 			     bool tmz_surface,
3885 			     bool force_disable_dcc)
3886 {
3887 	const struct drm_framebuffer *fb = &afb->base;
3888 	int ret;
3889 
3890 	memset(tiling_info, 0, sizeof(*tiling_info));
3891 	memset(plane_size, 0, sizeof(*plane_size));
3892 	memset(dcc, 0, sizeof(*dcc));
3893 	memset(address, 0, sizeof(*address));
3894 
3895 	address->tmz_surface = tmz_surface;
3896 
3897 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3898 		plane_size->surface_size.x = 0;
3899 		plane_size->surface_size.y = 0;
3900 		plane_size->surface_size.width = fb->width;
3901 		plane_size->surface_size.height = fb->height;
3902 		plane_size->surface_pitch =
3903 			fb->pitches[0] / fb->format->cpp[0];
3904 
3905 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3906 		address->grph.addr.low_part = lower_32_bits(afb->address);
3907 		address->grph.addr.high_part = upper_32_bits(afb->address);
3908 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3909 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3910 
3911 		plane_size->surface_size.x = 0;
3912 		plane_size->surface_size.y = 0;
3913 		plane_size->surface_size.width = fb->width;
3914 		plane_size->surface_size.height = fb->height;
3915 		plane_size->surface_pitch =
3916 			fb->pitches[0] / fb->format->cpp[0];
3917 
3918 		plane_size->chroma_size.x = 0;
3919 		plane_size->chroma_size.y = 0;
3920 		/* TODO: set these based on surface format */
3921 		plane_size->chroma_size.width = fb->width / 2;
3922 		plane_size->chroma_size.height = fb->height / 2;
3923 
3924 		plane_size->chroma_pitch =
3925 			fb->pitches[1] / fb->format->cpp[1];
3926 
3927 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3928 		address->video_progressive.luma_addr.low_part =
3929 			lower_32_bits(afb->address);
3930 		address->video_progressive.luma_addr.high_part =
3931 			upper_32_bits(afb->address);
3932 		address->video_progressive.chroma_addr.low_part =
3933 			lower_32_bits(chroma_addr);
3934 		address->video_progressive.chroma_addr.high_part =
3935 			upper_32_bits(chroma_addr);
3936 	}
3937 
3938 	/* Fill GFX8 params */
3939 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3940 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3941 
3942 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3943 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3944 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3945 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3946 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3947 
3948 		/* XXX fix me for VI */
3949 		tiling_info->gfx8.num_banks = num_banks;
3950 		tiling_info->gfx8.array_mode =
3951 				DC_ARRAY_2D_TILED_THIN1;
3952 		tiling_info->gfx8.tile_split = tile_split;
3953 		tiling_info->gfx8.bank_width = bankw;
3954 		tiling_info->gfx8.bank_height = bankh;
3955 		tiling_info->gfx8.tile_aspect = mtaspect;
3956 		tiling_info->gfx8.tile_mode =
3957 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3958 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3959 			== DC_ARRAY_1D_TILED_THIN1) {
3960 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3961 	}
3962 
3963 	tiling_info->gfx8.pipe_config =
3964 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3965 
3966 	if (adev->asic_type == CHIP_VEGA10 ||
3967 	    adev->asic_type == CHIP_VEGA12 ||
3968 	    adev->asic_type == CHIP_VEGA20 ||
3969 	    adev->asic_type == CHIP_NAVI10 ||
3970 	    adev->asic_type == CHIP_NAVI14 ||
3971 	    adev->asic_type == CHIP_NAVI12 ||
3972 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3973 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3974 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3975 #endif
3976 	    adev->asic_type == CHIP_RENOIR ||
3977 	    adev->asic_type == CHIP_RAVEN) {
3978 		/* Fill GFX9 params */
3979 		tiling_info->gfx9.num_pipes =
3980 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3981 		tiling_info->gfx9.num_banks =
3982 			adev->gfx.config.gb_addr_config_fields.num_banks;
3983 		tiling_info->gfx9.pipe_interleave =
3984 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3985 		tiling_info->gfx9.num_shader_engines =
3986 			adev->gfx.config.gb_addr_config_fields.num_se;
3987 		tiling_info->gfx9.max_compressed_frags =
3988 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3989 		tiling_info->gfx9.num_rb_per_se =
3990 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3991 		tiling_info->gfx9.swizzle =
3992 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3993 		tiling_info->gfx9.shaderEnable = 1;
3994 
3995 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3996 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3997 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3998 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3999 #endif
4000 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
4001 						plane_size, tiling_info,
4002 						tiling_flags, dcc, address,
4003 						force_disable_dcc);
4004 		if (ret)
4005 			return ret;
4006 	}
4007 
4008 	return 0;
4009 }
4010 
4011 static void
4012 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4013 			       bool *per_pixel_alpha, bool *global_alpha,
4014 			       int *global_alpha_value)
4015 {
4016 	*per_pixel_alpha = false;
4017 	*global_alpha = false;
4018 	*global_alpha_value = 0xff;
4019 
4020 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4021 		return;
4022 
4023 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4024 		static const uint32_t alpha_formats[] = {
4025 			DRM_FORMAT_ARGB8888,
4026 			DRM_FORMAT_RGBA8888,
4027 			DRM_FORMAT_ABGR8888,
4028 		};
4029 		uint32_t format = plane_state->fb->format->format;
4030 		unsigned int i;
4031 
4032 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4033 			if (format == alpha_formats[i]) {
4034 				*per_pixel_alpha = true;
4035 				break;
4036 			}
4037 		}
4038 	}
4039 
4040 	if (plane_state->alpha < 0xffff) {
4041 		*global_alpha = true;
4042 		*global_alpha_value = plane_state->alpha >> 8;
4043 	}
4044 }
4045 
4046 static int
4047 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4048 			    const enum surface_pixel_format format,
4049 			    enum dc_color_space *color_space)
4050 {
4051 	bool full_range;
4052 
4053 	*color_space = COLOR_SPACE_SRGB;
4054 
4055 	/* DRM color properties only affect non-RGB formats. */
4056 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4057 		return 0;
4058 
4059 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4060 
4061 	switch (plane_state->color_encoding) {
4062 	case DRM_COLOR_YCBCR_BT601:
4063 		if (full_range)
4064 			*color_space = COLOR_SPACE_YCBCR601;
4065 		else
4066 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4067 		break;
4068 
4069 	case DRM_COLOR_YCBCR_BT709:
4070 		if (full_range)
4071 			*color_space = COLOR_SPACE_YCBCR709;
4072 		else
4073 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4074 		break;
4075 
4076 	case DRM_COLOR_YCBCR_BT2020:
4077 		if (full_range)
4078 			*color_space = COLOR_SPACE_2020_YCBCR;
4079 		else
4080 			return -EINVAL;
4081 		break;
4082 
4083 	default:
4084 		return -EINVAL;
4085 	}
4086 
4087 	return 0;
4088 }
4089 
4090 static int
4091 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4092 			    const struct drm_plane_state *plane_state,
4093 			    const uint64_t tiling_flags,
4094 			    struct dc_plane_info *plane_info,
4095 			    struct dc_plane_address *address,
4096 			    bool tmz_surface,
4097 			    bool force_disable_dcc)
4098 {
4099 	const struct drm_framebuffer *fb = plane_state->fb;
4100 	const struct amdgpu_framebuffer *afb =
4101 		to_amdgpu_framebuffer(plane_state->fb);
4102 	struct drm_format_name_buf format_name;
4103 	int ret;
4104 
4105 	memset(plane_info, 0, sizeof(*plane_info));
4106 
4107 	switch (fb->format->format) {
4108 	case DRM_FORMAT_C8:
4109 		plane_info->format =
4110 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4111 		break;
4112 	case DRM_FORMAT_RGB565:
4113 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4114 		break;
4115 	case DRM_FORMAT_XRGB8888:
4116 	case DRM_FORMAT_ARGB8888:
4117 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4118 		break;
4119 	case DRM_FORMAT_XRGB2101010:
4120 	case DRM_FORMAT_ARGB2101010:
4121 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4122 		break;
4123 	case DRM_FORMAT_XBGR2101010:
4124 	case DRM_FORMAT_ABGR2101010:
4125 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4126 		break;
4127 	case DRM_FORMAT_XBGR8888:
4128 	case DRM_FORMAT_ABGR8888:
4129 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4130 		break;
4131 	case DRM_FORMAT_NV21:
4132 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4133 		break;
4134 	case DRM_FORMAT_NV12:
4135 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4136 		break;
4137 	case DRM_FORMAT_P010:
4138 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4139 		break;
4140 	case DRM_FORMAT_XRGB16161616F:
4141 	case DRM_FORMAT_ARGB16161616F:
4142 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4143 		break;
4144 	case DRM_FORMAT_XBGR16161616F:
4145 	case DRM_FORMAT_ABGR16161616F:
4146 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4147 		break;
4148 	default:
4149 		DRM_ERROR(
4150 			"Unsupported screen format %s\n",
4151 			drm_get_format_name(fb->format->format, &format_name));
4152 		return -EINVAL;
4153 	}
4154 
4155 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4156 	case DRM_MODE_ROTATE_0:
4157 		plane_info->rotation = ROTATION_ANGLE_0;
4158 		break;
4159 	case DRM_MODE_ROTATE_90:
4160 		plane_info->rotation = ROTATION_ANGLE_90;
4161 		break;
4162 	case DRM_MODE_ROTATE_180:
4163 		plane_info->rotation = ROTATION_ANGLE_180;
4164 		break;
4165 	case DRM_MODE_ROTATE_270:
4166 		plane_info->rotation = ROTATION_ANGLE_270;
4167 		break;
4168 	default:
4169 		plane_info->rotation = ROTATION_ANGLE_0;
4170 		break;
4171 	}
4172 
4173 	plane_info->visible = true;
4174 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4175 
4176 	plane_info->layer_index = 0;
4177 
4178 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4179 					  &plane_info->color_space);
4180 	if (ret)
4181 		return ret;
4182 
4183 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4184 					   plane_info->rotation, tiling_flags,
4185 					   &plane_info->tiling_info,
4186 					   &plane_info->plane_size,
4187 					   &plane_info->dcc, address, tmz_surface,
4188 					   force_disable_dcc);
4189 	if (ret)
4190 		return ret;
4191 
4192 	fill_blending_from_plane_state(
4193 		plane_state, &plane_info->per_pixel_alpha,
4194 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4195 
4196 	return 0;
4197 }
4198 
4199 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4200 				    struct dc_plane_state *dc_plane_state,
4201 				    struct drm_plane_state *plane_state,
4202 				    struct drm_crtc_state *crtc_state)
4203 {
4204 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4205 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4206 	struct dc_scaling_info scaling_info;
4207 	struct dc_plane_info plane_info;
4208 	int ret;
4209 	bool force_disable_dcc = false;
4210 
4211 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4212 	if (ret)
4213 		return ret;
4214 
4215 	dc_plane_state->src_rect = scaling_info.src_rect;
4216 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4217 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4218 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4219 
4220 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4221 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4222 					  dm_plane_state->tiling_flags,
4223 					  &plane_info,
4224 					  &dc_plane_state->address,
4225 					  dm_plane_state->tmz_surface,
4226 					  force_disable_dcc);
4227 	if (ret)
4228 		return ret;
4229 
4230 	dc_plane_state->format = plane_info.format;
4231 	dc_plane_state->color_space = plane_info.color_space;
4232 	dc_plane_state->format = plane_info.format;
4233 	dc_plane_state->plane_size = plane_info.plane_size;
4234 	dc_plane_state->rotation = plane_info.rotation;
4235 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4236 	dc_plane_state->stereo_format = plane_info.stereo_format;
4237 	dc_plane_state->tiling_info = plane_info.tiling_info;
4238 	dc_plane_state->visible = plane_info.visible;
4239 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4240 	dc_plane_state->global_alpha = plane_info.global_alpha;
4241 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4242 	dc_plane_state->dcc = plane_info.dcc;
4243 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4244 
4245 	/*
4246 	 * Always set input transfer function, since plane state is refreshed
4247 	 * every time.
4248 	 */
4249 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4250 	if (ret)
4251 		return ret;
4252 
4253 	return 0;
4254 }
4255 
4256 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4257 					   const struct dm_connector_state *dm_state,
4258 					   struct dc_stream_state *stream)
4259 {
4260 	enum amdgpu_rmx_type rmx_type;
4261 
4262 	struct rect src = { 0 }; /* viewport in composition space*/
4263 	struct rect dst = { 0 }; /* stream addressable area */
4264 
4265 	/* no mode. nothing to be done */
4266 	if (!mode)
4267 		return;
4268 
4269 	/* Full screen scaling by default */
4270 	src.width = mode->hdisplay;
4271 	src.height = mode->vdisplay;
4272 	dst.width = stream->timing.h_addressable;
4273 	dst.height = stream->timing.v_addressable;
4274 
4275 	if (dm_state) {
4276 		rmx_type = dm_state->scaling;
4277 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4278 			if (src.width * dst.height <
4279 					src.height * dst.width) {
4280 				/* height needs less upscaling/more downscaling */
4281 				dst.width = src.width *
4282 						dst.height / src.height;
4283 			} else {
4284 				/* width needs less upscaling/more downscaling */
4285 				dst.height = src.height *
4286 						dst.width / src.width;
4287 			}
4288 		} else if (rmx_type == RMX_CENTER) {
4289 			dst = src;
4290 		}
4291 
4292 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4293 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4294 
4295 		if (dm_state->underscan_enable) {
4296 			dst.x += dm_state->underscan_hborder / 2;
4297 			dst.y += dm_state->underscan_vborder / 2;
4298 			dst.width -= dm_state->underscan_hborder;
4299 			dst.height -= dm_state->underscan_vborder;
4300 		}
4301 	}
4302 
4303 	stream->src = src;
4304 	stream->dst = dst;
4305 
4306 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4307 			dst.x, dst.y, dst.width, dst.height);
4308 
4309 }
4310 
4311 static enum dc_color_depth
4312 convert_color_depth_from_display_info(const struct drm_connector *connector,
4313 				      bool is_y420, int requested_bpc)
4314 {
4315 	uint8_t bpc;
4316 
4317 	if (is_y420) {
4318 		bpc = 8;
4319 
4320 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4321 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4322 			bpc = 16;
4323 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4324 			bpc = 12;
4325 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4326 			bpc = 10;
4327 	} else {
4328 		bpc = (uint8_t)connector->display_info.bpc;
4329 		/* Assume 8 bpc by default if no bpc is specified. */
4330 		bpc = bpc ? bpc : 8;
4331 	}
4332 
4333 	if (requested_bpc > 0) {
4334 		/*
4335 		 * Cap display bpc based on the user requested value.
4336 		 *
4337 		 * The value for state->max_bpc may not correctly updated
4338 		 * depending on when the connector gets added to the state
4339 		 * or if this was called outside of atomic check, so it
4340 		 * can't be used directly.
4341 		 */
4342 		bpc = min_t(u8, bpc, requested_bpc);
4343 
4344 		/* Round down to the nearest even number. */
4345 		bpc = bpc - (bpc & 1);
4346 	}
4347 
4348 	switch (bpc) {
4349 	case 0:
4350 		/*
4351 		 * Temporary Work around, DRM doesn't parse color depth for
4352 		 * EDID revision before 1.4
4353 		 * TODO: Fix edid parsing
4354 		 */
4355 		return COLOR_DEPTH_888;
4356 	case 6:
4357 		return COLOR_DEPTH_666;
4358 	case 8:
4359 		return COLOR_DEPTH_888;
4360 	case 10:
4361 		return COLOR_DEPTH_101010;
4362 	case 12:
4363 		return COLOR_DEPTH_121212;
4364 	case 14:
4365 		return COLOR_DEPTH_141414;
4366 	case 16:
4367 		return COLOR_DEPTH_161616;
4368 	default:
4369 		return COLOR_DEPTH_UNDEFINED;
4370 	}
4371 }
4372 
4373 static enum dc_aspect_ratio
4374 get_aspect_ratio(const struct drm_display_mode *mode_in)
4375 {
4376 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4377 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4378 }
4379 
4380 static enum dc_color_space
4381 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4382 {
4383 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4384 
4385 	switch (dc_crtc_timing->pixel_encoding)	{
4386 	case PIXEL_ENCODING_YCBCR422:
4387 	case PIXEL_ENCODING_YCBCR444:
4388 	case PIXEL_ENCODING_YCBCR420:
4389 	{
4390 		/*
4391 		 * 27030khz is the separation point between HDTV and SDTV
4392 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4393 		 * respectively
4394 		 */
4395 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4396 			if (dc_crtc_timing->flags.Y_ONLY)
4397 				color_space =
4398 					COLOR_SPACE_YCBCR709_LIMITED;
4399 			else
4400 				color_space = COLOR_SPACE_YCBCR709;
4401 		} else {
4402 			if (dc_crtc_timing->flags.Y_ONLY)
4403 				color_space =
4404 					COLOR_SPACE_YCBCR601_LIMITED;
4405 			else
4406 				color_space = COLOR_SPACE_YCBCR601;
4407 		}
4408 
4409 	}
4410 	break;
4411 	case PIXEL_ENCODING_RGB:
4412 		color_space = COLOR_SPACE_SRGB;
4413 		break;
4414 
4415 	default:
4416 		WARN_ON(1);
4417 		break;
4418 	}
4419 
4420 	return color_space;
4421 }
4422 
4423 static bool adjust_colour_depth_from_display_info(
4424 	struct dc_crtc_timing *timing_out,
4425 	const struct drm_display_info *info)
4426 {
4427 	enum dc_color_depth depth = timing_out->display_color_depth;
4428 	int normalized_clk;
4429 	do {
4430 		normalized_clk = timing_out->pix_clk_100hz / 10;
4431 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4432 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4433 			normalized_clk /= 2;
4434 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4435 		switch (depth) {
4436 		case COLOR_DEPTH_888:
4437 			break;
4438 		case COLOR_DEPTH_101010:
4439 			normalized_clk = (normalized_clk * 30) / 24;
4440 			break;
4441 		case COLOR_DEPTH_121212:
4442 			normalized_clk = (normalized_clk * 36) / 24;
4443 			break;
4444 		case COLOR_DEPTH_161616:
4445 			normalized_clk = (normalized_clk * 48) / 24;
4446 			break;
4447 		default:
4448 			/* The above depths are the only ones valid for HDMI. */
4449 			return false;
4450 		}
4451 		if (normalized_clk <= info->max_tmds_clock) {
4452 			timing_out->display_color_depth = depth;
4453 			return true;
4454 		}
4455 	} while (--depth > COLOR_DEPTH_666);
4456 	return false;
4457 }
4458 
4459 static void fill_stream_properties_from_drm_display_mode(
4460 	struct dc_stream_state *stream,
4461 	const struct drm_display_mode *mode_in,
4462 	const struct drm_connector *connector,
4463 	const struct drm_connector_state *connector_state,
4464 	const struct dc_stream_state *old_stream,
4465 	int requested_bpc)
4466 {
4467 	struct dc_crtc_timing *timing_out = &stream->timing;
4468 	const struct drm_display_info *info = &connector->display_info;
4469 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4470 	struct hdmi_vendor_infoframe hv_frame;
4471 	struct hdmi_avi_infoframe avi_frame;
4472 
4473 	memset(&hv_frame, 0, sizeof(hv_frame));
4474 	memset(&avi_frame, 0, sizeof(avi_frame));
4475 
4476 	timing_out->h_border_left = 0;
4477 	timing_out->h_border_right = 0;
4478 	timing_out->v_border_top = 0;
4479 	timing_out->v_border_bottom = 0;
4480 	/* TODO: un-hardcode */
4481 	if (drm_mode_is_420_only(info, mode_in)
4482 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4483 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4484 	else if (drm_mode_is_420_also(info, mode_in)
4485 			&& aconnector->force_yuv420_output)
4486 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4487 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4488 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4489 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4490 	else
4491 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4492 
4493 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4494 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4495 		connector,
4496 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4497 		requested_bpc);
4498 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4499 	timing_out->hdmi_vic = 0;
4500 
4501 	if(old_stream) {
4502 		timing_out->vic = old_stream->timing.vic;
4503 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4504 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4505 	} else {
4506 		timing_out->vic = drm_match_cea_mode(mode_in);
4507 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4508 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4509 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4510 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4511 	}
4512 
4513 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4514 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4515 		timing_out->vic = avi_frame.video_code;
4516 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4517 		timing_out->hdmi_vic = hv_frame.vic;
4518 	}
4519 
4520 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4521 	timing_out->h_total = mode_in->crtc_htotal;
4522 	timing_out->h_sync_width =
4523 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4524 	timing_out->h_front_porch =
4525 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4526 	timing_out->v_total = mode_in->crtc_vtotal;
4527 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4528 	timing_out->v_front_porch =
4529 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4530 	timing_out->v_sync_width =
4531 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4532 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4533 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4534 
4535 	stream->output_color_space = get_output_color_space(timing_out);
4536 
4537 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4538 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4539 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4540 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4541 		    drm_mode_is_420_also(info, mode_in) &&
4542 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4543 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4544 			adjust_colour_depth_from_display_info(timing_out, info);
4545 		}
4546 	}
4547 }
4548 
4549 static void fill_audio_info(struct audio_info *audio_info,
4550 			    const struct drm_connector *drm_connector,
4551 			    const struct dc_sink *dc_sink)
4552 {
4553 	int i = 0;
4554 	int cea_revision = 0;
4555 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4556 
4557 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4558 	audio_info->product_id = edid_caps->product_id;
4559 
4560 	cea_revision = drm_connector->display_info.cea_rev;
4561 
4562 #ifdef __linux__
4563 	strscpy(audio_info->display_name,
4564 		edid_caps->display_name,
4565 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4566 #else
4567 	strncpy(audio_info->display_name,
4568 		edid_caps->display_name,
4569 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
4570 #endif
4571 
4572 	if (cea_revision >= 3) {
4573 		audio_info->mode_count = edid_caps->audio_mode_count;
4574 
4575 		for (i = 0; i < audio_info->mode_count; ++i) {
4576 			audio_info->modes[i].format_code =
4577 					(enum audio_format_code)
4578 					(edid_caps->audio_modes[i].format_code);
4579 			audio_info->modes[i].channel_count =
4580 					edid_caps->audio_modes[i].channel_count;
4581 			audio_info->modes[i].sample_rates.all =
4582 					edid_caps->audio_modes[i].sample_rate;
4583 			audio_info->modes[i].sample_size =
4584 					edid_caps->audio_modes[i].sample_size;
4585 		}
4586 	}
4587 
4588 	audio_info->flags.all = edid_caps->speaker_flags;
4589 
4590 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4591 	if (drm_connector->latency_present[0]) {
4592 		audio_info->video_latency = drm_connector->video_latency[0];
4593 		audio_info->audio_latency = drm_connector->audio_latency[0];
4594 	}
4595 
4596 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4597 
4598 }
4599 
4600 static void
4601 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4602 				      struct drm_display_mode *dst_mode)
4603 {
4604 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4605 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4606 	dst_mode->crtc_clock = src_mode->crtc_clock;
4607 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4608 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4609 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4610 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4611 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4612 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4613 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4614 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4615 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4616 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4617 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4618 }
4619 
4620 static void
4621 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4622 					const struct drm_display_mode *native_mode,
4623 					bool scale_enabled)
4624 {
4625 	if (scale_enabled) {
4626 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4627 	} else if (native_mode->clock == drm_mode->clock &&
4628 			native_mode->htotal == drm_mode->htotal &&
4629 			native_mode->vtotal == drm_mode->vtotal) {
4630 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4631 	} else {
4632 		/* no scaling nor amdgpu inserted, no need to patch */
4633 	}
4634 }
4635 
4636 static struct dc_sink *
4637 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4638 {
4639 	struct dc_sink_init_data sink_init_data = { 0 };
4640 	struct dc_sink *sink = NULL;
4641 	sink_init_data.link = aconnector->dc_link;
4642 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4643 
4644 	sink = dc_sink_create(&sink_init_data);
4645 	if (!sink) {
4646 		DRM_ERROR("Failed to create sink!\n");
4647 		return NULL;
4648 	}
4649 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4650 
4651 	return sink;
4652 }
4653 
4654 static void set_multisync_trigger_params(
4655 		struct dc_stream_state *stream)
4656 {
4657 	if (stream->triggered_crtc_reset.enabled) {
4658 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4659 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4660 	}
4661 }
4662 
4663 static void set_master_stream(struct dc_stream_state *stream_set[],
4664 			      int stream_count)
4665 {
4666 	int j, highest_rfr = 0, master_stream = 0;
4667 
4668 	for (j = 0;  j < stream_count; j++) {
4669 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4670 			int refresh_rate = 0;
4671 
4672 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4673 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4674 			if (refresh_rate > highest_rfr) {
4675 				highest_rfr = refresh_rate;
4676 				master_stream = j;
4677 			}
4678 		}
4679 	}
4680 	for (j = 0;  j < stream_count; j++) {
4681 		if (stream_set[j])
4682 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4683 	}
4684 }
4685 
4686 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4687 {
4688 	int i = 0;
4689 
4690 	if (context->stream_count < 2)
4691 		return;
4692 	for (i = 0; i < context->stream_count ; i++) {
4693 		if (!context->streams[i])
4694 			continue;
4695 		/*
4696 		 * TODO: add a function to read AMD VSDB bits and set
4697 		 * crtc_sync_master.multi_sync_enabled flag
4698 		 * For now it's set to false
4699 		 */
4700 		set_multisync_trigger_params(context->streams[i]);
4701 	}
4702 	set_master_stream(context->streams, context->stream_count);
4703 }
4704 
4705 static struct dc_stream_state *
4706 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4707 		       const struct drm_display_mode *drm_mode,
4708 		       const struct dm_connector_state *dm_state,
4709 		       const struct dc_stream_state *old_stream,
4710 		       int requested_bpc)
4711 {
4712 	struct drm_display_mode *preferred_mode = NULL;
4713 	struct drm_connector *drm_connector;
4714 	const struct drm_connector_state *con_state =
4715 		dm_state ? &dm_state->base : NULL;
4716 	struct dc_stream_state *stream = NULL;
4717 	struct drm_display_mode mode = *drm_mode;
4718 	bool native_mode_found = false;
4719 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4720 	int mode_refresh;
4721 	int preferred_refresh = 0;
4722 #if defined(CONFIG_DRM_AMD_DC_DCN)
4723 	struct dsc_dec_dpcd_caps dsc_caps;
4724 #endif
4725 	uint32_t link_bandwidth_kbps;
4726 
4727 	struct dc_sink *sink = NULL;
4728 	if (aconnector == NULL) {
4729 		DRM_ERROR("aconnector is NULL!\n");
4730 		return stream;
4731 	}
4732 
4733 	drm_connector = &aconnector->base;
4734 
4735 	if (!aconnector->dc_sink) {
4736 		sink = create_fake_sink(aconnector);
4737 		if (!sink)
4738 			return stream;
4739 	} else {
4740 		sink = aconnector->dc_sink;
4741 		dc_sink_retain(sink);
4742 	}
4743 
4744 	stream = dc_create_stream_for_sink(sink);
4745 
4746 	if (stream == NULL) {
4747 		DRM_ERROR("Failed to create stream for sink!\n");
4748 		goto finish;
4749 	}
4750 
4751 	stream->dm_stream_context = aconnector;
4752 
4753 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4754 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4755 
4756 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4757 		/* Search for preferred mode */
4758 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4759 			native_mode_found = true;
4760 			break;
4761 		}
4762 	}
4763 	if (!native_mode_found)
4764 		preferred_mode = list_first_entry_or_null(
4765 				&aconnector->base.modes,
4766 				struct drm_display_mode,
4767 				head);
4768 
4769 	mode_refresh = drm_mode_vrefresh(&mode);
4770 
4771 	if (preferred_mode == NULL) {
4772 		/*
4773 		 * This may not be an error, the use case is when we have no
4774 		 * usermode calls to reset and set mode upon hotplug. In this
4775 		 * case, we call set mode ourselves to restore the previous mode
4776 		 * and the modelist may not be filled in in time.
4777 		 */
4778 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4779 	} else {
4780 		decide_crtc_timing_for_drm_display_mode(
4781 				&mode, preferred_mode,
4782 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4783 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4784 	}
4785 
4786 	if (!dm_state)
4787 		drm_mode_set_crtcinfo(&mode, 0);
4788 
4789 	/*
4790 	* If scaling is enabled and refresh rate didn't change
4791 	* we copy the vic and polarities of the old timings
4792 	*/
4793 	if (!scale || mode_refresh != preferred_refresh)
4794 		fill_stream_properties_from_drm_display_mode(stream,
4795 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4796 	else
4797 		fill_stream_properties_from_drm_display_mode(stream,
4798 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4799 
4800 	stream->timing.flags.DSC = 0;
4801 
4802 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4803 #if defined(CONFIG_DRM_AMD_DC_DCN)
4804 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4805 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4806 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4807 				      &dsc_caps);
4808 #endif
4809 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4810 							     dc_link_get_link_cap(aconnector->dc_link));
4811 
4812 #if defined(CONFIG_DRM_AMD_DC_DCN)
4813 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4814 			/* Set DSC policy according to dsc_clock_en */
4815 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4816 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4817 
4818 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4819 						  &dsc_caps,
4820 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4821 						  link_bandwidth_kbps,
4822 						  &stream->timing,
4823 						  &stream->timing.dsc_cfg))
4824 				stream->timing.flags.DSC = 1;
4825 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4826 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4827 				stream->timing.flags.DSC = 1;
4828 
4829 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4830 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4831 
4832 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4833 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4834 
4835 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4836 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4837 		}
4838 #endif
4839 	}
4840 
4841 	update_stream_scaling_settings(&mode, dm_state, stream);
4842 
4843 	fill_audio_info(
4844 		&stream->audio_info,
4845 		drm_connector,
4846 		sink);
4847 
4848 	update_stream_signal(stream, sink);
4849 
4850 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4851 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4852 
4853 	if (stream->link->psr_settings.psr_feature_enabled) {
4854 		//
4855 		// should decide stream support vsc sdp colorimetry capability
4856 		// before building vsc info packet
4857 		//
4858 		stream->use_vsc_sdp_for_colorimetry = false;
4859 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4860 			stream->use_vsc_sdp_for_colorimetry =
4861 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4862 		} else {
4863 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4864 				stream->use_vsc_sdp_for_colorimetry = true;
4865 		}
4866 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4867 	}
4868 finish:
4869 	dc_sink_release(sink);
4870 
4871 	return stream;
4872 }
4873 
4874 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4875 {
4876 	drm_crtc_cleanup(crtc);
4877 	kfree(crtc);
4878 }
4879 
4880 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4881 				  struct drm_crtc_state *state)
4882 {
4883 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4884 
4885 	/* TODO Destroy dc_stream objects are stream object is flattened */
4886 	if (cur->stream)
4887 		dc_stream_release(cur->stream);
4888 
4889 
4890 	__drm_atomic_helper_crtc_destroy_state(state);
4891 
4892 
4893 	kfree(state);
4894 }
4895 
4896 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4897 {
4898 	struct dm_crtc_state *state;
4899 
4900 	if (crtc->state)
4901 		dm_crtc_destroy_state(crtc, crtc->state);
4902 
4903 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4904 	if (WARN_ON(!state))
4905 		return;
4906 
4907 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4908 }
4909 
4910 static struct drm_crtc_state *
4911 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4912 {
4913 	struct dm_crtc_state *state, *cur;
4914 
4915 	cur = to_dm_crtc_state(crtc->state);
4916 
4917 	if (WARN_ON(!crtc->state))
4918 		return NULL;
4919 
4920 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4921 	if (!state)
4922 		return NULL;
4923 
4924 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4925 
4926 	if (cur->stream) {
4927 		state->stream = cur->stream;
4928 		dc_stream_retain(state->stream);
4929 	}
4930 
4931 	state->active_planes = cur->active_planes;
4932 	state->vrr_infopacket = cur->vrr_infopacket;
4933 	state->abm_level = cur->abm_level;
4934 	state->vrr_supported = cur->vrr_supported;
4935 	state->freesync_config = cur->freesync_config;
4936 	state->crc_src = cur->crc_src;
4937 	state->cm_has_degamma = cur->cm_has_degamma;
4938 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4939 
4940 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4941 
4942 	return &state->base;
4943 }
4944 
4945 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4946 {
4947 	enum dc_irq_source irq_source;
4948 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4949 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4950 	int rc;
4951 
4952 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4953 
4954 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4955 
4956 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4957 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4958 	return rc;
4959 }
4960 
4961 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4962 {
4963 	enum dc_irq_source irq_source;
4964 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4965 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4966 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4967 	int rc = 0;
4968 
4969 	if (enable) {
4970 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4971 		if (amdgpu_dm_vrr_active(acrtc_state))
4972 			rc = dm_set_vupdate_irq(crtc, true);
4973 	} else {
4974 		/* vblank irq off -> vupdate irq off */
4975 		rc = dm_set_vupdate_irq(crtc, false);
4976 	}
4977 
4978 	if (rc)
4979 		return rc;
4980 
4981 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4982 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4983 }
4984 
4985 static int dm_enable_vblank(struct drm_crtc *crtc)
4986 {
4987 	return dm_set_vblank(crtc, true);
4988 }
4989 
4990 static void dm_disable_vblank(struct drm_crtc *crtc)
4991 {
4992 	dm_set_vblank(crtc, false);
4993 }
4994 
4995 /* Implemented only the options currently availible for the driver */
4996 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4997 	.reset = dm_crtc_reset_state,
4998 	.destroy = amdgpu_dm_crtc_destroy,
4999 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5000 	.set_config = drm_atomic_helper_set_config,
5001 	.page_flip = drm_atomic_helper_page_flip,
5002 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5003 	.atomic_destroy_state = dm_crtc_destroy_state,
5004 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5005 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5006 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5007 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5008 	.enable_vblank = dm_enable_vblank,
5009 	.disable_vblank = dm_disable_vblank,
5010 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5011 };
5012 
5013 static enum drm_connector_status
5014 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5015 {
5016 	bool connected;
5017 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5018 
5019 	/*
5020 	 * Notes:
5021 	 * 1. This interface is NOT called in context of HPD irq.
5022 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5023 	 * makes it a bad place for *any* MST-related activity.
5024 	 */
5025 
5026 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5027 	    !aconnector->fake_enable)
5028 		connected = (aconnector->dc_sink != NULL);
5029 	else
5030 		connected = (aconnector->base.force == DRM_FORCE_ON);
5031 
5032 	update_subconnector_property(aconnector);
5033 
5034 	return (connected ? connector_status_connected :
5035 			connector_status_disconnected);
5036 }
5037 
5038 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5039 					    struct drm_connector_state *connector_state,
5040 					    struct drm_property *property,
5041 					    uint64_t val)
5042 {
5043 	struct drm_device *dev = connector->dev;
5044 	struct amdgpu_device *adev = drm_to_adev(dev);
5045 	struct dm_connector_state *dm_old_state =
5046 		to_dm_connector_state(connector->state);
5047 	struct dm_connector_state *dm_new_state =
5048 		to_dm_connector_state(connector_state);
5049 
5050 	int ret = -EINVAL;
5051 
5052 	if (property == dev->mode_config.scaling_mode_property) {
5053 		enum amdgpu_rmx_type rmx_type;
5054 
5055 		switch (val) {
5056 		case DRM_MODE_SCALE_CENTER:
5057 			rmx_type = RMX_CENTER;
5058 			break;
5059 		case DRM_MODE_SCALE_ASPECT:
5060 			rmx_type = RMX_ASPECT;
5061 			break;
5062 		case DRM_MODE_SCALE_FULLSCREEN:
5063 			rmx_type = RMX_FULL;
5064 			break;
5065 		case DRM_MODE_SCALE_NONE:
5066 		default:
5067 			rmx_type = RMX_OFF;
5068 			break;
5069 		}
5070 
5071 		if (dm_old_state->scaling == rmx_type)
5072 			return 0;
5073 
5074 		dm_new_state->scaling = rmx_type;
5075 		ret = 0;
5076 	} else if (property == adev->mode_info.underscan_hborder_property) {
5077 		dm_new_state->underscan_hborder = val;
5078 		ret = 0;
5079 	} else if (property == adev->mode_info.underscan_vborder_property) {
5080 		dm_new_state->underscan_vborder = val;
5081 		ret = 0;
5082 	} else if (property == adev->mode_info.underscan_property) {
5083 		dm_new_state->underscan_enable = val;
5084 		ret = 0;
5085 	} else if (property == adev->mode_info.abm_level_property) {
5086 		dm_new_state->abm_level = val;
5087 		ret = 0;
5088 	}
5089 
5090 	return ret;
5091 }
5092 
5093 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5094 					    const struct drm_connector_state *state,
5095 					    struct drm_property *property,
5096 					    uint64_t *val)
5097 {
5098 	struct drm_device *dev = connector->dev;
5099 	struct amdgpu_device *adev = drm_to_adev(dev);
5100 	struct dm_connector_state *dm_state =
5101 		to_dm_connector_state(state);
5102 	int ret = -EINVAL;
5103 
5104 	if (property == dev->mode_config.scaling_mode_property) {
5105 		switch (dm_state->scaling) {
5106 		case RMX_CENTER:
5107 			*val = DRM_MODE_SCALE_CENTER;
5108 			break;
5109 		case RMX_ASPECT:
5110 			*val = DRM_MODE_SCALE_ASPECT;
5111 			break;
5112 		case RMX_FULL:
5113 			*val = DRM_MODE_SCALE_FULLSCREEN;
5114 			break;
5115 		case RMX_OFF:
5116 		default:
5117 			*val = DRM_MODE_SCALE_NONE;
5118 			break;
5119 		}
5120 		ret = 0;
5121 	} else if (property == adev->mode_info.underscan_hborder_property) {
5122 		*val = dm_state->underscan_hborder;
5123 		ret = 0;
5124 	} else if (property == adev->mode_info.underscan_vborder_property) {
5125 		*val = dm_state->underscan_vborder;
5126 		ret = 0;
5127 	} else if (property == adev->mode_info.underscan_property) {
5128 		*val = dm_state->underscan_enable;
5129 		ret = 0;
5130 	} else if (property == adev->mode_info.abm_level_property) {
5131 		*val = dm_state->abm_level;
5132 		ret = 0;
5133 	}
5134 
5135 	return ret;
5136 }
5137 
5138 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5139 {
5140 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5141 
5142 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5143 }
5144 
5145 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5146 {
5147 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5148 	const struct dc_link *link = aconnector->dc_link;
5149 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5150 	struct amdgpu_display_manager *dm = &adev->dm;
5151 
5152 	/*
5153 	 * Call only if mst_mgr was iniitalized before since it's not done
5154 	 * for all connector types.
5155 	 */
5156 	if (aconnector->mst_mgr.dev)
5157 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5158 
5159 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5160 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5161 
5162 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5163 	    link->type != dc_connection_none &&
5164 	    dm->backlight_dev) {
5165 		backlight_device_unregister(dm->backlight_dev);
5166 		dm->backlight_dev = NULL;
5167 	}
5168 #endif
5169 
5170 	if (aconnector->dc_em_sink)
5171 		dc_sink_release(aconnector->dc_em_sink);
5172 	aconnector->dc_em_sink = NULL;
5173 	if (aconnector->dc_sink)
5174 		dc_sink_release(aconnector->dc_sink);
5175 	aconnector->dc_sink = NULL;
5176 
5177 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5178 	drm_connector_unregister(connector);
5179 	drm_connector_cleanup(connector);
5180 	if (aconnector->i2c) {
5181 		i2c_del_adapter(&aconnector->i2c->base);
5182 		kfree(aconnector->i2c);
5183 	}
5184 	kfree(aconnector->dm_dp_aux.aux.name);
5185 
5186 	kfree(connector);
5187 }
5188 
5189 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5190 {
5191 	struct dm_connector_state *state =
5192 		to_dm_connector_state(connector->state);
5193 
5194 	if (connector->state)
5195 		__drm_atomic_helper_connector_destroy_state(connector->state);
5196 
5197 	kfree(state);
5198 
5199 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5200 
5201 	if (state) {
5202 		state->scaling = RMX_OFF;
5203 		state->underscan_enable = false;
5204 		state->underscan_hborder = 0;
5205 		state->underscan_vborder = 0;
5206 		state->base.max_requested_bpc = 8;
5207 		state->vcpi_slots = 0;
5208 		state->pbn = 0;
5209 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5210 			state->abm_level = amdgpu_dm_abm_level;
5211 
5212 		__drm_atomic_helper_connector_reset(connector, &state->base);
5213 	}
5214 }
5215 
5216 struct drm_connector_state *
5217 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5218 {
5219 	struct dm_connector_state *state =
5220 		to_dm_connector_state(connector->state);
5221 
5222 	struct dm_connector_state *new_state =
5223 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5224 
5225 	if (!new_state)
5226 		return NULL;
5227 
5228 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5229 
5230 	new_state->freesync_capable = state->freesync_capable;
5231 	new_state->abm_level = state->abm_level;
5232 	new_state->scaling = state->scaling;
5233 	new_state->underscan_enable = state->underscan_enable;
5234 	new_state->underscan_hborder = state->underscan_hborder;
5235 	new_state->underscan_vborder = state->underscan_vborder;
5236 	new_state->vcpi_slots = state->vcpi_slots;
5237 	new_state->pbn = state->pbn;
5238 	return &new_state->base;
5239 }
5240 
5241 static int
5242 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5243 {
5244 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5245 		to_amdgpu_dm_connector(connector);
5246 	int r;
5247 
5248 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5249 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5250 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5251 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5252 		if (r)
5253 			return r;
5254 	}
5255 
5256 #if defined(CONFIG_DEBUG_FS)
5257 	connector_debugfs_init(amdgpu_dm_connector);
5258 #endif
5259 
5260 	return 0;
5261 }
5262 
5263 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5264 	.reset = amdgpu_dm_connector_funcs_reset,
5265 	.detect = amdgpu_dm_connector_detect,
5266 	.fill_modes = drm_helper_probe_single_connector_modes,
5267 	.destroy = amdgpu_dm_connector_destroy,
5268 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5269 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5270 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5271 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5272 	.late_register = amdgpu_dm_connector_late_register,
5273 	.early_unregister = amdgpu_dm_connector_unregister
5274 };
5275 
5276 static int get_modes(struct drm_connector *connector)
5277 {
5278 	return amdgpu_dm_connector_get_modes(connector);
5279 }
5280 
5281 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5282 {
5283 	struct dc_sink_init_data init_params = {
5284 			.link = aconnector->dc_link,
5285 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5286 	};
5287 	struct edid *edid;
5288 
5289 	if (!aconnector->base.edid_blob_ptr) {
5290 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5291 				aconnector->base.name);
5292 
5293 		aconnector->base.force = DRM_FORCE_OFF;
5294 		aconnector->base.override_edid = false;
5295 		return;
5296 	}
5297 
5298 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5299 
5300 	aconnector->edid = edid;
5301 
5302 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5303 		aconnector->dc_link,
5304 		(uint8_t *)edid,
5305 		(edid->extensions + 1) * EDID_LENGTH,
5306 		&init_params);
5307 
5308 	if (aconnector->base.force == DRM_FORCE_ON) {
5309 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5310 		aconnector->dc_link->local_sink :
5311 		aconnector->dc_em_sink;
5312 		dc_sink_retain(aconnector->dc_sink);
5313 	}
5314 }
5315 
5316 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5317 {
5318 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5319 
5320 	/*
5321 	 * In case of headless boot with force on for DP managed connector
5322 	 * Those settings have to be != 0 to get initial modeset
5323 	 */
5324 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5325 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5326 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5327 	}
5328 
5329 
5330 	aconnector->base.override_edid = true;
5331 	create_eml_sink(aconnector);
5332 }
5333 
5334 static struct dc_stream_state *
5335 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5336 				const struct drm_display_mode *drm_mode,
5337 				const struct dm_connector_state *dm_state,
5338 				const struct dc_stream_state *old_stream)
5339 {
5340 	struct drm_connector *connector = &aconnector->base;
5341 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5342 	struct dc_stream_state *stream;
5343 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5344 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5345 	enum dc_status dc_result = DC_OK;
5346 
5347 	do {
5348 		stream = create_stream_for_sink(aconnector, drm_mode,
5349 						dm_state, old_stream,
5350 						requested_bpc);
5351 		if (stream == NULL) {
5352 			DRM_ERROR("Failed to create stream for sink!\n");
5353 			break;
5354 		}
5355 
5356 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5357 
5358 		if (dc_result != DC_OK) {
5359 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5360 				      drm_mode->hdisplay,
5361 				      drm_mode->vdisplay,
5362 				      drm_mode->clock,
5363 				      dc_result,
5364 				      dc_status_to_str(dc_result));
5365 
5366 			dc_stream_release(stream);
5367 			stream = NULL;
5368 			requested_bpc -= 2; /* lower bpc to retry validation */
5369 		}
5370 
5371 	} while (stream == NULL && requested_bpc >= 6);
5372 
5373 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
5374 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
5375 
5376 		aconnector->force_yuv420_output = true;
5377 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
5378 						dm_state, old_stream);
5379 		aconnector->force_yuv420_output = false;
5380 	}
5381 
5382 	return stream;
5383 }
5384 
5385 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5386 				   struct drm_display_mode *mode)
5387 {
5388 	int result = MODE_ERROR;
5389 	struct dc_sink *dc_sink;
5390 	/* TODO: Unhardcode stream count */
5391 	struct dc_stream_state *stream;
5392 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5393 
5394 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5395 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5396 		return result;
5397 
5398 	/*
5399 	 * Only run this the first time mode_valid is called to initilialize
5400 	 * EDID mgmt
5401 	 */
5402 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5403 		!aconnector->dc_em_sink)
5404 		handle_edid_mgmt(aconnector);
5405 
5406 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5407 
5408 	if (dc_sink == NULL) {
5409 		DRM_ERROR("dc_sink is NULL!\n");
5410 		goto fail;
5411 	}
5412 
5413 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5414 	if (stream) {
5415 		dc_stream_release(stream);
5416 		result = MODE_OK;
5417 	}
5418 
5419 fail:
5420 	/* TODO: error handling*/
5421 	return result;
5422 }
5423 
5424 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5425 				struct dc_info_packet *out)
5426 {
5427 	struct hdmi_drm_infoframe frame;
5428 	unsigned char buf[30]; /* 26 + 4 */
5429 	ssize_t len;
5430 	int ret, i;
5431 
5432 	memset(out, 0, sizeof(*out));
5433 
5434 	if (!state->hdr_output_metadata)
5435 		return 0;
5436 
5437 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5438 	if (ret)
5439 		return ret;
5440 
5441 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5442 	if (len < 0)
5443 		return (int)len;
5444 
5445 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5446 	if (len != 30)
5447 		return -EINVAL;
5448 
5449 	/* Prepare the infopacket for DC. */
5450 	switch (state->connector->connector_type) {
5451 	case DRM_MODE_CONNECTOR_HDMIA:
5452 		out->hb0 = 0x87; /* type */
5453 		out->hb1 = 0x01; /* version */
5454 		out->hb2 = 0x1A; /* length */
5455 		out->sb[0] = buf[3]; /* checksum */
5456 		i = 1;
5457 		break;
5458 
5459 	case DRM_MODE_CONNECTOR_DisplayPort:
5460 	case DRM_MODE_CONNECTOR_eDP:
5461 		out->hb0 = 0x00; /* sdp id, zero */
5462 		out->hb1 = 0x87; /* type */
5463 		out->hb2 = 0x1D; /* payload len - 1 */
5464 		out->hb3 = (0x13 << 2); /* sdp version */
5465 		out->sb[0] = 0x01; /* version */
5466 		out->sb[1] = 0x1A; /* length */
5467 		i = 2;
5468 		break;
5469 
5470 	default:
5471 		return -EINVAL;
5472 	}
5473 
5474 	memcpy(&out->sb[i], &buf[4], 26);
5475 	out->valid = true;
5476 
5477 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5478 		       sizeof(out->sb), false);
5479 
5480 	return 0;
5481 }
5482 
5483 static bool
5484 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5485 			  const struct drm_connector_state *new_state)
5486 {
5487 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5488 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5489 
5490 	if (old_blob != new_blob) {
5491 		if (old_blob && new_blob &&
5492 		    old_blob->length == new_blob->length)
5493 			return memcmp(old_blob->data, new_blob->data,
5494 				      old_blob->length);
5495 
5496 		return true;
5497 	}
5498 
5499 	return false;
5500 }
5501 
5502 static int
5503 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5504 				 struct drm_atomic_state *state)
5505 {
5506 	struct drm_connector_state *new_con_state =
5507 		drm_atomic_get_new_connector_state(state, conn);
5508 	struct drm_connector_state *old_con_state =
5509 		drm_atomic_get_old_connector_state(state, conn);
5510 	struct drm_crtc *crtc = new_con_state->crtc;
5511 	struct drm_crtc_state *new_crtc_state;
5512 	int ret;
5513 
5514 	if (!crtc)
5515 		return 0;
5516 
5517 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5518 		struct dc_info_packet hdr_infopacket;
5519 
5520 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5521 		if (ret)
5522 			return ret;
5523 
5524 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5525 		if (IS_ERR(new_crtc_state))
5526 			return PTR_ERR(new_crtc_state);
5527 
5528 		/*
5529 		 * DC considers the stream backends changed if the
5530 		 * static metadata changes. Forcing the modeset also
5531 		 * gives a simple way for userspace to switch from
5532 		 * 8bpc to 10bpc when setting the metadata to enter
5533 		 * or exit HDR.
5534 		 *
5535 		 * Changing the static metadata after it's been
5536 		 * set is permissible, however. So only force a
5537 		 * modeset if we're entering or exiting HDR.
5538 		 */
5539 		new_crtc_state->mode_changed =
5540 			!old_con_state->hdr_output_metadata ||
5541 			!new_con_state->hdr_output_metadata;
5542 	}
5543 
5544 	return 0;
5545 }
5546 
5547 static const struct drm_connector_helper_funcs
5548 amdgpu_dm_connector_helper_funcs = {
5549 	/*
5550 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5551 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5552 	 * are missing after user start lightdm. So we need to renew modes list.
5553 	 * in get_modes call back, not just return the modes count
5554 	 */
5555 	.get_modes = get_modes,
5556 	.mode_valid = amdgpu_dm_connector_mode_valid,
5557 	.atomic_check = amdgpu_dm_connector_atomic_check,
5558 };
5559 
5560 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5561 {
5562 }
5563 
5564 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5565 {
5566 	struct drm_atomic_state *state = new_crtc_state->state;
5567 	struct drm_plane *plane;
5568 	int num_active = 0;
5569 
5570 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5571 		struct drm_plane_state *new_plane_state;
5572 
5573 		/* Cursor planes are "fake". */
5574 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5575 			continue;
5576 
5577 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5578 
5579 		if (!new_plane_state) {
5580 			/*
5581 			 * The plane is enable on the CRTC and hasn't changed
5582 			 * state. This means that it previously passed
5583 			 * validation and is therefore enabled.
5584 			 */
5585 			num_active += 1;
5586 			continue;
5587 		}
5588 
5589 		/* We need a framebuffer to be considered enabled. */
5590 		num_active += (new_plane_state->fb != NULL);
5591 	}
5592 
5593 	return num_active;
5594 }
5595 
5596 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5597 					 struct drm_crtc_state *new_crtc_state)
5598 {
5599 	struct dm_crtc_state *dm_new_crtc_state =
5600 		to_dm_crtc_state(new_crtc_state);
5601 
5602 	dm_new_crtc_state->active_planes = 0;
5603 
5604 	if (!dm_new_crtc_state->stream)
5605 		return;
5606 
5607 	dm_new_crtc_state->active_planes =
5608 		count_crtc_active_planes(new_crtc_state);
5609 }
5610 
5611 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5612 				       struct drm_crtc_state *state)
5613 {
5614 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5615 	struct dc *dc = adev->dm.dc;
5616 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5617 	int ret = -EINVAL;
5618 
5619 	dm_update_crtc_active_planes(crtc, state);
5620 
5621 	if (unlikely(!dm_crtc_state->stream &&
5622 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5623 		WARN_ON(1);
5624 		return ret;
5625 	}
5626 
5627 	/*
5628 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5629 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5630 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5631 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5632 	 */
5633 	if (state->enable &&
5634 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5635 		return -EINVAL;
5636 
5637 	/* In some use cases, like reset, no stream is attached */
5638 	if (!dm_crtc_state->stream)
5639 		return 0;
5640 
5641 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5642 		return 0;
5643 
5644 	return ret;
5645 }
5646 
5647 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5648 				      const struct drm_display_mode *mode,
5649 				      struct drm_display_mode *adjusted_mode)
5650 {
5651 	return true;
5652 }
5653 
5654 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5655 	.disable = dm_crtc_helper_disable,
5656 	.atomic_check = dm_crtc_helper_atomic_check,
5657 	.mode_fixup = dm_crtc_helper_mode_fixup,
5658 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5659 };
5660 
5661 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5662 {
5663 
5664 }
5665 
5666 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5667 {
5668 	switch (display_color_depth) {
5669 		case COLOR_DEPTH_666:
5670 			return 6;
5671 		case COLOR_DEPTH_888:
5672 			return 8;
5673 		case COLOR_DEPTH_101010:
5674 			return 10;
5675 		case COLOR_DEPTH_121212:
5676 			return 12;
5677 		case COLOR_DEPTH_141414:
5678 			return 14;
5679 		case COLOR_DEPTH_161616:
5680 			return 16;
5681 		default:
5682 			break;
5683 		}
5684 	return 0;
5685 }
5686 
5687 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5688 					  struct drm_crtc_state *crtc_state,
5689 					  struct drm_connector_state *conn_state)
5690 {
5691 	struct drm_atomic_state *state = crtc_state->state;
5692 	struct drm_connector *connector = conn_state->connector;
5693 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5694 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5695 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5696 	struct drm_dp_mst_topology_mgr *mst_mgr;
5697 	struct drm_dp_mst_port *mst_port;
5698 	enum dc_color_depth color_depth;
5699 	int clock, bpp = 0;
5700 	bool is_y420 = false;
5701 
5702 	if (!aconnector->port || !aconnector->dc_sink)
5703 		return 0;
5704 
5705 	mst_port = aconnector->port;
5706 	mst_mgr = &aconnector->mst_port->mst_mgr;
5707 
5708 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5709 		return 0;
5710 
5711 	if (!state->duplicated) {
5712 		int max_bpc = conn_state->max_requested_bpc;
5713 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5714 				aconnector->force_yuv420_output;
5715 		color_depth = convert_color_depth_from_display_info(connector,
5716 								    is_y420,
5717 								    max_bpc);
5718 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5719 		clock = adjusted_mode->clock;
5720 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5721 	}
5722 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5723 									   mst_mgr,
5724 									   mst_port,
5725 									   dm_new_connector_state->pbn,
5726 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5727 	if (dm_new_connector_state->vcpi_slots < 0) {
5728 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5729 		return dm_new_connector_state->vcpi_slots;
5730 	}
5731 	return 0;
5732 }
5733 
5734 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5735 	.disable = dm_encoder_helper_disable,
5736 	.atomic_check = dm_encoder_helper_atomic_check
5737 };
5738 
5739 #if defined(CONFIG_DRM_AMD_DC_DCN)
5740 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5741 					    struct dc_state *dc_state)
5742 {
5743 	struct dc_stream_state *stream = NULL;
5744 	struct drm_connector *connector;
5745 	struct drm_connector_state *new_con_state, *old_con_state;
5746 	struct amdgpu_dm_connector *aconnector;
5747 	struct dm_connector_state *dm_conn_state;
5748 	int i, j, clock, bpp;
5749 	int vcpi, pbn_div, pbn = 0;
5750 
5751 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5752 
5753 		aconnector = to_amdgpu_dm_connector(connector);
5754 
5755 		if (!aconnector->port)
5756 			continue;
5757 
5758 		if (!new_con_state || !new_con_state->crtc)
5759 			continue;
5760 
5761 		dm_conn_state = to_dm_connector_state(new_con_state);
5762 
5763 		for (j = 0; j < dc_state->stream_count; j++) {
5764 			stream = dc_state->streams[j];
5765 			if (!stream)
5766 				continue;
5767 
5768 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5769 				break;
5770 
5771 			stream = NULL;
5772 		}
5773 
5774 		if (!stream)
5775 			continue;
5776 
5777 		if (stream->timing.flags.DSC != 1) {
5778 			drm_dp_mst_atomic_enable_dsc(state,
5779 						     aconnector->port,
5780 						     dm_conn_state->pbn,
5781 						     0,
5782 						     false);
5783 			continue;
5784 		}
5785 
5786 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5787 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5788 		clock = stream->timing.pix_clk_100hz / 10;
5789 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5790 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5791 						    aconnector->port,
5792 						    pbn, pbn_div,
5793 						    true);
5794 		if (vcpi < 0)
5795 			return vcpi;
5796 
5797 		dm_conn_state->pbn = pbn;
5798 		dm_conn_state->vcpi_slots = vcpi;
5799 	}
5800 	return 0;
5801 }
5802 #endif
5803 
5804 static void dm_drm_plane_reset(struct drm_plane *plane)
5805 {
5806 	struct dm_plane_state *amdgpu_state = NULL;
5807 
5808 	if (plane->state)
5809 		plane->funcs->atomic_destroy_state(plane, plane->state);
5810 
5811 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5812 	WARN_ON(amdgpu_state == NULL);
5813 
5814 	if (amdgpu_state)
5815 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5816 }
5817 
5818 static struct drm_plane_state *
5819 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5820 {
5821 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5822 
5823 	old_dm_plane_state = to_dm_plane_state(plane->state);
5824 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5825 	if (!dm_plane_state)
5826 		return NULL;
5827 
5828 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5829 
5830 	if (old_dm_plane_state->dc_state) {
5831 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5832 		dc_plane_state_retain(dm_plane_state->dc_state);
5833 	}
5834 
5835 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5836 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5837 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5838 
5839 	return &dm_plane_state->base;
5840 }
5841 
5842 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5843 				struct drm_plane_state *state)
5844 {
5845 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5846 
5847 	if (dm_plane_state->dc_state)
5848 		dc_plane_state_release(dm_plane_state->dc_state);
5849 
5850 	drm_atomic_helper_plane_destroy_state(plane, state);
5851 }
5852 
5853 static const struct drm_plane_funcs dm_plane_funcs = {
5854 	.update_plane	= drm_atomic_helper_update_plane,
5855 	.disable_plane	= drm_atomic_helper_disable_plane,
5856 	.destroy	= drm_primary_helper_destroy,
5857 	.reset = dm_drm_plane_reset,
5858 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5859 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5860 };
5861 
5862 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5863 				      struct drm_plane_state *new_state)
5864 {
5865 	struct amdgpu_framebuffer *afb;
5866 	struct drm_gem_object *obj;
5867 	struct amdgpu_device *adev;
5868 	struct amdgpu_bo *rbo;
5869 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5870 	struct list_head list;
5871 	struct ttm_validate_buffer tv;
5872 	struct ww_acquire_ctx ticket;
5873 	uint32_t domain;
5874 	int r;
5875 
5876 	if (!new_state->fb) {
5877 		DRM_DEBUG_DRIVER("No FB bound\n");
5878 		return 0;
5879 	}
5880 
5881 	afb = to_amdgpu_framebuffer(new_state->fb);
5882 	obj = new_state->fb->obj[0];
5883 	rbo = gem_to_amdgpu_bo(obj);
5884 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5885 	INIT_LIST_HEAD(&list);
5886 
5887 	tv.bo = &rbo->tbo;
5888 	tv.num_shared = 1;
5889 	list_add(&tv.head, &list);
5890 
5891 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5892 	if (r) {
5893 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5894 		return r;
5895 	}
5896 
5897 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5898 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5899 	else
5900 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5901 
5902 	r = amdgpu_bo_pin(rbo, domain);
5903 	if (unlikely(r != 0)) {
5904 		if (r != -ERESTARTSYS)
5905 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5906 		ttm_eu_backoff_reservation(&ticket, &list);
5907 		return r;
5908 	}
5909 
5910 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5911 	if (unlikely(r != 0)) {
5912 		amdgpu_bo_unpin(rbo);
5913 		ttm_eu_backoff_reservation(&ticket, &list);
5914 		DRM_ERROR("%p bind failed\n", rbo);
5915 		return r;
5916 	}
5917 
5918 	ttm_eu_backoff_reservation(&ticket, &list);
5919 
5920 	afb->address = amdgpu_bo_gpu_offset(rbo);
5921 
5922 	amdgpu_bo_ref(rbo);
5923 
5924 	/**
5925 	 * We don't do surface updates on planes that have been newly created,
5926 	 * but we also don't have the afb->address during atomic check.
5927 	 *
5928 	 * Fill in buffer attributes depending on the address here, but only on
5929 	 * newly created planes since they're not being used by DC yet and this
5930 	 * won't modify global state.
5931 	 */
5932 	dm_plane_state_old = to_dm_plane_state(plane->state);
5933 	dm_plane_state_new = to_dm_plane_state(new_state);
5934 
5935 	if (dm_plane_state_new->dc_state &&
5936 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5937 		struct dc_plane_state *plane_state =
5938 			dm_plane_state_new->dc_state;
5939 		bool force_disable_dcc = !plane_state->dcc.enable;
5940 
5941 		fill_plane_buffer_attributes(
5942 			adev, afb, plane_state->format, plane_state->rotation,
5943 			dm_plane_state_new->tiling_flags,
5944 			&plane_state->tiling_info, &plane_state->plane_size,
5945 			&plane_state->dcc, &plane_state->address,
5946 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5947 	}
5948 
5949 	return 0;
5950 }
5951 
5952 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5953 				       struct drm_plane_state *old_state)
5954 {
5955 	struct amdgpu_bo *rbo;
5956 	int r;
5957 
5958 	if (!old_state->fb)
5959 		return;
5960 
5961 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5962 	r = amdgpu_bo_reserve(rbo, false);
5963 	if (unlikely(r)) {
5964 		DRM_ERROR("failed to reserve rbo before unpin\n");
5965 		return;
5966 	}
5967 
5968 	amdgpu_bo_unpin(rbo);
5969 	amdgpu_bo_unreserve(rbo);
5970 	amdgpu_bo_unref(&rbo);
5971 }
5972 
5973 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5974 				       struct drm_crtc_state *new_crtc_state)
5975 {
5976 	int max_downscale = 0;
5977 	int max_upscale = INT_MAX;
5978 
5979 	/* TODO: These should be checked against DC plane caps */
5980 	return drm_atomic_helper_check_plane_state(
5981 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5982 }
5983 
5984 static int dm_plane_atomic_check(struct drm_plane *plane,
5985 				 struct drm_plane_state *state)
5986 {
5987 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5988 	struct dc *dc = adev->dm.dc;
5989 	struct dm_plane_state *dm_plane_state;
5990 	struct dc_scaling_info scaling_info;
5991 	struct drm_crtc_state *new_crtc_state;
5992 	int ret;
5993 
5994 	dm_plane_state = to_dm_plane_state(state);
5995 
5996 	if (!dm_plane_state->dc_state)
5997 		return 0;
5998 
5999 	new_crtc_state =
6000 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6001 	if (!new_crtc_state)
6002 		return -EINVAL;
6003 
6004 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6005 	if (ret)
6006 		return ret;
6007 
6008 	ret = fill_dc_scaling_info(state, &scaling_info);
6009 	if (ret)
6010 		return ret;
6011 
6012 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6013 		return 0;
6014 
6015 	return -EINVAL;
6016 }
6017 
6018 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6019 				       struct drm_plane_state *new_plane_state)
6020 {
6021 	/* Only support async updates on cursor planes. */
6022 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6023 		return -EINVAL;
6024 
6025 	return 0;
6026 }
6027 
6028 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6029 					 struct drm_plane_state *new_state)
6030 {
6031 	struct drm_plane_state *old_state =
6032 		drm_atomic_get_old_plane_state(new_state->state, plane);
6033 
6034 	swap(plane->state->fb, new_state->fb);
6035 
6036 	plane->state->src_x = new_state->src_x;
6037 	plane->state->src_y = new_state->src_y;
6038 	plane->state->src_w = new_state->src_w;
6039 	plane->state->src_h = new_state->src_h;
6040 	plane->state->crtc_x = new_state->crtc_x;
6041 	plane->state->crtc_y = new_state->crtc_y;
6042 	plane->state->crtc_w = new_state->crtc_w;
6043 	plane->state->crtc_h = new_state->crtc_h;
6044 
6045 	handle_cursor_update(plane, old_state);
6046 }
6047 
6048 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6049 	.prepare_fb = dm_plane_helper_prepare_fb,
6050 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6051 	.atomic_check = dm_plane_atomic_check,
6052 	.atomic_async_check = dm_plane_atomic_async_check,
6053 	.atomic_async_update = dm_plane_atomic_async_update
6054 };
6055 
6056 /*
6057  * TODO: these are currently initialized to rgb formats only.
6058  * For future use cases we should either initialize them dynamically based on
6059  * plane capabilities, or initialize this array to all formats, so internal drm
6060  * check will succeed, and let DC implement proper check
6061  */
6062 static const uint32_t rgb_formats[] = {
6063 	DRM_FORMAT_XRGB8888,
6064 	DRM_FORMAT_ARGB8888,
6065 	DRM_FORMAT_RGBA8888,
6066 	DRM_FORMAT_XRGB2101010,
6067 	DRM_FORMAT_XBGR2101010,
6068 	DRM_FORMAT_ARGB2101010,
6069 	DRM_FORMAT_ABGR2101010,
6070 	DRM_FORMAT_XBGR8888,
6071 	DRM_FORMAT_ABGR8888,
6072 	DRM_FORMAT_RGB565,
6073 };
6074 
6075 static const uint32_t overlay_formats[] = {
6076 	DRM_FORMAT_XRGB8888,
6077 	DRM_FORMAT_ARGB8888,
6078 	DRM_FORMAT_RGBA8888,
6079 	DRM_FORMAT_XBGR8888,
6080 	DRM_FORMAT_ABGR8888,
6081 	DRM_FORMAT_RGB565
6082 };
6083 
6084 static const u32 cursor_formats[] = {
6085 	DRM_FORMAT_ARGB8888
6086 };
6087 
6088 static int get_plane_formats(const struct drm_plane *plane,
6089 			     const struct dc_plane_cap *plane_cap,
6090 			     uint32_t *formats, int max_formats)
6091 {
6092 	int i, num_formats = 0;
6093 
6094 	/*
6095 	 * TODO: Query support for each group of formats directly from
6096 	 * DC plane caps. This will require adding more formats to the
6097 	 * caps list.
6098 	 */
6099 
6100 	switch (plane->type) {
6101 	case DRM_PLANE_TYPE_PRIMARY:
6102 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6103 			if (num_formats >= max_formats)
6104 				break;
6105 
6106 			formats[num_formats++] = rgb_formats[i];
6107 		}
6108 
6109 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6110 			formats[num_formats++] = DRM_FORMAT_NV12;
6111 		if (plane_cap && plane_cap->pixel_format_support.p010)
6112 			formats[num_formats++] = DRM_FORMAT_P010;
6113 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6114 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6115 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6116 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6117 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6118 		}
6119 		break;
6120 
6121 	case DRM_PLANE_TYPE_OVERLAY:
6122 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6123 			if (num_formats >= max_formats)
6124 				break;
6125 
6126 			formats[num_formats++] = overlay_formats[i];
6127 		}
6128 		break;
6129 
6130 	case DRM_PLANE_TYPE_CURSOR:
6131 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6132 			if (num_formats >= max_formats)
6133 				break;
6134 
6135 			formats[num_formats++] = cursor_formats[i];
6136 		}
6137 		break;
6138 	}
6139 
6140 	return num_formats;
6141 }
6142 
6143 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6144 				struct drm_plane *plane,
6145 				unsigned long possible_crtcs,
6146 				const struct dc_plane_cap *plane_cap)
6147 {
6148 	uint32_t formats[32];
6149 	int num_formats;
6150 	int res = -EPERM;
6151 	unsigned int supported_rotations;
6152 
6153 	num_formats = get_plane_formats(plane, plane_cap, formats,
6154 					ARRAY_SIZE(formats));
6155 
6156 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6157 				       &dm_plane_funcs, formats, num_formats,
6158 				       NULL, plane->type, NULL);
6159 	if (res)
6160 		return res;
6161 
6162 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6163 	    plane_cap && plane_cap->per_pixel_alpha) {
6164 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6165 					  BIT(DRM_MODE_BLEND_PREMULTI);
6166 
6167 		drm_plane_create_alpha_property(plane);
6168 		drm_plane_create_blend_mode_property(plane, blend_caps);
6169 	}
6170 
6171 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6172 	    plane_cap &&
6173 	    (plane_cap->pixel_format_support.nv12 ||
6174 	     plane_cap->pixel_format_support.p010)) {
6175 		/* This only affects YUV formats. */
6176 		drm_plane_create_color_properties(
6177 			plane,
6178 			BIT(DRM_COLOR_YCBCR_BT601) |
6179 			BIT(DRM_COLOR_YCBCR_BT709) |
6180 			BIT(DRM_COLOR_YCBCR_BT2020),
6181 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6182 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6183 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6184 	}
6185 
6186 	supported_rotations =
6187 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6188 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6189 
6190 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6191 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6192 						   supported_rotations);
6193 
6194 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6195 
6196 	/* Create (reset) the plane state */
6197 	if (plane->funcs->reset)
6198 		plane->funcs->reset(plane);
6199 
6200 	return 0;
6201 }
6202 
6203 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6204 			       struct drm_plane *plane,
6205 			       uint32_t crtc_index)
6206 {
6207 	struct amdgpu_crtc *acrtc = NULL;
6208 	struct drm_plane *cursor_plane;
6209 
6210 	int res = -ENOMEM;
6211 
6212 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6213 	if (!cursor_plane)
6214 		goto fail;
6215 
6216 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6217 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6218 
6219 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6220 	if (!acrtc)
6221 		goto fail;
6222 
6223 	res = drm_crtc_init_with_planes(
6224 			dm->ddev,
6225 			&acrtc->base,
6226 			plane,
6227 			cursor_plane,
6228 			&amdgpu_dm_crtc_funcs, NULL);
6229 
6230 	if (res)
6231 		goto fail;
6232 
6233 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6234 
6235 	/* Create (reset) the plane state */
6236 	if (acrtc->base.funcs->reset)
6237 		acrtc->base.funcs->reset(&acrtc->base);
6238 
6239 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6240 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6241 
6242 	acrtc->crtc_id = crtc_index;
6243 	acrtc->base.enabled = false;
6244 	acrtc->otg_inst = -1;
6245 
6246 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6247 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6248 				   true, MAX_COLOR_LUT_ENTRIES);
6249 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6250 
6251 	return 0;
6252 
6253 fail:
6254 	kfree(acrtc);
6255 	kfree(cursor_plane);
6256 	return res;
6257 }
6258 
6259 
6260 static int to_drm_connector_type(enum amd_signal_type st)
6261 {
6262 	switch (st) {
6263 	case SIGNAL_TYPE_HDMI_TYPE_A:
6264 		return DRM_MODE_CONNECTOR_HDMIA;
6265 	case SIGNAL_TYPE_EDP:
6266 		return DRM_MODE_CONNECTOR_eDP;
6267 	case SIGNAL_TYPE_LVDS:
6268 		return DRM_MODE_CONNECTOR_LVDS;
6269 	case SIGNAL_TYPE_RGB:
6270 		return DRM_MODE_CONNECTOR_VGA;
6271 	case SIGNAL_TYPE_DISPLAY_PORT:
6272 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6273 		return DRM_MODE_CONNECTOR_DisplayPort;
6274 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6275 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6276 		return DRM_MODE_CONNECTOR_DVID;
6277 	case SIGNAL_TYPE_VIRTUAL:
6278 		return DRM_MODE_CONNECTOR_VIRTUAL;
6279 
6280 	default:
6281 		return DRM_MODE_CONNECTOR_Unknown;
6282 	}
6283 }
6284 
6285 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6286 {
6287 	struct drm_encoder *encoder;
6288 
6289 	/* There is only one encoder per connector */
6290 	drm_connector_for_each_possible_encoder(connector, encoder)
6291 		return encoder;
6292 
6293 	return NULL;
6294 }
6295 
6296 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6297 {
6298 	struct drm_encoder *encoder;
6299 	struct amdgpu_encoder *amdgpu_encoder;
6300 
6301 	encoder = amdgpu_dm_connector_to_encoder(connector);
6302 
6303 	if (encoder == NULL)
6304 		return;
6305 
6306 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6307 
6308 	amdgpu_encoder->native_mode.clock = 0;
6309 
6310 	if (!list_empty(&connector->probed_modes)) {
6311 		struct drm_display_mode *preferred_mode = NULL;
6312 
6313 		list_for_each_entry(preferred_mode,
6314 				    &connector->probed_modes,
6315 				    head) {
6316 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6317 				amdgpu_encoder->native_mode = *preferred_mode;
6318 
6319 			break;
6320 		}
6321 
6322 	}
6323 }
6324 
6325 static struct drm_display_mode *
6326 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6327 			     char *name,
6328 			     int hdisplay, int vdisplay)
6329 {
6330 	struct drm_device *dev = encoder->dev;
6331 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6332 	struct drm_display_mode *mode = NULL;
6333 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6334 
6335 	mode = drm_mode_duplicate(dev, native_mode);
6336 
6337 	if (mode == NULL)
6338 		return NULL;
6339 
6340 	mode->hdisplay = hdisplay;
6341 	mode->vdisplay = vdisplay;
6342 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6343 #ifdef __linux__
6344 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6345 #else
6346 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6347 #endif
6348 
6349 	return mode;
6350 
6351 }
6352 
6353 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6354 						 struct drm_connector *connector)
6355 {
6356 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6357 	struct drm_display_mode *mode = NULL;
6358 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6359 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6360 				to_amdgpu_dm_connector(connector);
6361 	int i;
6362 	int n;
6363 	struct mode_size {
6364 		char name[DRM_DISPLAY_MODE_LEN];
6365 		int w;
6366 		int h;
6367 	} common_modes[] = {
6368 		{  "640x480",  640,  480},
6369 		{  "800x600",  800,  600},
6370 		{ "1024x768", 1024,  768},
6371 		{ "1280x720", 1280,  720},
6372 		{ "1280x800", 1280,  800},
6373 		{"1280x1024", 1280, 1024},
6374 		{ "1440x900", 1440,  900},
6375 		{"1680x1050", 1680, 1050},
6376 		{"1600x1200", 1600, 1200},
6377 		{"1920x1080", 1920, 1080},
6378 		{"1920x1200", 1920, 1200}
6379 	};
6380 
6381 	n = ARRAY_SIZE(common_modes);
6382 
6383 	for (i = 0; i < n; i++) {
6384 		struct drm_display_mode *curmode = NULL;
6385 		bool mode_existed = false;
6386 
6387 		if (common_modes[i].w > native_mode->hdisplay ||
6388 		    common_modes[i].h > native_mode->vdisplay ||
6389 		   (common_modes[i].w == native_mode->hdisplay &&
6390 		    common_modes[i].h == native_mode->vdisplay))
6391 			continue;
6392 
6393 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6394 			if (common_modes[i].w == curmode->hdisplay &&
6395 			    common_modes[i].h == curmode->vdisplay) {
6396 				mode_existed = true;
6397 				break;
6398 			}
6399 		}
6400 
6401 		if (mode_existed)
6402 			continue;
6403 
6404 		mode = amdgpu_dm_create_common_mode(encoder,
6405 				common_modes[i].name, common_modes[i].w,
6406 				common_modes[i].h);
6407 		drm_mode_probed_add(connector, mode);
6408 		amdgpu_dm_connector->num_modes++;
6409 	}
6410 }
6411 
6412 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6413 					      struct edid *edid)
6414 {
6415 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6416 			to_amdgpu_dm_connector(connector);
6417 
6418 	if (edid) {
6419 		/* empty probed_modes */
6420 		INIT_LIST_HEAD(&connector->probed_modes);
6421 		amdgpu_dm_connector->num_modes =
6422 				drm_add_edid_modes(connector, edid);
6423 
6424 		/* sorting the probed modes before calling function
6425 		 * amdgpu_dm_get_native_mode() since EDID can have
6426 		 * more than one preferred mode. The modes that are
6427 		 * later in the probed mode list could be of higher
6428 		 * and preferred resolution. For example, 3840x2160
6429 		 * resolution in base EDID preferred timing and 4096x2160
6430 		 * preferred resolution in DID extension block later.
6431 		 */
6432 		drm_mode_sort(&connector->probed_modes);
6433 		amdgpu_dm_get_native_mode(connector);
6434 	} else {
6435 		amdgpu_dm_connector->num_modes = 0;
6436 	}
6437 }
6438 
6439 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6440 {
6441 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6442 			to_amdgpu_dm_connector(connector);
6443 	struct drm_encoder *encoder;
6444 	struct edid *edid = amdgpu_dm_connector->edid;
6445 
6446 	encoder = amdgpu_dm_connector_to_encoder(connector);
6447 
6448 	if (!edid || !drm_edid_is_valid(edid)) {
6449 		amdgpu_dm_connector->num_modes =
6450 				drm_add_modes_noedid(connector, 640, 480);
6451 	} else {
6452 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6453 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6454 	}
6455 	amdgpu_dm_fbc_init(connector);
6456 
6457 	return amdgpu_dm_connector->num_modes;
6458 }
6459 
6460 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6461 				     struct amdgpu_dm_connector *aconnector,
6462 				     int connector_type,
6463 				     struct dc_link *link,
6464 				     int link_index)
6465 {
6466 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6467 
6468 	/*
6469 	 * Some of the properties below require access to state, like bpc.
6470 	 * Allocate some default initial connector state with our reset helper.
6471 	 */
6472 	if (aconnector->base.funcs->reset)
6473 		aconnector->base.funcs->reset(&aconnector->base);
6474 
6475 	aconnector->connector_id = link_index;
6476 	aconnector->dc_link = link;
6477 	aconnector->base.interlace_allowed = false;
6478 	aconnector->base.doublescan_allowed = false;
6479 	aconnector->base.stereo_allowed = false;
6480 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6481 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6482 	aconnector->audio_inst = -1;
6483 	rw_init(&aconnector->hpd_lock, "dmhpd");
6484 
6485 	/*
6486 	 * configure support HPD hot plug connector_>polled default value is 0
6487 	 * which means HPD hot plug not supported
6488 	 */
6489 	switch (connector_type) {
6490 	case DRM_MODE_CONNECTOR_HDMIA:
6491 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6492 		aconnector->base.ycbcr_420_allowed =
6493 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6494 		break;
6495 	case DRM_MODE_CONNECTOR_DisplayPort:
6496 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6497 		aconnector->base.ycbcr_420_allowed =
6498 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6499 		break;
6500 	case DRM_MODE_CONNECTOR_DVID:
6501 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6502 		break;
6503 	default:
6504 		break;
6505 	}
6506 
6507 	drm_object_attach_property(&aconnector->base.base,
6508 				dm->ddev->mode_config.scaling_mode_property,
6509 				DRM_MODE_SCALE_NONE);
6510 
6511 	drm_object_attach_property(&aconnector->base.base,
6512 				adev->mode_info.underscan_property,
6513 				UNDERSCAN_OFF);
6514 	drm_object_attach_property(&aconnector->base.base,
6515 				adev->mode_info.underscan_hborder_property,
6516 				0);
6517 	drm_object_attach_property(&aconnector->base.base,
6518 				adev->mode_info.underscan_vborder_property,
6519 				0);
6520 
6521 	if (!aconnector->mst_port)
6522 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6523 
6524 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6525 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6526 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6527 
6528 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6529 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6530 		drm_object_attach_property(&aconnector->base.base,
6531 				adev->mode_info.abm_level_property, 0);
6532 	}
6533 
6534 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6535 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6536 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6537 		drm_object_attach_property(
6538 			&aconnector->base.base,
6539 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6540 
6541 		if (!aconnector->mst_port)
6542 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6543 
6544 #ifdef CONFIG_DRM_AMD_DC_HDCP
6545 		if (adev->dm.hdcp_workqueue)
6546 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6547 #endif
6548 	}
6549 }
6550 
6551 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6552 			      struct i2c_msg *msgs, int num)
6553 {
6554 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6555 	struct ddc_service *ddc_service = i2c->ddc_service;
6556 	struct i2c_command cmd;
6557 	int i;
6558 	int result = -EIO;
6559 
6560 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6561 
6562 	if (!cmd.payloads)
6563 		return result;
6564 
6565 	cmd.number_of_payloads = num;
6566 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6567 	cmd.speed = 100;
6568 
6569 	for (i = 0; i < num; i++) {
6570 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6571 		cmd.payloads[i].address = msgs[i].addr;
6572 		cmd.payloads[i].length = msgs[i].len;
6573 		cmd.payloads[i].data = msgs[i].buf;
6574 	}
6575 
6576 	if (dc_submit_i2c(
6577 			ddc_service->ctx->dc,
6578 			ddc_service->ddc_pin->hw_info.ddc_channel,
6579 			&cmd))
6580 		result = num;
6581 
6582 	kfree(cmd.payloads);
6583 	return result;
6584 }
6585 
6586 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6587 {
6588 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6589 }
6590 
6591 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6592 	.master_xfer = amdgpu_dm_i2c_xfer,
6593 	.functionality = amdgpu_dm_i2c_func,
6594 };
6595 
6596 static struct amdgpu_i2c_adapter *
6597 create_i2c(struct ddc_service *ddc_service,
6598 	   int link_index,
6599 	   int *res)
6600 {
6601 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6602 	struct amdgpu_i2c_adapter *i2c;
6603 
6604 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6605 	if (!i2c)
6606 		return NULL;
6607 #ifdef notyet
6608 	i2c->base.owner = THIS_MODULE;
6609 	i2c->base.class = I2C_CLASS_DDC;
6610 	i2c->base.dev.parent = &adev->pdev->dev;
6611 #endif
6612 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6613 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6614 	i2c_set_adapdata(&i2c->base, i2c);
6615 	i2c->ddc_service = ddc_service;
6616 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6617 
6618 	return i2c;
6619 }
6620 
6621 
6622 /*
6623  * Note: this function assumes that dc_link_detect() was called for the
6624  * dc_link which will be represented by this aconnector.
6625  */
6626 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6627 				    struct amdgpu_dm_connector *aconnector,
6628 				    uint32_t link_index,
6629 				    struct amdgpu_encoder *aencoder)
6630 {
6631 	int res = 0;
6632 	int connector_type;
6633 	struct dc *dc = dm->dc;
6634 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6635 	struct amdgpu_i2c_adapter *i2c;
6636 
6637 	link->priv = aconnector;
6638 
6639 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6640 
6641 	i2c = create_i2c(link->ddc, link->link_index, &res);
6642 	if (!i2c) {
6643 		DRM_ERROR("Failed to create i2c adapter data\n");
6644 		return -ENOMEM;
6645 	}
6646 
6647 	aconnector->i2c = i2c;
6648 	res = i2c_add_adapter(&i2c->base);
6649 
6650 	if (res) {
6651 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6652 		goto out_free;
6653 	}
6654 
6655 	connector_type = to_drm_connector_type(link->connector_signal);
6656 
6657 	res = drm_connector_init_with_ddc(
6658 			dm->ddev,
6659 			&aconnector->base,
6660 			&amdgpu_dm_connector_funcs,
6661 			connector_type,
6662 			&i2c->base);
6663 
6664 	if (res) {
6665 		DRM_ERROR("connector_init failed\n");
6666 		aconnector->connector_id = -1;
6667 		goto out_free;
6668 	}
6669 
6670 	drm_connector_helper_add(
6671 			&aconnector->base,
6672 			&amdgpu_dm_connector_helper_funcs);
6673 
6674 	amdgpu_dm_connector_init_helper(
6675 		dm,
6676 		aconnector,
6677 		connector_type,
6678 		link,
6679 		link_index);
6680 
6681 	drm_connector_attach_encoder(
6682 		&aconnector->base, &aencoder->base);
6683 
6684 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6685 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6686 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6687 
6688 out_free:
6689 	if (res) {
6690 		kfree(i2c);
6691 		aconnector->i2c = NULL;
6692 	}
6693 	return res;
6694 }
6695 
6696 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6697 {
6698 	switch (adev->mode_info.num_crtc) {
6699 	case 1:
6700 		return 0x1;
6701 	case 2:
6702 		return 0x3;
6703 	case 3:
6704 		return 0x7;
6705 	case 4:
6706 		return 0xf;
6707 	case 5:
6708 		return 0x1f;
6709 	case 6:
6710 	default:
6711 		return 0x3f;
6712 	}
6713 }
6714 
6715 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6716 				  struct amdgpu_encoder *aencoder,
6717 				  uint32_t link_index)
6718 {
6719 	struct amdgpu_device *adev = drm_to_adev(dev);
6720 
6721 	int res = drm_encoder_init(dev,
6722 				   &aencoder->base,
6723 				   &amdgpu_dm_encoder_funcs,
6724 				   DRM_MODE_ENCODER_TMDS,
6725 				   NULL);
6726 
6727 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6728 
6729 	if (!res)
6730 		aencoder->encoder_id = link_index;
6731 	else
6732 		aencoder->encoder_id = -1;
6733 
6734 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6735 
6736 	return res;
6737 }
6738 
6739 static void manage_dm_interrupts(struct amdgpu_device *adev,
6740 				 struct amdgpu_crtc *acrtc,
6741 				 bool enable)
6742 {
6743 	/*
6744 	 * We have no guarantee that the frontend index maps to the same
6745 	 * backend index - some even map to more than one.
6746 	 *
6747 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6748 	 */
6749 	int irq_type =
6750 		amdgpu_display_crtc_idx_to_irq_type(
6751 			adev,
6752 			acrtc->crtc_id);
6753 
6754 	if (enable) {
6755 		drm_crtc_vblank_on(&acrtc->base);
6756 		amdgpu_irq_get(
6757 			adev,
6758 			&adev->pageflip_irq,
6759 			irq_type);
6760 	} else {
6761 
6762 		amdgpu_irq_put(
6763 			adev,
6764 			&adev->pageflip_irq,
6765 			irq_type);
6766 		drm_crtc_vblank_off(&acrtc->base);
6767 	}
6768 }
6769 
6770 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6771 				      struct amdgpu_crtc *acrtc)
6772 {
6773 	int irq_type =
6774 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6775 
6776 	/**
6777 	 * This reads the current state for the IRQ and force reapplies
6778 	 * the setting to hardware.
6779 	 */
6780 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6781 }
6782 
6783 static bool
6784 is_scaling_state_different(const struct dm_connector_state *dm_state,
6785 			   const struct dm_connector_state *old_dm_state)
6786 {
6787 	if (dm_state->scaling != old_dm_state->scaling)
6788 		return true;
6789 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6790 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6791 			return true;
6792 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6793 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6794 			return true;
6795 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6796 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6797 		return true;
6798 	return false;
6799 }
6800 
6801 #ifdef CONFIG_DRM_AMD_DC_HDCP
6802 static bool is_content_protection_different(struct drm_connector_state *state,
6803 					    const struct drm_connector_state *old_state,
6804 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6805 {
6806 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6807 
6808 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6809 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6810 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6811 		return true;
6812 	}
6813 
6814 	/* CP is being re enabled, ignore this */
6815 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6816 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6817 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6818 		return false;
6819 	}
6820 
6821 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6822 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6823 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6824 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6825 
6826 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6827 	 * hot-plug, headless s3, dpms
6828 	 */
6829 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6830 	    aconnector->dc_sink != NULL)
6831 		return true;
6832 
6833 	if (old_state->content_protection == state->content_protection)
6834 		return false;
6835 
6836 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6837 		return true;
6838 
6839 	return false;
6840 }
6841 
6842 #endif
6843 static void remove_stream(struct amdgpu_device *adev,
6844 			  struct amdgpu_crtc *acrtc,
6845 			  struct dc_stream_state *stream)
6846 {
6847 	/* this is the update mode case */
6848 
6849 	acrtc->otg_inst = -1;
6850 	acrtc->enabled = false;
6851 }
6852 
6853 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6854 			       struct dc_cursor_position *position)
6855 {
6856 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6857 	int x, y;
6858 	int xorigin = 0, yorigin = 0;
6859 
6860 	if (!crtc || !plane->state->fb)
6861 		return 0;
6862 
6863 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6864 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6865 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6866 			  __func__,
6867 			  plane->state->crtc_w,
6868 			  plane->state->crtc_h);
6869 		return -EINVAL;
6870 	}
6871 
6872 	x = plane->state->crtc_x;
6873 	y = plane->state->crtc_y;
6874 
6875 	if (x <= -amdgpu_crtc->max_cursor_width ||
6876 	    y <= -amdgpu_crtc->max_cursor_height)
6877 		return 0;
6878 
6879 	if (x < 0) {
6880 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6881 		x = 0;
6882 	}
6883 	if (y < 0) {
6884 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6885 		y = 0;
6886 	}
6887 	position->enable = true;
6888 	position->translate_by_source = true;
6889 	position->x = x;
6890 	position->y = y;
6891 	position->x_hotspot = xorigin;
6892 	position->y_hotspot = yorigin;
6893 
6894 	return 0;
6895 }
6896 
6897 static void handle_cursor_update(struct drm_plane *plane,
6898 				 struct drm_plane_state *old_plane_state)
6899 {
6900 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6901 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6902 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6903 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6904 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6905 	uint64_t address = afb ? afb->address : 0;
6906 	struct dc_cursor_position position = {0};
6907 	struct dc_cursor_attributes attributes;
6908 	int ret;
6909 
6910 	if (!plane->state->fb && !old_plane_state->fb)
6911 		return;
6912 
6913 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6914 			 __func__,
6915 			 amdgpu_crtc->crtc_id,
6916 			 plane->state->crtc_w,
6917 			 plane->state->crtc_h);
6918 
6919 	ret = get_cursor_position(plane, crtc, &position);
6920 	if (ret)
6921 		return;
6922 
6923 	if (!position.enable) {
6924 		/* turn off cursor */
6925 		if (crtc_state && crtc_state->stream) {
6926 			mutex_lock(&adev->dm.dc_lock);
6927 			dc_stream_set_cursor_position(crtc_state->stream,
6928 						      &position);
6929 			mutex_unlock(&adev->dm.dc_lock);
6930 		}
6931 		return;
6932 	}
6933 
6934 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6935 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6936 
6937 	memset(&attributes, 0, sizeof(attributes));
6938 	attributes.address.high_part = upper_32_bits(address);
6939 	attributes.address.low_part  = lower_32_bits(address);
6940 	attributes.width             = plane->state->crtc_w;
6941 	attributes.height            = plane->state->crtc_h;
6942 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6943 	attributes.rotation_angle    = 0;
6944 	attributes.attribute_flags.value = 0;
6945 
6946 	attributes.pitch = attributes.width;
6947 
6948 	if (crtc_state->stream) {
6949 		mutex_lock(&adev->dm.dc_lock);
6950 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6951 							 &attributes))
6952 			DRM_ERROR("DC failed to set cursor attributes\n");
6953 
6954 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6955 						   &position))
6956 			DRM_ERROR("DC failed to set cursor position\n");
6957 		mutex_unlock(&adev->dm.dc_lock);
6958 	}
6959 }
6960 
6961 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6962 {
6963 
6964 	assert_spin_locked(&acrtc->base.dev->event_lock);
6965 	WARN_ON(acrtc->event);
6966 
6967 	acrtc->event = acrtc->base.state->event;
6968 
6969 	/* Set the flip status */
6970 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6971 
6972 	/* Mark this event as consumed */
6973 	acrtc->base.state->event = NULL;
6974 
6975 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6976 						 acrtc->crtc_id);
6977 }
6978 
6979 static void update_freesync_state_on_stream(
6980 	struct amdgpu_display_manager *dm,
6981 	struct dm_crtc_state *new_crtc_state,
6982 	struct dc_stream_state *new_stream,
6983 	struct dc_plane_state *surface,
6984 	u32 flip_timestamp_in_us)
6985 {
6986 	struct mod_vrr_params vrr_params;
6987 	struct dc_info_packet vrr_infopacket = {0};
6988 	struct amdgpu_device *adev = dm->adev;
6989 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6990 	unsigned long flags;
6991 
6992 	if (!new_stream)
6993 		return;
6994 
6995 	/*
6996 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6997 	 * For now it's sufficient to just guard against these conditions.
6998 	 */
6999 
7000 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7001 		return;
7002 
7003 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7004         vrr_params = acrtc->dm_irq_params.vrr_params;
7005 
7006 	if (surface) {
7007 		mod_freesync_handle_preflip(
7008 			dm->freesync_module,
7009 			surface,
7010 			new_stream,
7011 			flip_timestamp_in_us,
7012 			&vrr_params);
7013 
7014 		if (adev->family < AMDGPU_FAMILY_AI &&
7015 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7016 			mod_freesync_handle_v_update(dm->freesync_module,
7017 						     new_stream, &vrr_params);
7018 
7019 			/* Need to call this before the frame ends. */
7020 			dc_stream_adjust_vmin_vmax(dm->dc,
7021 						   new_crtc_state->stream,
7022 						   &vrr_params.adjust);
7023 		}
7024 	}
7025 
7026 	mod_freesync_build_vrr_infopacket(
7027 		dm->freesync_module,
7028 		new_stream,
7029 		&vrr_params,
7030 		PACKET_TYPE_VRR,
7031 		TRANSFER_FUNC_UNKNOWN,
7032 		&vrr_infopacket);
7033 
7034 	new_crtc_state->freesync_timing_changed |=
7035 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7036 			&vrr_params.adjust,
7037 			sizeof(vrr_params.adjust)) != 0);
7038 
7039 	new_crtc_state->freesync_vrr_info_changed |=
7040 		(memcmp(&new_crtc_state->vrr_infopacket,
7041 			&vrr_infopacket,
7042 			sizeof(vrr_infopacket)) != 0);
7043 
7044 	acrtc->dm_irq_params.vrr_params = vrr_params;
7045 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7046 
7047 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7048 	new_stream->vrr_infopacket = vrr_infopacket;
7049 
7050 	if (new_crtc_state->freesync_vrr_info_changed)
7051 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7052 			      new_crtc_state->base.crtc->base.id,
7053 			      (int)new_crtc_state->base.vrr_enabled,
7054 			      (int)vrr_params.state);
7055 
7056 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7057 }
7058 
7059 static void update_stream_irq_parameters(
7060 	struct amdgpu_display_manager *dm,
7061 	struct dm_crtc_state *new_crtc_state)
7062 {
7063 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7064 	struct mod_vrr_params vrr_params;
7065 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7066 	struct amdgpu_device *adev = dm->adev;
7067 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7068 	unsigned long flags;
7069 
7070 	if (!new_stream)
7071 		return;
7072 
7073 	/*
7074 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7075 	 * For now it's sufficient to just guard against these conditions.
7076 	 */
7077 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7078 		return;
7079 
7080 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7081 	vrr_params = acrtc->dm_irq_params.vrr_params;
7082 
7083 	if (new_crtc_state->vrr_supported &&
7084 	    config.min_refresh_in_uhz &&
7085 	    config.max_refresh_in_uhz) {
7086 		config.state = new_crtc_state->base.vrr_enabled ?
7087 			VRR_STATE_ACTIVE_VARIABLE :
7088 			VRR_STATE_INACTIVE;
7089 	} else {
7090 		config.state = VRR_STATE_UNSUPPORTED;
7091 	}
7092 
7093 	mod_freesync_build_vrr_params(dm->freesync_module,
7094 				      new_stream,
7095 				      &config, &vrr_params);
7096 
7097 	new_crtc_state->freesync_timing_changed |=
7098 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7099 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7100 
7101 	new_crtc_state->freesync_config = config;
7102 	/* Copy state for access from DM IRQ handler */
7103 	acrtc->dm_irq_params.freesync_config = config;
7104 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7105 	acrtc->dm_irq_params.vrr_params = vrr_params;
7106 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7107 }
7108 
7109 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7110 					    struct dm_crtc_state *new_state)
7111 {
7112 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7113 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7114 
7115 	if (!old_vrr_active && new_vrr_active) {
7116 		/* Transition VRR inactive -> active:
7117 		 * While VRR is active, we must not disable vblank irq, as a
7118 		 * reenable after disable would compute bogus vblank/pflip
7119 		 * timestamps if it likely happened inside display front-porch.
7120 		 *
7121 		 * We also need vupdate irq for the actual core vblank handling
7122 		 * at end of vblank.
7123 		 */
7124 		dm_set_vupdate_irq(new_state->base.crtc, true);
7125 		drm_crtc_vblank_get(new_state->base.crtc);
7126 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7127 				 __func__, new_state->base.crtc->base.id);
7128 	} else if (old_vrr_active && !new_vrr_active) {
7129 		/* Transition VRR active -> inactive:
7130 		 * Allow vblank irq disable again for fixed refresh rate.
7131 		 */
7132 		dm_set_vupdate_irq(new_state->base.crtc, false);
7133 		drm_crtc_vblank_put(new_state->base.crtc);
7134 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7135 				 __func__, new_state->base.crtc->base.id);
7136 	}
7137 }
7138 
7139 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7140 {
7141 	struct drm_plane *plane;
7142 	struct drm_plane_state *old_plane_state, *new_plane_state;
7143 	int i;
7144 
7145 	/*
7146 	 * TODO: Make this per-stream so we don't issue redundant updates for
7147 	 * commits with multiple streams.
7148 	 */
7149 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7150 				       new_plane_state, i)
7151 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7152 			handle_cursor_update(plane, old_plane_state);
7153 }
7154 
7155 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7156 				    struct dc_state *dc_state,
7157 				    struct drm_device *dev,
7158 				    struct amdgpu_display_manager *dm,
7159 				    struct drm_crtc *pcrtc,
7160 				    bool wait_for_vblank)
7161 {
7162 	uint32_t i;
7163 	uint64_t timestamp_ns;
7164 	struct drm_plane *plane;
7165 	struct drm_plane_state *old_plane_state, *new_plane_state;
7166 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7167 	struct drm_crtc_state *new_pcrtc_state =
7168 			drm_atomic_get_new_crtc_state(state, pcrtc);
7169 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7170 	struct dm_crtc_state *dm_old_crtc_state =
7171 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7172 	int planes_count = 0, vpos, hpos;
7173 	long r;
7174 	unsigned long flags;
7175 	struct amdgpu_bo *abo;
7176 	uint32_t target_vblank, last_flip_vblank;
7177 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7178 	bool pflip_present = false;
7179 	struct {
7180 		struct dc_surface_update surface_updates[MAX_SURFACES];
7181 		struct dc_plane_info plane_infos[MAX_SURFACES];
7182 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7183 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7184 		struct dc_stream_update stream_update;
7185 	} *bundle;
7186 
7187 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7188 
7189 	if (!bundle) {
7190 		dm_error("Failed to allocate update bundle\n");
7191 		goto cleanup;
7192 	}
7193 
7194 	/*
7195 	 * Disable the cursor first if we're disabling all the planes.
7196 	 * It'll remain on the screen after the planes are re-enabled
7197 	 * if we don't.
7198 	 */
7199 	if (acrtc_state->active_planes == 0)
7200 		amdgpu_dm_commit_cursors(state);
7201 
7202 	/* update planes when needed */
7203 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7204 		struct drm_crtc *crtc = new_plane_state->crtc;
7205 		struct drm_crtc_state *new_crtc_state;
7206 		struct drm_framebuffer *fb = new_plane_state->fb;
7207 		bool plane_needs_flip;
7208 		struct dc_plane_state *dc_plane;
7209 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7210 
7211 		/* Cursor plane is handled after stream updates */
7212 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7213 			continue;
7214 
7215 		if (!fb || !crtc || pcrtc != crtc)
7216 			continue;
7217 
7218 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7219 		if (!new_crtc_state->active)
7220 			continue;
7221 
7222 		dc_plane = dm_new_plane_state->dc_state;
7223 
7224 		bundle->surface_updates[planes_count].surface = dc_plane;
7225 		if (new_pcrtc_state->color_mgmt_changed) {
7226 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7227 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7228 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7229 		}
7230 
7231 		fill_dc_scaling_info(new_plane_state,
7232 				     &bundle->scaling_infos[planes_count]);
7233 
7234 		bundle->surface_updates[planes_count].scaling_info =
7235 			&bundle->scaling_infos[planes_count];
7236 
7237 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7238 
7239 		pflip_present = pflip_present || plane_needs_flip;
7240 
7241 		if (!plane_needs_flip) {
7242 			planes_count += 1;
7243 			continue;
7244 		}
7245 
7246 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7247 
7248 		/*
7249 		 * Wait for all fences on this FB. Do limited wait to avoid
7250 		 * deadlock during GPU reset when this fence will not signal
7251 		 * but we hold reservation lock for the BO.
7252 		 */
7253 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7254 							false,
7255 							msecs_to_jiffies(5000));
7256 		if (unlikely(r <= 0))
7257 			DRM_ERROR("Waiting for fences timed out!");
7258 
7259 		fill_dc_plane_info_and_addr(
7260 			dm->adev, new_plane_state,
7261 			dm_new_plane_state->tiling_flags,
7262 			&bundle->plane_infos[planes_count],
7263 			&bundle->flip_addrs[planes_count].address,
7264 			dm_new_plane_state->tmz_surface, false);
7265 
7266 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7267 				 new_plane_state->plane->index,
7268 				 bundle->plane_infos[planes_count].dcc.enable);
7269 
7270 		bundle->surface_updates[planes_count].plane_info =
7271 			&bundle->plane_infos[planes_count];
7272 
7273 		/*
7274 		 * Only allow immediate flips for fast updates that don't
7275 		 * change FB pitch, DCC state, rotation or mirroing.
7276 		 */
7277 		bundle->flip_addrs[planes_count].flip_immediate =
7278 			crtc->state->async_flip &&
7279 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7280 
7281 		timestamp_ns = ktime_get_ns();
7282 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7283 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7284 		bundle->surface_updates[planes_count].surface = dc_plane;
7285 
7286 		if (!bundle->surface_updates[planes_count].surface) {
7287 			DRM_ERROR("No surface for CRTC: id=%d\n",
7288 					acrtc_attach->crtc_id);
7289 			continue;
7290 		}
7291 
7292 		if (plane == pcrtc->primary)
7293 			update_freesync_state_on_stream(
7294 				dm,
7295 				acrtc_state,
7296 				acrtc_state->stream,
7297 				dc_plane,
7298 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7299 
7300 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7301 				 __func__,
7302 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7303 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7304 
7305 		planes_count += 1;
7306 
7307 	}
7308 
7309 	if (pflip_present) {
7310 		if (!vrr_active) {
7311 			/* Use old throttling in non-vrr fixed refresh rate mode
7312 			 * to keep flip scheduling based on target vblank counts
7313 			 * working in a backwards compatible way, e.g., for
7314 			 * clients using the GLX_OML_sync_control extension or
7315 			 * DRI3/Present extension with defined target_msc.
7316 			 */
7317 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7318 		}
7319 		else {
7320 			/* For variable refresh rate mode only:
7321 			 * Get vblank of last completed flip to avoid > 1 vrr
7322 			 * flips per video frame by use of throttling, but allow
7323 			 * flip programming anywhere in the possibly large
7324 			 * variable vrr vblank interval for fine-grained flip
7325 			 * timing control and more opportunity to avoid stutter
7326 			 * on late submission of flips.
7327 			 */
7328 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7329 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7330 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7331 		}
7332 
7333 		target_vblank = last_flip_vblank + wait_for_vblank;
7334 
7335 		/*
7336 		 * Wait until we're out of the vertical blank period before the one
7337 		 * targeted by the flip
7338 		 */
7339 		while ((acrtc_attach->enabled &&
7340 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7341 							    0, &vpos, &hpos, NULL,
7342 							    NULL, &pcrtc->hwmode)
7343 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7344 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7345 			(int)(target_vblank -
7346 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7347 			usleep_range(1000, 1100);
7348 		}
7349 
7350 		/**
7351 		 * Prepare the flip event for the pageflip interrupt to handle.
7352 		 *
7353 		 * This only works in the case where we've already turned on the
7354 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7355 		 * from 0 -> n planes we have to skip a hardware generated event
7356 		 * and rely on sending it from software.
7357 		 */
7358 		if (acrtc_attach->base.state->event &&
7359 		    acrtc_state->active_planes > 0) {
7360 			drm_crtc_vblank_get(pcrtc);
7361 
7362 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7363 
7364 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7365 			prepare_flip_isr(acrtc_attach);
7366 
7367 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7368 		}
7369 
7370 		if (acrtc_state->stream) {
7371 			if (acrtc_state->freesync_vrr_info_changed)
7372 				bundle->stream_update.vrr_infopacket =
7373 					&acrtc_state->stream->vrr_infopacket;
7374 		}
7375 	}
7376 
7377 	/* Update the planes if changed or disable if we don't have any. */
7378 	if ((planes_count || acrtc_state->active_planes == 0) &&
7379 		acrtc_state->stream) {
7380 		bundle->stream_update.stream = acrtc_state->stream;
7381 		if (new_pcrtc_state->mode_changed) {
7382 			bundle->stream_update.src = acrtc_state->stream->src;
7383 			bundle->stream_update.dst = acrtc_state->stream->dst;
7384 		}
7385 
7386 		if (new_pcrtc_state->color_mgmt_changed) {
7387 			/*
7388 			 * TODO: This isn't fully correct since we've actually
7389 			 * already modified the stream in place.
7390 			 */
7391 			bundle->stream_update.gamut_remap =
7392 				&acrtc_state->stream->gamut_remap_matrix;
7393 			bundle->stream_update.output_csc_transform =
7394 				&acrtc_state->stream->csc_color_matrix;
7395 			bundle->stream_update.out_transfer_func =
7396 				acrtc_state->stream->out_transfer_func;
7397 		}
7398 
7399 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7400 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7401 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7402 
7403 		/*
7404 		 * If FreeSync state on the stream has changed then we need to
7405 		 * re-adjust the min/max bounds now that DC doesn't handle this
7406 		 * as part of commit.
7407 		 */
7408 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7409 		    amdgpu_dm_vrr_active(acrtc_state)) {
7410 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7411 			dc_stream_adjust_vmin_vmax(
7412 				dm->dc, acrtc_state->stream,
7413 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7414 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7415 		}
7416 		mutex_lock(&dm->dc_lock);
7417 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7418 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7419 			amdgpu_dm_psr_disable(acrtc_state->stream);
7420 
7421 		dc_commit_updates_for_stream(dm->dc,
7422 						     bundle->surface_updates,
7423 						     planes_count,
7424 						     acrtc_state->stream,
7425 						     &bundle->stream_update,
7426 						     dc_state);
7427 
7428 		/**
7429 		 * Enable or disable the interrupts on the backend.
7430 		 *
7431 		 * Most pipes are put into power gating when unused.
7432 		 *
7433 		 * When power gating is enabled on a pipe we lose the
7434 		 * interrupt enablement state when power gating is disabled.
7435 		 *
7436 		 * So we need to update the IRQ control state in hardware
7437 		 * whenever the pipe turns on (since it could be previously
7438 		 * power gated) or off (since some pipes can't be power gated
7439 		 * on some ASICs).
7440 		 */
7441 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7442 			dm_update_pflip_irq_state(drm_to_adev(dev),
7443 						  acrtc_attach);
7444 
7445 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7446 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7447 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7448 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7449 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7450 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7451 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7452 			amdgpu_dm_psr_enable(acrtc_state->stream);
7453 		}
7454 
7455 		mutex_unlock(&dm->dc_lock);
7456 	}
7457 
7458 	/*
7459 	 * Update cursor state *after* programming all the planes.
7460 	 * This avoids redundant programming in the case where we're going
7461 	 * to be disabling a single plane - those pipes are being disabled.
7462 	 */
7463 	if (acrtc_state->active_planes)
7464 		amdgpu_dm_commit_cursors(state);
7465 
7466 cleanup:
7467 	kfree(bundle);
7468 }
7469 
7470 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7471 				   struct drm_atomic_state *state)
7472 {
7473 	struct amdgpu_device *adev = drm_to_adev(dev);
7474 	struct amdgpu_dm_connector *aconnector;
7475 	struct drm_connector *connector;
7476 	struct drm_connector_state *old_con_state, *new_con_state;
7477 	struct drm_crtc_state *new_crtc_state;
7478 	struct dm_crtc_state *new_dm_crtc_state;
7479 	const struct dc_stream_status *status;
7480 	int i, inst;
7481 
7482 	/* Notify device removals. */
7483 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7484 		if (old_con_state->crtc != new_con_state->crtc) {
7485 			/* CRTC changes require notification. */
7486 			goto notify;
7487 		}
7488 
7489 		if (!new_con_state->crtc)
7490 			continue;
7491 
7492 		new_crtc_state = drm_atomic_get_new_crtc_state(
7493 			state, new_con_state->crtc);
7494 
7495 		if (!new_crtc_state)
7496 			continue;
7497 
7498 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7499 			continue;
7500 
7501 	notify:
7502 		aconnector = to_amdgpu_dm_connector(connector);
7503 
7504 		mutex_lock(&adev->dm.audio_lock);
7505 		inst = aconnector->audio_inst;
7506 		aconnector->audio_inst = -1;
7507 		mutex_unlock(&adev->dm.audio_lock);
7508 
7509 		amdgpu_dm_audio_eld_notify(adev, inst);
7510 	}
7511 
7512 	/* Notify audio device additions. */
7513 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7514 		if (!new_con_state->crtc)
7515 			continue;
7516 
7517 		new_crtc_state = drm_atomic_get_new_crtc_state(
7518 			state, new_con_state->crtc);
7519 
7520 		if (!new_crtc_state)
7521 			continue;
7522 
7523 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7524 			continue;
7525 
7526 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7527 		if (!new_dm_crtc_state->stream)
7528 			continue;
7529 
7530 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7531 		if (!status)
7532 			continue;
7533 
7534 		aconnector = to_amdgpu_dm_connector(connector);
7535 
7536 		mutex_lock(&adev->dm.audio_lock);
7537 		inst = status->audio_inst;
7538 		aconnector->audio_inst = inst;
7539 		mutex_unlock(&adev->dm.audio_lock);
7540 
7541 		amdgpu_dm_audio_eld_notify(adev, inst);
7542 	}
7543 }
7544 
7545 /*
7546  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7547  * @crtc_state: the DRM CRTC state
7548  * @stream_state: the DC stream state.
7549  *
7550  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7551  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7552  */
7553 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7554 						struct dc_stream_state *stream_state)
7555 {
7556 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7557 }
7558 
7559 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7560 				   struct drm_atomic_state *state,
7561 				   bool nonblock)
7562 {
7563 	/*
7564 	 * Add check here for SoC's that support hardware cursor plane, to
7565 	 * unset legacy_cursor_update
7566 	 */
7567 
7568 	return drm_atomic_helper_commit(dev, state, nonblock);
7569 
7570 	/*TODO Handle EINTR, reenable IRQ*/
7571 }
7572 
7573 /**
7574  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7575  * @state: The atomic state to commit
7576  *
7577  * This will tell DC to commit the constructed DC state from atomic_check,
7578  * programming the hardware. Any failures here implies a hardware failure, since
7579  * atomic check should have filtered anything non-kosher.
7580  */
7581 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7582 {
7583 	struct drm_device *dev = state->dev;
7584 	struct amdgpu_device *adev = drm_to_adev(dev);
7585 	struct amdgpu_display_manager *dm = &adev->dm;
7586 	struct dm_atomic_state *dm_state;
7587 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7588 	uint32_t i, j;
7589 	struct drm_crtc *crtc;
7590 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7591 	unsigned long flags;
7592 	bool wait_for_vblank = true;
7593 	struct drm_connector *connector;
7594 	struct drm_connector_state *old_con_state, *new_con_state;
7595 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7596 	int crtc_disable_count = 0;
7597 	bool mode_set_reset_required = false;
7598 
7599 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7600 
7601 	dm_state = dm_atomic_get_new_state(state);
7602 	if (dm_state && dm_state->context) {
7603 		dc_state = dm_state->context;
7604 	} else {
7605 		/* No state changes, retain current state. */
7606 		dc_state_temp = dc_create_state(dm->dc);
7607 		ASSERT(dc_state_temp);
7608 		dc_state = dc_state_temp;
7609 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7610 	}
7611 
7612 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7613 				       new_crtc_state, i) {
7614 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7615 
7616 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7617 
7618 		if (old_crtc_state->active &&
7619 		    (!new_crtc_state->active ||
7620 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7621 			manage_dm_interrupts(adev, acrtc, false);
7622 			dc_stream_release(dm_old_crtc_state->stream);
7623 		}
7624 	}
7625 
7626 	drm_atomic_helper_calc_timestamping_constants(state);
7627 
7628 	/* update changed items */
7629 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7630 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7631 
7632 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7633 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7634 
7635 		DRM_DEBUG_DRIVER(
7636 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7637 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7638 			"connectors_changed:%d\n",
7639 			acrtc->crtc_id,
7640 			new_crtc_state->enable,
7641 			new_crtc_state->active,
7642 			new_crtc_state->planes_changed,
7643 			new_crtc_state->mode_changed,
7644 			new_crtc_state->active_changed,
7645 			new_crtc_state->connectors_changed);
7646 
7647 		/* Copy all transient state flags into dc state */
7648 		if (dm_new_crtc_state->stream) {
7649 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7650 							    dm_new_crtc_state->stream);
7651 		}
7652 
7653 		/* handles headless hotplug case, updating new_state and
7654 		 * aconnector as needed
7655 		 */
7656 
7657 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7658 
7659 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7660 
7661 			if (!dm_new_crtc_state->stream) {
7662 				/*
7663 				 * this could happen because of issues with
7664 				 * userspace notifications delivery.
7665 				 * In this case userspace tries to set mode on
7666 				 * display which is disconnected in fact.
7667 				 * dc_sink is NULL in this case on aconnector.
7668 				 * We expect reset mode will come soon.
7669 				 *
7670 				 * This can also happen when unplug is done
7671 				 * during resume sequence ended
7672 				 *
7673 				 * In this case, we want to pretend we still
7674 				 * have a sink to keep the pipe running so that
7675 				 * hw state is consistent with the sw state
7676 				 */
7677 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7678 						__func__, acrtc->base.base.id);
7679 				continue;
7680 			}
7681 
7682 			if (dm_old_crtc_state->stream)
7683 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7684 
7685 			pm_runtime_get_noresume(dev->dev);
7686 
7687 			acrtc->enabled = true;
7688 			acrtc->hw_mode = new_crtc_state->mode;
7689 			crtc->hwmode = new_crtc_state->mode;
7690 			mode_set_reset_required = true;
7691 		} else if (modereset_required(new_crtc_state)) {
7692 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7693 			/* i.e. reset mode */
7694 			if (dm_old_crtc_state->stream)
7695 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7696 			mode_set_reset_required = true;
7697 		}
7698 	} /* for_each_crtc_in_state() */
7699 
7700 	if (dc_state) {
7701 		/* if there mode set or reset, disable eDP PSR */
7702 		if (mode_set_reset_required)
7703 			amdgpu_dm_psr_disable_all(dm);
7704 
7705 		dm_enable_per_frame_crtc_master_sync(dc_state);
7706 		mutex_lock(&dm->dc_lock);
7707 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7708 		mutex_unlock(&dm->dc_lock);
7709 	}
7710 
7711 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7712 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7713 
7714 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7715 
7716 		if (dm_new_crtc_state->stream != NULL) {
7717 			const struct dc_stream_status *status =
7718 					dc_stream_get_status(dm_new_crtc_state->stream);
7719 
7720 			if (!status)
7721 				status = dc_stream_get_status_from_state(dc_state,
7722 									 dm_new_crtc_state->stream);
7723 			if (!status)
7724 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7725 			else
7726 				acrtc->otg_inst = status->primary_otg_inst;
7727 		}
7728 	}
7729 #ifdef CONFIG_DRM_AMD_DC_HDCP
7730 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7731 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7732 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7733 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7734 
7735 		new_crtc_state = NULL;
7736 
7737 		if (acrtc)
7738 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7739 
7740 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7741 
7742 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7743 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7744 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7745 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7746 			continue;
7747 		}
7748 
7749 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7750 			hdcp_update_display(
7751 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7752 				new_con_state->hdcp_content_type,
7753 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7754 													 : false);
7755 	}
7756 #endif
7757 
7758 	/* Handle connector state changes */
7759 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7760 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7761 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7762 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7763 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7764 		struct dc_stream_update stream_update;
7765 		struct dc_info_packet hdr_packet;
7766 		struct dc_stream_status *status = NULL;
7767 		bool abm_changed, hdr_changed, scaling_changed;
7768 
7769 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7770 		memset(&stream_update, 0, sizeof(stream_update));
7771 
7772 		if (acrtc) {
7773 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7774 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7775 		}
7776 
7777 		/* Skip any modesets/resets */
7778 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7779 			continue;
7780 
7781 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7782 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7783 
7784 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7785 							     dm_old_con_state);
7786 
7787 		abm_changed = dm_new_crtc_state->abm_level !=
7788 			      dm_old_crtc_state->abm_level;
7789 
7790 		hdr_changed =
7791 			is_hdr_metadata_different(old_con_state, new_con_state);
7792 
7793 		if (!scaling_changed && !abm_changed && !hdr_changed)
7794 			continue;
7795 
7796 		stream_update.stream = dm_new_crtc_state->stream;
7797 		if (scaling_changed) {
7798 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7799 					dm_new_con_state, dm_new_crtc_state->stream);
7800 
7801 			stream_update.src = dm_new_crtc_state->stream->src;
7802 			stream_update.dst = dm_new_crtc_state->stream->dst;
7803 		}
7804 
7805 		if (abm_changed) {
7806 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7807 
7808 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7809 		}
7810 
7811 		if (hdr_changed) {
7812 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7813 			stream_update.hdr_static_metadata = &hdr_packet;
7814 		}
7815 
7816 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7817 		WARN_ON(!status);
7818 		WARN_ON(!status->plane_count);
7819 
7820 		/*
7821 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7822 		 * Here we create an empty update on each plane.
7823 		 * To fix this, DC should permit updating only stream properties.
7824 		 */
7825 		for (j = 0; j < status->plane_count; j++)
7826 			dummy_updates[j].surface = status->plane_states[0];
7827 
7828 
7829 		mutex_lock(&dm->dc_lock);
7830 		dc_commit_updates_for_stream(dm->dc,
7831 						     dummy_updates,
7832 						     status->plane_count,
7833 						     dm_new_crtc_state->stream,
7834 						     &stream_update,
7835 						     dc_state);
7836 		mutex_unlock(&dm->dc_lock);
7837 	}
7838 
7839 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7840 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7841 				      new_crtc_state, i) {
7842 		if (old_crtc_state->active && !new_crtc_state->active)
7843 			crtc_disable_count++;
7844 
7845 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7846 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7847 
7848 		/* For freesync config update on crtc state and params for irq */
7849 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7850 
7851 		/* Handle vrr on->off / off->on transitions */
7852 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7853 						dm_new_crtc_state);
7854 	}
7855 
7856 	/**
7857 	 * Enable interrupts for CRTCs that are newly enabled or went through
7858 	 * a modeset. It was intentionally deferred until after the front end
7859 	 * state was modified to wait until the OTG was on and so the IRQ
7860 	 * handlers didn't access stale or invalid state.
7861 	 */
7862 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7863 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7864 
7865 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7866 
7867 		if (new_crtc_state->active &&
7868 		    (!old_crtc_state->active ||
7869 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7870 			dc_stream_retain(dm_new_crtc_state->stream);
7871 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7872 			manage_dm_interrupts(adev, acrtc, true);
7873 
7874 #ifdef CONFIG_DEBUG_FS
7875 			/**
7876 			 * Frontend may have changed so reapply the CRC capture
7877 			 * settings for the stream.
7878 			 */
7879 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7880 
7881 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7882 				amdgpu_dm_crtc_configure_crc_source(
7883 					crtc, dm_new_crtc_state,
7884 					dm_new_crtc_state->crc_src);
7885 			}
7886 #endif
7887 		}
7888 	}
7889 
7890 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7891 		if (new_crtc_state->async_flip)
7892 			wait_for_vblank = false;
7893 
7894 	/* update planes when needed per crtc*/
7895 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7896 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7897 
7898 		if (dm_new_crtc_state->stream)
7899 			amdgpu_dm_commit_planes(state, dc_state, dev,
7900 						dm, crtc, wait_for_vblank);
7901 	}
7902 
7903 	/* Update audio instances for each connector. */
7904 	amdgpu_dm_commit_audio(dev, state);
7905 
7906 	/*
7907 	 * send vblank event on all events not handled in flip and
7908 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7909 	 */
7910 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7911 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7912 
7913 		if (new_crtc_state->event)
7914 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7915 
7916 		new_crtc_state->event = NULL;
7917 	}
7918 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7919 
7920 	/* Signal HW programming completion */
7921 	drm_atomic_helper_commit_hw_done(state);
7922 
7923 	if (wait_for_vblank)
7924 		drm_atomic_helper_wait_for_flip_done(dev, state);
7925 
7926 	drm_atomic_helper_cleanup_planes(dev, state);
7927 
7928 	/*
7929 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7930 	 * so we can put the GPU into runtime suspend if we're not driving any
7931 	 * displays anymore
7932 	 */
7933 	for (i = 0; i < crtc_disable_count; i++)
7934 		pm_runtime_put_autosuspend(dev->dev);
7935 	pm_runtime_mark_last_busy(dev->dev);
7936 
7937 	if (dc_state_temp)
7938 		dc_release_state(dc_state_temp);
7939 }
7940 
7941 
7942 static int dm_force_atomic_commit(struct drm_connector *connector)
7943 {
7944 	int ret = 0;
7945 	struct drm_device *ddev = connector->dev;
7946 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7947 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7948 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7949 	struct drm_connector_state *conn_state;
7950 	struct drm_crtc_state *crtc_state;
7951 	struct drm_plane_state *plane_state;
7952 
7953 	if (!state)
7954 		return -ENOMEM;
7955 
7956 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7957 
7958 	/* Construct an atomic state to restore previous display setting */
7959 
7960 	/*
7961 	 * Attach connectors to drm_atomic_state
7962 	 */
7963 	conn_state = drm_atomic_get_connector_state(state, connector);
7964 
7965 	ret = PTR_ERR_OR_ZERO(conn_state);
7966 	if (ret)
7967 		goto out;
7968 
7969 	/* Attach crtc to drm_atomic_state*/
7970 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7971 
7972 	ret = PTR_ERR_OR_ZERO(crtc_state);
7973 	if (ret)
7974 		goto out;
7975 
7976 	/* force a restore */
7977 	crtc_state->mode_changed = true;
7978 
7979 	/* Attach plane to drm_atomic_state */
7980 	plane_state = drm_atomic_get_plane_state(state, plane);
7981 
7982 	ret = PTR_ERR_OR_ZERO(plane_state);
7983 	if (ret)
7984 		goto out;
7985 
7986 	/* Call commit internally with the state we just constructed */
7987 	ret = drm_atomic_commit(state);
7988 
7989 out:
7990 	drm_atomic_state_put(state);
7991 	if (ret)
7992 		DRM_ERROR("Restoring old state failed with %i\n", ret);
7993 
7994 	return ret;
7995 }
7996 
7997 /*
7998  * This function handles all cases when set mode does not come upon hotplug.
7999  * This includes when a display is unplugged then plugged back into the
8000  * same port and when running without usermode desktop manager supprot
8001  */
8002 void dm_restore_drm_connector_state(struct drm_device *dev,
8003 				    struct drm_connector *connector)
8004 {
8005 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8006 	struct amdgpu_crtc *disconnected_acrtc;
8007 	struct dm_crtc_state *acrtc_state;
8008 
8009 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8010 		return;
8011 
8012 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8013 	if (!disconnected_acrtc)
8014 		return;
8015 
8016 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8017 	if (!acrtc_state->stream)
8018 		return;
8019 
8020 	/*
8021 	 * If the previous sink is not released and different from the current,
8022 	 * we deduce we are in a state where we can not rely on usermode call
8023 	 * to turn on the display, so we do it here
8024 	 */
8025 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8026 		dm_force_atomic_commit(&aconnector->base);
8027 }
8028 
8029 /*
8030  * Grabs all modesetting locks to serialize against any blocking commits,
8031  * Waits for completion of all non blocking commits.
8032  */
8033 static int do_aquire_global_lock(struct drm_device *dev,
8034 				 struct drm_atomic_state *state)
8035 {
8036 	struct drm_crtc *crtc;
8037 	struct drm_crtc_commit *commit;
8038 	long ret;
8039 
8040 	/*
8041 	 * Adding all modeset locks to aquire_ctx will
8042 	 * ensure that when the framework release it the
8043 	 * extra locks we are locking here will get released to
8044 	 */
8045 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8046 	if (ret)
8047 		return ret;
8048 
8049 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8050 		spin_lock(&crtc->commit_lock);
8051 		commit = list_first_entry_or_null(&crtc->commit_list,
8052 				struct drm_crtc_commit, commit_entry);
8053 		if (commit)
8054 			drm_crtc_commit_get(commit);
8055 		spin_unlock(&crtc->commit_lock);
8056 
8057 		if (!commit)
8058 			continue;
8059 
8060 		/*
8061 		 * Make sure all pending HW programming completed and
8062 		 * page flips done
8063 		 */
8064 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8065 
8066 		if (ret > 0)
8067 			ret = wait_for_completion_interruptible_timeout(
8068 					&commit->flip_done, 10*HZ);
8069 
8070 		if (ret == 0)
8071 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8072 				  "timed out\n", crtc->base.id, crtc->name);
8073 
8074 		drm_crtc_commit_put(commit);
8075 	}
8076 
8077 	return ret < 0 ? ret : 0;
8078 }
8079 
8080 static void get_freesync_config_for_crtc(
8081 	struct dm_crtc_state *new_crtc_state,
8082 	struct dm_connector_state *new_con_state)
8083 {
8084 	struct mod_freesync_config config = {0};
8085 	struct amdgpu_dm_connector *aconnector =
8086 			to_amdgpu_dm_connector(new_con_state->base.connector);
8087 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8088 	int vrefresh = drm_mode_vrefresh(mode);
8089 
8090 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8091 					vrefresh >= aconnector->min_vfreq &&
8092 					vrefresh <= aconnector->max_vfreq;
8093 
8094 	if (new_crtc_state->vrr_supported) {
8095 		new_crtc_state->stream->ignore_msa_timing_param = true;
8096 		config.state = new_crtc_state->base.vrr_enabled ?
8097 				VRR_STATE_ACTIVE_VARIABLE :
8098 				VRR_STATE_INACTIVE;
8099 		config.min_refresh_in_uhz =
8100 				aconnector->min_vfreq * 1000000;
8101 		config.max_refresh_in_uhz =
8102 				aconnector->max_vfreq * 1000000;
8103 		config.vsif_supported = true;
8104 		config.btr = true;
8105 	}
8106 
8107 	new_crtc_state->freesync_config = config;
8108 }
8109 
8110 static void reset_freesync_config_for_crtc(
8111 	struct dm_crtc_state *new_crtc_state)
8112 {
8113 	new_crtc_state->vrr_supported = false;
8114 
8115 	memset(&new_crtc_state->vrr_infopacket, 0,
8116 	       sizeof(new_crtc_state->vrr_infopacket));
8117 }
8118 
8119 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8120 				struct drm_atomic_state *state,
8121 				struct drm_crtc *crtc,
8122 				struct drm_crtc_state *old_crtc_state,
8123 				struct drm_crtc_state *new_crtc_state,
8124 				bool enable,
8125 				bool *lock_and_validation_needed)
8126 {
8127 	struct dm_atomic_state *dm_state = NULL;
8128 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8129 	struct dc_stream_state *new_stream;
8130 	int ret = 0;
8131 
8132 	/*
8133 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8134 	 * update changed items
8135 	 */
8136 	struct amdgpu_crtc *acrtc = NULL;
8137 	struct amdgpu_dm_connector *aconnector = NULL;
8138 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8139 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8140 
8141 	new_stream = NULL;
8142 
8143 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8144 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8145 	acrtc = to_amdgpu_crtc(crtc);
8146 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8147 
8148 	/* TODO This hack should go away */
8149 	if (aconnector && enable) {
8150 		/* Make sure fake sink is created in plug-in scenario */
8151 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8152 							    &aconnector->base);
8153 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8154 							    &aconnector->base);
8155 
8156 		if (IS_ERR(drm_new_conn_state)) {
8157 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8158 			goto fail;
8159 		}
8160 
8161 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8162 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8163 
8164 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8165 			goto skip_modeset;
8166 
8167 		new_stream = create_validate_stream_for_sink(aconnector,
8168 							     &new_crtc_state->mode,
8169 							     dm_new_conn_state,
8170 							     dm_old_crtc_state->stream);
8171 
8172 		/*
8173 		 * we can have no stream on ACTION_SET if a display
8174 		 * was disconnected during S3, in this case it is not an
8175 		 * error, the OS will be updated after detection, and
8176 		 * will do the right thing on next atomic commit
8177 		 */
8178 
8179 		if (!new_stream) {
8180 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8181 					__func__, acrtc->base.base.id);
8182 			ret = -ENOMEM;
8183 			goto fail;
8184 		}
8185 
8186 		/*
8187 		 * TODO: Check VSDB bits to decide whether this should
8188 		 * be enabled or not.
8189 		 */
8190 		new_stream->triggered_crtc_reset.enabled =
8191 			dm->force_timing_sync;
8192 
8193 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8194 
8195 		ret = fill_hdr_info_packet(drm_new_conn_state,
8196 					   &new_stream->hdr_static_metadata);
8197 		if (ret)
8198 			goto fail;
8199 
8200 		/*
8201 		 * If we already removed the old stream from the context
8202 		 * (and set the new stream to NULL) then we can't reuse
8203 		 * the old stream even if the stream and scaling are unchanged.
8204 		 * We'll hit the BUG_ON and black screen.
8205 		 *
8206 		 * TODO: Refactor this function to allow this check to work
8207 		 * in all conditions.
8208 		 */
8209 		if (dm_new_crtc_state->stream &&
8210 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8211 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8212 			new_crtc_state->mode_changed = false;
8213 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8214 					 new_crtc_state->mode_changed);
8215 		}
8216 	}
8217 
8218 	/* mode_changed flag may get updated above, need to check again */
8219 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8220 		goto skip_modeset;
8221 
8222 	DRM_DEBUG_DRIVER(
8223 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8224 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8225 		"connectors_changed:%d\n",
8226 		acrtc->crtc_id,
8227 		new_crtc_state->enable,
8228 		new_crtc_state->active,
8229 		new_crtc_state->planes_changed,
8230 		new_crtc_state->mode_changed,
8231 		new_crtc_state->active_changed,
8232 		new_crtc_state->connectors_changed);
8233 
8234 	/* Remove stream for any changed/disabled CRTC */
8235 	if (!enable) {
8236 
8237 		if (!dm_old_crtc_state->stream)
8238 			goto skip_modeset;
8239 
8240 		ret = dm_atomic_get_state(state, &dm_state);
8241 		if (ret)
8242 			goto fail;
8243 
8244 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8245 				crtc->base.id);
8246 
8247 		/* i.e. reset mode */
8248 		if (dc_remove_stream_from_ctx(
8249 				dm->dc,
8250 				dm_state->context,
8251 				dm_old_crtc_state->stream) != DC_OK) {
8252 			ret = -EINVAL;
8253 			goto fail;
8254 		}
8255 
8256 		dc_stream_release(dm_old_crtc_state->stream);
8257 		dm_new_crtc_state->stream = NULL;
8258 
8259 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8260 
8261 		*lock_and_validation_needed = true;
8262 
8263 	} else {/* Add stream for any updated/enabled CRTC */
8264 		/*
8265 		 * Quick fix to prevent NULL pointer on new_stream when
8266 		 * added MST connectors not found in existing crtc_state in the chained mode
8267 		 * TODO: need to dig out the root cause of that
8268 		 */
8269 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8270 			goto skip_modeset;
8271 
8272 		if (modereset_required(new_crtc_state))
8273 			goto skip_modeset;
8274 
8275 		if (modeset_required(new_crtc_state, new_stream,
8276 				     dm_old_crtc_state->stream)) {
8277 
8278 			WARN_ON(dm_new_crtc_state->stream);
8279 
8280 			ret = dm_atomic_get_state(state, &dm_state);
8281 			if (ret)
8282 				goto fail;
8283 
8284 			dm_new_crtc_state->stream = new_stream;
8285 
8286 			dc_stream_retain(new_stream);
8287 
8288 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8289 						crtc->base.id);
8290 
8291 			if (dc_add_stream_to_ctx(
8292 					dm->dc,
8293 					dm_state->context,
8294 					dm_new_crtc_state->stream) != DC_OK) {
8295 				ret = -EINVAL;
8296 				goto fail;
8297 			}
8298 
8299 			*lock_and_validation_needed = true;
8300 		}
8301 	}
8302 
8303 skip_modeset:
8304 	/* Release extra reference */
8305 	if (new_stream)
8306 		 dc_stream_release(new_stream);
8307 
8308 	/*
8309 	 * We want to do dc stream updates that do not require a
8310 	 * full modeset below.
8311 	 */
8312 	if (!(enable && aconnector && new_crtc_state->active))
8313 		return 0;
8314 	/*
8315 	 * Given above conditions, the dc state cannot be NULL because:
8316 	 * 1. We're in the process of enabling CRTCs (just been added
8317 	 *    to the dc context, or already is on the context)
8318 	 * 2. Has a valid connector attached, and
8319 	 * 3. Is currently active and enabled.
8320 	 * => The dc stream state currently exists.
8321 	 */
8322 	BUG_ON(dm_new_crtc_state->stream == NULL);
8323 
8324 	/* Scaling or underscan settings */
8325 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8326 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8327 		update_stream_scaling_settings(
8328 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8329 
8330 	/* ABM settings */
8331 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8332 
8333 	/*
8334 	 * Color management settings. We also update color properties
8335 	 * when a modeset is needed, to ensure it gets reprogrammed.
8336 	 */
8337 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8338 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8339 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8340 		if (ret)
8341 			goto fail;
8342 	}
8343 
8344 	/* Update Freesync settings. */
8345 	get_freesync_config_for_crtc(dm_new_crtc_state,
8346 				     dm_new_conn_state);
8347 
8348 	return ret;
8349 
8350 fail:
8351 	if (new_stream)
8352 		dc_stream_release(new_stream);
8353 	return ret;
8354 }
8355 
8356 static bool should_reset_plane(struct drm_atomic_state *state,
8357 			       struct drm_plane *plane,
8358 			       struct drm_plane_state *old_plane_state,
8359 			       struct drm_plane_state *new_plane_state)
8360 {
8361 	struct drm_plane *other;
8362 	struct drm_plane_state *old_other_state, *new_other_state;
8363 	struct drm_crtc_state *new_crtc_state;
8364 	int i;
8365 
8366 	/*
8367 	 * TODO: Remove this hack once the checks below are sufficient
8368 	 * enough to determine when we need to reset all the planes on
8369 	 * the stream.
8370 	 */
8371 	if (state->allow_modeset)
8372 		return true;
8373 
8374 	/* Exit early if we know that we're adding or removing the plane. */
8375 	if (old_plane_state->crtc != new_plane_state->crtc)
8376 		return true;
8377 
8378 	/* old crtc == new_crtc == NULL, plane not in context. */
8379 	if (!new_plane_state->crtc)
8380 		return false;
8381 
8382 	new_crtc_state =
8383 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8384 
8385 	if (!new_crtc_state)
8386 		return true;
8387 
8388 	/* CRTC Degamma changes currently require us to recreate planes. */
8389 	if (new_crtc_state->color_mgmt_changed)
8390 		return true;
8391 
8392 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8393 		return true;
8394 
8395 	/*
8396 	 * If there are any new primary or overlay planes being added or
8397 	 * removed then the z-order can potentially change. To ensure
8398 	 * correct z-order and pipe acquisition the current DC architecture
8399 	 * requires us to remove and recreate all existing planes.
8400 	 *
8401 	 * TODO: Come up with a more elegant solution for this.
8402 	 */
8403 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8404 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8405 
8406 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8407 			continue;
8408 
8409 		if (old_other_state->crtc != new_plane_state->crtc &&
8410 		    new_other_state->crtc != new_plane_state->crtc)
8411 			continue;
8412 
8413 		if (old_other_state->crtc != new_other_state->crtc)
8414 			return true;
8415 
8416 		/* Src/dst size and scaling updates. */
8417 		if (old_other_state->src_w != new_other_state->src_w ||
8418 		    old_other_state->src_h != new_other_state->src_h ||
8419 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8420 		    old_other_state->crtc_h != new_other_state->crtc_h)
8421 			return true;
8422 
8423 		/* Rotation / mirroring updates. */
8424 		if (old_other_state->rotation != new_other_state->rotation)
8425 			return true;
8426 
8427 		/* Blending updates. */
8428 		if (old_other_state->pixel_blend_mode !=
8429 		    new_other_state->pixel_blend_mode)
8430 			return true;
8431 
8432 		/* Alpha updates. */
8433 		if (old_other_state->alpha != new_other_state->alpha)
8434 			return true;
8435 
8436 		/* Colorspace changes. */
8437 		if (old_other_state->color_range != new_other_state->color_range ||
8438 		    old_other_state->color_encoding != new_other_state->color_encoding)
8439 			return true;
8440 
8441 		/* Framebuffer checks fall at the end. */
8442 		if (!old_other_state->fb || !new_other_state->fb)
8443 			continue;
8444 
8445 		/* Pixel format changes can require bandwidth updates. */
8446 		if (old_other_state->fb->format != new_other_state->fb->format)
8447 			return true;
8448 
8449 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8450 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8451 
8452 		/* Tiling and DCC changes also require bandwidth updates. */
8453 		if (old_dm_plane_state->tiling_flags !=
8454 		    new_dm_plane_state->tiling_flags)
8455 			return true;
8456 	}
8457 
8458 	return false;
8459 }
8460 
8461 static int dm_update_plane_state(struct dc *dc,
8462 				 struct drm_atomic_state *state,
8463 				 struct drm_plane *plane,
8464 				 struct drm_plane_state *old_plane_state,
8465 				 struct drm_plane_state *new_plane_state,
8466 				 bool enable,
8467 				 bool *lock_and_validation_needed)
8468 {
8469 
8470 	struct dm_atomic_state *dm_state = NULL;
8471 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8472 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8473 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8474 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8475 	struct amdgpu_crtc *new_acrtc;
8476 	bool needs_reset;
8477 	int ret = 0;
8478 
8479 
8480 	new_plane_crtc = new_plane_state->crtc;
8481 	old_plane_crtc = old_plane_state->crtc;
8482 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8483 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8484 
8485 	/*TODO Implement better atomic check for cursor plane */
8486 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8487 		if (!enable || !new_plane_crtc ||
8488 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8489 			return 0;
8490 
8491 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8492 
8493 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8494 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8495 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8496 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8497 			return -EINVAL;
8498 		}
8499 
8500 		return 0;
8501 	}
8502 
8503 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8504 					 new_plane_state);
8505 
8506 	/* Remove any changed/removed planes */
8507 	if (!enable) {
8508 		if (!needs_reset)
8509 			return 0;
8510 
8511 		if (!old_plane_crtc)
8512 			return 0;
8513 
8514 		old_crtc_state = drm_atomic_get_old_crtc_state(
8515 				state, old_plane_crtc);
8516 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8517 
8518 		if (!dm_old_crtc_state->stream)
8519 			return 0;
8520 
8521 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8522 				plane->base.id, old_plane_crtc->base.id);
8523 
8524 		ret = dm_atomic_get_state(state, &dm_state);
8525 		if (ret)
8526 			return ret;
8527 
8528 		if (!dc_remove_plane_from_context(
8529 				dc,
8530 				dm_old_crtc_state->stream,
8531 				dm_old_plane_state->dc_state,
8532 				dm_state->context)) {
8533 
8534 			return -EINVAL;
8535 		}
8536 
8537 
8538 		dc_plane_state_release(dm_old_plane_state->dc_state);
8539 		dm_new_plane_state->dc_state = NULL;
8540 
8541 		*lock_and_validation_needed = true;
8542 
8543 	} else { /* Add new planes */
8544 		struct dc_plane_state *dc_new_plane_state;
8545 
8546 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8547 			return 0;
8548 
8549 		if (!new_plane_crtc)
8550 			return 0;
8551 
8552 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8553 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8554 
8555 		if (!dm_new_crtc_state->stream)
8556 			return 0;
8557 
8558 		if (!needs_reset)
8559 			return 0;
8560 
8561 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8562 		if (ret)
8563 			return ret;
8564 
8565 		WARN_ON(dm_new_plane_state->dc_state);
8566 
8567 		dc_new_plane_state = dc_create_plane_state(dc);
8568 		if (!dc_new_plane_state)
8569 			return -ENOMEM;
8570 
8571 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8572 				plane->base.id, new_plane_crtc->base.id);
8573 
8574 		ret = fill_dc_plane_attributes(
8575 			drm_to_adev(new_plane_crtc->dev),
8576 			dc_new_plane_state,
8577 			new_plane_state,
8578 			new_crtc_state);
8579 		if (ret) {
8580 			dc_plane_state_release(dc_new_plane_state);
8581 			return ret;
8582 		}
8583 
8584 		ret = dm_atomic_get_state(state, &dm_state);
8585 		if (ret) {
8586 			dc_plane_state_release(dc_new_plane_state);
8587 			return ret;
8588 		}
8589 
8590 		/*
8591 		 * Any atomic check errors that occur after this will
8592 		 * not need a release. The plane state will be attached
8593 		 * to the stream, and therefore part of the atomic
8594 		 * state. It'll be released when the atomic state is
8595 		 * cleaned.
8596 		 */
8597 		if (!dc_add_plane_to_context(
8598 				dc,
8599 				dm_new_crtc_state->stream,
8600 				dc_new_plane_state,
8601 				dm_state->context)) {
8602 
8603 			dc_plane_state_release(dc_new_plane_state);
8604 			return -EINVAL;
8605 		}
8606 
8607 		dm_new_plane_state->dc_state = dc_new_plane_state;
8608 
8609 		/* Tell DC to do a full surface update every time there
8610 		 * is a plane change. Inefficient, but works for now.
8611 		 */
8612 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8613 
8614 		*lock_and_validation_needed = true;
8615 	}
8616 
8617 
8618 	return ret;
8619 }
8620 
8621 #if defined(CONFIG_DRM_AMD_DC_DCN)
8622 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8623 {
8624 	struct drm_connector *connector;
8625 	struct drm_connector_state *conn_state;
8626 	struct amdgpu_dm_connector *aconnector = NULL;
8627 	int i;
8628 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8629 		if (conn_state->crtc != crtc)
8630 			continue;
8631 
8632 		aconnector = to_amdgpu_dm_connector(connector);
8633 		if (!aconnector->port || !aconnector->mst_port)
8634 			aconnector = NULL;
8635 		else
8636 			break;
8637 	}
8638 
8639 	if (!aconnector)
8640 		return 0;
8641 
8642 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8643 }
8644 #endif
8645 
8646 static int validate_overlay(struct drm_atomic_state *state)
8647 {
8648 	int i;
8649 	struct drm_plane *plane;
8650 	struct drm_plane_state *old_plane_state, *new_plane_state;
8651 	struct drm_plane_state *primary_state, *overlay_state = NULL;
8652 
8653 	/* Check if primary plane is contained inside overlay */
8654 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8655 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
8656 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8657 				return 0;
8658 
8659 			overlay_state = new_plane_state;
8660 			continue;
8661 		}
8662 	}
8663 
8664 	/* check if we're making changes to the overlay plane */
8665 	if (!overlay_state)
8666 		return 0;
8667 
8668 	/* check if overlay plane is enabled */
8669 	if (!overlay_state->crtc)
8670 		return 0;
8671 
8672 	/* find the primary plane for the CRTC that the overlay is enabled on */
8673 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
8674 	if (IS_ERR(primary_state))
8675 		return PTR_ERR(primary_state);
8676 
8677 	/* check if primary plane is enabled */
8678 	if (!primary_state->crtc)
8679 		return 0;
8680 
8681 	/* Perform the bounds check to ensure the overlay plane covers the primary */
8682 	if (primary_state->crtc_x < overlay_state->crtc_x ||
8683 	    primary_state->crtc_y < overlay_state->crtc_y ||
8684 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
8685 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
8686 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
8687 		return -EINVAL;
8688 	}
8689 
8690 	return 0;
8691 }
8692 
8693 /**
8694  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8695  * @dev: The DRM device
8696  * @state: The atomic state to commit
8697  *
8698  * Validate that the given atomic state is programmable by DC into hardware.
8699  * This involves constructing a &struct dc_state reflecting the new hardware
8700  * state we wish to commit, then querying DC to see if it is programmable. It's
8701  * important not to modify the existing DC state. Otherwise, atomic_check
8702  * may unexpectedly commit hardware changes.
8703  *
8704  * When validating the DC state, it's important that the right locks are
8705  * acquired. For full updates case which removes/adds/updates streams on one
8706  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8707  * that any such full update commit will wait for completion of any outstanding
8708  * flip using DRMs synchronization events.
8709  *
8710  * Note that DM adds the affected connectors for all CRTCs in state, when that
8711  * might not seem necessary. This is because DC stream creation requires the
8712  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8713  * be possible but non-trivial - a possible TODO item.
8714  *
8715  * Return: -Error code if validation failed.
8716  */
8717 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8718 				  struct drm_atomic_state *state)
8719 {
8720 	struct amdgpu_device *adev = drm_to_adev(dev);
8721 	struct dm_atomic_state *dm_state = NULL;
8722 	struct dc *dc = adev->dm.dc;
8723 	struct drm_connector *connector;
8724 	struct drm_connector_state *old_con_state, *new_con_state;
8725 	struct drm_crtc *crtc;
8726 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8727 	struct drm_plane *plane;
8728 	struct drm_plane_state *old_plane_state, *new_plane_state;
8729 	enum dc_status status;
8730 	int ret, i;
8731 	bool lock_and_validation_needed = false;
8732 
8733 	amdgpu_check_debugfs_connector_property_change(adev, state);
8734 
8735 	ret = drm_atomic_helper_check_modeset(dev, state);
8736 	if (ret)
8737 		goto fail;
8738 
8739 	/* Check connector changes */
8740 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8741 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8742 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8743 
8744 		/* Skip connectors that are disabled or part of modeset already. */
8745 		if (!old_con_state->crtc && !new_con_state->crtc)
8746 			continue;
8747 
8748 		if (!new_con_state->crtc)
8749 			continue;
8750 
8751 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8752 		if (IS_ERR(new_crtc_state)) {
8753 			ret = PTR_ERR(new_crtc_state);
8754 			goto fail;
8755 		}
8756 
8757 		if (dm_old_con_state->abm_level !=
8758 		    dm_new_con_state->abm_level)
8759 			new_crtc_state->connectors_changed = true;
8760 	}
8761 
8762 #if defined(CONFIG_DRM_AMD_DC_DCN)
8763 	if (dc_resource_is_dsc_encoding_supported(dc)) {
8764 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8765 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8766 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8767 				if (ret)
8768 					goto fail;
8769 			}
8770 		}
8771 	}
8772 #endif
8773 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8774 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8775 		    !new_crtc_state->color_mgmt_changed &&
8776 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8777 			continue;
8778 
8779 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
8780 		if (ret)
8781 			goto fail;
8782 
8783 		if (!new_crtc_state->enable)
8784 			continue;
8785 
8786 		ret = drm_atomic_add_affected_connectors(state, crtc);
8787 		if (ret)
8788 			return ret;
8789 
8790 		ret = drm_atomic_add_affected_planes(state, crtc);
8791 		if (ret)
8792 			goto fail;
8793 	}
8794 
8795 	/*
8796 	 * Add all primary and overlay planes on the CRTC to the state
8797 	 * whenever a plane is enabled to maintain correct z-ordering
8798 	 * and to enable fast surface updates.
8799 	 */
8800 	drm_for_each_crtc(crtc, dev) {
8801 		bool modified = false;
8802 
8803 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8804 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8805 				continue;
8806 
8807 			if (new_plane_state->crtc == crtc ||
8808 			    old_plane_state->crtc == crtc) {
8809 				modified = true;
8810 				break;
8811 			}
8812 		}
8813 
8814 		if (!modified)
8815 			continue;
8816 
8817 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8818 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8819 				continue;
8820 
8821 			new_plane_state =
8822 				drm_atomic_get_plane_state(state, plane);
8823 
8824 			if (IS_ERR(new_plane_state)) {
8825 				ret = PTR_ERR(new_plane_state);
8826 				goto fail;
8827 			}
8828 		}
8829 	}
8830 
8831 	/* Prepass for updating tiling flags on new planes. */
8832 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8833 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8834 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8835 
8836 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8837 				  &new_dm_plane_state->tmz_surface);
8838 		if (ret)
8839 			goto fail;
8840 	}
8841 
8842 	/* Remove exiting planes if they are modified */
8843 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8844 		ret = dm_update_plane_state(dc, state, plane,
8845 					    old_plane_state,
8846 					    new_plane_state,
8847 					    false,
8848 					    &lock_and_validation_needed);
8849 		if (ret)
8850 			goto fail;
8851 	}
8852 
8853 	/* Disable all crtcs which require disable */
8854 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8855 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8856 					   old_crtc_state,
8857 					   new_crtc_state,
8858 					   false,
8859 					   &lock_and_validation_needed);
8860 		if (ret)
8861 			goto fail;
8862 	}
8863 
8864 	/* Enable all crtcs which require enable */
8865 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8866 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8867 					   old_crtc_state,
8868 					   new_crtc_state,
8869 					   true,
8870 					   &lock_and_validation_needed);
8871 		if (ret)
8872 			goto fail;
8873 	}
8874 
8875 	ret = validate_overlay(state);
8876 	if (ret)
8877 		goto fail;
8878 
8879 	/* Add new/modified planes */
8880 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8881 		ret = dm_update_plane_state(dc, state, plane,
8882 					    old_plane_state,
8883 					    new_plane_state,
8884 					    true,
8885 					    &lock_and_validation_needed);
8886 		if (ret)
8887 			goto fail;
8888 	}
8889 
8890 	/* Run this here since we want to validate the streams we created */
8891 	ret = drm_atomic_helper_check_planes(dev, state);
8892 	if (ret)
8893 		goto fail;
8894 
8895 	if (state->legacy_cursor_update) {
8896 		/*
8897 		 * This is a fast cursor update coming from the plane update
8898 		 * helper, check if it can be done asynchronously for better
8899 		 * performance.
8900 		 */
8901 		state->async_update =
8902 			!drm_atomic_helper_async_check(dev, state);
8903 
8904 		/*
8905 		 * Skip the remaining global validation if this is an async
8906 		 * update. Cursor updates can be done without affecting
8907 		 * state or bandwidth calcs and this avoids the performance
8908 		 * penalty of locking the private state object and
8909 		 * allocating a new dc_state.
8910 		 */
8911 		if (state->async_update)
8912 			return 0;
8913 	}
8914 
8915 	/* Check scaling and underscan changes*/
8916 	/* TODO Removed scaling changes validation due to inability to commit
8917 	 * new stream into context w\o causing full reset. Need to
8918 	 * decide how to handle.
8919 	 */
8920 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8921 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8922 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8923 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8924 
8925 		/* Skip any modesets/resets */
8926 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8927 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8928 			continue;
8929 
8930 		/* Skip any thing not scale or underscan changes */
8931 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8932 			continue;
8933 
8934 		lock_and_validation_needed = true;
8935 	}
8936 
8937 	/**
8938 	 * Streams and planes are reset when there are changes that affect
8939 	 * bandwidth. Anything that affects bandwidth needs to go through
8940 	 * DC global validation to ensure that the configuration can be applied
8941 	 * to hardware.
8942 	 *
8943 	 * We have to currently stall out here in atomic_check for outstanding
8944 	 * commits to finish in this case because our IRQ handlers reference
8945 	 * DRM state directly - we can end up disabling interrupts too early
8946 	 * if we don't.
8947 	 *
8948 	 * TODO: Remove this stall and drop DM state private objects.
8949 	 */
8950 	if (lock_and_validation_needed) {
8951 		ret = dm_atomic_get_state(state, &dm_state);
8952 		if (ret)
8953 			goto fail;
8954 
8955 		ret = do_aquire_global_lock(dev, state);
8956 		if (ret)
8957 			goto fail;
8958 
8959 #if defined(CONFIG_DRM_AMD_DC_DCN)
8960 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8961 			goto fail;
8962 
8963 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8964 		if (ret)
8965 			goto fail;
8966 #endif
8967 
8968 		/*
8969 		 * Perform validation of MST topology in the state:
8970 		 * We need to perform MST atomic check before calling
8971 		 * dc_validate_global_state(), or there is a chance
8972 		 * to get stuck in an infinite loop and hang eventually.
8973 		 */
8974 		ret = drm_dp_mst_atomic_check(state);
8975 		if (ret)
8976 			goto fail;
8977 		status = dc_validate_global_state(dc, dm_state->context, false);
8978 		if (status != DC_OK) {
8979 			drm_dbg_atomic(dev,
8980 				       "DC global validation failure: %s (%d)",
8981 				       dc_status_to_str(status), status);
8982 			ret = -EINVAL;
8983 			goto fail;
8984 		}
8985 	} else {
8986 		/*
8987 		 * The commit is a fast update. Fast updates shouldn't change
8988 		 * the DC context, affect global validation, and can have their
8989 		 * commit work done in parallel with other commits not touching
8990 		 * the same resource. If we have a new DC context as part of
8991 		 * the DM atomic state from validation we need to free it and
8992 		 * retain the existing one instead.
8993 		 *
8994 		 * Furthermore, since the DM atomic state only contains the DC
8995 		 * context and can safely be annulled, we can free the state
8996 		 * and clear the associated private object now to free
8997 		 * some memory and avoid a possible use-after-free later.
8998 		 */
8999 
9000 		for (i = 0; i < state->num_private_objs; i++) {
9001 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9002 
9003 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9004 				int j = state->num_private_objs-1;
9005 
9006 				dm_atomic_destroy_state(obj,
9007 						state->private_objs[i].state);
9008 
9009 				/* If i is not at the end of the array then the
9010 				 * last element needs to be moved to where i was
9011 				 * before the array can safely be truncated.
9012 				 */
9013 				if (i != j)
9014 					state->private_objs[i] =
9015 						state->private_objs[j];
9016 
9017 				state->private_objs[j].ptr = NULL;
9018 				state->private_objs[j].state = NULL;
9019 				state->private_objs[j].old_state = NULL;
9020 				state->private_objs[j].new_state = NULL;
9021 
9022 				state->num_private_objs = j;
9023 				break;
9024 			}
9025 		}
9026 	}
9027 
9028 	/* Store the overall update type for use later in atomic check. */
9029 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9030 		struct dm_crtc_state *dm_new_crtc_state =
9031 			to_dm_crtc_state(new_crtc_state);
9032 
9033 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9034 							 UPDATE_TYPE_FULL :
9035 							 UPDATE_TYPE_FAST;
9036 	}
9037 
9038 	/* Must be success */
9039 	WARN_ON(ret);
9040 	return ret;
9041 
9042 fail:
9043 	if (ret == -EDEADLK)
9044 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9045 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9046 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9047 	else
9048 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9049 
9050 	return ret;
9051 }
9052 
9053 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9054 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9055 {
9056 	uint8_t dpcd_data;
9057 	bool capable = false;
9058 
9059 	if (amdgpu_dm_connector->dc_link &&
9060 		dm_helpers_dp_read_dpcd(
9061 				NULL,
9062 				amdgpu_dm_connector->dc_link,
9063 				DP_DOWN_STREAM_PORT_COUNT,
9064 				&dpcd_data,
9065 				sizeof(dpcd_data))) {
9066 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9067 	}
9068 
9069 	return capable;
9070 }
9071 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9072 					struct edid *edid)
9073 {
9074 	int i;
9075 	bool edid_check_required;
9076 	struct detailed_timing *timing;
9077 	struct detailed_non_pixel *data;
9078 	struct detailed_data_monitor_range *range;
9079 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9080 			to_amdgpu_dm_connector(connector);
9081 	struct dm_connector_state *dm_con_state = NULL;
9082 
9083 	struct drm_device *dev = connector->dev;
9084 	struct amdgpu_device *adev = drm_to_adev(dev);
9085 	bool freesync_capable = false;
9086 
9087 	if (!connector->state) {
9088 		DRM_ERROR("%s - Connector has no state", __func__);
9089 		goto update;
9090 	}
9091 
9092 	if (!edid) {
9093 		dm_con_state = to_dm_connector_state(connector->state);
9094 
9095 		amdgpu_dm_connector->min_vfreq = 0;
9096 		amdgpu_dm_connector->max_vfreq = 0;
9097 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9098 
9099 		goto update;
9100 	}
9101 
9102 	dm_con_state = to_dm_connector_state(connector->state);
9103 
9104 	edid_check_required = false;
9105 	if (!amdgpu_dm_connector->dc_sink) {
9106 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9107 		goto update;
9108 	}
9109 	if (!adev->dm.freesync_module)
9110 		goto update;
9111 	/*
9112 	 * if edid non zero restrict freesync only for dp and edp
9113 	 */
9114 	if (edid) {
9115 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9116 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9117 			edid_check_required = is_dp_capable_without_timing_msa(
9118 						adev->dm.dc,
9119 						amdgpu_dm_connector);
9120 		}
9121 	}
9122 	if (edid_check_required == true && (edid->version > 1 ||
9123 	   (edid->version == 1 && edid->revision > 1))) {
9124 		for (i = 0; i < 4; i++) {
9125 
9126 			timing	= &edid->detailed_timings[i];
9127 			data	= &timing->data.other_data;
9128 			range	= &data->data.range;
9129 			/*
9130 			 * Check if monitor has continuous frequency mode
9131 			 */
9132 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9133 				continue;
9134 			/*
9135 			 * Check for flag range limits only. If flag == 1 then
9136 			 * no additional timing information provided.
9137 			 * Default GTF, GTF Secondary curve and CVT are not
9138 			 * supported
9139 			 */
9140 			if (range->flags != 1)
9141 				continue;
9142 
9143 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9144 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9145 			amdgpu_dm_connector->pixel_clock_mhz =
9146 				range->pixel_clock_mhz * 10;
9147 			break;
9148 		}
9149 
9150 		if (amdgpu_dm_connector->max_vfreq -
9151 		    amdgpu_dm_connector->min_vfreq > 10) {
9152 
9153 			freesync_capable = true;
9154 		}
9155 	}
9156 
9157 update:
9158 	if (dm_con_state)
9159 		dm_con_state->freesync_capable = freesync_capable;
9160 
9161 	if (connector->vrr_capable_property)
9162 		drm_connector_set_vrr_capable_property(connector,
9163 						       freesync_capable);
9164 }
9165 
9166 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9167 {
9168 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9169 
9170 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9171 		return;
9172 	if (link->type == dc_connection_none)
9173 		return;
9174 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9175 					dpcd_data, sizeof(dpcd_data))) {
9176 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9177 
9178 		if (dpcd_data[0] == 0) {
9179 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9180 			link->psr_settings.psr_feature_enabled = false;
9181 		} else {
9182 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9183 			link->psr_settings.psr_feature_enabled = true;
9184 		}
9185 
9186 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9187 	}
9188 }
9189 
9190 /*
9191  * amdgpu_dm_link_setup_psr() - configure psr link
9192  * @stream: stream state
9193  *
9194  * Return: true if success
9195  */
9196 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9197 {
9198 	struct dc_link *link = NULL;
9199 	struct psr_config psr_config = {0};
9200 	struct psr_context psr_context = {0};
9201 	bool ret = false;
9202 
9203 	if (stream == NULL)
9204 		return false;
9205 
9206 	link = stream->link;
9207 
9208 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9209 
9210 	if (psr_config.psr_version > 0) {
9211 		psr_config.psr_exit_link_training_required = 0x1;
9212 		psr_config.psr_frame_capture_indication_req = 0;
9213 		psr_config.psr_rfb_setup_time = 0x37;
9214 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9215 		psr_config.allow_smu_optimizations = 0x0;
9216 
9217 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9218 
9219 	}
9220 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9221 
9222 	return ret;
9223 }
9224 
9225 /*
9226  * amdgpu_dm_psr_enable() - enable psr f/w
9227  * @stream: stream state
9228  *
9229  * Return: true if success
9230  */
9231 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9232 {
9233 	struct dc_link *link = stream->link;
9234 	unsigned int vsync_rate_hz = 0;
9235 	struct dc_static_screen_params params = {0};
9236 	/* Calculate number of static frames before generating interrupt to
9237 	 * enter PSR.
9238 	 */
9239 	// Init fail safe of 2 frames static
9240 	unsigned int num_frames_static = 2;
9241 
9242 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9243 
9244 	vsync_rate_hz = div64_u64(div64_u64((
9245 			stream->timing.pix_clk_100hz * 100),
9246 			stream->timing.v_total),
9247 			stream->timing.h_total);
9248 
9249 	/* Round up
9250 	 * Calculate number of frames such that at least 30 ms of time has
9251 	 * passed.
9252 	 */
9253 	if (vsync_rate_hz != 0) {
9254 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9255 		num_frames_static = (30000 / frame_time_microsec) + 1;
9256 	}
9257 
9258 	params.triggers.cursor_update = true;
9259 	params.triggers.overlay_update = true;
9260 	params.triggers.surface_update = true;
9261 	params.num_frames = num_frames_static;
9262 
9263 	dc_stream_set_static_screen_params(link->ctx->dc,
9264 					   &stream, 1,
9265 					   &params);
9266 
9267 	return dc_link_set_psr_allow_active(link, true, false);
9268 }
9269 
9270 /*
9271  * amdgpu_dm_psr_disable() - disable psr f/w
9272  * @stream:  stream state
9273  *
9274  * Return: true if success
9275  */
9276 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9277 {
9278 
9279 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9280 
9281 	return dc_link_set_psr_allow_active(stream->link, false, true);
9282 }
9283 
9284 /*
9285  * amdgpu_dm_psr_disable() - disable psr f/w
9286  * if psr is enabled on any stream
9287  *
9288  * Return: true if success
9289  */
9290 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9291 {
9292 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9293 	return dc_set_psr_allow_active(dm->dc, false);
9294 }
9295 
9296 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9297 {
9298 	struct amdgpu_device *adev = drm_to_adev(dev);
9299 	struct dc *dc = adev->dm.dc;
9300 	int i;
9301 
9302 	mutex_lock(&adev->dm.dc_lock);
9303 	if (dc->current_state) {
9304 		for (i = 0; i < dc->current_state->stream_count; ++i)
9305 			dc->current_state->streams[i]
9306 				->triggered_crtc_reset.enabled =
9307 				adev->dm.force_timing_sync;
9308 
9309 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9310 		dc_trigger_sync(dc, dc->current_state);
9311 	}
9312 	mutex_unlock(&adev->dm.dc_lock);
9313 }
9314