xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 25c4e8bd056e974b28f4a0ffd39d76c190a56013)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 #define DMUB_TRACE_MAX_READ 64
622 /**
623  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624  * @interrupt_params: used for determining the Outbox instance
625  *
626  * Handles the Outbox Interrupt
627  * event handler.
628  */
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 #ifdef notyet
797 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
798 	.bind	= amdgpu_dm_audio_component_bind,
799 	.unbind	= amdgpu_dm_audio_component_unbind,
800 };
801 #endif
802 
803 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
804 {
805 	int i, ret;
806 
807 	if (!amdgpu_audio)
808 		return 0;
809 
810 	adev->mode_info.audio.enabled = true;
811 
812 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
813 
814 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
815 		adev->mode_info.audio.pin[i].channels = -1;
816 		adev->mode_info.audio.pin[i].rate = -1;
817 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
818 		adev->mode_info.audio.pin[i].status_bits = 0;
819 		adev->mode_info.audio.pin[i].category_code = 0;
820 		adev->mode_info.audio.pin[i].connected = false;
821 		adev->mode_info.audio.pin[i].id =
822 			adev->dm.dc->res_pool->audios[i]->inst;
823 		adev->mode_info.audio.pin[i].offset = 0;
824 	}
825 
826 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
827 	if (ret < 0)
828 		return ret;
829 
830 	adev->dm.audio_registered = true;
831 
832 	return 0;
833 }
834 
835 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
836 {
837 	if (!amdgpu_audio)
838 		return;
839 
840 	if (!adev->mode_info.audio.enabled)
841 		return;
842 
843 	if (adev->dm.audio_registered) {
844 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
845 		adev->dm.audio_registered = false;
846 	}
847 
848 	/* TODO: Disable audio? */
849 
850 	adev->mode_info.audio.enabled = false;
851 }
852 
853 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
854 {
855 	struct drm_audio_component *acomp = adev->dm.audio_component;
856 
857 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
858 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
859 
860 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
861 						 pin, -1);
862 	}
863 }
864 
865 static int dm_dmub_hw_init(struct amdgpu_device *adev)
866 {
867 	const struct dmcub_firmware_header_v1_0 *hdr;
868 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
869 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
870 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
871 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
872 	struct abm *abm = adev->dm.dc->res_pool->abm;
873 	struct dmub_srv_hw_params hw_params;
874 	enum dmub_status status;
875 	const unsigned char *fw_inst_const, *fw_bss_data;
876 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
877 	bool has_hw_support;
878 
879 	if (!dmub_srv)
880 		/* DMUB isn't supported on the ASIC. */
881 		return 0;
882 
883 	if (!fb_info) {
884 		DRM_ERROR("No framebuffer info for DMUB service.\n");
885 		return -EINVAL;
886 	}
887 
888 	if (!dmub_fw) {
889 		/* Firmware required for DMUB support. */
890 		DRM_ERROR("No firmware provided for DMUB.\n");
891 		return -EINVAL;
892 	}
893 
894 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
895 	if (status != DMUB_STATUS_OK) {
896 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
897 		return -EINVAL;
898 	}
899 
900 	if (!has_hw_support) {
901 		DRM_INFO("DMUB unsupported on ASIC\n");
902 		return 0;
903 	}
904 
905 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
906 
907 	fw_inst_const = dmub_fw->data +
908 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
909 			PSP_HEADER_BYTES;
910 
911 	fw_bss_data = dmub_fw->data +
912 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913 		      le32_to_cpu(hdr->inst_const_bytes);
914 
915 	/* Copy firmware and bios info into FB memory. */
916 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
917 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
918 
919 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
920 
921 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
922 	 * amdgpu_ucode_init_single_fw will load dmub firmware
923 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
924 	 * will be done by dm_dmub_hw_init
925 	 */
926 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
927 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
928 				fw_inst_const_size);
929 	}
930 
931 	if (fw_bss_data_size)
932 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
933 		       fw_bss_data, fw_bss_data_size);
934 
935 	/* Copy firmware bios info into FB memory. */
936 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 	       adev->bios_size);
938 
939 	/* Reset regions that need to be reset. */
940 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
941 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
942 
943 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
944 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
945 
946 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
947 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
948 
949 	/* Initialize hardware. */
950 	memset(&hw_params, 0, sizeof(hw_params));
951 	hw_params.fb_base = adev->gmc.fb_start;
952 	hw_params.fb_offset = adev->gmc.aper_base;
953 
954 	/* backdoor load firmware and trigger dmub running */
955 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
956 		hw_params.load_inst_const = true;
957 
958 	if (dmcu)
959 		hw_params.psp_version = dmcu->psp_version;
960 
961 	for (i = 0; i < fb_info->num_fb; ++i)
962 		hw_params.fb[i] = &fb_info->fb[i];
963 
964 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
965 	if (status != DMUB_STATUS_OK) {
966 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
967 		return -EINVAL;
968 	}
969 
970 	/* Wait for firmware load to finish. */
971 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
972 	if (status != DMUB_STATUS_OK)
973 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
974 
975 	/* Init DMCU and ABM if available. */
976 	if (dmcu && abm) {
977 		dmcu->funcs->dmcu_init(dmcu);
978 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 	}
980 
981 	if (!adev->dm.dc->ctx->dmub_srv)
982 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 	if (!adev->dm.dc->ctx->dmub_srv) {
984 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 		return -ENOMEM;
986 	}
987 
988 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 		 adev->dm.dmcub_fw_version);
990 
991 	return 0;
992 }
993 
994 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
995 {
996 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
997 	enum dmub_status status;
998 	bool init;
999 
1000 	if (!dmub_srv) {
1001 		/* DMUB isn't supported on the ASIC. */
1002 		return;
1003 	}
1004 
1005 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1006 	if (status != DMUB_STATUS_OK)
1007 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1008 
1009 	if (status == DMUB_STATUS_OK && init) {
1010 		/* Wait for firmware load to finish. */
1011 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1012 		if (status != DMUB_STATUS_OK)
1013 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1014 	} else {
1015 		/* Perform the full hardware initialization. */
1016 		dm_dmub_hw_init(adev);
1017 	}
1018 }
1019 
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1022 {
1023 	uint64_t pt_base;
1024 	uint32_t logical_addr_low;
1025 	uint32_t logical_addr_high;
1026 	uint32_t agp_base, agp_bot, agp_top;
1027 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1028 
1029 	memset(pa_config, 0, sizeof(*pa_config));
1030 
1031 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1032 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1033 
1034 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1035 		/*
1036 		 * Raven2 has a HW issue that it is unable to use the vram which
1037 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1038 		 * workaround that increase system aperture high address (add 1)
1039 		 * to get rid of the VM fault and hardware hang.
1040 		 */
1041 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1042 	else
1043 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1044 
1045 	agp_base = 0;
1046 	agp_bot = adev->gmc.agp_start >> 24;
1047 	agp_top = adev->gmc.agp_end >> 24;
1048 
1049 
1050 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1051 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1052 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1053 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1054 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1055 	page_table_base.low_part = lower_32_bits(pt_base);
1056 
1057 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1058 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1059 
1060 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1061 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1062 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1063 
1064 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1065 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1066 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1067 
1068 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1069 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1070 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1071 
1072 	pa_config->is_hvm_enabled = 0;
1073 
1074 }
1075 #endif
1076 #if defined(CONFIG_DRM_AMD_DC_DCN)
1077 static void vblank_control_worker(struct work_struct *work)
1078 {
1079 	struct vblank_control_work *vblank_work =
1080 		container_of(work, struct vblank_control_work, work);
1081 	struct amdgpu_display_manager *dm = vblank_work->dm;
1082 
1083 	mutex_lock(&dm->dc_lock);
1084 
1085 	if (vblank_work->enable)
1086 		dm->active_vblank_irq_count++;
1087 	else if(dm->active_vblank_irq_count)
1088 		dm->active_vblank_irq_count--;
1089 
1090 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1091 
1092 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1093 
1094 	/* Control PSR based on vblank requirements from OS */
1095 	if (vblank_work->stream && vblank_work->stream->link) {
1096 		if (vblank_work->enable) {
1097 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1098 				amdgpu_dm_psr_disable(vblank_work->stream);
1099 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1100 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1101 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1102 			amdgpu_dm_psr_enable(vblank_work->stream);
1103 		}
1104 	}
1105 
1106 	mutex_unlock(&dm->dc_lock);
1107 
1108 	dc_stream_release(vblank_work->stream);
1109 
1110 	kfree(vblank_work);
1111 }
1112 
1113 #endif
1114 static int amdgpu_dm_init(struct amdgpu_device *adev)
1115 {
1116 	struct dc_init_data init_data;
1117 #ifdef CONFIG_DRM_AMD_DC_HDCP
1118 	struct dc_callback_init init_params;
1119 #endif
1120 	int r;
1121 
1122 	adev->dm.ddev = adev_to_drm(adev);
1123 	adev->dm.adev = adev;
1124 
1125 	/* Zero all the fields */
1126 	memset(&init_data, 0, sizeof(init_data));
1127 #ifdef CONFIG_DRM_AMD_DC_HDCP
1128 	memset(&init_params, 0, sizeof(init_params));
1129 #endif
1130 
1131 	rw_init(&adev->dm.dc_lock, "dmdc");
1132 	rw_init(&adev->dm.audio_lock, "dmaud");
1133 #if defined(CONFIG_DRM_AMD_DC_DCN)
1134 	mtx_init(&adev->dm.vblank_lock, IPL_TTY);
1135 #endif
1136 
1137 	if(amdgpu_dm_irq_init(adev)) {
1138 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1139 		goto error;
1140 	}
1141 
1142 	init_data.asic_id.chip_family = adev->family;
1143 
1144 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1145 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1146 	init_data.asic_id.chip_id = adev->pdev->device;
1147 
1148 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1149 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1150 	init_data.asic_id.atombios_base_address =
1151 		adev->mode_info.atom_context->bios;
1152 
1153 	init_data.driver = adev;
1154 
1155 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1156 
1157 	if (!adev->dm.cgs_device) {
1158 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1159 		goto error;
1160 	}
1161 
1162 	init_data.cgs_device = adev->dm.cgs_device;
1163 
1164 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1165 
1166 	switch (adev->asic_type) {
1167 	case CHIP_CARRIZO:
1168 	case CHIP_STONEY:
1169 	case CHIP_RAVEN:
1170 	case CHIP_RENOIR:
1171 		init_data.flags.gpu_vm_support = true;
1172 		switch (adev->dm.dmcub_fw_version) {
1173 		case 0: /* development */
1174 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1175 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1176 			init_data.flags.disable_dmcu = false;
1177 			break;
1178 		default:
1179 			init_data.flags.disable_dmcu = true;
1180 		}
1181 		break;
1182 	case CHIP_VANGOGH:
1183 	case CHIP_YELLOW_CARP:
1184 		init_data.flags.gpu_vm_support = true;
1185 		break;
1186 	default:
1187 		break;
1188 	}
1189 
1190 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1191 		init_data.flags.fbc_support = true;
1192 
1193 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1194 		init_data.flags.multi_mon_pp_mclk_switch = true;
1195 
1196 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1197 		init_data.flags.disable_fractional_pwm = true;
1198 
1199 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1200 		init_data.flags.edp_no_power_sequencing = true;
1201 
1202 	init_data.flags.power_down_display_on_boot = true;
1203 
1204 	INIT_LIST_HEAD(&adev->dm.da_list);
1205 	/* Display Core create. */
1206 	adev->dm.dc = dc_create(&init_data);
1207 
1208 	if (adev->dm.dc) {
1209 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1210 	} else {
1211 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1212 		goto error;
1213 	}
1214 
1215 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1216 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1217 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1218 	}
1219 
1220 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1221 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1222 
1223 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1224 		adev->dm.dc->debug.disable_stutter = true;
1225 
1226 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1227 		adev->dm.dc->debug.disable_dsc = true;
1228 
1229 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1230 		adev->dm.dc->debug.disable_clock_gate = true;
1231 
1232 	r = dm_dmub_hw_init(adev);
1233 	if (r) {
1234 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1235 		goto error;
1236 	}
1237 
1238 	dc_hardware_init(adev->dm.dc);
1239 
1240 #if defined(CONFIG_DRM_AMD_DC_DCN)
1241 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1242 		struct dc_phy_addr_space_config pa_config;
1243 
1244 		mmhub_read_system_context(adev, &pa_config);
1245 
1246 		// Call the DC init_memory func
1247 		dc_setup_system_context(adev->dm.dc, &pa_config);
1248 	}
1249 #endif
1250 
1251 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1252 	if (!adev->dm.freesync_module) {
1253 		DRM_ERROR(
1254 		"amdgpu: failed to initialize freesync_module.\n");
1255 	} else
1256 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1257 				adev->dm.freesync_module);
1258 
1259 	amdgpu_dm_init_color_mod();
1260 
1261 #if defined(CONFIG_DRM_AMD_DC_DCN)
1262 	if (adev->dm.dc->caps.max_links > 0) {
1263 		adev->dm.vblank_control_workqueue =
1264 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1265 		if (!adev->dm.vblank_control_workqueue)
1266 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1267 	}
1268 #endif
1269 
1270 #ifdef CONFIG_DRM_AMD_DC_HDCP
1271 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1272 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1273 
1274 		if (!adev->dm.hdcp_workqueue)
1275 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1276 		else
1277 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1278 
1279 		dc_init_callbacks(adev->dm.dc, &init_params);
1280 	}
1281 #endif
1282 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1283 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1284 #endif
1285 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1286 		init_completion(&adev->dm.dmub_aux_transfer_done);
1287 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1288 		if (!adev->dm.dmub_notify) {
1289 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1290 			goto error;
1291 		}
1292 		amdgpu_dm_outbox_init(adev);
1293 	}
1294 
1295 	if (amdgpu_dm_initialize_drm_device(adev)) {
1296 		DRM_ERROR(
1297 		"amdgpu: failed to initialize sw for display support.\n");
1298 		goto error;
1299 	}
1300 
1301 	/* create fake encoders for MST */
1302 	dm_dp_create_fake_mst_encoders(adev);
1303 
1304 	/* TODO: Add_display_info? */
1305 
1306 	/* TODO use dynamic cursor width */
1307 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1308 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1309 
1310 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1311 		DRM_ERROR(
1312 		"amdgpu: failed to initialize sw for display support.\n");
1313 		goto error;
1314 	}
1315 
1316 
1317 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1318 
1319 	return 0;
1320 error:
1321 	amdgpu_dm_fini(adev);
1322 
1323 	return -EINVAL;
1324 }
1325 
1326 static int amdgpu_dm_early_fini(void *handle)
1327 {
1328 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329 
1330 	amdgpu_dm_audio_fini(adev);
1331 
1332 	return 0;
1333 }
1334 
1335 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1336 {
1337 	int i;
1338 
1339 #if defined(CONFIG_DRM_AMD_DC_DCN)
1340 	if (adev->dm.vblank_control_workqueue) {
1341 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1342 		adev->dm.vblank_control_workqueue = NULL;
1343 	}
1344 #endif
1345 
1346 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1347 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1348 	}
1349 
1350 	amdgpu_dm_destroy_drm_device(&adev->dm);
1351 
1352 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1353 	if (adev->dm.crc_rd_wrk) {
1354 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1355 		kfree(adev->dm.crc_rd_wrk);
1356 		adev->dm.crc_rd_wrk = NULL;
1357 	}
1358 #endif
1359 #ifdef CONFIG_DRM_AMD_DC_HDCP
1360 	if (adev->dm.hdcp_workqueue) {
1361 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1362 		adev->dm.hdcp_workqueue = NULL;
1363 	}
1364 
1365 	if (adev->dm.dc)
1366 		dc_deinit_callbacks(adev->dm.dc);
1367 #endif
1368 
1369 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1370 
1371 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1372 		kfree(adev->dm.dmub_notify);
1373 		adev->dm.dmub_notify = NULL;
1374 	}
1375 
1376 	if (adev->dm.dmub_bo)
1377 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1378 				      &adev->dm.dmub_bo_gpu_addr,
1379 				      &adev->dm.dmub_bo_cpu_addr);
1380 
1381 	/* DC Destroy TODO: Replace destroy DAL */
1382 	if (adev->dm.dc)
1383 		dc_destroy(&adev->dm.dc);
1384 	/*
1385 	 * TODO: pageflip, vlank interrupt
1386 	 *
1387 	 * amdgpu_dm_irq_fini(adev);
1388 	 */
1389 
1390 	if (adev->dm.cgs_device) {
1391 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1392 		adev->dm.cgs_device = NULL;
1393 	}
1394 	if (adev->dm.freesync_module) {
1395 		mod_freesync_destroy(adev->dm.freesync_module);
1396 		adev->dm.freesync_module = NULL;
1397 	}
1398 
1399 	mutex_destroy(&adev->dm.audio_lock);
1400 	mutex_destroy(&adev->dm.dc_lock);
1401 
1402 	return;
1403 }
1404 
1405 static int load_dmcu_fw(struct amdgpu_device *adev)
1406 {
1407 	const char *fw_name_dmcu = NULL;
1408 	int r;
1409 	const struct dmcu_firmware_header_v1_0 *hdr;
1410 
1411 	switch(adev->asic_type) {
1412 #if defined(CONFIG_DRM_AMD_DC_SI)
1413 	case CHIP_TAHITI:
1414 	case CHIP_PITCAIRN:
1415 	case CHIP_VERDE:
1416 	case CHIP_OLAND:
1417 #endif
1418 	case CHIP_BONAIRE:
1419 	case CHIP_HAWAII:
1420 	case CHIP_KAVERI:
1421 	case CHIP_KABINI:
1422 	case CHIP_MULLINS:
1423 	case CHIP_TONGA:
1424 	case CHIP_FIJI:
1425 	case CHIP_CARRIZO:
1426 	case CHIP_STONEY:
1427 	case CHIP_POLARIS11:
1428 	case CHIP_POLARIS10:
1429 	case CHIP_POLARIS12:
1430 	case CHIP_VEGAM:
1431 	case CHIP_VEGA10:
1432 	case CHIP_VEGA12:
1433 	case CHIP_VEGA20:
1434 	case CHIP_NAVI10:
1435 	case CHIP_NAVI14:
1436 	case CHIP_RENOIR:
1437 	case CHIP_SIENNA_CICHLID:
1438 	case CHIP_NAVY_FLOUNDER:
1439 	case CHIP_DIMGREY_CAVEFISH:
1440 	case CHIP_BEIGE_GOBY:
1441 	case CHIP_VANGOGH:
1442 	case CHIP_YELLOW_CARP:
1443 		return 0;
1444 	case CHIP_NAVI12:
1445 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1446 		break;
1447 	case CHIP_RAVEN:
1448 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1449 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1450 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1451 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1452 		else
1453 			return 0;
1454 		break;
1455 	default:
1456 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1457 		return -EINVAL;
1458 	}
1459 
1460 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1461 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1462 		return 0;
1463 	}
1464 
1465 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1466 	if (r == -ENOENT) {
1467 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1468 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1469 		adev->dm.fw_dmcu = NULL;
1470 		return 0;
1471 	}
1472 	if (r) {
1473 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1474 			fw_name_dmcu);
1475 		return r;
1476 	}
1477 
1478 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1479 	if (r) {
1480 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1481 			fw_name_dmcu);
1482 		release_firmware(adev->dm.fw_dmcu);
1483 		adev->dm.fw_dmcu = NULL;
1484 		return r;
1485 	}
1486 
1487 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1488 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1489 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1490 	adev->firmware.fw_size +=
1491 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1492 
1493 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1494 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1495 	adev->firmware.fw_size +=
1496 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1497 
1498 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1499 
1500 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1501 
1502 	return 0;
1503 }
1504 
1505 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1506 {
1507 	struct amdgpu_device *adev = ctx;
1508 
1509 	return dm_read_reg(adev->dm.dc->ctx, address);
1510 }
1511 
1512 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1513 				     uint32_t value)
1514 {
1515 	struct amdgpu_device *adev = ctx;
1516 
1517 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1518 }
1519 
1520 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1521 {
1522 	struct dmub_srv_create_params create_params;
1523 	struct dmub_srv_region_params region_params;
1524 	struct dmub_srv_region_info region_info;
1525 	struct dmub_srv_fb_params fb_params;
1526 	struct dmub_srv_fb_info *fb_info;
1527 	struct dmub_srv *dmub_srv;
1528 	const struct dmcub_firmware_header_v1_0 *hdr;
1529 	const char *fw_name_dmub;
1530 	enum dmub_asic dmub_asic;
1531 	enum dmub_status status;
1532 	int r;
1533 
1534 	switch (adev->asic_type) {
1535 	case CHIP_RENOIR:
1536 		dmub_asic = DMUB_ASIC_DCN21;
1537 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1538 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1539 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1540 		break;
1541 	case CHIP_SIENNA_CICHLID:
1542 		dmub_asic = DMUB_ASIC_DCN30;
1543 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1544 		break;
1545 	case CHIP_NAVY_FLOUNDER:
1546 		dmub_asic = DMUB_ASIC_DCN30;
1547 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1548 		break;
1549 	case CHIP_VANGOGH:
1550 		dmub_asic = DMUB_ASIC_DCN301;
1551 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1552 		break;
1553 	case CHIP_DIMGREY_CAVEFISH:
1554 		dmub_asic = DMUB_ASIC_DCN302;
1555 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1556 		break;
1557 	case CHIP_BEIGE_GOBY:
1558 		dmub_asic = DMUB_ASIC_DCN303;
1559 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1560 		break;
1561 	case CHIP_YELLOW_CARP:
1562 		dmub_asic = DMUB_ASIC_DCN31;
1563 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1564 		break;
1565 
1566 	default:
1567 		/* ASIC doesn't support DMUB. */
1568 		return 0;
1569 	}
1570 
1571 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1572 	if (r) {
1573 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1574 		return 0;
1575 	}
1576 
1577 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1578 	if (r) {
1579 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1580 		return 0;
1581 	}
1582 
1583 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1584 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1585 
1586 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1587 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1588 			AMDGPU_UCODE_ID_DMCUB;
1589 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1590 			adev->dm.dmub_fw;
1591 		adev->firmware.fw_size +=
1592 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1593 
1594 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1595 			 adev->dm.dmcub_fw_version);
1596 	}
1597 
1598 
1599 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1600 	dmub_srv = adev->dm.dmub_srv;
1601 
1602 	if (!dmub_srv) {
1603 		DRM_ERROR("Failed to allocate DMUB service!\n");
1604 		return -ENOMEM;
1605 	}
1606 
1607 	memset(&create_params, 0, sizeof(create_params));
1608 	create_params.user_ctx = adev;
1609 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1610 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1611 	create_params.asic = dmub_asic;
1612 
1613 	/* Create the DMUB service. */
1614 	status = dmub_srv_create(dmub_srv, &create_params);
1615 	if (status != DMUB_STATUS_OK) {
1616 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1617 		return -EINVAL;
1618 	}
1619 
1620 	/* Calculate the size of all the regions for the DMUB service. */
1621 	memset(&region_params, 0, sizeof(region_params));
1622 
1623 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1624 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1625 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1626 	region_params.vbios_size = adev->bios_size;
1627 	region_params.fw_bss_data = region_params.bss_data_size ?
1628 		adev->dm.dmub_fw->data +
1629 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1630 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1631 	region_params.fw_inst_const =
1632 		adev->dm.dmub_fw->data +
1633 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1634 		PSP_HEADER_BYTES;
1635 
1636 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1637 					   &region_info);
1638 
1639 	if (status != DMUB_STATUS_OK) {
1640 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1641 		return -EINVAL;
1642 	}
1643 
1644 	/*
1645 	 * Allocate a framebuffer based on the total size of all the regions.
1646 	 * TODO: Move this into GART.
1647 	 */
1648 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1649 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1650 				    &adev->dm.dmub_bo_gpu_addr,
1651 				    &adev->dm.dmub_bo_cpu_addr);
1652 	if (r)
1653 		return r;
1654 
1655 	/* Rebase the regions on the framebuffer address. */
1656 	memset(&fb_params, 0, sizeof(fb_params));
1657 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1658 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1659 	fb_params.region_info = &region_info;
1660 
1661 	adev->dm.dmub_fb_info =
1662 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1663 	fb_info = adev->dm.dmub_fb_info;
1664 
1665 	if (!fb_info) {
1666 		DRM_ERROR(
1667 			"Failed to allocate framebuffer info for DMUB service!\n");
1668 		return -ENOMEM;
1669 	}
1670 
1671 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1672 	if (status != DMUB_STATUS_OK) {
1673 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1674 		return -EINVAL;
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 static int dm_sw_init(void *handle)
1681 {
1682 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1683 	int r;
1684 
1685 	r = dm_dmub_sw_init(adev);
1686 	if (r)
1687 		return r;
1688 
1689 	return load_dmcu_fw(adev);
1690 }
1691 
1692 static int dm_sw_fini(void *handle)
1693 {
1694 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695 
1696 	kfree(adev->dm.dmub_fb_info);
1697 	adev->dm.dmub_fb_info = NULL;
1698 
1699 	if (adev->dm.dmub_srv) {
1700 		dmub_srv_destroy(adev->dm.dmub_srv);
1701 		adev->dm.dmub_srv = NULL;
1702 	}
1703 
1704 	release_firmware(adev->dm.dmub_fw);
1705 	adev->dm.dmub_fw = NULL;
1706 
1707 	release_firmware(adev->dm.fw_dmcu);
1708 	adev->dm.fw_dmcu = NULL;
1709 
1710 	return 0;
1711 }
1712 
1713 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1714 {
1715 	struct amdgpu_dm_connector *aconnector;
1716 	struct drm_connector *connector;
1717 	struct drm_connector_list_iter iter;
1718 	int ret = 0;
1719 
1720 	drm_connector_list_iter_begin(dev, &iter);
1721 	drm_for_each_connector_iter(connector, &iter) {
1722 		aconnector = to_amdgpu_dm_connector(connector);
1723 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1724 		    aconnector->mst_mgr.aux) {
1725 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1726 					 aconnector,
1727 					 aconnector->base.base.id);
1728 
1729 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1730 			if (ret < 0) {
1731 				DRM_ERROR("DM_MST: Failed to start MST\n");
1732 				aconnector->dc_link->type =
1733 					dc_connection_single;
1734 				break;
1735 			}
1736 		}
1737 	}
1738 	drm_connector_list_iter_end(&iter);
1739 
1740 	return ret;
1741 }
1742 
1743 static int dm_late_init(void *handle)
1744 {
1745 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1746 
1747 	struct dmcu_iram_parameters params;
1748 	unsigned int linear_lut[16];
1749 	int i;
1750 	struct dmcu *dmcu = NULL;
1751 
1752 	dmcu = adev->dm.dc->res_pool->dmcu;
1753 
1754 	for (i = 0; i < 16; i++)
1755 		linear_lut[i] = 0xFFFF * i / 15;
1756 
1757 	params.set = 0;
1758 	params.backlight_ramping_override = false;
1759 	params.backlight_ramping_start = 0xCCCC;
1760 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1761 	params.backlight_lut_array_size = 16;
1762 	params.backlight_lut_array = linear_lut;
1763 
1764 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1765 	 * 0xFFFF x 0.01 = 0x28F
1766 	 */
1767 	params.min_abm_backlight = 0x28F;
1768 	/* In the case where abm is implemented on dmcub,
1769 	* dmcu object will be null.
1770 	* ABM 2.4 and up are implemented on dmcub.
1771 	*/
1772 	if (dmcu) {
1773 		if (!dmcu_load_iram(dmcu, params))
1774 			return -EINVAL;
1775 	} else if (adev->dm.dc->ctx->dmub_srv) {
1776 		struct dc_link *edp_links[MAX_NUM_EDP];
1777 		int edp_num;
1778 
1779 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1780 		for (i = 0; i < edp_num; i++) {
1781 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1782 				return -EINVAL;
1783 		}
1784 	}
1785 
1786 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1787 }
1788 
1789 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1790 {
1791 	struct amdgpu_dm_connector *aconnector;
1792 	struct drm_connector *connector;
1793 	struct drm_connector_list_iter iter;
1794 	struct drm_dp_mst_topology_mgr *mgr;
1795 	int ret;
1796 	bool need_hotplug = false;
1797 
1798 	drm_connector_list_iter_begin(dev, &iter);
1799 	drm_for_each_connector_iter(connector, &iter) {
1800 		aconnector = to_amdgpu_dm_connector(connector);
1801 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1802 		    aconnector->mst_port)
1803 			continue;
1804 
1805 		mgr = &aconnector->mst_mgr;
1806 
1807 		if (suspend) {
1808 			drm_dp_mst_topology_mgr_suspend(mgr);
1809 		} else {
1810 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1811 			if (ret < 0) {
1812 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1813 				need_hotplug = true;
1814 			}
1815 		}
1816 	}
1817 	drm_connector_list_iter_end(&iter);
1818 
1819 	if (need_hotplug)
1820 		drm_kms_helper_hotplug_event(dev);
1821 }
1822 
1823 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1824 {
1825 	struct smu_context *smu = &adev->smu;
1826 	int ret = 0;
1827 
1828 	if (!is_support_sw_smu(adev))
1829 		return 0;
1830 
1831 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1832 	 * on window driver dc implementation.
1833 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1834 	 * should be passed to smu during boot up and resume from s3.
1835 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1836 	 * dcn20_resource_construct
1837 	 * then call pplib functions below to pass the settings to smu:
1838 	 * smu_set_watermarks_for_clock_ranges
1839 	 * smu_set_watermarks_table
1840 	 * navi10_set_watermarks_table
1841 	 * smu_write_watermarks_table
1842 	 *
1843 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1844 	 * dc has implemented different flow for window driver:
1845 	 * dc_hardware_init / dc_set_power_state
1846 	 * dcn10_init_hw
1847 	 * notify_wm_ranges
1848 	 * set_wm_ranges
1849 	 * -- Linux
1850 	 * smu_set_watermarks_for_clock_ranges
1851 	 * renoir_set_watermarks_table
1852 	 * smu_write_watermarks_table
1853 	 *
1854 	 * For Linux,
1855 	 * dc_hardware_init -> amdgpu_dm_init
1856 	 * dc_set_power_state --> dm_resume
1857 	 *
1858 	 * therefore, this function apply to navi10/12/14 but not Renoir
1859 	 * *
1860 	 */
1861 	switch(adev->asic_type) {
1862 	case CHIP_NAVI10:
1863 	case CHIP_NAVI14:
1864 	case CHIP_NAVI12:
1865 		break;
1866 	default:
1867 		return 0;
1868 	}
1869 
1870 	ret = smu_write_watermarks_table(smu);
1871 	if (ret) {
1872 		DRM_ERROR("Failed to update WMTABLE!\n");
1873 		return ret;
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 /**
1880  * dm_hw_init() - Initialize DC device
1881  * @handle: The base driver device containing the amdgpu_dm device.
1882  *
1883  * Initialize the &struct amdgpu_display_manager device. This involves calling
1884  * the initializers of each DM component, then populating the struct with them.
1885  *
1886  * Although the function implies hardware initialization, both hardware and
1887  * software are initialized here. Splitting them out to their relevant init
1888  * hooks is a future TODO item.
1889  *
1890  * Some notable things that are initialized here:
1891  *
1892  * - Display Core, both software and hardware
1893  * - DC modules that we need (freesync and color management)
1894  * - DRM software states
1895  * - Interrupt sources and handlers
1896  * - Vblank support
1897  * - Debug FS entries, if enabled
1898  */
1899 static int dm_hw_init(void *handle)
1900 {
1901 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1902 	/* Create DAL display manager */
1903 	amdgpu_dm_init(adev);
1904 	amdgpu_dm_hpd_init(adev);
1905 
1906 	return 0;
1907 }
1908 
1909 /**
1910  * dm_hw_fini() - Teardown DC device
1911  * @handle: The base driver device containing the amdgpu_dm device.
1912  *
1913  * Teardown components within &struct amdgpu_display_manager that require
1914  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1915  * were loaded. Also flush IRQ workqueues and disable them.
1916  */
1917 static int dm_hw_fini(void *handle)
1918 {
1919 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1920 
1921 	amdgpu_dm_hpd_fini(adev);
1922 
1923 	amdgpu_dm_irq_fini(adev);
1924 	amdgpu_dm_fini(adev);
1925 	return 0;
1926 }
1927 
1928 
1929 static int dm_enable_vblank(struct drm_crtc *crtc);
1930 static void dm_disable_vblank(struct drm_crtc *crtc);
1931 
1932 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1933 				 struct dc_state *state, bool enable)
1934 {
1935 	enum dc_irq_source irq_source;
1936 	struct amdgpu_crtc *acrtc;
1937 	int rc = -EBUSY;
1938 	int i = 0;
1939 
1940 	for (i = 0; i < state->stream_count; i++) {
1941 		acrtc = get_crtc_by_otg_inst(
1942 				adev, state->stream_status[i].primary_otg_inst);
1943 
1944 		if (acrtc && state->stream_status[i].plane_count != 0) {
1945 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1946 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1947 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1948 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1949 			if (rc)
1950 				DRM_WARN("Failed to %s pflip interrupts\n",
1951 					 enable ? "enable" : "disable");
1952 
1953 			if (enable) {
1954 				rc = dm_enable_vblank(&acrtc->base);
1955 				if (rc)
1956 					DRM_WARN("Failed to enable vblank interrupts\n");
1957 			} else {
1958 				dm_disable_vblank(&acrtc->base);
1959 			}
1960 
1961 		}
1962 	}
1963 
1964 }
1965 
1966 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1967 {
1968 	struct dc_state *context = NULL;
1969 	enum dc_status res = DC_ERROR_UNEXPECTED;
1970 	int i;
1971 	struct dc_stream_state *del_streams[MAX_PIPES];
1972 	int del_streams_count = 0;
1973 
1974 	memset(del_streams, 0, sizeof(del_streams));
1975 
1976 	context = dc_create_state(dc);
1977 	if (context == NULL)
1978 		goto context_alloc_fail;
1979 
1980 	dc_resource_state_copy_construct_current(dc, context);
1981 
1982 	/* First remove from context all streams */
1983 	for (i = 0; i < context->stream_count; i++) {
1984 		struct dc_stream_state *stream = context->streams[i];
1985 
1986 		del_streams[del_streams_count++] = stream;
1987 	}
1988 
1989 	/* Remove all planes for removed streams and then remove the streams */
1990 	for (i = 0; i < del_streams_count; i++) {
1991 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1992 			res = DC_FAIL_DETACH_SURFACES;
1993 			goto fail;
1994 		}
1995 
1996 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1997 		if (res != DC_OK)
1998 			goto fail;
1999 	}
2000 
2001 
2002 	res = dc_validate_global_state(dc, context, false);
2003 
2004 	if (res != DC_OK) {
2005 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2006 		goto fail;
2007 	}
2008 
2009 	res = dc_commit_state(dc, context);
2010 
2011 fail:
2012 	dc_release_state(context);
2013 
2014 context_alloc_fail:
2015 	return res;
2016 }
2017 
2018 static int dm_suspend(void *handle)
2019 {
2020 	struct amdgpu_device *adev = handle;
2021 	struct amdgpu_display_manager *dm = &adev->dm;
2022 	int ret = 0;
2023 
2024 	if (amdgpu_in_reset(adev)) {
2025 		mutex_lock(&dm->dc_lock);
2026 
2027 #if defined(CONFIG_DRM_AMD_DC_DCN)
2028 		dc_allow_idle_optimizations(adev->dm.dc, false);
2029 #endif
2030 
2031 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2032 
2033 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2034 
2035 		amdgpu_dm_commit_zero_streams(dm->dc);
2036 
2037 		amdgpu_dm_irq_suspend(adev);
2038 
2039 		return ret;
2040 	}
2041 
2042 	WARN_ON(adev->dm.cached_state);
2043 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2044 
2045 	s3_handle_mst(adev_to_drm(adev), true);
2046 
2047 	amdgpu_dm_irq_suspend(adev);
2048 
2049 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2050 
2051 	return 0;
2052 }
2053 
2054 static struct amdgpu_dm_connector *
2055 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2056 					     struct drm_crtc *crtc)
2057 {
2058 	uint32_t i;
2059 	struct drm_connector_state *new_con_state;
2060 	struct drm_connector *connector;
2061 	struct drm_crtc *crtc_from_state;
2062 
2063 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2064 		crtc_from_state = new_con_state->crtc;
2065 
2066 		if (crtc_from_state == crtc)
2067 			return to_amdgpu_dm_connector(connector);
2068 	}
2069 
2070 	return NULL;
2071 }
2072 
2073 static void emulated_link_detect(struct dc_link *link)
2074 {
2075 	struct dc_sink_init_data sink_init_data = { 0 };
2076 	struct display_sink_capability sink_caps = { 0 };
2077 	enum dc_edid_status edid_status;
2078 	struct dc_context *dc_ctx = link->ctx;
2079 	struct dc_sink *sink = NULL;
2080 	struct dc_sink *prev_sink = NULL;
2081 
2082 	link->type = dc_connection_none;
2083 	prev_sink = link->local_sink;
2084 
2085 	if (prev_sink)
2086 		dc_sink_release(prev_sink);
2087 
2088 	switch (link->connector_signal) {
2089 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2090 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2091 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2092 		break;
2093 	}
2094 
2095 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2096 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2097 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2098 		break;
2099 	}
2100 
2101 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2102 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2103 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2104 		break;
2105 	}
2106 
2107 	case SIGNAL_TYPE_LVDS: {
2108 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2109 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2110 		break;
2111 	}
2112 
2113 	case SIGNAL_TYPE_EDP: {
2114 		sink_caps.transaction_type =
2115 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2116 		sink_caps.signal = SIGNAL_TYPE_EDP;
2117 		break;
2118 	}
2119 
2120 	case SIGNAL_TYPE_DISPLAY_PORT: {
2121 		sink_caps.transaction_type =
2122 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2123 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2124 		break;
2125 	}
2126 
2127 	default:
2128 		DC_ERROR("Invalid connector type! signal:%d\n",
2129 			link->connector_signal);
2130 		return;
2131 	}
2132 
2133 	sink_init_data.link = link;
2134 	sink_init_data.sink_signal = sink_caps.signal;
2135 
2136 	sink = dc_sink_create(&sink_init_data);
2137 	if (!sink) {
2138 		DC_ERROR("Failed to create sink!\n");
2139 		return;
2140 	}
2141 
2142 	/* dc_sink_create returns a new reference */
2143 	link->local_sink = sink;
2144 
2145 	edid_status = dm_helpers_read_local_edid(
2146 			link->ctx,
2147 			link,
2148 			sink);
2149 
2150 	if (edid_status != EDID_OK)
2151 		DC_ERROR("Failed to read EDID");
2152 
2153 }
2154 
2155 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2156 				     struct amdgpu_display_manager *dm)
2157 {
2158 	struct {
2159 		struct dc_surface_update surface_updates[MAX_SURFACES];
2160 		struct dc_plane_info plane_infos[MAX_SURFACES];
2161 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2162 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2163 		struct dc_stream_update stream_update;
2164 	} * bundle;
2165 	int k, m;
2166 
2167 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2168 
2169 	if (!bundle) {
2170 		dm_error("Failed to allocate update bundle\n");
2171 		goto cleanup;
2172 	}
2173 
2174 	for (k = 0; k < dc_state->stream_count; k++) {
2175 		bundle->stream_update.stream = dc_state->streams[k];
2176 
2177 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2178 			bundle->surface_updates[m].surface =
2179 				dc_state->stream_status->plane_states[m];
2180 			bundle->surface_updates[m].surface->force_full_update =
2181 				true;
2182 		}
2183 		dc_commit_updates_for_stream(
2184 			dm->dc, bundle->surface_updates,
2185 			dc_state->stream_status->plane_count,
2186 			dc_state->streams[k], &bundle->stream_update, dc_state);
2187 	}
2188 
2189 cleanup:
2190 	kfree(bundle);
2191 
2192 	return;
2193 }
2194 
2195 static void dm_set_dpms_off(struct dc_link *link)
2196 {
2197 	struct dc_stream_state *stream_state;
2198 	struct amdgpu_dm_connector *aconnector = link->priv;
2199 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2200 	struct dc_stream_update stream_update;
2201 	bool dpms_off = true;
2202 
2203 	memset(&stream_update, 0, sizeof(stream_update));
2204 	stream_update.dpms_off = &dpms_off;
2205 
2206 	mutex_lock(&adev->dm.dc_lock);
2207 	stream_state = dc_stream_find_from_link(link);
2208 
2209 	if (stream_state == NULL) {
2210 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2211 		mutex_unlock(&adev->dm.dc_lock);
2212 		return;
2213 	}
2214 
2215 	stream_update.stream = stream_state;
2216 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2217 				     stream_state, &stream_update,
2218 				     stream_state->ctx->dc->current_state);
2219 	mutex_unlock(&adev->dm.dc_lock);
2220 }
2221 
2222 static int dm_resume(void *handle)
2223 {
2224 	struct amdgpu_device *adev = handle;
2225 	struct drm_device *ddev = adev_to_drm(adev);
2226 	struct amdgpu_display_manager *dm = &adev->dm;
2227 	struct amdgpu_dm_connector *aconnector;
2228 	struct drm_connector *connector;
2229 	struct drm_connector_list_iter iter;
2230 	struct drm_crtc *crtc;
2231 	struct drm_crtc_state *new_crtc_state;
2232 	struct dm_crtc_state *dm_new_crtc_state;
2233 	struct drm_plane *plane;
2234 	struct drm_plane_state *new_plane_state;
2235 	struct dm_plane_state *dm_new_plane_state;
2236 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2237 	enum dc_connection_type new_connection_type = dc_connection_none;
2238 	struct dc_state *dc_state;
2239 	int i, r, j;
2240 
2241 	if (amdgpu_in_reset(adev)) {
2242 		dc_state = dm->cached_dc_state;
2243 
2244 		if (dc_enable_dmub_notifications(adev->dm.dc))
2245 			amdgpu_dm_outbox_init(adev);
2246 
2247 		r = dm_dmub_hw_init(adev);
2248 		if (r)
2249 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2250 
2251 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2252 		dc_resume(dm->dc);
2253 
2254 		amdgpu_dm_irq_resume_early(adev);
2255 
2256 		for (i = 0; i < dc_state->stream_count; i++) {
2257 			dc_state->streams[i]->mode_changed = true;
2258 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2259 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2260 					= 0xffffffff;
2261 			}
2262 		}
2263 #if defined(CONFIG_DRM_AMD_DC_DCN)
2264 		/*
2265 		 * Resource allocation happens for link encoders for newer ASIC in
2266 		 * dc_validate_global_state, so we need to revalidate it.
2267 		 *
2268 		 * This shouldn't fail (it passed once before), so warn if it does.
2269 		 */
2270 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2271 #endif
2272 
2273 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2274 
2275 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2276 
2277 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2278 
2279 		dc_release_state(dm->cached_dc_state);
2280 		dm->cached_dc_state = NULL;
2281 
2282 		amdgpu_dm_irq_resume_late(adev);
2283 
2284 		mutex_unlock(&dm->dc_lock);
2285 
2286 		return 0;
2287 	}
2288 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2289 	dc_release_state(dm_state->context);
2290 	dm_state->context = dc_create_state(dm->dc);
2291 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2292 	dc_resource_state_construct(dm->dc, dm_state->context);
2293 
2294 	/* Re-enable outbox interrupts for DPIA. */
2295 	if (dc_enable_dmub_notifications(adev->dm.dc))
2296 		amdgpu_dm_outbox_init(adev);
2297 
2298 	/* Before powering on DC we need to re-initialize DMUB. */
2299 	dm_dmub_hw_resume(adev);
2300 
2301 	/* power on hardware */
2302 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2303 
2304 	/* program HPD filter */
2305 	dc_resume(dm->dc);
2306 
2307 	/*
2308 	 * early enable HPD Rx IRQ, should be done before set mode as short
2309 	 * pulse interrupts are used for MST
2310 	 */
2311 	amdgpu_dm_irq_resume_early(adev);
2312 
2313 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2314 	s3_handle_mst(ddev, false);
2315 
2316 	/* Do detection*/
2317 	drm_connector_list_iter_begin(ddev, &iter);
2318 	drm_for_each_connector_iter(connector, &iter) {
2319 		aconnector = to_amdgpu_dm_connector(connector);
2320 
2321 		/*
2322 		 * this is the case when traversing through already created
2323 		 * MST connectors, should be skipped
2324 		 */
2325 		if (aconnector->dc_link &&
2326 		    aconnector->dc_link->type == dc_connection_mst_branch)
2327 			continue;
2328 
2329 		mutex_lock(&aconnector->hpd_lock);
2330 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2331 			DRM_ERROR("KMS: Failed to detect connector\n");
2332 
2333 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2334 			emulated_link_detect(aconnector->dc_link);
2335 		else
2336 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2337 
2338 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2339 			aconnector->fake_enable = false;
2340 
2341 		if (aconnector->dc_sink)
2342 			dc_sink_release(aconnector->dc_sink);
2343 		aconnector->dc_sink = NULL;
2344 		amdgpu_dm_update_connector_after_detect(aconnector);
2345 		mutex_unlock(&aconnector->hpd_lock);
2346 	}
2347 	drm_connector_list_iter_end(&iter);
2348 
2349 	/* Force mode set in atomic commit */
2350 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2351 		new_crtc_state->active_changed = true;
2352 
2353 	/*
2354 	 * atomic_check is expected to create the dc states. We need to release
2355 	 * them here, since they were duplicated as part of the suspend
2356 	 * procedure.
2357 	 */
2358 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2359 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2360 		if (dm_new_crtc_state->stream) {
2361 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2362 			dc_stream_release(dm_new_crtc_state->stream);
2363 			dm_new_crtc_state->stream = NULL;
2364 		}
2365 	}
2366 
2367 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2368 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2369 		if (dm_new_plane_state->dc_state) {
2370 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2371 			dc_plane_state_release(dm_new_plane_state->dc_state);
2372 			dm_new_plane_state->dc_state = NULL;
2373 		}
2374 	}
2375 
2376 	drm_atomic_helper_resume(ddev, dm->cached_state);
2377 
2378 	dm->cached_state = NULL;
2379 
2380 	amdgpu_dm_irq_resume_late(adev);
2381 
2382 	amdgpu_dm_smu_write_watermarks_table(adev);
2383 
2384 	return 0;
2385 }
2386 
2387 /**
2388  * DOC: DM Lifecycle
2389  *
2390  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2391  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2392  * the base driver's device list to be initialized and torn down accordingly.
2393  *
2394  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2395  */
2396 
2397 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2398 	.name = "dm",
2399 	.early_init = dm_early_init,
2400 	.late_init = dm_late_init,
2401 	.sw_init = dm_sw_init,
2402 	.sw_fini = dm_sw_fini,
2403 	.early_fini = amdgpu_dm_early_fini,
2404 	.hw_init = dm_hw_init,
2405 	.hw_fini = dm_hw_fini,
2406 	.suspend = dm_suspend,
2407 	.resume = dm_resume,
2408 	.is_idle = dm_is_idle,
2409 	.wait_for_idle = dm_wait_for_idle,
2410 	.check_soft_reset = dm_check_soft_reset,
2411 	.soft_reset = dm_soft_reset,
2412 	.set_clockgating_state = dm_set_clockgating_state,
2413 	.set_powergating_state = dm_set_powergating_state,
2414 };
2415 
2416 const struct amdgpu_ip_block_version dm_ip_block =
2417 {
2418 	.type = AMD_IP_BLOCK_TYPE_DCE,
2419 	.major = 1,
2420 	.minor = 0,
2421 	.rev = 0,
2422 	.funcs = &amdgpu_dm_funcs,
2423 };
2424 
2425 
2426 /**
2427  * DOC: atomic
2428  *
2429  * *WIP*
2430  */
2431 
2432 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2433 	.fb_create = amdgpu_display_user_framebuffer_create,
2434 	.get_format_info = amd_get_format_info,
2435 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2436 	.atomic_check = amdgpu_dm_atomic_check,
2437 	.atomic_commit = drm_atomic_helper_commit,
2438 };
2439 
2440 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2441 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2442 };
2443 
2444 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2445 {
2446 	u32 max_avg, min_cll, max, min, q, r;
2447 	struct amdgpu_dm_backlight_caps *caps;
2448 	struct amdgpu_display_manager *dm;
2449 	struct drm_connector *conn_base;
2450 	struct amdgpu_device *adev;
2451 	struct dc_link *link = NULL;
2452 	static const u8 pre_computed_values[] = {
2453 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2454 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2455 	int i;
2456 
2457 	if (!aconnector || !aconnector->dc_link)
2458 		return;
2459 
2460 	link = aconnector->dc_link;
2461 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2462 		return;
2463 
2464 	conn_base = &aconnector->base;
2465 	adev = drm_to_adev(conn_base->dev);
2466 	dm = &adev->dm;
2467 	for (i = 0; i < dm->num_of_edps; i++) {
2468 		if (link == dm->backlight_link[i])
2469 			break;
2470 	}
2471 	if (i >= dm->num_of_edps)
2472 		return;
2473 	caps = &dm->backlight_caps[i];
2474 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2475 	caps->aux_support = false;
2476 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2477 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2478 
2479 	if (caps->ext_caps->bits.oled == 1 /*||
2480 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2481 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2482 		caps->aux_support = true;
2483 
2484 	if (amdgpu_backlight == 0)
2485 		caps->aux_support = false;
2486 	else if (amdgpu_backlight == 1)
2487 		caps->aux_support = true;
2488 
2489 	/* From the specification (CTA-861-G), for calculating the maximum
2490 	 * luminance we need to use:
2491 	 *	Luminance = 50*2**(CV/32)
2492 	 * Where CV is a one-byte value.
2493 	 * For calculating this expression we may need float point precision;
2494 	 * to avoid this complexity level, we take advantage that CV is divided
2495 	 * by a constant. From the Euclids division algorithm, we know that CV
2496 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2497 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2498 	 * need to pre-compute the value of r/32. For pre-computing the values
2499 	 * We just used the following Ruby line:
2500 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2501 	 * The results of the above expressions can be verified at
2502 	 * pre_computed_values.
2503 	 */
2504 	q = max_avg >> 5;
2505 	r = max_avg % 32;
2506 	max = (1 << q) * pre_computed_values[r];
2507 
2508 	// min luminance: maxLum * (CV/255)^2 / 100
2509 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2510 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2511 
2512 	caps->aux_max_input_signal = max;
2513 	caps->aux_min_input_signal = min;
2514 }
2515 
2516 void amdgpu_dm_update_connector_after_detect(
2517 		struct amdgpu_dm_connector *aconnector)
2518 {
2519 	struct drm_connector *connector = &aconnector->base;
2520 	struct drm_device *dev = connector->dev;
2521 	struct dc_sink *sink;
2522 
2523 	/* MST handled by drm_mst framework */
2524 	if (aconnector->mst_mgr.mst_state == true)
2525 		return;
2526 
2527 	sink = aconnector->dc_link->local_sink;
2528 	if (sink)
2529 		dc_sink_retain(sink);
2530 
2531 	/*
2532 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2533 	 * the connector sink is set to either fake or physical sink depends on link status.
2534 	 * Skip if already done during boot.
2535 	 */
2536 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2537 			&& aconnector->dc_em_sink) {
2538 
2539 		/*
2540 		 * For S3 resume with headless use eml_sink to fake stream
2541 		 * because on resume connector->sink is set to NULL
2542 		 */
2543 		mutex_lock(&dev->mode_config.mutex);
2544 
2545 		if (sink) {
2546 			if (aconnector->dc_sink) {
2547 				amdgpu_dm_update_freesync_caps(connector, NULL);
2548 				/*
2549 				 * retain and release below are used to
2550 				 * bump up refcount for sink because the link doesn't point
2551 				 * to it anymore after disconnect, so on next crtc to connector
2552 				 * reshuffle by UMD we will get into unwanted dc_sink release
2553 				 */
2554 				dc_sink_release(aconnector->dc_sink);
2555 			}
2556 			aconnector->dc_sink = sink;
2557 			dc_sink_retain(aconnector->dc_sink);
2558 			amdgpu_dm_update_freesync_caps(connector,
2559 					aconnector->edid);
2560 		} else {
2561 			amdgpu_dm_update_freesync_caps(connector, NULL);
2562 			if (!aconnector->dc_sink) {
2563 				aconnector->dc_sink = aconnector->dc_em_sink;
2564 				dc_sink_retain(aconnector->dc_sink);
2565 			}
2566 		}
2567 
2568 		mutex_unlock(&dev->mode_config.mutex);
2569 
2570 		if (sink)
2571 			dc_sink_release(sink);
2572 		return;
2573 	}
2574 
2575 	/*
2576 	 * TODO: temporary guard to look for proper fix
2577 	 * if this sink is MST sink, we should not do anything
2578 	 */
2579 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2580 		dc_sink_release(sink);
2581 		return;
2582 	}
2583 
2584 	if (aconnector->dc_sink == sink) {
2585 		/*
2586 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2587 		 * Do nothing!!
2588 		 */
2589 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2590 				aconnector->connector_id);
2591 		if (sink)
2592 			dc_sink_release(sink);
2593 		return;
2594 	}
2595 
2596 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2597 		aconnector->connector_id, aconnector->dc_sink, sink);
2598 
2599 	mutex_lock(&dev->mode_config.mutex);
2600 
2601 	/*
2602 	 * 1. Update status of the drm connector
2603 	 * 2. Send an event and let userspace tell us what to do
2604 	 */
2605 	if (sink) {
2606 		/*
2607 		 * TODO: check if we still need the S3 mode update workaround.
2608 		 * If yes, put it here.
2609 		 */
2610 		if (aconnector->dc_sink) {
2611 			amdgpu_dm_update_freesync_caps(connector, NULL);
2612 			dc_sink_release(aconnector->dc_sink);
2613 		}
2614 
2615 		aconnector->dc_sink = sink;
2616 		dc_sink_retain(aconnector->dc_sink);
2617 		if (sink->dc_edid.length == 0) {
2618 			aconnector->edid = NULL;
2619 			if (aconnector->dc_link->aux_mode) {
2620 				drm_dp_cec_unset_edid(
2621 					&aconnector->dm_dp_aux.aux);
2622 			}
2623 		} else {
2624 			aconnector->edid =
2625 				(struct edid *)sink->dc_edid.raw_edid;
2626 
2627 			drm_connector_update_edid_property(connector,
2628 							   aconnector->edid);
2629 			if (aconnector->dc_link->aux_mode)
2630 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2631 						    aconnector->edid);
2632 		}
2633 
2634 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2635 		update_connector_ext_caps(aconnector);
2636 	} else {
2637 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2638 		amdgpu_dm_update_freesync_caps(connector, NULL);
2639 		drm_connector_update_edid_property(connector, NULL);
2640 		aconnector->num_modes = 0;
2641 		dc_sink_release(aconnector->dc_sink);
2642 		aconnector->dc_sink = NULL;
2643 		aconnector->edid = NULL;
2644 #ifdef CONFIG_DRM_AMD_DC_HDCP
2645 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2646 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2647 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2648 #endif
2649 	}
2650 
2651 	mutex_unlock(&dev->mode_config.mutex);
2652 
2653 	update_subconnector_property(aconnector);
2654 
2655 	if (sink)
2656 		dc_sink_release(sink);
2657 }
2658 
2659 static void handle_hpd_irq(void *param)
2660 {
2661 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2662 	struct drm_connector *connector = &aconnector->base;
2663 	struct drm_device *dev = connector->dev;
2664 	enum dc_connection_type new_connection_type = dc_connection_none;
2665 	struct amdgpu_device *adev = drm_to_adev(dev);
2666 #ifdef CONFIG_DRM_AMD_DC_HDCP
2667 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2668 #endif
2669 
2670 	if (adev->dm.disable_hpd_irq)
2671 		return;
2672 
2673 	/*
2674 	 * In case of failure or MST no need to update connector status or notify the OS
2675 	 * since (for MST case) MST does this in its own context.
2676 	 */
2677 	mutex_lock(&aconnector->hpd_lock);
2678 
2679 #ifdef CONFIG_DRM_AMD_DC_HDCP
2680 	if (adev->dm.hdcp_workqueue) {
2681 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2682 		dm_con_state->update_hdcp = true;
2683 	}
2684 #endif
2685 	if (aconnector->fake_enable)
2686 		aconnector->fake_enable = false;
2687 
2688 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2689 		DRM_ERROR("KMS: Failed to detect connector\n");
2690 
2691 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2692 		emulated_link_detect(aconnector->dc_link);
2693 
2694 
2695 		drm_modeset_lock_all(dev);
2696 		dm_restore_drm_connector_state(dev, connector);
2697 		drm_modeset_unlock_all(dev);
2698 
2699 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2700 			drm_kms_helper_hotplug_event(dev);
2701 
2702 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2703 		if (new_connection_type == dc_connection_none &&
2704 		    aconnector->dc_link->type == dc_connection_none)
2705 			dm_set_dpms_off(aconnector->dc_link);
2706 
2707 		amdgpu_dm_update_connector_after_detect(aconnector);
2708 
2709 		drm_modeset_lock_all(dev);
2710 		dm_restore_drm_connector_state(dev, connector);
2711 		drm_modeset_unlock_all(dev);
2712 
2713 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2714 			drm_kms_helper_hotplug_event(dev);
2715 	}
2716 	mutex_unlock(&aconnector->hpd_lock);
2717 
2718 }
2719 
2720 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2721 {
2722 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2723 	uint8_t dret;
2724 	bool new_irq_handled = false;
2725 	int dpcd_addr;
2726 	int dpcd_bytes_to_read;
2727 
2728 	const int max_process_count = 30;
2729 	int process_count = 0;
2730 
2731 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2732 
2733 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2734 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2735 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2736 		dpcd_addr = DP_SINK_COUNT;
2737 	} else {
2738 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2739 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2740 		dpcd_addr = DP_SINK_COUNT_ESI;
2741 	}
2742 
2743 	dret = drm_dp_dpcd_read(
2744 		&aconnector->dm_dp_aux.aux,
2745 		dpcd_addr,
2746 		esi,
2747 		dpcd_bytes_to_read);
2748 
2749 	while (dret == dpcd_bytes_to_read &&
2750 		process_count < max_process_count) {
2751 		uint8_t retry;
2752 		dret = 0;
2753 
2754 		process_count++;
2755 
2756 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2757 		/* handle HPD short pulse irq */
2758 		if (aconnector->mst_mgr.mst_state)
2759 			drm_dp_mst_hpd_irq(
2760 				&aconnector->mst_mgr,
2761 				esi,
2762 				&new_irq_handled);
2763 
2764 		if (new_irq_handled) {
2765 			/* ACK at DPCD to notify down stream */
2766 			const int ack_dpcd_bytes_to_write =
2767 				dpcd_bytes_to_read - 1;
2768 
2769 			for (retry = 0; retry < 3; retry++) {
2770 				uint8_t wret;
2771 
2772 				wret = drm_dp_dpcd_write(
2773 					&aconnector->dm_dp_aux.aux,
2774 					dpcd_addr + 1,
2775 					&esi[1],
2776 					ack_dpcd_bytes_to_write);
2777 				if (wret == ack_dpcd_bytes_to_write)
2778 					break;
2779 			}
2780 
2781 			/* check if there is new irq to be handled */
2782 			dret = drm_dp_dpcd_read(
2783 				&aconnector->dm_dp_aux.aux,
2784 				dpcd_addr,
2785 				esi,
2786 				dpcd_bytes_to_read);
2787 
2788 			new_irq_handled = false;
2789 		} else {
2790 			break;
2791 		}
2792 	}
2793 
2794 	if (process_count == max_process_count)
2795 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2796 }
2797 
2798 static void handle_hpd_rx_irq(void *param)
2799 {
2800 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2801 	struct drm_connector *connector = &aconnector->base;
2802 	struct drm_device *dev = connector->dev;
2803 	struct dc_link *dc_link = aconnector->dc_link;
2804 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2805 	bool result = false;
2806 	enum dc_connection_type new_connection_type = dc_connection_none;
2807 	struct amdgpu_device *adev = drm_to_adev(dev);
2808 	union hpd_irq_data hpd_irq_data;
2809 	bool lock_flag = 0;
2810 
2811 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2812 
2813 	if (adev->dm.disable_hpd_irq)
2814 		return;
2815 
2816 
2817 	/*
2818 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2819 	 * conflict, after implement i2c helper, this mutex should be
2820 	 * retired.
2821 	 */
2822 	mutex_lock(&aconnector->hpd_lock);
2823 
2824 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2825 
2826 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2827 		(dc_link->type == dc_connection_mst_branch)) {
2828 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2829 			result = true;
2830 			dm_handle_hpd_rx_irq(aconnector);
2831 			goto out;
2832 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2833 			result = false;
2834 			dm_handle_hpd_rx_irq(aconnector);
2835 			goto out;
2836 		}
2837 	}
2838 
2839 	/*
2840 	 * TODO: We need the lock to avoid touching DC state while it's being
2841 	 * modified during automated compliance testing, or when link loss
2842 	 * happens. While this should be split into subhandlers and proper
2843 	 * interfaces to avoid having to conditionally lock like this in the
2844 	 * outer layer, we need this workaround temporarily to allow MST
2845 	 * lightup in some scenarios to avoid timeout.
2846 	 */
2847 	if (!amdgpu_in_reset(adev) &&
2848 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2849 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2850 		mutex_lock(&adev->dm.dc_lock);
2851 		lock_flag = 1;
2852 	}
2853 
2854 #ifdef CONFIG_DRM_AMD_DC_HDCP
2855 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2856 #else
2857 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2858 #endif
2859 	if (!amdgpu_in_reset(adev) && lock_flag)
2860 		mutex_unlock(&adev->dm.dc_lock);
2861 
2862 out:
2863 	if (result && !is_mst_root_connector) {
2864 		/* Downstream Port status changed. */
2865 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2866 			DRM_ERROR("KMS: Failed to detect connector\n");
2867 
2868 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2869 			emulated_link_detect(dc_link);
2870 
2871 			if (aconnector->fake_enable)
2872 				aconnector->fake_enable = false;
2873 
2874 			amdgpu_dm_update_connector_after_detect(aconnector);
2875 
2876 
2877 			drm_modeset_lock_all(dev);
2878 			dm_restore_drm_connector_state(dev, connector);
2879 			drm_modeset_unlock_all(dev);
2880 
2881 			drm_kms_helper_hotplug_event(dev);
2882 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2883 
2884 			if (aconnector->fake_enable)
2885 				aconnector->fake_enable = false;
2886 
2887 			amdgpu_dm_update_connector_after_detect(aconnector);
2888 
2889 
2890 			drm_modeset_lock_all(dev);
2891 			dm_restore_drm_connector_state(dev, connector);
2892 			drm_modeset_unlock_all(dev);
2893 
2894 			drm_kms_helper_hotplug_event(dev);
2895 		}
2896 	}
2897 #ifdef CONFIG_DRM_AMD_DC_HDCP
2898 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2899 		if (adev->dm.hdcp_workqueue)
2900 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2901 	}
2902 #endif
2903 
2904 	if (dc_link->type != dc_connection_mst_branch)
2905 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2906 
2907 	mutex_unlock(&aconnector->hpd_lock);
2908 }
2909 
2910 static void register_hpd_handlers(struct amdgpu_device *adev)
2911 {
2912 	struct drm_device *dev = adev_to_drm(adev);
2913 	struct drm_connector *connector;
2914 	struct amdgpu_dm_connector *aconnector;
2915 	const struct dc_link *dc_link;
2916 	struct dc_interrupt_params int_params = {0};
2917 
2918 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2919 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2920 
2921 	list_for_each_entry(connector,
2922 			&dev->mode_config.connector_list, head)	{
2923 
2924 		aconnector = to_amdgpu_dm_connector(connector);
2925 		dc_link = aconnector->dc_link;
2926 
2927 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2928 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2929 			int_params.irq_source = dc_link->irq_source_hpd;
2930 
2931 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2932 					handle_hpd_irq,
2933 					(void *) aconnector);
2934 		}
2935 
2936 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2937 
2938 			/* Also register for DP short pulse (hpd_rx). */
2939 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2940 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2941 
2942 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2943 					handle_hpd_rx_irq,
2944 					(void *) aconnector);
2945 		}
2946 	}
2947 }
2948 
2949 #if defined(CONFIG_DRM_AMD_DC_SI)
2950 /* Register IRQ sources and initialize IRQ callbacks */
2951 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2952 {
2953 	struct dc *dc = adev->dm.dc;
2954 	struct common_irq_params *c_irq_params;
2955 	struct dc_interrupt_params int_params = {0};
2956 	int r;
2957 	int i;
2958 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2959 
2960 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2961 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2962 
2963 	/*
2964 	 * Actions of amdgpu_irq_add_id():
2965 	 * 1. Register a set() function with base driver.
2966 	 *    Base driver will call set() function to enable/disable an
2967 	 *    interrupt in DC hardware.
2968 	 * 2. Register amdgpu_dm_irq_handler().
2969 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2970 	 *    coming from DC hardware.
2971 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2972 	 *    for acknowledging and handling. */
2973 
2974 	/* Use VBLANK interrupt */
2975 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2976 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2977 		if (r) {
2978 			DRM_ERROR("Failed to add crtc irq id!\n");
2979 			return r;
2980 		}
2981 
2982 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2983 		int_params.irq_source =
2984 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2985 
2986 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2987 
2988 		c_irq_params->adev = adev;
2989 		c_irq_params->irq_src = int_params.irq_source;
2990 
2991 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2992 				dm_crtc_high_irq, c_irq_params);
2993 	}
2994 
2995 	/* Use GRPH_PFLIP interrupt */
2996 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2997 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2998 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2999 		if (r) {
3000 			DRM_ERROR("Failed to add page flip irq id!\n");
3001 			return r;
3002 		}
3003 
3004 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3005 		int_params.irq_source =
3006 			dc_interrupt_to_irq_source(dc, i, 0);
3007 
3008 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3009 
3010 		c_irq_params->adev = adev;
3011 		c_irq_params->irq_src = int_params.irq_source;
3012 
3013 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3014 				dm_pflip_high_irq, c_irq_params);
3015 
3016 	}
3017 
3018 	/* HPD */
3019 	r = amdgpu_irq_add_id(adev, client_id,
3020 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3021 	if (r) {
3022 		DRM_ERROR("Failed to add hpd irq id!\n");
3023 		return r;
3024 	}
3025 
3026 	register_hpd_handlers(adev);
3027 
3028 	return 0;
3029 }
3030 #endif
3031 
3032 /* Register IRQ sources and initialize IRQ callbacks */
3033 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3034 {
3035 	struct dc *dc = adev->dm.dc;
3036 	struct common_irq_params *c_irq_params;
3037 	struct dc_interrupt_params int_params = {0};
3038 	int r;
3039 	int i;
3040 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3041 
3042 	if (adev->asic_type >= CHIP_VEGA10)
3043 		client_id = SOC15_IH_CLIENTID_DCE;
3044 
3045 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3046 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3047 
3048 	/*
3049 	 * Actions of amdgpu_irq_add_id():
3050 	 * 1. Register a set() function with base driver.
3051 	 *    Base driver will call set() function to enable/disable an
3052 	 *    interrupt in DC hardware.
3053 	 * 2. Register amdgpu_dm_irq_handler().
3054 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3055 	 *    coming from DC hardware.
3056 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3057 	 *    for acknowledging and handling. */
3058 
3059 	/* Use VBLANK interrupt */
3060 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3061 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3062 		if (r) {
3063 			DRM_ERROR("Failed to add crtc irq id!\n");
3064 			return r;
3065 		}
3066 
3067 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3068 		int_params.irq_source =
3069 			dc_interrupt_to_irq_source(dc, i, 0);
3070 
3071 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3072 
3073 		c_irq_params->adev = adev;
3074 		c_irq_params->irq_src = int_params.irq_source;
3075 
3076 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3077 				dm_crtc_high_irq, c_irq_params);
3078 	}
3079 
3080 	/* Use VUPDATE interrupt */
3081 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3082 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3083 		if (r) {
3084 			DRM_ERROR("Failed to add vupdate irq id!\n");
3085 			return r;
3086 		}
3087 
3088 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3089 		int_params.irq_source =
3090 			dc_interrupt_to_irq_source(dc, i, 0);
3091 
3092 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3093 
3094 		c_irq_params->adev = adev;
3095 		c_irq_params->irq_src = int_params.irq_source;
3096 
3097 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3098 				dm_vupdate_high_irq, c_irq_params);
3099 	}
3100 
3101 	/* Use GRPH_PFLIP interrupt */
3102 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3103 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3104 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3105 		if (r) {
3106 			DRM_ERROR("Failed to add page flip irq id!\n");
3107 			return r;
3108 		}
3109 
3110 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 		int_params.irq_source =
3112 			dc_interrupt_to_irq_source(dc, i, 0);
3113 
3114 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3115 
3116 		c_irq_params->adev = adev;
3117 		c_irq_params->irq_src = int_params.irq_source;
3118 
3119 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120 				dm_pflip_high_irq, c_irq_params);
3121 
3122 	}
3123 
3124 	/* HPD */
3125 	r = amdgpu_irq_add_id(adev, client_id,
3126 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3127 	if (r) {
3128 		DRM_ERROR("Failed to add hpd irq id!\n");
3129 		return r;
3130 	}
3131 
3132 	register_hpd_handlers(adev);
3133 
3134 	return 0;
3135 }
3136 
3137 #if defined(CONFIG_DRM_AMD_DC_DCN)
3138 /* Register IRQ sources and initialize IRQ callbacks */
3139 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3140 {
3141 	struct dc *dc = adev->dm.dc;
3142 	struct common_irq_params *c_irq_params;
3143 	struct dc_interrupt_params int_params = {0};
3144 	int r;
3145 	int i;
3146 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3147 	static const unsigned int vrtl_int_srcid[] = {
3148 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3149 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3150 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3151 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3152 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3153 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3154 	};
3155 #endif
3156 
3157 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3158 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3159 
3160 	/*
3161 	 * Actions of amdgpu_irq_add_id():
3162 	 * 1. Register a set() function with base driver.
3163 	 *    Base driver will call set() function to enable/disable an
3164 	 *    interrupt in DC hardware.
3165 	 * 2. Register amdgpu_dm_irq_handler().
3166 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3167 	 *    coming from DC hardware.
3168 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3169 	 *    for acknowledging and handling.
3170 	 */
3171 
3172 	/* Use VSTARTUP interrupt */
3173 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3174 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3175 			i++) {
3176 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3177 
3178 		if (r) {
3179 			DRM_ERROR("Failed to add crtc irq id!\n");
3180 			return r;
3181 		}
3182 
3183 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3184 		int_params.irq_source =
3185 			dc_interrupt_to_irq_source(dc, i, 0);
3186 
3187 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3188 
3189 		c_irq_params->adev = adev;
3190 		c_irq_params->irq_src = int_params.irq_source;
3191 
3192 		amdgpu_dm_irq_register_interrupt(
3193 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3194 	}
3195 
3196 	/* Use otg vertical line interrupt */
3197 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3198 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3199 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3200 				vrtl_int_srcid[i], &adev->vline0_irq);
3201 
3202 		if (r) {
3203 			DRM_ERROR("Failed to add vline0 irq id!\n");
3204 			return r;
3205 		}
3206 
3207 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3208 		int_params.irq_source =
3209 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3210 
3211 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3212 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3213 			break;
3214 		}
3215 
3216 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3217 					- DC_IRQ_SOURCE_DC1_VLINE0];
3218 
3219 		c_irq_params->adev = adev;
3220 		c_irq_params->irq_src = int_params.irq_source;
3221 
3222 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3223 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3224 	}
3225 #endif
3226 
3227 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3228 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3229 	 * to trigger at end of each vblank, regardless of state of the lock,
3230 	 * matching DCE behaviour.
3231 	 */
3232 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3233 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3234 	     i++) {
3235 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3236 
3237 		if (r) {
3238 			DRM_ERROR("Failed to add vupdate irq id!\n");
3239 			return r;
3240 		}
3241 
3242 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3243 		int_params.irq_source =
3244 			dc_interrupt_to_irq_source(dc, i, 0);
3245 
3246 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3247 
3248 		c_irq_params->adev = adev;
3249 		c_irq_params->irq_src = int_params.irq_source;
3250 
3251 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3252 				dm_vupdate_high_irq, c_irq_params);
3253 	}
3254 
3255 	/* Use GRPH_PFLIP interrupt */
3256 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3257 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3258 			i++) {
3259 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3260 		if (r) {
3261 			DRM_ERROR("Failed to add page flip irq id!\n");
3262 			return r;
3263 		}
3264 
3265 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3266 		int_params.irq_source =
3267 			dc_interrupt_to_irq_source(dc, i, 0);
3268 
3269 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3270 
3271 		c_irq_params->adev = adev;
3272 		c_irq_params->irq_src = int_params.irq_source;
3273 
3274 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3275 				dm_pflip_high_irq, c_irq_params);
3276 
3277 	}
3278 
3279 	/* HPD */
3280 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3281 			&adev->hpd_irq);
3282 	if (r) {
3283 		DRM_ERROR("Failed to add hpd irq id!\n");
3284 		return r;
3285 	}
3286 
3287 	register_hpd_handlers(adev);
3288 
3289 	return 0;
3290 }
3291 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3292 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3293 {
3294 	struct dc *dc = adev->dm.dc;
3295 	struct common_irq_params *c_irq_params;
3296 	struct dc_interrupt_params int_params = {0};
3297 	int r, i;
3298 
3299 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3300 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3301 
3302 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3303 			&adev->dmub_outbox_irq);
3304 	if (r) {
3305 		DRM_ERROR("Failed to add outbox irq id!\n");
3306 		return r;
3307 	}
3308 
3309 	if (dc->ctx->dmub_srv) {
3310 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3311 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3312 		int_params.irq_source =
3313 		dc_interrupt_to_irq_source(dc, i, 0);
3314 
3315 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3316 
3317 		c_irq_params->adev = adev;
3318 		c_irq_params->irq_src = int_params.irq_source;
3319 
3320 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3321 				dm_dmub_outbox1_low_irq, c_irq_params);
3322 	}
3323 
3324 	return 0;
3325 }
3326 #endif
3327 
3328 /*
3329  * Acquires the lock for the atomic state object and returns
3330  * the new atomic state.
3331  *
3332  * This should only be called during atomic check.
3333  */
3334 static int dm_atomic_get_state(struct drm_atomic_state *state,
3335 			       struct dm_atomic_state **dm_state)
3336 {
3337 	struct drm_device *dev = state->dev;
3338 	struct amdgpu_device *adev = drm_to_adev(dev);
3339 	struct amdgpu_display_manager *dm = &adev->dm;
3340 	struct drm_private_state *priv_state;
3341 
3342 	if (*dm_state)
3343 		return 0;
3344 
3345 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3346 	if (IS_ERR(priv_state))
3347 		return PTR_ERR(priv_state);
3348 
3349 	*dm_state = to_dm_atomic_state(priv_state);
3350 
3351 	return 0;
3352 }
3353 
3354 static struct dm_atomic_state *
3355 dm_atomic_get_new_state(struct drm_atomic_state *state)
3356 {
3357 	struct drm_device *dev = state->dev;
3358 	struct amdgpu_device *adev = drm_to_adev(dev);
3359 	struct amdgpu_display_manager *dm = &adev->dm;
3360 	struct drm_private_obj *obj;
3361 	struct drm_private_state *new_obj_state;
3362 	int i;
3363 
3364 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3365 		if (obj->funcs == dm->atomic_obj.funcs)
3366 			return to_dm_atomic_state(new_obj_state);
3367 	}
3368 
3369 	return NULL;
3370 }
3371 
3372 static struct drm_private_state *
3373 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3374 {
3375 	struct dm_atomic_state *old_state, *new_state;
3376 
3377 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3378 	if (!new_state)
3379 		return NULL;
3380 
3381 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3382 
3383 	old_state = to_dm_atomic_state(obj->state);
3384 
3385 	if (old_state && old_state->context)
3386 		new_state->context = dc_copy_state(old_state->context);
3387 
3388 	if (!new_state->context) {
3389 		kfree(new_state);
3390 		return NULL;
3391 	}
3392 
3393 	return &new_state->base;
3394 }
3395 
3396 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3397 				    struct drm_private_state *state)
3398 {
3399 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3400 
3401 	if (dm_state && dm_state->context)
3402 		dc_release_state(dm_state->context);
3403 
3404 	kfree(dm_state);
3405 }
3406 
3407 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3408 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3409 	.atomic_destroy_state = dm_atomic_destroy_state,
3410 };
3411 
3412 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3413 {
3414 	struct dm_atomic_state *state;
3415 	int r;
3416 
3417 	adev->mode_info.mode_config_initialized = true;
3418 
3419 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3420 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3421 
3422 	adev_to_drm(adev)->mode_config.max_width = 16384;
3423 	adev_to_drm(adev)->mode_config.max_height = 16384;
3424 
3425 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3426 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3427 	/* indicates support for immediate flip */
3428 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3429 
3430 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3431 
3432 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3433 	if (!state)
3434 		return -ENOMEM;
3435 
3436 	state->context = dc_create_state(adev->dm.dc);
3437 	if (!state->context) {
3438 		kfree(state);
3439 		return -ENOMEM;
3440 	}
3441 
3442 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3443 
3444 	drm_atomic_private_obj_init(adev_to_drm(adev),
3445 				    &adev->dm.atomic_obj,
3446 				    &state->base,
3447 				    &dm_atomic_state_funcs);
3448 
3449 	r = amdgpu_display_modeset_create_props(adev);
3450 	if (r) {
3451 		dc_release_state(state->context);
3452 		kfree(state);
3453 		return r;
3454 	}
3455 
3456 	r = amdgpu_dm_audio_init(adev);
3457 	if (r) {
3458 		dc_release_state(state->context);
3459 		kfree(state);
3460 		return r;
3461 	}
3462 
3463 	return 0;
3464 }
3465 
3466 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3467 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3468 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3469 
3470 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3471 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3472 
3473 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3474 					    int bl_idx)
3475 {
3476 #if defined(CONFIG_ACPI)
3477 	struct amdgpu_dm_backlight_caps caps;
3478 
3479 	memset(&caps, 0, sizeof(caps));
3480 
3481 	if (dm->backlight_caps[bl_idx].caps_valid)
3482 		return;
3483 
3484 	amdgpu_acpi_get_backlight_caps(&caps);
3485 	if (caps.caps_valid) {
3486 		dm->backlight_caps[bl_idx].caps_valid = true;
3487 		if (caps.aux_support)
3488 			return;
3489 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3490 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3491 	} else {
3492 		dm->backlight_caps[bl_idx].min_input_signal =
3493 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3494 		dm->backlight_caps[bl_idx].max_input_signal =
3495 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3496 	}
3497 #else
3498 	if (dm->backlight_caps[bl_idx].aux_support)
3499 		return;
3500 
3501 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3502 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3503 #endif
3504 }
3505 
3506 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3507 				unsigned *min, unsigned *max)
3508 {
3509 	if (!caps)
3510 		return 0;
3511 
3512 	if (caps->aux_support) {
3513 		// Firmware limits are in nits, DC API wants millinits.
3514 		*max = 1000 * caps->aux_max_input_signal;
3515 		*min = 1000 * caps->aux_min_input_signal;
3516 	} else {
3517 		// Firmware limits are 8-bit, PWM control is 16-bit.
3518 		*max = 0x101 * caps->max_input_signal;
3519 		*min = 0x101 * caps->min_input_signal;
3520 	}
3521 	return 1;
3522 }
3523 
3524 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3525 					uint32_t brightness)
3526 {
3527 	unsigned min, max;
3528 
3529 	if (!get_brightness_range(caps, &min, &max))
3530 		return brightness;
3531 
3532 	// Rescale 0..255 to min..max
3533 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3534 				       AMDGPU_MAX_BL_LEVEL);
3535 }
3536 
3537 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3538 				      uint32_t brightness)
3539 {
3540 	unsigned min, max;
3541 
3542 	if (!get_brightness_range(caps, &min, &max))
3543 		return brightness;
3544 
3545 	if (brightness < min)
3546 		return 0;
3547 	// Rescale min..max to 0..255
3548 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3549 				 max - min);
3550 }
3551 
3552 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3553 					 int bl_idx,
3554 					 u32 user_brightness)
3555 {
3556 	struct amdgpu_dm_backlight_caps caps;
3557 	struct dc_link *link;
3558 	u32 brightness;
3559 	bool rc;
3560 
3561 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3562 	caps = dm->backlight_caps[bl_idx];
3563 
3564 	dm->brightness[bl_idx] = user_brightness;
3565 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3566 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3567 
3568 	/* Change brightness based on AUX property */
3569 	if (caps.aux_support) {
3570 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3571 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3572 		if (!rc)
3573 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3574 	} else {
3575 		rc = dc_link_set_backlight_level(link, brightness, 0);
3576 		if (!rc)
3577 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3578 	}
3579 
3580 	if (rc)
3581 		dm->actual_brightness[bl_idx] = user_brightness;
3582 }
3583 
3584 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3585 {
3586 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3587 	int i;
3588 
3589 	for (i = 0; i < dm->num_of_edps; i++) {
3590 		if (bd == dm->backlight_dev[i])
3591 			break;
3592 	}
3593 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3594 		i = 0;
3595 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3596 
3597 	return 0;
3598 }
3599 
3600 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3601 					 int bl_idx)
3602 {
3603 	struct amdgpu_dm_backlight_caps caps;
3604 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3605 
3606 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3607 	caps = dm->backlight_caps[bl_idx];
3608 
3609 	if (caps.aux_support) {
3610 		u32 avg, peak;
3611 		bool rc;
3612 
3613 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3614 		if (!rc)
3615 			return dm->brightness[bl_idx];
3616 		return convert_brightness_to_user(&caps, avg);
3617 	} else {
3618 		int ret = dc_link_get_backlight_level(link);
3619 
3620 		if (ret == DC_ERROR_UNEXPECTED)
3621 			return dm->brightness[bl_idx];
3622 		return convert_brightness_to_user(&caps, ret);
3623 	}
3624 }
3625 
3626 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3627 {
3628 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3629 	int i;
3630 
3631 	for (i = 0; i < dm->num_of_edps; i++) {
3632 		if (bd == dm->backlight_dev[i])
3633 			break;
3634 	}
3635 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3636 		i = 0;
3637 	return amdgpu_dm_backlight_get_level(dm, i);
3638 }
3639 
3640 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3641 	.options = BL_CORE_SUSPENDRESUME,
3642 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3643 	.update_status	= amdgpu_dm_backlight_update_status,
3644 };
3645 
3646 static void
3647 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3648 {
3649 	char bl_name[16];
3650 	struct backlight_properties props = { 0 };
3651 
3652 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3653 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3654 
3655 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3656 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3657 	props.type = BACKLIGHT_RAW;
3658 
3659 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3660 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3661 
3662 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3663 								       adev_to_drm(dm->adev)->dev,
3664 								       dm,
3665 								       &amdgpu_dm_backlight_ops,
3666 								       &props);
3667 
3668 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3669 		DRM_ERROR("DM: Backlight registration failed!\n");
3670 	else
3671 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3672 }
3673 #endif
3674 
3675 static int initialize_plane(struct amdgpu_display_manager *dm,
3676 			    struct amdgpu_mode_info *mode_info, int plane_id,
3677 			    enum drm_plane_type plane_type,
3678 			    const struct dc_plane_cap *plane_cap)
3679 {
3680 	struct drm_plane *plane;
3681 	unsigned long possible_crtcs;
3682 	int ret = 0;
3683 
3684 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3685 	if (!plane) {
3686 		DRM_ERROR("KMS: Failed to allocate plane\n");
3687 		return -ENOMEM;
3688 	}
3689 	plane->type = plane_type;
3690 
3691 	/*
3692 	 * HACK: IGT tests expect that the primary plane for a CRTC
3693 	 * can only have one possible CRTC. Only expose support for
3694 	 * any CRTC if they're not going to be used as a primary plane
3695 	 * for a CRTC - like overlay or underlay planes.
3696 	 */
3697 	possible_crtcs = 1 << plane_id;
3698 	if (plane_id >= dm->dc->caps.max_streams)
3699 		possible_crtcs = 0xff;
3700 
3701 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3702 
3703 	if (ret) {
3704 		DRM_ERROR("KMS: Failed to initialize plane\n");
3705 		kfree(plane);
3706 		return ret;
3707 	}
3708 
3709 	if (mode_info)
3710 		mode_info->planes[plane_id] = plane;
3711 
3712 	return ret;
3713 }
3714 
3715 
3716 static void register_backlight_device(struct amdgpu_display_manager *dm,
3717 				      struct dc_link *link)
3718 {
3719 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3720 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3721 
3722 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3723 	    link->type != dc_connection_none) {
3724 		/*
3725 		 * Event if registration failed, we should continue with
3726 		 * DM initialization because not having a backlight control
3727 		 * is better then a black screen.
3728 		 */
3729 		if (!dm->backlight_dev[dm->num_of_edps])
3730 			amdgpu_dm_register_backlight_device(dm);
3731 
3732 		if (dm->backlight_dev[dm->num_of_edps]) {
3733 			dm->backlight_link[dm->num_of_edps] = link;
3734 			dm->num_of_edps++;
3735 		}
3736 	}
3737 #endif
3738 }
3739 
3740 
3741 /*
3742  * In this architecture, the association
3743  * connector -> encoder -> crtc
3744  * id not really requried. The crtc and connector will hold the
3745  * display_index as an abstraction to use with DAL component
3746  *
3747  * Returns 0 on success
3748  */
3749 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3750 {
3751 	struct amdgpu_display_manager *dm = &adev->dm;
3752 	int32_t i;
3753 	struct amdgpu_dm_connector *aconnector = NULL;
3754 	struct amdgpu_encoder *aencoder = NULL;
3755 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3756 	uint32_t link_cnt;
3757 	int32_t primary_planes;
3758 	enum dc_connection_type new_connection_type = dc_connection_none;
3759 	const struct dc_plane_cap *plane;
3760 
3761 	dm->display_indexes_num = dm->dc->caps.max_streams;
3762 	/* Update the actual used number of crtc */
3763 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3764 
3765 	link_cnt = dm->dc->caps.max_links;
3766 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3767 		DRM_ERROR("DM: Failed to initialize mode config\n");
3768 		return -EINVAL;
3769 	}
3770 
3771 	/* There is one primary plane per CRTC */
3772 	primary_planes = dm->dc->caps.max_streams;
3773 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3774 
3775 	/*
3776 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3777 	 * Order is reversed to match iteration order in atomic check.
3778 	 */
3779 	for (i = (primary_planes - 1); i >= 0; i--) {
3780 		plane = &dm->dc->caps.planes[i];
3781 
3782 		if (initialize_plane(dm, mode_info, i,
3783 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3784 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3785 			goto fail;
3786 		}
3787 	}
3788 
3789 	/*
3790 	 * Initialize overlay planes, index starting after primary planes.
3791 	 * These planes have a higher DRM index than the primary planes since
3792 	 * they should be considered as having a higher z-order.
3793 	 * Order is reversed to match iteration order in atomic check.
3794 	 *
3795 	 * Only support DCN for now, and only expose one so we don't encourage
3796 	 * userspace to use up all the pipes.
3797 	 */
3798 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3799 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3800 
3801 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3802 			continue;
3803 
3804 		if (!plane->blends_with_above || !plane->blends_with_below)
3805 			continue;
3806 
3807 		if (!plane->pixel_format_support.argb8888)
3808 			continue;
3809 
3810 		if (initialize_plane(dm, NULL, primary_planes + i,
3811 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3812 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3813 			goto fail;
3814 		}
3815 
3816 		/* Only create one overlay plane. */
3817 		break;
3818 	}
3819 
3820 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3821 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3822 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3823 			goto fail;
3824 		}
3825 
3826 #if defined(CONFIG_DRM_AMD_DC_DCN)
3827 	/* Use Outbox interrupt */
3828 	switch (adev->asic_type) {
3829 	case CHIP_SIENNA_CICHLID:
3830 	case CHIP_NAVY_FLOUNDER:
3831 	case CHIP_YELLOW_CARP:
3832 	case CHIP_RENOIR:
3833 		if (register_outbox_irq_handlers(dm->adev)) {
3834 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3835 			goto fail;
3836 		}
3837 		break;
3838 	default:
3839 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3840 	}
3841 #endif
3842 
3843 	/* loops over all connectors on the board */
3844 	for (i = 0; i < link_cnt; i++) {
3845 		struct dc_link *link = NULL;
3846 
3847 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3848 			DRM_ERROR(
3849 				"KMS: Cannot support more than %d display indexes\n",
3850 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3851 			continue;
3852 		}
3853 
3854 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3855 		if (!aconnector)
3856 			goto fail;
3857 
3858 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3859 		if (!aencoder)
3860 			goto fail;
3861 
3862 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3863 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3864 			goto fail;
3865 		}
3866 
3867 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3868 			DRM_ERROR("KMS: Failed to initialize connector\n");
3869 			goto fail;
3870 		}
3871 
3872 		link = dc_get_link_at_index(dm->dc, i);
3873 
3874 		if (!dc_link_detect_sink(link, &new_connection_type))
3875 			DRM_ERROR("KMS: Failed to detect connector\n");
3876 
3877 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3878 			emulated_link_detect(link);
3879 			amdgpu_dm_update_connector_after_detect(aconnector);
3880 
3881 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3882 			amdgpu_dm_update_connector_after_detect(aconnector);
3883 			register_backlight_device(dm, link);
3884 
3885 			if (dm->num_of_edps)
3886 				update_connector_ext_caps(aconnector);
3887 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3888 				amdgpu_dm_set_psr_caps(link);
3889 
3890 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
3891 			 * PSR is also supported.
3892 			 */
3893 			if (link->psr_settings.psr_feature_enabled)
3894 				adev_to_drm(adev)->vblank_disable_immediate = false;
3895 		}
3896 
3897 
3898 	}
3899 
3900 	/* Software is initialized. Now we can register interrupt handlers. */
3901 	switch (adev->asic_type) {
3902 #if defined(CONFIG_DRM_AMD_DC_SI)
3903 	case CHIP_TAHITI:
3904 	case CHIP_PITCAIRN:
3905 	case CHIP_VERDE:
3906 	case CHIP_OLAND:
3907 		if (dce60_register_irq_handlers(dm->adev)) {
3908 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3909 			goto fail;
3910 		}
3911 		break;
3912 #endif
3913 	case CHIP_BONAIRE:
3914 	case CHIP_HAWAII:
3915 	case CHIP_KAVERI:
3916 	case CHIP_KABINI:
3917 	case CHIP_MULLINS:
3918 	case CHIP_TONGA:
3919 	case CHIP_FIJI:
3920 	case CHIP_CARRIZO:
3921 	case CHIP_STONEY:
3922 	case CHIP_POLARIS11:
3923 	case CHIP_POLARIS10:
3924 	case CHIP_POLARIS12:
3925 	case CHIP_VEGAM:
3926 	case CHIP_VEGA10:
3927 	case CHIP_VEGA12:
3928 	case CHIP_VEGA20:
3929 		if (dce110_register_irq_handlers(dm->adev)) {
3930 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3931 			goto fail;
3932 		}
3933 		break;
3934 #if defined(CONFIG_DRM_AMD_DC_DCN)
3935 	case CHIP_RAVEN:
3936 	case CHIP_NAVI12:
3937 	case CHIP_NAVI10:
3938 	case CHIP_NAVI14:
3939 	case CHIP_RENOIR:
3940 	case CHIP_SIENNA_CICHLID:
3941 	case CHIP_NAVY_FLOUNDER:
3942 	case CHIP_DIMGREY_CAVEFISH:
3943 	case CHIP_BEIGE_GOBY:
3944 	case CHIP_VANGOGH:
3945 	case CHIP_YELLOW_CARP:
3946 		if (dcn10_register_irq_handlers(dm->adev)) {
3947 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3948 			goto fail;
3949 		}
3950 		break;
3951 #endif
3952 	default:
3953 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3954 		goto fail;
3955 	}
3956 
3957 	return 0;
3958 fail:
3959 	kfree(aencoder);
3960 	kfree(aconnector);
3961 
3962 	return -EINVAL;
3963 }
3964 
3965 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3966 {
3967 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3968 	return;
3969 }
3970 
3971 /******************************************************************************
3972  * amdgpu_display_funcs functions
3973  *****************************************************************************/
3974 
3975 /*
3976  * dm_bandwidth_update - program display watermarks
3977  *
3978  * @adev: amdgpu_device pointer
3979  *
3980  * Calculate and program the display watermarks and line buffer allocation.
3981  */
3982 static void dm_bandwidth_update(struct amdgpu_device *adev)
3983 {
3984 	/* TODO: implement later */
3985 }
3986 
3987 static const struct amdgpu_display_funcs dm_display_funcs = {
3988 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3989 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3990 	.backlight_set_level = NULL, /* never called for DC */
3991 	.backlight_get_level = NULL, /* never called for DC */
3992 	.hpd_sense = NULL,/* called unconditionally */
3993 	.hpd_set_polarity = NULL, /* called unconditionally */
3994 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3995 	.page_flip_get_scanoutpos =
3996 		dm_crtc_get_scanoutpos,/* called unconditionally */
3997 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3998 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3999 };
4000 
4001 #if defined(CONFIG_DEBUG_KERNEL_DC)
4002 
4003 static ssize_t s3_debug_store(struct device *device,
4004 			      struct device_attribute *attr,
4005 			      const char *buf,
4006 			      size_t count)
4007 {
4008 	int ret;
4009 	int s3_state;
4010 	struct drm_device *drm_dev = dev_get_drvdata(device);
4011 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4012 
4013 	ret = kstrtoint(buf, 0, &s3_state);
4014 
4015 	if (ret == 0) {
4016 		if (s3_state) {
4017 			dm_resume(adev);
4018 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4019 		} else
4020 			dm_suspend(adev);
4021 	}
4022 
4023 	return ret == 0 ? count : 0;
4024 }
4025 
4026 DEVICE_ATTR_WO(s3_debug);
4027 
4028 #endif
4029 
4030 static int dm_early_init(void *handle)
4031 {
4032 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4033 
4034 	switch (adev->asic_type) {
4035 #if defined(CONFIG_DRM_AMD_DC_SI)
4036 	case CHIP_TAHITI:
4037 	case CHIP_PITCAIRN:
4038 	case CHIP_VERDE:
4039 		adev->mode_info.num_crtc = 6;
4040 		adev->mode_info.num_hpd = 6;
4041 		adev->mode_info.num_dig = 6;
4042 		break;
4043 	case CHIP_OLAND:
4044 		adev->mode_info.num_crtc = 2;
4045 		adev->mode_info.num_hpd = 2;
4046 		adev->mode_info.num_dig = 2;
4047 		break;
4048 #endif
4049 	case CHIP_BONAIRE:
4050 	case CHIP_HAWAII:
4051 		adev->mode_info.num_crtc = 6;
4052 		adev->mode_info.num_hpd = 6;
4053 		adev->mode_info.num_dig = 6;
4054 		break;
4055 	case CHIP_KAVERI:
4056 		adev->mode_info.num_crtc = 4;
4057 		adev->mode_info.num_hpd = 6;
4058 		adev->mode_info.num_dig = 7;
4059 		break;
4060 	case CHIP_KABINI:
4061 	case CHIP_MULLINS:
4062 		adev->mode_info.num_crtc = 2;
4063 		adev->mode_info.num_hpd = 6;
4064 		adev->mode_info.num_dig = 6;
4065 		break;
4066 	case CHIP_FIJI:
4067 	case CHIP_TONGA:
4068 		adev->mode_info.num_crtc = 6;
4069 		adev->mode_info.num_hpd = 6;
4070 		adev->mode_info.num_dig = 7;
4071 		break;
4072 	case CHIP_CARRIZO:
4073 		adev->mode_info.num_crtc = 3;
4074 		adev->mode_info.num_hpd = 6;
4075 		adev->mode_info.num_dig = 9;
4076 		break;
4077 	case CHIP_STONEY:
4078 		adev->mode_info.num_crtc = 2;
4079 		adev->mode_info.num_hpd = 6;
4080 		adev->mode_info.num_dig = 9;
4081 		break;
4082 	case CHIP_POLARIS11:
4083 	case CHIP_POLARIS12:
4084 		adev->mode_info.num_crtc = 5;
4085 		adev->mode_info.num_hpd = 5;
4086 		adev->mode_info.num_dig = 5;
4087 		break;
4088 	case CHIP_POLARIS10:
4089 	case CHIP_VEGAM:
4090 		adev->mode_info.num_crtc = 6;
4091 		adev->mode_info.num_hpd = 6;
4092 		adev->mode_info.num_dig = 6;
4093 		break;
4094 	case CHIP_VEGA10:
4095 	case CHIP_VEGA12:
4096 	case CHIP_VEGA20:
4097 		adev->mode_info.num_crtc = 6;
4098 		adev->mode_info.num_hpd = 6;
4099 		adev->mode_info.num_dig = 6;
4100 		break;
4101 #if defined(CONFIG_DRM_AMD_DC_DCN)
4102 	case CHIP_RAVEN:
4103 	case CHIP_RENOIR:
4104 	case CHIP_VANGOGH:
4105 		adev->mode_info.num_crtc = 4;
4106 		adev->mode_info.num_hpd = 4;
4107 		adev->mode_info.num_dig = 4;
4108 		break;
4109 	case CHIP_NAVI10:
4110 	case CHIP_NAVI12:
4111 	case CHIP_SIENNA_CICHLID:
4112 	case CHIP_NAVY_FLOUNDER:
4113 		adev->mode_info.num_crtc = 6;
4114 		adev->mode_info.num_hpd = 6;
4115 		adev->mode_info.num_dig = 6;
4116 		break;
4117 	case CHIP_YELLOW_CARP:
4118 		adev->mode_info.num_crtc = 4;
4119 		adev->mode_info.num_hpd = 4;
4120 		adev->mode_info.num_dig = 4;
4121 		break;
4122 	case CHIP_NAVI14:
4123 	case CHIP_DIMGREY_CAVEFISH:
4124 		adev->mode_info.num_crtc = 5;
4125 		adev->mode_info.num_hpd = 5;
4126 		adev->mode_info.num_dig = 5;
4127 		break;
4128 	case CHIP_BEIGE_GOBY:
4129 		adev->mode_info.num_crtc = 2;
4130 		adev->mode_info.num_hpd = 2;
4131 		adev->mode_info.num_dig = 2;
4132 		break;
4133 #endif
4134 	default:
4135 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4136 		return -EINVAL;
4137 	}
4138 
4139 	amdgpu_dm_set_irq_funcs(adev);
4140 
4141 	if (adev->mode_info.funcs == NULL)
4142 		adev->mode_info.funcs = &dm_display_funcs;
4143 
4144 	/*
4145 	 * Note: Do NOT change adev->audio_endpt_rreg and
4146 	 * adev->audio_endpt_wreg because they are initialised in
4147 	 * amdgpu_device_init()
4148 	 */
4149 #if defined(CONFIG_DEBUG_KERNEL_DC)
4150 	device_create_file(
4151 		adev_to_drm(adev)->dev,
4152 		&dev_attr_s3_debug);
4153 #endif
4154 
4155 	return 0;
4156 }
4157 
4158 static bool modeset_required(struct drm_crtc_state *crtc_state,
4159 			     struct dc_stream_state *new_stream,
4160 			     struct dc_stream_state *old_stream)
4161 {
4162 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4163 }
4164 
4165 static bool modereset_required(struct drm_crtc_state *crtc_state)
4166 {
4167 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4168 }
4169 
4170 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4171 {
4172 	drm_encoder_cleanup(encoder);
4173 	kfree(encoder);
4174 }
4175 
4176 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4177 	.destroy = amdgpu_dm_encoder_destroy,
4178 };
4179 
4180 
4181 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4182 					 struct drm_framebuffer *fb,
4183 					 int *min_downscale, int *max_upscale)
4184 {
4185 	struct amdgpu_device *adev = drm_to_adev(dev);
4186 	struct dc *dc = adev->dm.dc;
4187 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4188 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4189 
4190 	switch (fb->format->format) {
4191 	case DRM_FORMAT_P010:
4192 	case DRM_FORMAT_NV12:
4193 	case DRM_FORMAT_NV21:
4194 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4195 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4196 		break;
4197 
4198 	case DRM_FORMAT_XRGB16161616F:
4199 	case DRM_FORMAT_ARGB16161616F:
4200 	case DRM_FORMAT_XBGR16161616F:
4201 	case DRM_FORMAT_ABGR16161616F:
4202 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4203 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4204 		break;
4205 
4206 	default:
4207 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4208 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4209 		break;
4210 	}
4211 
4212 	/*
4213 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4214 	 * scaling factor of 1.0 == 1000 units.
4215 	 */
4216 	if (*max_upscale == 1)
4217 		*max_upscale = 1000;
4218 
4219 	if (*min_downscale == 1)
4220 		*min_downscale = 1000;
4221 }
4222 
4223 
4224 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4225 				struct dc_scaling_info *scaling_info)
4226 {
4227 	int scale_w, scale_h, min_downscale, max_upscale;
4228 
4229 	memset(scaling_info, 0, sizeof(*scaling_info));
4230 
4231 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4232 	scaling_info->src_rect.x = state->src_x >> 16;
4233 	scaling_info->src_rect.y = state->src_y >> 16;
4234 
4235 	/*
4236 	 * For reasons we don't (yet) fully understand a non-zero
4237 	 * src_y coordinate into an NV12 buffer can cause a
4238 	 * system hang. To avoid hangs (and maybe be overly cautious)
4239 	 * let's reject both non-zero src_x and src_y.
4240 	 *
4241 	 * We currently know of only one use-case to reproduce a
4242 	 * scenario with non-zero src_x and src_y for NV12, which
4243 	 * is to gesture the YouTube Android app into full screen
4244 	 * on ChromeOS.
4245 	 */
4246 	if (state->fb &&
4247 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4248 	    (scaling_info->src_rect.x != 0 ||
4249 	     scaling_info->src_rect.y != 0))
4250 		return -EINVAL;
4251 
4252 	scaling_info->src_rect.width = state->src_w >> 16;
4253 	if (scaling_info->src_rect.width == 0)
4254 		return -EINVAL;
4255 
4256 	scaling_info->src_rect.height = state->src_h >> 16;
4257 	if (scaling_info->src_rect.height == 0)
4258 		return -EINVAL;
4259 
4260 	scaling_info->dst_rect.x = state->crtc_x;
4261 	scaling_info->dst_rect.y = state->crtc_y;
4262 
4263 	if (state->crtc_w == 0)
4264 		return -EINVAL;
4265 
4266 	scaling_info->dst_rect.width = state->crtc_w;
4267 
4268 	if (state->crtc_h == 0)
4269 		return -EINVAL;
4270 
4271 	scaling_info->dst_rect.height = state->crtc_h;
4272 
4273 	/* DRM doesn't specify clipping on destination output. */
4274 	scaling_info->clip_rect = scaling_info->dst_rect;
4275 
4276 	/* Validate scaling per-format with DC plane caps */
4277 	if (state->plane && state->plane->dev && state->fb) {
4278 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4279 					     &min_downscale, &max_upscale);
4280 	} else {
4281 		min_downscale = 250;
4282 		max_upscale = 16000;
4283 	}
4284 
4285 	scale_w = scaling_info->dst_rect.width * 1000 /
4286 		  scaling_info->src_rect.width;
4287 
4288 	if (scale_w < min_downscale || scale_w > max_upscale)
4289 		return -EINVAL;
4290 
4291 	scale_h = scaling_info->dst_rect.height * 1000 /
4292 		  scaling_info->src_rect.height;
4293 
4294 	if (scale_h < min_downscale || scale_h > max_upscale)
4295 		return -EINVAL;
4296 
4297 	/*
4298 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4299 	 * assume reasonable defaults based on the format.
4300 	 */
4301 
4302 	return 0;
4303 }
4304 
4305 static void
4306 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4307 				 uint64_t tiling_flags)
4308 {
4309 	/* Fill GFX8 params */
4310 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4311 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4312 
4313 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4314 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4315 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4316 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4317 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4318 
4319 		/* XXX fix me for VI */
4320 		tiling_info->gfx8.num_banks = num_banks;
4321 		tiling_info->gfx8.array_mode =
4322 				DC_ARRAY_2D_TILED_THIN1;
4323 		tiling_info->gfx8.tile_split = tile_split;
4324 		tiling_info->gfx8.bank_width = bankw;
4325 		tiling_info->gfx8.bank_height = bankh;
4326 		tiling_info->gfx8.tile_aspect = mtaspect;
4327 		tiling_info->gfx8.tile_mode =
4328 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4329 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4330 			== DC_ARRAY_1D_TILED_THIN1) {
4331 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4332 	}
4333 
4334 	tiling_info->gfx8.pipe_config =
4335 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4336 }
4337 
4338 static void
4339 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4340 				  union dc_tiling_info *tiling_info)
4341 {
4342 	tiling_info->gfx9.num_pipes =
4343 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4344 	tiling_info->gfx9.num_banks =
4345 		adev->gfx.config.gb_addr_config_fields.num_banks;
4346 	tiling_info->gfx9.pipe_interleave =
4347 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4348 	tiling_info->gfx9.num_shader_engines =
4349 		adev->gfx.config.gb_addr_config_fields.num_se;
4350 	tiling_info->gfx9.max_compressed_frags =
4351 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4352 	tiling_info->gfx9.num_rb_per_se =
4353 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4354 	tiling_info->gfx9.shaderEnable = 1;
4355 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4356 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4357 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4358 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4359 	    adev->asic_type == CHIP_YELLOW_CARP ||
4360 	    adev->asic_type == CHIP_VANGOGH)
4361 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4362 }
4363 
4364 static int
4365 validate_dcc(struct amdgpu_device *adev,
4366 	     const enum surface_pixel_format format,
4367 	     const enum dc_rotation_angle rotation,
4368 	     const union dc_tiling_info *tiling_info,
4369 	     const struct dc_plane_dcc_param *dcc,
4370 	     const struct dc_plane_address *address,
4371 	     const struct plane_size *plane_size)
4372 {
4373 	struct dc *dc = adev->dm.dc;
4374 	struct dc_dcc_surface_param input;
4375 	struct dc_surface_dcc_cap output;
4376 
4377 	memset(&input, 0, sizeof(input));
4378 	memset(&output, 0, sizeof(output));
4379 
4380 	if (!dcc->enable)
4381 		return 0;
4382 
4383 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4384 	    !dc->cap_funcs.get_dcc_compression_cap)
4385 		return -EINVAL;
4386 
4387 	input.format = format;
4388 	input.surface_size.width = plane_size->surface_size.width;
4389 	input.surface_size.height = plane_size->surface_size.height;
4390 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4391 
4392 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4393 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4394 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4395 		input.scan = SCAN_DIRECTION_VERTICAL;
4396 
4397 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4398 		return -EINVAL;
4399 
4400 	if (!output.capable)
4401 		return -EINVAL;
4402 
4403 	if (dcc->independent_64b_blks == 0 &&
4404 	    output.grph.rgb.independent_64b_blks != 0)
4405 		return -EINVAL;
4406 
4407 	return 0;
4408 }
4409 
4410 static bool
4411 modifier_has_dcc(uint64_t modifier)
4412 {
4413 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4414 }
4415 
4416 static unsigned
4417 modifier_gfx9_swizzle_mode(uint64_t modifier)
4418 {
4419 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4420 		return 0;
4421 
4422 	return AMD_FMT_MOD_GET(TILE, modifier);
4423 }
4424 
4425 static const struct drm_format_info *
4426 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4427 {
4428 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4429 }
4430 
4431 static void
4432 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4433 				    union dc_tiling_info *tiling_info,
4434 				    uint64_t modifier)
4435 {
4436 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4437 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4438 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4439 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4440 
4441 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4442 
4443 	if (!IS_AMD_FMT_MOD(modifier))
4444 		return;
4445 
4446 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4447 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4448 
4449 	if (adev->family >= AMDGPU_FAMILY_NV) {
4450 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4451 	} else {
4452 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4453 
4454 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4455 	}
4456 }
4457 
4458 enum dm_micro_swizzle {
4459 	MICRO_SWIZZLE_Z = 0,
4460 	MICRO_SWIZZLE_S = 1,
4461 	MICRO_SWIZZLE_D = 2,
4462 	MICRO_SWIZZLE_R = 3
4463 };
4464 
4465 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4466 					  uint32_t format,
4467 					  uint64_t modifier)
4468 {
4469 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4470 	const struct drm_format_info *info = drm_format_info(format);
4471 	int i;
4472 
4473 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4474 
4475 	if (!info)
4476 		return false;
4477 
4478 	/*
4479 	 * We always have to allow these modifiers:
4480 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4481 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4482 	 */
4483 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4484 	    modifier == DRM_FORMAT_MOD_INVALID) {
4485 		return true;
4486 	}
4487 
4488 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4489 	for (i = 0; i < plane->modifier_count; i++) {
4490 		if (modifier == plane->modifiers[i])
4491 			break;
4492 	}
4493 	if (i == plane->modifier_count)
4494 		return false;
4495 
4496 	/*
4497 	 * For D swizzle the canonical modifier depends on the bpp, so check
4498 	 * it here.
4499 	 */
4500 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4501 	    adev->family >= AMDGPU_FAMILY_NV) {
4502 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4503 			return false;
4504 	}
4505 
4506 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4507 	    info->cpp[0] < 8)
4508 		return false;
4509 
4510 	if (modifier_has_dcc(modifier)) {
4511 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4512 		if (info->cpp[0] != 4)
4513 			return false;
4514 		/* We support multi-planar formats, but not when combined with
4515 		 * additional DCC metadata planes. */
4516 		if (info->num_planes > 1)
4517 			return false;
4518 	}
4519 
4520 	return true;
4521 }
4522 
4523 static void
4524 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4525 {
4526 	if (!*mods)
4527 		return;
4528 
4529 	if (*cap - *size < 1) {
4530 		uint64_t new_cap = *cap * 2;
4531 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4532 
4533 		if (!new_mods) {
4534 			kfree(*mods);
4535 			*mods = NULL;
4536 			return;
4537 		}
4538 
4539 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4540 		kfree(*mods);
4541 		*mods = new_mods;
4542 		*cap = new_cap;
4543 	}
4544 
4545 	(*mods)[*size] = mod;
4546 	*size += 1;
4547 }
4548 
4549 static void
4550 add_gfx9_modifiers(const struct amdgpu_device *adev,
4551 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4552 {
4553 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4554 	int pipe_xor_bits = min(8, pipes +
4555 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4556 	int bank_xor_bits = min(8 - pipe_xor_bits,
4557 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4558 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4559 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4560 
4561 
4562 	if (adev->family == AMDGPU_FAMILY_RV) {
4563 		/* Raven2 and later */
4564 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4565 
4566 		/*
4567 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4568 		 * doesn't support _D on DCN
4569 		 */
4570 
4571 		if (has_constant_encode) {
4572 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4573 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4574 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4575 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4576 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4577 				    AMD_FMT_MOD_SET(DCC, 1) |
4578 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4579 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4580 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4581 		}
4582 
4583 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4584 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4585 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4586 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4587 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4588 			    AMD_FMT_MOD_SET(DCC, 1) |
4589 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4590 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4591 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4592 
4593 		if (has_constant_encode) {
4594 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4595 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4596 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4597 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4598 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4599 				    AMD_FMT_MOD_SET(DCC, 1) |
4600 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4601 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4602 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4603 
4604 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4605 				    AMD_FMT_MOD_SET(RB, rb) |
4606 				    AMD_FMT_MOD_SET(PIPE, pipes));
4607 		}
4608 
4609 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4610 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4611 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4612 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4613 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4614 			    AMD_FMT_MOD_SET(DCC, 1) |
4615 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4616 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4617 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4618 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4619 			    AMD_FMT_MOD_SET(RB, rb) |
4620 			    AMD_FMT_MOD_SET(PIPE, pipes));
4621 	}
4622 
4623 	/*
4624 	 * Only supported for 64bpp on Raven, will be filtered on format in
4625 	 * dm_plane_format_mod_supported.
4626 	 */
4627 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4628 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4629 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4630 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4631 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4632 
4633 	if (adev->family == AMDGPU_FAMILY_RV) {
4634 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4635 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4636 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4637 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4638 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4639 	}
4640 
4641 	/*
4642 	 * Only supported for 64bpp on Raven, will be filtered on format in
4643 	 * dm_plane_format_mod_supported.
4644 	 */
4645 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4646 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4647 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4648 
4649 	if (adev->family == AMDGPU_FAMILY_RV) {
4650 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4651 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4652 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4653 	}
4654 }
4655 
4656 static void
4657 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4658 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4659 {
4660 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4661 
4662 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4663 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4664 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4665 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4666 		    AMD_FMT_MOD_SET(DCC, 1) |
4667 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4668 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4669 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4670 
4671 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4672 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4673 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4674 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4675 		    AMD_FMT_MOD_SET(DCC, 1) |
4676 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4677 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4678 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4679 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4680 
4681 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4682 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4683 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4684 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4685 
4686 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4687 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4688 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4689 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4690 
4691 
4692 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4693 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4694 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4695 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4696 
4697 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4698 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4699 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4700 }
4701 
4702 static void
4703 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4704 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4705 {
4706 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4707 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4708 
4709 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4710 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4711 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4712 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4713 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4714 		    AMD_FMT_MOD_SET(DCC, 1) |
4715 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4716 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4717 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4718 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4719 
4720 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4721 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4722 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4723 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4724 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4725 		    AMD_FMT_MOD_SET(DCC, 1) |
4726 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4727 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4728 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4729 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4730 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4731 
4732 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4733 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4734 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4735 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4736 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4737 
4738 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4739 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4740 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4741 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4742 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4743 
4744 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4745 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4746 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4747 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4748 
4749 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4750 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4751 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4752 }
4753 
4754 static int
4755 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4756 {
4757 	uint64_t size = 0, capacity = 128;
4758 	*mods = NULL;
4759 
4760 	/* We have not hooked up any pre-GFX9 modifiers. */
4761 	if (adev->family < AMDGPU_FAMILY_AI)
4762 		return 0;
4763 
4764 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4765 
4766 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4767 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4768 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4769 		return *mods ? 0 : -ENOMEM;
4770 	}
4771 
4772 	switch (adev->family) {
4773 	case AMDGPU_FAMILY_AI:
4774 	case AMDGPU_FAMILY_RV:
4775 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4776 		break;
4777 	case AMDGPU_FAMILY_NV:
4778 	case AMDGPU_FAMILY_VGH:
4779 	case AMDGPU_FAMILY_YC:
4780 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4781 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4782 		else
4783 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4784 		break;
4785 	}
4786 
4787 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4788 
4789 	/* INVALID marks the end of the list. */
4790 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4791 
4792 	if (!*mods)
4793 		return -ENOMEM;
4794 
4795 	return 0;
4796 }
4797 
4798 static int
4799 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4800 					  const struct amdgpu_framebuffer *afb,
4801 					  const enum surface_pixel_format format,
4802 					  const enum dc_rotation_angle rotation,
4803 					  const struct plane_size *plane_size,
4804 					  union dc_tiling_info *tiling_info,
4805 					  struct dc_plane_dcc_param *dcc,
4806 					  struct dc_plane_address *address,
4807 					  const bool force_disable_dcc)
4808 {
4809 	const uint64_t modifier = afb->base.modifier;
4810 	int ret = 0;
4811 
4812 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4813 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4814 
4815 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4816 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4817 
4818 		dcc->enable = 1;
4819 		dcc->meta_pitch = afb->base.pitches[1];
4820 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4821 
4822 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4823 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4824 	}
4825 
4826 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4827 	if (ret)
4828 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4829 
4830 	return ret;
4831 }
4832 
4833 static int
4834 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4835 			     const struct amdgpu_framebuffer *afb,
4836 			     const enum surface_pixel_format format,
4837 			     const enum dc_rotation_angle rotation,
4838 			     const uint64_t tiling_flags,
4839 			     union dc_tiling_info *tiling_info,
4840 			     struct plane_size *plane_size,
4841 			     struct dc_plane_dcc_param *dcc,
4842 			     struct dc_plane_address *address,
4843 			     bool tmz_surface,
4844 			     bool force_disable_dcc)
4845 {
4846 	const struct drm_framebuffer *fb = &afb->base;
4847 	int ret;
4848 
4849 	memset(tiling_info, 0, sizeof(*tiling_info));
4850 	memset(plane_size, 0, sizeof(*plane_size));
4851 	memset(dcc, 0, sizeof(*dcc));
4852 	memset(address, 0, sizeof(*address));
4853 
4854 	address->tmz_surface = tmz_surface;
4855 
4856 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4857 		uint64_t addr = afb->address + fb->offsets[0];
4858 
4859 		plane_size->surface_size.x = 0;
4860 		plane_size->surface_size.y = 0;
4861 		plane_size->surface_size.width = fb->width;
4862 		plane_size->surface_size.height = fb->height;
4863 		plane_size->surface_pitch =
4864 			fb->pitches[0] / fb->format->cpp[0];
4865 
4866 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4867 		address->grph.addr.low_part = lower_32_bits(addr);
4868 		address->grph.addr.high_part = upper_32_bits(addr);
4869 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4870 		uint64_t luma_addr = afb->address + fb->offsets[0];
4871 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4872 
4873 		plane_size->surface_size.x = 0;
4874 		plane_size->surface_size.y = 0;
4875 		plane_size->surface_size.width = fb->width;
4876 		plane_size->surface_size.height = fb->height;
4877 		plane_size->surface_pitch =
4878 			fb->pitches[0] / fb->format->cpp[0];
4879 
4880 		plane_size->chroma_size.x = 0;
4881 		plane_size->chroma_size.y = 0;
4882 		/* TODO: set these based on surface format */
4883 		plane_size->chroma_size.width = fb->width / 2;
4884 		plane_size->chroma_size.height = fb->height / 2;
4885 
4886 		plane_size->chroma_pitch =
4887 			fb->pitches[1] / fb->format->cpp[1];
4888 
4889 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4890 		address->video_progressive.luma_addr.low_part =
4891 			lower_32_bits(luma_addr);
4892 		address->video_progressive.luma_addr.high_part =
4893 			upper_32_bits(luma_addr);
4894 		address->video_progressive.chroma_addr.low_part =
4895 			lower_32_bits(chroma_addr);
4896 		address->video_progressive.chroma_addr.high_part =
4897 			upper_32_bits(chroma_addr);
4898 	}
4899 
4900 	if (adev->family >= AMDGPU_FAMILY_AI) {
4901 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4902 								rotation, plane_size,
4903 								tiling_info, dcc,
4904 								address,
4905 								force_disable_dcc);
4906 		if (ret)
4907 			return ret;
4908 	} else {
4909 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4910 	}
4911 
4912 	return 0;
4913 }
4914 
4915 static void
4916 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4917 			       bool *per_pixel_alpha, bool *global_alpha,
4918 			       int *global_alpha_value)
4919 {
4920 	*per_pixel_alpha = false;
4921 	*global_alpha = false;
4922 	*global_alpha_value = 0xff;
4923 
4924 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4925 		return;
4926 
4927 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4928 		static const uint32_t alpha_formats[] = {
4929 			DRM_FORMAT_ARGB8888,
4930 			DRM_FORMAT_RGBA8888,
4931 			DRM_FORMAT_ABGR8888,
4932 		};
4933 		uint32_t format = plane_state->fb->format->format;
4934 		unsigned int i;
4935 
4936 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4937 			if (format == alpha_formats[i]) {
4938 				*per_pixel_alpha = true;
4939 				break;
4940 			}
4941 		}
4942 	}
4943 
4944 	if (plane_state->alpha < 0xffff) {
4945 		*global_alpha = true;
4946 		*global_alpha_value = plane_state->alpha >> 8;
4947 	}
4948 }
4949 
4950 static int
4951 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4952 			    const enum surface_pixel_format format,
4953 			    enum dc_color_space *color_space)
4954 {
4955 	bool full_range;
4956 
4957 	*color_space = COLOR_SPACE_SRGB;
4958 
4959 	/* DRM color properties only affect non-RGB formats. */
4960 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4961 		return 0;
4962 
4963 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4964 
4965 	switch (plane_state->color_encoding) {
4966 	case DRM_COLOR_YCBCR_BT601:
4967 		if (full_range)
4968 			*color_space = COLOR_SPACE_YCBCR601;
4969 		else
4970 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4971 		break;
4972 
4973 	case DRM_COLOR_YCBCR_BT709:
4974 		if (full_range)
4975 			*color_space = COLOR_SPACE_YCBCR709;
4976 		else
4977 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4978 		break;
4979 
4980 	case DRM_COLOR_YCBCR_BT2020:
4981 		if (full_range)
4982 			*color_space = COLOR_SPACE_2020_YCBCR;
4983 		else
4984 			return -EINVAL;
4985 		break;
4986 
4987 	default:
4988 		return -EINVAL;
4989 	}
4990 
4991 	return 0;
4992 }
4993 
4994 static int
4995 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4996 			    const struct drm_plane_state *plane_state,
4997 			    const uint64_t tiling_flags,
4998 			    struct dc_plane_info *plane_info,
4999 			    struct dc_plane_address *address,
5000 			    bool tmz_surface,
5001 			    bool force_disable_dcc)
5002 {
5003 	const struct drm_framebuffer *fb = plane_state->fb;
5004 	const struct amdgpu_framebuffer *afb =
5005 		to_amdgpu_framebuffer(plane_state->fb);
5006 	int ret;
5007 
5008 	memset(plane_info, 0, sizeof(*plane_info));
5009 
5010 	switch (fb->format->format) {
5011 	case DRM_FORMAT_C8:
5012 		plane_info->format =
5013 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5014 		break;
5015 	case DRM_FORMAT_RGB565:
5016 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5017 		break;
5018 	case DRM_FORMAT_XRGB8888:
5019 	case DRM_FORMAT_ARGB8888:
5020 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5021 		break;
5022 	case DRM_FORMAT_XRGB2101010:
5023 	case DRM_FORMAT_ARGB2101010:
5024 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5025 		break;
5026 	case DRM_FORMAT_XBGR2101010:
5027 	case DRM_FORMAT_ABGR2101010:
5028 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5029 		break;
5030 	case DRM_FORMAT_XBGR8888:
5031 	case DRM_FORMAT_ABGR8888:
5032 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5033 		break;
5034 	case DRM_FORMAT_NV21:
5035 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5036 		break;
5037 	case DRM_FORMAT_NV12:
5038 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5039 		break;
5040 	case DRM_FORMAT_P010:
5041 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5042 		break;
5043 	case DRM_FORMAT_XRGB16161616F:
5044 	case DRM_FORMAT_ARGB16161616F:
5045 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5046 		break;
5047 	case DRM_FORMAT_XBGR16161616F:
5048 	case DRM_FORMAT_ABGR16161616F:
5049 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5050 		break;
5051 	case DRM_FORMAT_XRGB16161616:
5052 	case DRM_FORMAT_ARGB16161616:
5053 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5054 		break;
5055 	case DRM_FORMAT_XBGR16161616:
5056 	case DRM_FORMAT_ABGR16161616:
5057 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5058 		break;
5059 	default:
5060 		DRM_ERROR(
5061 			"Unsupported screen format %p4cc\n",
5062 			&fb->format->format);
5063 		return -EINVAL;
5064 	}
5065 
5066 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5067 	case DRM_MODE_ROTATE_0:
5068 		plane_info->rotation = ROTATION_ANGLE_0;
5069 		break;
5070 	case DRM_MODE_ROTATE_90:
5071 		plane_info->rotation = ROTATION_ANGLE_90;
5072 		break;
5073 	case DRM_MODE_ROTATE_180:
5074 		plane_info->rotation = ROTATION_ANGLE_180;
5075 		break;
5076 	case DRM_MODE_ROTATE_270:
5077 		plane_info->rotation = ROTATION_ANGLE_270;
5078 		break;
5079 	default:
5080 		plane_info->rotation = ROTATION_ANGLE_0;
5081 		break;
5082 	}
5083 
5084 	plane_info->visible = true;
5085 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5086 
5087 	plane_info->layer_index = 0;
5088 
5089 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5090 					  &plane_info->color_space);
5091 	if (ret)
5092 		return ret;
5093 
5094 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5095 					   plane_info->rotation, tiling_flags,
5096 					   &plane_info->tiling_info,
5097 					   &plane_info->plane_size,
5098 					   &plane_info->dcc, address, tmz_surface,
5099 					   force_disable_dcc);
5100 	if (ret)
5101 		return ret;
5102 
5103 	fill_blending_from_plane_state(
5104 		plane_state, &plane_info->per_pixel_alpha,
5105 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5106 
5107 	return 0;
5108 }
5109 
5110 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5111 				    struct dc_plane_state *dc_plane_state,
5112 				    struct drm_plane_state *plane_state,
5113 				    struct drm_crtc_state *crtc_state)
5114 {
5115 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5116 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5117 	struct dc_scaling_info scaling_info;
5118 	struct dc_plane_info plane_info;
5119 	int ret;
5120 	bool force_disable_dcc = false;
5121 
5122 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5123 	if (ret)
5124 		return ret;
5125 
5126 	dc_plane_state->src_rect = scaling_info.src_rect;
5127 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5128 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5129 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5130 
5131 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5132 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5133 					  afb->tiling_flags,
5134 					  &plane_info,
5135 					  &dc_plane_state->address,
5136 					  afb->tmz_surface,
5137 					  force_disable_dcc);
5138 	if (ret)
5139 		return ret;
5140 
5141 	dc_plane_state->format = plane_info.format;
5142 	dc_plane_state->color_space = plane_info.color_space;
5143 	dc_plane_state->format = plane_info.format;
5144 	dc_plane_state->plane_size = plane_info.plane_size;
5145 	dc_plane_state->rotation = plane_info.rotation;
5146 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5147 	dc_plane_state->stereo_format = plane_info.stereo_format;
5148 	dc_plane_state->tiling_info = plane_info.tiling_info;
5149 	dc_plane_state->visible = plane_info.visible;
5150 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5151 	dc_plane_state->global_alpha = plane_info.global_alpha;
5152 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5153 	dc_plane_state->dcc = plane_info.dcc;
5154 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5155 	dc_plane_state->flip_int_enabled = true;
5156 
5157 	/*
5158 	 * Always set input transfer function, since plane state is refreshed
5159 	 * every time.
5160 	 */
5161 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5162 	if (ret)
5163 		return ret;
5164 
5165 	return 0;
5166 }
5167 
5168 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5169 					   const struct dm_connector_state *dm_state,
5170 					   struct dc_stream_state *stream)
5171 {
5172 	enum amdgpu_rmx_type rmx_type;
5173 
5174 	struct rect src = { 0 }; /* viewport in composition space*/
5175 	struct rect dst = { 0 }; /* stream addressable area */
5176 
5177 	/* no mode. nothing to be done */
5178 	if (!mode)
5179 		return;
5180 
5181 	/* Full screen scaling by default */
5182 	src.width = mode->hdisplay;
5183 	src.height = mode->vdisplay;
5184 	dst.width = stream->timing.h_addressable;
5185 	dst.height = stream->timing.v_addressable;
5186 
5187 	if (dm_state) {
5188 		rmx_type = dm_state->scaling;
5189 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5190 			if (src.width * dst.height <
5191 					src.height * dst.width) {
5192 				/* height needs less upscaling/more downscaling */
5193 				dst.width = src.width *
5194 						dst.height / src.height;
5195 			} else {
5196 				/* width needs less upscaling/more downscaling */
5197 				dst.height = src.height *
5198 						dst.width / src.width;
5199 			}
5200 		} else if (rmx_type == RMX_CENTER) {
5201 			dst = src;
5202 		}
5203 
5204 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5205 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5206 
5207 		if (dm_state->underscan_enable) {
5208 			dst.x += dm_state->underscan_hborder / 2;
5209 			dst.y += dm_state->underscan_vborder / 2;
5210 			dst.width -= dm_state->underscan_hborder;
5211 			dst.height -= dm_state->underscan_vborder;
5212 		}
5213 	}
5214 
5215 	stream->src = src;
5216 	stream->dst = dst;
5217 
5218 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5219 		      dst.x, dst.y, dst.width, dst.height);
5220 
5221 }
5222 
5223 static enum dc_color_depth
5224 convert_color_depth_from_display_info(const struct drm_connector *connector,
5225 				      bool is_y420, int requested_bpc)
5226 {
5227 	uint8_t bpc;
5228 
5229 	if (is_y420) {
5230 		bpc = 8;
5231 
5232 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5233 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5234 			bpc = 16;
5235 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5236 			bpc = 12;
5237 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5238 			bpc = 10;
5239 	} else {
5240 		bpc = (uint8_t)connector->display_info.bpc;
5241 		/* Assume 8 bpc by default if no bpc is specified. */
5242 		bpc = bpc ? bpc : 8;
5243 	}
5244 
5245 	if (requested_bpc > 0) {
5246 		/*
5247 		 * Cap display bpc based on the user requested value.
5248 		 *
5249 		 * The value for state->max_bpc may not correctly updated
5250 		 * depending on when the connector gets added to the state
5251 		 * or if this was called outside of atomic check, so it
5252 		 * can't be used directly.
5253 		 */
5254 		bpc = min_t(u8, bpc, requested_bpc);
5255 
5256 		/* Round down to the nearest even number. */
5257 		bpc = bpc - (bpc & 1);
5258 	}
5259 
5260 	switch (bpc) {
5261 	case 0:
5262 		/*
5263 		 * Temporary Work around, DRM doesn't parse color depth for
5264 		 * EDID revision before 1.4
5265 		 * TODO: Fix edid parsing
5266 		 */
5267 		return COLOR_DEPTH_888;
5268 	case 6:
5269 		return COLOR_DEPTH_666;
5270 	case 8:
5271 		return COLOR_DEPTH_888;
5272 	case 10:
5273 		return COLOR_DEPTH_101010;
5274 	case 12:
5275 		return COLOR_DEPTH_121212;
5276 	case 14:
5277 		return COLOR_DEPTH_141414;
5278 	case 16:
5279 		return COLOR_DEPTH_161616;
5280 	default:
5281 		return COLOR_DEPTH_UNDEFINED;
5282 	}
5283 }
5284 
5285 static enum dc_aspect_ratio
5286 get_aspect_ratio(const struct drm_display_mode *mode_in)
5287 {
5288 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5289 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5290 }
5291 
5292 static enum dc_color_space
5293 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5294 {
5295 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5296 
5297 	switch (dc_crtc_timing->pixel_encoding)	{
5298 	case PIXEL_ENCODING_YCBCR422:
5299 	case PIXEL_ENCODING_YCBCR444:
5300 	case PIXEL_ENCODING_YCBCR420:
5301 	{
5302 		/*
5303 		 * 27030khz is the separation point between HDTV and SDTV
5304 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5305 		 * respectively
5306 		 */
5307 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5308 			if (dc_crtc_timing->flags.Y_ONLY)
5309 				color_space =
5310 					COLOR_SPACE_YCBCR709_LIMITED;
5311 			else
5312 				color_space = COLOR_SPACE_YCBCR709;
5313 		} else {
5314 			if (dc_crtc_timing->flags.Y_ONLY)
5315 				color_space =
5316 					COLOR_SPACE_YCBCR601_LIMITED;
5317 			else
5318 				color_space = COLOR_SPACE_YCBCR601;
5319 		}
5320 
5321 	}
5322 	break;
5323 	case PIXEL_ENCODING_RGB:
5324 		color_space = COLOR_SPACE_SRGB;
5325 		break;
5326 
5327 	default:
5328 		WARN_ON(1);
5329 		break;
5330 	}
5331 
5332 	return color_space;
5333 }
5334 
5335 static bool adjust_colour_depth_from_display_info(
5336 	struct dc_crtc_timing *timing_out,
5337 	const struct drm_display_info *info)
5338 {
5339 	enum dc_color_depth depth = timing_out->display_color_depth;
5340 	int normalized_clk;
5341 	do {
5342 		normalized_clk = timing_out->pix_clk_100hz / 10;
5343 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5344 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5345 			normalized_clk /= 2;
5346 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5347 		switch (depth) {
5348 		case COLOR_DEPTH_888:
5349 			break;
5350 		case COLOR_DEPTH_101010:
5351 			normalized_clk = (normalized_clk * 30) / 24;
5352 			break;
5353 		case COLOR_DEPTH_121212:
5354 			normalized_clk = (normalized_clk * 36) / 24;
5355 			break;
5356 		case COLOR_DEPTH_161616:
5357 			normalized_clk = (normalized_clk * 48) / 24;
5358 			break;
5359 		default:
5360 			/* The above depths are the only ones valid for HDMI. */
5361 			return false;
5362 		}
5363 		if (normalized_clk <= info->max_tmds_clock) {
5364 			timing_out->display_color_depth = depth;
5365 			return true;
5366 		}
5367 	} while (--depth > COLOR_DEPTH_666);
5368 	return false;
5369 }
5370 
5371 static void fill_stream_properties_from_drm_display_mode(
5372 	struct dc_stream_state *stream,
5373 	const struct drm_display_mode *mode_in,
5374 	const struct drm_connector *connector,
5375 	const struct drm_connector_state *connector_state,
5376 	const struct dc_stream_state *old_stream,
5377 	int requested_bpc)
5378 {
5379 	struct dc_crtc_timing *timing_out = &stream->timing;
5380 	const struct drm_display_info *info = &connector->display_info;
5381 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5382 	struct hdmi_vendor_infoframe hv_frame;
5383 	struct hdmi_avi_infoframe avi_frame;
5384 
5385 	memset(&hv_frame, 0, sizeof(hv_frame));
5386 	memset(&avi_frame, 0, sizeof(avi_frame));
5387 
5388 	timing_out->h_border_left = 0;
5389 	timing_out->h_border_right = 0;
5390 	timing_out->v_border_top = 0;
5391 	timing_out->v_border_bottom = 0;
5392 	/* TODO: un-hardcode */
5393 	if (drm_mode_is_420_only(info, mode_in)
5394 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5395 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5396 	else if (drm_mode_is_420_also(info, mode_in)
5397 			&& aconnector->force_yuv420_output)
5398 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5399 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5400 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5401 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5402 	else
5403 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5404 
5405 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5406 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5407 		connector,
5408 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5409 		requested_bpc);
5410 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5411 	timing_out->hdmi_vic = 0;
5412 
5413 	if(old_stream) {
5414 		timing_out->vic = old_stream->timing.vic;
5415 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5416 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5417 	} else {
5418 		timing_out->vic = drm_match_cea_mode(mode_in);
5419 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5420 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5421 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5422 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5423 	}
5424 
5425 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5426 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5427 		timing_out->vic = avi_frame.video_code;
5428 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5429 		timing_out->hdmi_vic = hv_frame.vic;
5430 	}
5431 
5432 	if (is_freesync_video_mode(mode_in, aconnector)) {
5433 		timing_out->h_addressable = mode_in->hdisplay;
5434 		timing_out->h_total = mode_in->htotal;
5435 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5436 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5437 		timing_out->v_total = mode_in->vtotal;
5438 		timing_out->v_addressable = mode_in->vdisplay;
5439 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5440 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5441 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5442 	} else {
5443 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5444 		timing_out->h_total = mode_in->crtc_htotal;
5445 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5446 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5447 		timing_out->v_total = mode_in->crtc_vtotal;
5448 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5449 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5450 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5451 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5452 	}
5453 
5454 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5455 
5456 	stream->output_color_space = get_output_color_space(timing_out);
5457 
5458 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5459 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5460 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5461 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5462 		    drm_mode_is_420_also(info, mode_in) &&
5463 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5464 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5465 			adjust_colour_depth_from_display_info(timing_out, info);
5466 		}
5467 	}
5468 }
5469 
5470 static void fill_audio_info(struct audio_info *audio_info,
5471 			    const struct drm_connector *drm_connector,
5472 			    const struct dc_sink *dc_sink)
5473 {
5474 	int i = 0;
5475 	int cea_revision = 0;
5476 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5477 
5478 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5479 	audio_info->product_id = edid_caps->product_id;
5480 
5481 	cea_revision = drm_connector->display_info.cea_rev;
5482 
5483 #ifdef __linux__
5484 	strscpy(audio_info->display_name,
5485 		edid_caps->display_name,
5486 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5487 #else
5488 	strncpy(audio_info->display_name,
5489 		edid_caps->display_name,
5490 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
5491 #endif
5492 
5493 	if (cea_revision >= 3) {
5494 		audio_info->mode_count = edid_caps->audio_mode_count;
5495 
5496 		for (i = 0; i < audio_info->mode_count; ++i) {
5497 			audio_info->modes[i].format_code =
5498 					(enum audio_format_code)
5499 					(edid_caps->audio_modes[i].format_code);
5500 			audio_info->modes[i].channel_count =
5501 					edid_caps->audio_modes[i].channel_count;
5502 			audio_info->modes[i].sample_rates.all =
5503 					edid_caps->audio_modes[i].sample_rate;
5504 			audio_info->modes[i].sample_size =
5505 					edid_caps->audio_modes[i].sample_size;
5506 		}
5507 	}
5508 
5509 	audio_info->flags.all = edid_caps->speaker_flags;
5510 
5511 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5512 	if (drm_connector->latency_present[0]) {
5513 		audio_info->video_latency = drm_connector->video_latency[0];
5514 		audio_info->audio_latency = drm_connector->audio_latency[0];
5515 	}
5516 
5517 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5518 
5519 }
5520 
5521 static void
5522 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5523 				      struct drm_display_mode *dst_mode)
5524 {
5525 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5526 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5527 	dst_mode->crtc_clock = src_mode->crtc_clock;
5528 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5529 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5530 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5531 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5532 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5533 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5534 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5535 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5536 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5537 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5538 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5539 }
5540 
5541 static void
5542 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5543 					const struct drm_display_mode *native_mode,
5544 					bool scale_enabled)
5545 {
5546 	if (scale_enabled) {
5547 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5548 	} else if (native_mode->clock == drm_mode->clock &&
5549 			native_mode->htotal == drm_mode->htotal &&
5550 			native_mode->vtotal == drm_mode->vtotal) {
5551 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5552 	} else {
5553 		/* no scaling nor amdgpu inserted, no need to patch */
5554 	}
5555 }
5556 
5557 static struct dc_sink *
5558 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5559 {
5560 	struct dc_sink_init_data sink_init_data = { 0 };
5561 	struct dc_sink *sink = NULL;
5562 	sink_init_data.link = aconnector->dc_link;
5563 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5564 
5565 	sink = dc_sink_create(&sink_init_data);
5566 	if (!sink) {
5567 		DRM_ERROR("Failed to create sink!\n");
5568 		return NULL;
5569 	}
5570 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5571 
5572 	return sink;
5573 }
5574 
5575 static void set_multisync_trigger_params(
5576 		struct dc_stream_state *stream)
5577 {
5578 	struct dc_stream_state *master = NULL;
5579 
5580 	if (stream->triggered_crtc_reset.enabled) {
5581 		master = stream->triggered_crtc_reset.event_source;
5582 		stream->triggered_crtc_reset.event =
5583 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5584 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5585 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5586 	}
5587 }
5588 
5589 static void set_master_stream(struct dc_stream_state *stream_set[],
5590 			      int stream_count)
5591 {
5592 	int j, highest_rfr = 0, master_stream = 0;
5593 
5594 	for (j = 0;  j < stream_count; j++) {
5595 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5596 			int refresh_rate = 0;
5597 
5598 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5599 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5600 			if (refresh_rate > highest_rfr) {
5601 				highest_rfr = refresh_rate;
5602 				master_stream = j;
5603 			}
5604 		}
5605 	}
5606 	for (j = 0;  j < stream_count; j++) {
5607 		if (stream_set[j])
5608 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5609 	}
5610 }
5611 
5612 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5613 {
5614 	int i = 0;
5615 	struct dc_stream_state *stream;
5616 
5617 	if (context->stream_count < 2)
5618 		return;
5619 	for (i = 0; i < context->stream_count ; i++) {
5620 		if (!context->streams[i])
5621 			continue;
5622 		/*
5623 		 * TODO: add a function to read AMD VSDB bits and set
5624 		 * crtc_sync_master.multi_sync_enabled flag
5625 		 * For now it's set to false
5626 		 */
5627 	}
5628 
5629 	set_master_stream(context->streams, context->stream_count);
5630 
5631 	for (i = 0; i < context->stream_count ; i++) {
5632 		stream = context->streams[i];
5633 
5634 		if (!stream)
5635 			continue;
5636 
5637 		set_multisync_trigger_params(stream);
5638 	}
5639 }
5640 
5641 #if defined(CONFIG_DRM_AMD_DC_DCN)
5642 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5643 							struct dc_sink *sink, struct dc_stream_state *stream,
5644 							struct dsc_dec_dpcd_caps *dsc_caps)
5645 {
5646 	stream->timing.flags.DSC = 0;
5647 	dsc_caps->is_dsc_supported = false;
5648 
5649 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5650 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5651 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5652 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5653 				      dsc_caps);
5654 	}
5655 }
5656 
5657 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5658 										struct dc_sink *sink, struct dc_stream_state *stream,
5659 										struct dsc_dec_dpcd_caps *dsc_caps)
5660 {
5661 	struct drm_connector *drm_connector = &aconnector->base;
5662 	uint32_t link_bandwidth_kbps;
5663 
5664 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5665 							dc_link_get_link_cap(aconnector->dc_link));
5666 	/* Set DSC policy according to dsc_clock_en */
5667 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5668 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5669 
5670 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5671 
5672 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5673 						dsc_caps,
5674 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5675 						0,
5676 						link_bandwidth_kbps,
5677 						&stream->timing,
5678 						&stream->timing.dsc_cfg)) {
5679 			stream->timing.flags.DSC = 1;
5680 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5681 		}
5682 	}
5683 
5684 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5685 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5686 		stream->timing.flags.DSC = 1;
5687 
5688 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5689 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5690 
5691 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5692 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5693 
5694 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5695 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5696 }
5697 #endif
5698 
5699 /**
5700  * DOC: FreeSync Video
5701  *
5702  * When a userspace application wants to play a video, the content follows a
5703  * standard format definition that usually specifies the FPS for that format.
5704  * The below list illustrates some video format and the expected FPS,
5705  * respectively:
5706  *
5707  * - TV/NTSC (23.976 FPS)
5708  * - Cinema (24 FPS)
5709  * - TV/PAL (25 FPS)
5710  * - TV/NTSC (29.97 FPS)
5711  * - TV/NTSC (30 FPS)
5712  * - Cinema HFR (48 FPS)
5713  * - TV/PAL (50 FPS)
5714  * - Commonly used (60 FPS)
5715  * - Multiples of 24 (48,72,96 FPS)
5716  *
5717  * The list of standards video format is not huge and can be added to the
5718  * connector modeset list beforehand. With that, userspace can leverage
5719  * FreeSync to extends the front porch in order to attain the target refresh
5720  * rate. Such a switch will happen seamlessly, without screen blanking or
5721  * reprogramming of the output in any other way. If the userspace requests a
5722  * modesetting change compatible with FreeSync modes that only differ in the
5723  * refresh rate, DC will skip the full update and avoid blink during the
5724  * transition. For example, the video player can change the modesetting from
5725  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5726  * causing any display blink. This same concept can be applied to a mode
5727  * setting change.
5728  */
5729 static struct drm_display_mode *
5730 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5731 			  bool use_probed_modes)
5732 {
5733 	struct drm_display_mode *m, *m_pref = NULL;
5734 	u16 current_refresh, highest_refresh;
5735 	struct list_head *list_head = use_probed_modes ?
5736 						    &aconnector->base.probed_modes :
5737 						    &aconnector->base.modes;
5738 
5739 	if (aconnector->freesync_vid_base.clock != 0)
5740 		return &aconnector->freesync_vid_base;
5741 
5742 	/* Find the preferred mode */
5743 	list_for_each_entry (m, list_head, head) {
5744 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5745 			m_pref = m;
5746 			break;
5747 		}
5748 	}
5749 
5750 	if (!m_pref) {
5751 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5752 		m_pref = list_first_entry_or_null(
5753 			&aconnector->base.modes, struct drm_display_mode, head);
5754 		if (!m_pref) {
5755 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5756 			return NULL;
5757 		}
5758 	}
5759 
5760 	highest_refresh = drm_mode_vrefresh(m_pref);
5761 
5762 	/*
5763 	 * Find the mode with highest refresh rate with same resolution.
5764 	 * For some monitors, preferred mode is not the mode with highest
5765 	 * supported refresh rate.
5766 	 */
5767 	list_for_each_entry (m, list_head, head) {
5768 		current_refresh  = drm_mode_vrefresh(m);
5769 
5770 		if (m->hdisplay == m_pref->hdisplay &&
5771 		    m->vdisplay == m_pref->vdisplay &&
5772 		    highest_refresh < current_refresh) {
5773 			highest_refresh = current_refresh;
5774 			m_pref = m;
5775 		}
5776 	}
5777 
5778 	aconnector->freesync_vid_base = *m_pref;
5779 	return m_pref;
5780 }
5781 
5782 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5783 				   struct amdgpu_dm_connector *aconnector)
5784 {
5785 	struct drm_display_mode *high_mode;
5786 	int timing_diff;
5787 
5788 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5789 	if (!high_mode || !mode)
5790 		return false;
5791 
5792 	timing_diff = high_mode->vtotal - mode->vtotal;
5793 
5794 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5795 	    high_mode->hdisplay != mode->hdisplay ||
5796 	    high_mode->vdisplay != mode->vdisplay ||
5797 	    high_mode->hsync_start != mode->hsync_start ||
5798 	    high_mode->hsync_end != mode->hsync_end ||
5799 	    high_mode->htotal != mode->htotal ||
5800 	    high_mode->hskew != mode->hskew ||
5801 	    high_mode->vscan != mode->vscan ||
5802 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5803 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5804 		return false;
5805 	else
5806 		return true;
5807 }
5808 
5809 static struct dc_stream_state *
5810 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5811 		       const struct drm_display_mode *drm_mode,
5812 		       const struct dm_connector_state *dm_state,
5813 		       const struct dc_stream_state *old_stream,
5814 		       int requested_bpc)
5815 {
5816 	struct drm_display_mode *preferred_mode = NULL;
5817 	struct drm_connector *drm_connector;
5818 	const struct drm_connector_state *con_state =
5819 		dm_state ? &dm_state->base : NULL;
5820 	struct dc_stream_state *stream = NULL;
5821 	struct drm_display_mode mode = *drm_mode;
5822 	struct drm_display_mode saved_mode;
5823 	struct drm_display_mode *freesync_mode = NULL;
5824 	bool native_mode_found = false;
5825 	bool recalculate_timing = false;
5826 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5827 	int mode_refresh;
5828 	int preferred_refresh = 0;
5829 #if defined(CONFIG_DRM_AMD_DC_DCN)
5830 	struct dsc_dec_dpcd_caps dsc_caps;
5831 #endif
5832 	struct dc_sink *sink = NULL;
5833 
5834 	memset(&saved_mode, 0, sizeof(saved_mode));
5835 
5836 	if (aconnector == NULL) {
5837 		DRM_ERROR("aconnector is NULL!\n");
5838 		return stream;
5839 	}
5840 
5841 	drm_connector = &aconnector->base;
5842 
5843 	if (!aconnector->dc_sink) {
5844 		sink = create_fake_sink(aconnector);
5845 		if (!sink)
5846 			return stream;
5847 	} else {
5848 		sink = aconnector->dc_sink;
5849 		dc_sink_retain(sink);
5850 	}
5851 
5852 	stream = dc_create_stream_for_sink(sink);
5853 
5854 	if (stream == NULL) {
5855 		DRM_ERROR("Failed to create stream for sink!\n");
5856 		goto finish;
5857 	}
5858 
5859 	stream->dm_stream_context = aconnector;
5860 
5861 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5862 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5863 
5864 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5865 		/* Search for preferred mode */
5866 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5867 			native_mode_found = true;
5868 			break;
5869 		}
5870 	}
5871 	if (!native_mode_found)
5872 		preferred_mode = list_first_entry_or_null(
5873 				&aconnector->base.modes,
5874 				struct drm_display_mode,
5875 				head);
5876 
5877 	mode_refresh = drm_mode_vrefresh(&mode);
5878 
5879 	if (preferred_mode == NULL) {
5880 		/*
5881 		 * This may not be an error, the use case is when we have no
5882 		 * usermode calls to reset and set mode upon hotplug. In this
5883 		 * case, we call set mode ourselves to restore the previous mode
5884 		 * and the modelist may not be filled in in time.
5885 		 */
5886 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5887 	} else {
5888 		recalculate_timing = amdgpu_freesync_vid_mode &&
5889 				 is_freesync_video_mode(&mode, aconnector);
5890 		if (recalculate_timing) {
5891 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5892 			saved_mode = mode;
5893 			mode = *freesync_mode;
5894 		} else {
5895 			decide_crtc_timing_for_drm_display_mode(
5896 				&mode, preferred_mode, scale);
5897 
5898 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5899 		}
5900 	}
5901 
5902 	if (recalculate_timing)
5903 		drm_mode_set_crtcinfo(&saved_mode, 0);
5904 	else if (!dm_state)
5905 		drm_mode_set_crtcinfo(&mode, 0);
5906 
5907        /*
5908 	* If scaling is enabled and refresh rate didn't change
5909 	* we copy the vic and polarities of the old timings
5910 	*/
5911 	if (!scale || mode_refresh != preferred_refresh)
5912 		fill_stream_properties_from_drm_display_mode(
5913 			stream, &mode, &aconnector->base, con_state, NULL,
5914 			requested_bpc);
5915 	else
5916 		fill_stream_properties_from_drm_display_mode(
5917 			stream, &mode, &aconnector->base, con_state, old_stream,
5918 			requested_bpc);
5919 
5920 #if defined(CONFIG_DRM_AMD_DC_DCN)
5921 	/* SST DSC determination policy */
5922 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5923 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5924 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5925 #endif
5926 
5927 	update_stream_scaling_settings(&mode, dm_state, stream);
5928 
5929 	fill_audio_info(
5930 		&stream->audio_info,
5931 		drm_connector,
5932 		sink);
5933 
5934 	update_stream_signal(stream, sink);
5935 
5936 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5937 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5938 
5939 	if (stream->link->psr_settings.psr_feature_enabled) {
5940 		//
5941 		// should decide stream support vsc sdp colorimetry capability
5942 		// before building vsc info packet
5943 		//
5944 		stream->use_vsc_sdp_for_colorimetry = false;
5945 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5946 			stream->use_vsc_sdp_for_colorimetry =
5947 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5948 		} else {
5949 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5950 				stream->use_vsc_sdp_for_colorimetry = true;
5951 		}
5952 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5953 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5954 
5955 	}
5956 finish:
5957 	dc_sink_release(sink);
5958 
5959 	return stream;
5960 }
5961 
5962 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5963 {
5964 	drm_crtc_cleanup(crtc);
5965 	kfree(crtc);
5966 }
5967 
5968 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5969 				  struct drm_crtc_state *state)
5970 {
5971 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5972 
5973 	/* TODO Destroy dc_stream objects are stream object is flattened */
5974 	if (cur->stream)
5975 		dc_stream_release(cur->stream);
5976 
5977 
5978 	__drm_atomic_helper_crtc_destroy_state(state);
5979 
5980 
5981 	kfree(state);
5982 }
5983 
5984 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5985 {
5986 	struct dm_crtc_state *state;
5987 
5988 	if (crtc->state)
5989 		dm_crtc_destroy_state(crtc, crtc->state);
5990 
5991 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5992 	if (WARN_ON(!state))
5993 		return;
5994 
5995 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5996 }
5997 
5998 static struct drm_crtc_state *
5999 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6000 {
6001 	struct dm_crtc_state *state, *cur;
6002 
6003 	cur = to_dm_crtc_state(crtc->state);
6004 
6005 	if (WARN_ON(!crtc->state))
6006 		return NULL;
6007 
6008 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6009 	if (!state)
6010 		return NULL;
6011 
6012 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6013 
6014 	if (cur->stream) {
6015 		state->stream = cur->stream;
6016 		dc_stream_retain(state->stream);
6017 	}
6018 
6019 	state->active_planes = cur->active_planes;
6020 	state->vrr_infopacket = cur->vrr_infopacket;
6021 	state->abm_level = cur->abm_level;
6022 	state->vrr_supported = cur->vrr_supported;
6023 	state->freesync_config = cur->freesync_config;
6024 	state->cm_has_degamma = cur->cm_has_degamma;
6025 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6026 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6027 
6028 	return &state->base;
6029 }
6030 
6031 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6032 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6033 {
6034 	crtc_debugfs_init(crtc);
6035 
6036 	return 0;
6037 }
6038 #endif
6039 
6040 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6041 {
6042 	enum dc_irq_source irq_source;
6043 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6044 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6045 	int rc;
6046 
6047 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6048 
6049 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6050 
6051 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6052 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6053 	return rc;
6054 }
6055 
6056 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6057 {
6058 	enum dc_irq_source irq_source;
6059 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6060 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6061 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6062 #if defined(CONFIG_DRM_AMD_DC_DCN)
6063 	struct amdgpu_display_manager *dm = &adev->dm;
6064 	struct vblank_control_work *work;
6065 #endif
6066 	int rc = 0;
6067 
6068 	if (enable) {
6069 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6070 		if (amdgpu_dm_vrr_active(acrtc_state))
6071 			rc = dm_set_vupdate_irq(crtc, true);
6072 	} else {
6073 		/* vblank irq off -> vupdate irq off */
6074 		rc = dm_set_vupdate_irq(crtc, false);
6075 	}
6076 
6077 	if (rc)
6078 		return rc;
6079 
6080 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6081 
6082 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6083 		return -EBUSY;
6084 
6085 	if (amdgpu_in_reset(adev))
6086 		return 0;
6087 
6088 #if defined(CONFIG_DRM_AMD_DC_DCN)
6089 	if (dm->vblank_control_workqueue) {
6090 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6091 		if (!work)
6092 			return -ENOMEM;
6093 
6094 		INIT_WORK(&work->work, vblank_control_worker);
6095 		work->dm = dm;
6096 		work->acrtc = acrtc;
6097 		work->enable = enable;
6098 
6099 		if (acrtc_state->stream) {
6100 			dc_stream_retain(acrtc_state->stream);
6101 			work->stream = acrtc_state->stream;
6102 		}
6103 
6104 		queue_work(dm->vblank_control_workqueue, &work->work);
6105 	}
6106 #endif
6107 
6108 	return 0;
6109 }
6110 
6111 static int dm_enable_vblank(struct drm_crtc *crtc)
6112 {
6113 	return dm_set_vblank(crtc, true);
6114 }
6115 
6116 static void dm_disable_vblank(struct drm_crtc *crtc)
6117 {
6118 	dm_set_vblank(crtc, false);
6119 }
6120 
6121 /* Implemented only the options currently availible for the driver */
6122 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6123 	.reset = dm_crtc_reset_state,
6124 	.destroy = amdgpu_dm_crtc_destroy,
6125 	.set_config = drm_atomic_helper_set_config,
6126 	.page_flip = drm_atomic_helper_page_flip,
6127 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6128 	.atomic_destroy_state = dm_crtc_destroy_state,
6129 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6130 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6131 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6132 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6133 	.enable_vblank = dm_enable_vblank,
6134 	.disable_vblank = dm_disable_vblank,
6135 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6136 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6137 	.late_register = amdgpu_dm_crtc_late_register,
6138 #endif
6139 };
6140 
6141 static enum drm_connector_status
6142 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6143 {
6144 	bool connected;
6145 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6146 
6147 	/*
6148 	 * Notes:
6149 	 * 1. This interface is NOT called in context of HPD irq.
6150 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6151 	 * makes it a bad place for *any* MST-related activity.
6152 	 */
6153 
6154 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6155 	    !aconnector->fake_enable)
6156 		connected = (aconnector->dc_sink != NULL);
6157 	else
6158 		connected = (aconnector->base.force == DRM_FORCE_ON);
6159 
6160 	update_subconnector_property(aconnector);
6161 
6162 	return (connected ? connector_status_connected :
6163 			connector_status_disconnected);
6164 }
6165 
6166 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6167 					    struct drm_connector_state *connector_state,
6168 					    struct drm_property *property,
6169 					    uint64_t val)
6170 {
6171 	struct drm_device *dev = connector->dev;
6172 	struct amdgpu_device *adev = drm_to_adev(dev);
6173 	struct dm_connector_state *dm_old_state =
6174 		to_dm_connector_state(connector->state);
6175 	struct dm_connector_state *dm_new_state =
6176 		to_dm_connector_state(connector_state);
6177 
6178 	int ret = -EINVAL;
6179 
6180 	if (property == dev->mode_config.scaling_mode_property) {
6181 		enum amdgpu_rmx_type rmx_type;
6182 
6183 		switch (val) {
6184 		case DRM_MODE_SCALE_CENTER:
6185 			rmx_type = RMX_CENTER;
6186 			break;
6187 		case DRM_MODE_SCALE_ASPECT:
6188 			rmx_type = RMX_ASPECT;
6189 			break;
6190 		case DRM_MODE_SCALE_FULLSCREEN:
6191 			rmx_type = RMX_FULL;
6192 			break;
6193 		case DRM_MODE_SCALE_NONE:
6194 		default:
6195 			rmx_type = RMX_OFF;
6196 			break;
6197 		}
6198 
6199 		if (dm_old_state->scaling == rmx_type)
6200 			return 0;
6201 
6202 		dm_new_state->scaling = rmx_type;
6203 		ret = 0;
6204 	} else if (property == adev->mode_info.underscan_hborder_property) {
6205 		dm_new_state->underscan_hborder = val;
6206 		ret = 0;
6207 	} else if (property == adev->mode_info.underscan_vborder_property) {
6208 		dm_new_state->underscan_vborder = val;
6209 		ret = 0;
6210 	} else if (property == adev->mode_info.underscan_property) {
6211 		dm_new_state->underscan_enable = val;
6212 		ret = 0;
6213 	} else if (property == adev->mode_info.abm_level_property) {
6214 		dm_new_state->abm_level = val;
6215 		ret = 0;
6216 	}
6217 
6218 	return ret;
6219 }
6220 
6221 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6222 					    const struct drm_connector_state *state,
6223 					    struct drm_property *property,
6224 					    uint64_t *val)
6225 {
6226 	struct drm_device *dev = connector->dev;
6227 	struct amdgpu_device *adev = drm_to_adev(dev);
6228 	struct dm_connector_state *dm_state =
6229 		to_dm_connector_state(state);
6230 	int ret = -EINVAL;
6231 
6232 	if (property == dev->mode_config.scaling_mode_property) {
6233 		switch (dm_state->scaling) {
6234 		case RMX_CENTER:
6235 			*val = DRM_MODE_SCALE_CENTER;
6236 			break;
6237 		case RMX_ASPECT:
6238 			*val = DRM_MODE_SCALE_ASPECT;
6239 			break;
6240 		case RMX_FULL:
6241 			*val = DRM_MODE_SCALE_FULLSCREEN;
6242 			break;
6243 		case RMX_OFF:
6244 		default:
6245 			*val = DRM_MODE_SCALE_NONE;
6246 			break;
6247 		}
6248 		ret = 0;
6249 	} else if (property == adev->mode_info.underscan_hborder_property) {
6250 		*val = dm_state->underscan_hborder;
6251 		ret = 0;
6252 	} else if (property == adev->mode_info.underscan_vborder_property) {
6253 		*val = dm_state->underscan_vborder;
6254 		ret = 0;
6255 	} else if (property == adev->mode_info.underscan_property) {
6256 		*val = dm_state->underscan_enable;
6257 		ret = 0;
6258 	} else if (property == adev->mode_info.abm_level_property) {
6259 		*val = dm_state->abm_level;
6260 		ret = 0;
6261 	}
6262 
6263 	return ret;
6264 }
6265 
6266 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6267 {
6268 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6269 
6270 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6271 }
6272 
6273 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6274 {
6275 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6276 	const struct dc_link *link = aconnector->dc_link;
6277 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6278 	struct amdgpu_display_manager *dm = &adev->dm;
6279 	int i;
6280 
6281 	/*
6282 	 * Call only if mst_mgr was iniitalized before since it's not done
6283 	 * for all connector types.
6284 	 */
6285 	if (aconnector->mst_mgr.dev)
6286 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6287 
6288 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6289 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6290 	for (i = 0; i < dm->num_of_edps; i++) {
6291 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6292 			backlight_device_unregister(dm->backlight_dev[i]);
6293 			dm->backlight_dev[i] = NULL;
6294 		}
6295 	}
6296 #endif
6297 
6298 	if (aconnector->dc_em_sink)
6299 		dc_sink_release(aconnector->dc_em_sink);
6300 	aconnector->dc_em_sink = NULL;
6301 	if (aconnector->dc_sink)
6302 		dc_sink_release(aconnector->dc_sink);
6303 	aconnector->dc_sink = NULL;
6304 
6305 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6306 	drm_connector_unregister(connector);
6307 	drm_connector_cleanup(connector);
6308 	if (aconnector->i2c) {
6309 		i2c_del_adapter(&aconnector->i2c->base);
6310 		kfree(aconnector->i2c);
6311 	}
6312 	kfree(aconnector->dm_dp_aux.aux.name);
6313 
6314 	kfree(connector);
6315 }
6316 
6317 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6318 {
6319 	struct dm_connector_state *state =
6320 		to_dm_connector_state(connector->state);
6321 
6322 	if (connector->state)
6323 		__drm_atomic_helper_connector_destroy_state(connector->state);
6324 
6325 	kfree(state);
6326 
6327 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6328 
6329 	if (state) {
6330 		state->scaling = RMX_OFF;
6331 		state->underscan_enable = false;
6332 		state->underscan_hborder = 0;
6333 		state->underscan_vborder = 0;
6334 		state->base.max_requested_bpc = 8;
6335 		state->vcpi_slots = 0;
6336 		state->pbn = 0;
6337 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6338 			state->abm_level = amdgpu_dm_abm_level;
6339 
6340 		__drm_atomic_helper_connector_reset(connector, &state->base);
6341 	}
6342 }
6343 
6344 struct drm_connector_state *
6345 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6346 {
6347 	struct dm_connector_state *state =
6348 		to_dm_connector_state(connector->state);
6349 
6350 	struct dm_connector_state *new_state =
6351 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6352 
6353 	if (!new_state)
6354 		return NULL;
6355 
6356 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6357 
6358 	new_state->freesync_capable = state->freesync_capable;
6359 	new_state->abm_level = state->abm_level;
6360 	new_state->scaling = state->scaling;
6361 	new_state->underscan_enable = state->underscan_enable;
6362 	new_state->underscan_hborder = state->underscan_hborder;
6363 	new_state->underscan_vborder = state->underscan_vborder;
6364 	new_state->vcpi_slots = state->vcpi_slots;
6365 	new_state->pbn = state->pbn;
6366 	return &new_state->base;
6367 }
6368 
6369 static int
6370 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6371 {
6372 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6373 		to_amdgpu_dm_connector(connector);
6374 	int r;
6375 
6376 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6377 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6378 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6379 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6380 		if (r)
6381 			return r;
6382 	}
6383 
6384 #if defined(CONFIG_DEBUG_FS)
6385 	connector_debugfs_init(amdgpu_dm_connector);
6386 #endif
6387 
6388 	return 0;
6389 }
6390 
6391 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6392 	.reset = amdgpu_dm_connector_funcs_reset,
6393 	.detect = amdgpu_dm_connector_detect,
6394 	.fill_modes = drm_helper_probe_single_connector_modes,
6395 	.destroy = amdgpu_dm_connector_destroy,
6396 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6397 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6398 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6399 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6400 	.late_register = amdgpu_dm_connector_late_register,
6401 	.early_unregister = amdgpu_dm_connector_unregister
6402 };
6403 
6404 static int get_modes(struct drm_connector *connector)
6405 {
6406 	return amdgpu_dm_connector_get_modes(connector);
6407 }
6408 
6409 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6410 {
6411 	struct dc_sink_init_data init_params = {
6412 			.link = aconnector->dc_link,
6413 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6414 	};
6415 	struct edid *edid;
6416 
6417 	if (!aconnector->base.edid_blob_ptr) {
6418 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6419 				aconnector->base.name);
6420 
6421 		aconnector->base.force = DRM_FORCE_OFF;
6422 		aconnector->base.override_edid = false;
6423 		return;
6424 	}
6425 
6426 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6427 
6428 	aconnector->edid = edid;
6429 
6430 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6431 		aconnector->dc_link,
6432 		(uint8_t *)edid,
6433 		(edid->extensions + 1) * EDID_LENGTH,
6434 		&init_params);
6435 
6436 	if (aconnector->base.force == DRM_FORCE_ON) {
6437 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6438 		aconnector->dc_link->local_sink :
6439 		aconnector->dc_em_sink;
6440 		dc_sink_retain(aconnector->dc_sink);
6441 	}
6442 }
6443 
6444 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6445 {
6446 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6447 
6448 	/*
6449 	 * In case of headless boot with force on for DP managed connector
6450 	 * Those settings have to be != 0 to get initial modeset
6451 	 */
6452 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6453 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6454 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6455 	}
6456 
6457 
6458 	aconnector->base.override_edid = true;
6459 	create_eml_sink(aconnector);
6460 }
6461 
6462 static struct dc_stream_state *
6463 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6464 				const struct drm_display_mode *drm_mode,
6465 				const struct dm_connector_state *dm_state,
6466 				const struct dc_stream_state *old_stream)
6467 {
6468 	struct drm_connector *connector = &aconnector->base;
6469 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6470 	struct dc_stream_state *stream;
6471 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6472 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6473 	enum dc_status dc_result = DC_OK;
6474 
6475 	do {
6476 		stream = create_stream_for_sink(aconnector, drm_mode,
6477 						dm_state, old_stream,
6478 						requested_bpc);
6479 		if (stream == NULL) {
6480 			DRM_ERROR("Failed to create stream for sink!\n");
6481 			break;
6482 		}
6483 
6484 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6485 
6486 		if (dc_result != DC_OK) {
6487 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6488 				      drm_mode->hdisplay,
6489 				      drm_mode->vdisplay,
6490 				      drm_mode->clock,
6491 				      dc_result,
6492 				      dc_status_to_str(dc_result));
6493 
6494 			dc_stream_release(stream);
6495 			stream = NULL;
6496 			requested_bpc -= 2; /* lower bpc to retry validation */
6497 		}
6498 
6499 	} while (stream == NULL && requested_bpc >= 6);
6500 
6501 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6502 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6503 
6504 		aconnector->force_yuv420_output = true;
6505 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6506 						dm_state, old_stream);
6507 		aconnector->force_yuv420_output = false;
6508 	}
6509 
6510 	return stream;
6511 }
6512 
6513 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6514 				   struct drm_display_mode *mode)
6515 {
6516 	int result = MODE_ERROR;
6517 	struct dc_sink *dc_sink;
6518 	/* TODO: Unhardcode stream count */
6519 	struct dc_stream_state *stream;
6520 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6521 
6522 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6523 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6524 		return result;
6525 
6526 	/*
6527 	 * Only run this the first time mode_valid is called to initilialize
6528 	 * EDID mgmt
6529 	 */
6530 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6531 		!aconnector->dc_em_sink)
6532 		handle_edid_mgmt(aconnector);
6533 
6534 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6535 
6536 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6537 				aconnector->base.force != DRM_FORCE_ON) {
6538 		DRM_ERROR("dc_sink is NULL!\n");
6539 		goto fail;
6540 	}
6541 
6542 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6543 	if (stream) {
6544 		dc_stream_release(stream);
6545 		result = MODE_OK;
6546 	}
6547 
6548 fail:
6549 	/* TODO: error handling*/
6550 	return result;
6551 }
6552 
6553 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6554 				struct dc_info_packet *out)
6555 {
6556 	struct hdmi_drm_infoframe frame;
6557 	unsigned char buf[30]; /* 26 + 4 */
6558 	ssize_t len;
6559 	int ret, i;
6560 
6561 	memset(out, 0, sizeof(*out));
6562 
6563 	if (!state->hdr_output_metadata)
6564 		return 0;
6565 
6566 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6567 	if (ret)
6568 		return ret;
6569 
6570 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6571 	if (len < 0)
6572 		return (int)len;
6573 
6574 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6575 	if (len != 30)
6576 		return -EINVAL;
6577 
6578 	/* Prepare the infopacket for DC. */
6579 	switch (state->connector->connector_type) {
6580 	case DRM_MODE_CONNECTOR_HDMIA:
6581 		out->hb0 = 0x87; /* type */
6582 		out->hb1 = 0x01; /* version */
6583 		out->hb2 = 0x1A; /* length */
6584 		out->sb[0] = buf[3]; /* checksum */
6585 		i = 1;
6586 		break;
6587 
6588 	case DRM_MODE_CONNECTOR_DisplayPort:
6589 	case DRM_MODE_CONNECTOR_eDP:
6590 		out->hb0 = 0x00; /* sdp id, zero */
6591 		out->hb1 = 0x87; /* type */
6592 		out->hb2 = 0x1D; /* payload len - 1 */
6593 		out->hb3 = (0x13 << 2); /* sdp version */
6594 		out->sb[0] = 0x01; /* version */
6595 		out->sb[1] = 0x1A; /* length */
6596 		i = 2;
6597 		break;
6598 
6599 	default:
6600 		return -EINVAL;
6601 	}
6602 
6603 	memcpy(&out->sb[i], &buf[4], 26);
6604 	out->valid = true;
6605 
6606 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6607 		       sizeof(out->sb), false);
6608 
6609 	return 0;
6610 }
6611 
6612 static int
6613 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6614 				 struct drm_atomic_state *state)
6615 {
6616 	struct drm_connector_state *new_con_state =
6617 		drm_atomic_get_new_connector_state(state, conn);
6618 	struct drm_connector_state *old_con_state =
6619 		drm_atomic_get_old_connector_state(state, conn);
6620 	struct drm_crtc *crtc = new_con_state->crtc;
6621 	struct drm_crtc_state *new_crtc_state;
6622 	int ret;
6623 
6624 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6625 
6626 	if (!crtc)
6627 		return 0;
6628 
6629 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6630 		struct dc_info_packet hdr_infopacket;
6631 
6632 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6633 		if (ret)
6634 			return ret;
6635 
6636 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6637 		if (IS_ERR(new_crtc_state))
6638 			return PTR_ERR(new_crtc_state);
6639 
6640 		/*
6641 		 * DC considers the stream backends changed if the
6642 		 * static metadata changes. Forcing the modeset also
6643 		 * gives a simple way for userspace to switch from
6644 		 * 8bpc to 10bpc when setting the metadata to enter
6645 		 * or exit HDR.
6646 		 *
6647 		 * Changing the static metadata after it's been
6648 		 * set is permissible, however. So only force a
6649 		 * modeset if we're entering or exiting HDR.
6650 		 */
6651 		new_crtc_state->mode_changed =
6652 			!old_con_state->hdr_output_metadata ||
6653 			!new_con_state->hdr_output_metadata;
6654 	}
6655 
6656 	return 0;
6657 }
6658 
6659 static const struct drm_connector_helper_funcs
6660 amdgpu_dm_connector_helper_funcs = {
6661 	/*
6662 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6663 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6664 	 * are missing after user start lightdm. So we need to renew modes list.
6665 	 * in get_modes call back, not just return the modes count
6666 	 */
6667 	.get_modes = get_modes,
6668 	.mode_valid = amdgpu_dm_connector_mode_valid,
6669 	.atomic_check = amdgpu_dm_connector_atomic_check,
6670 };
6671 
6672 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6673 {
6674 }
6675 
6676 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6677 {
6678 	struct drm_atomic_state *state = new_crtc_state->state;
6679 	struct drm_plane *plane;
6680 	int num_active = 0;
6681 
6682 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6683 		struct drm_plane_state *new_plane_state;
6684 
6685 		/* Cursor planes are "fake". */
6686 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6687 			continue;
6688 
6689 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6690 
6691 		if (!new_plane_state) {
6692 			/*
6693 			 * The plane is enable on the CRTC and hasn't changed
6694 			 * state. This means that it previously passed
6695 			 * validation and is therefore enabled.
6696 			 */
6697 			num_active += 1;
6698 			continue;
6699 		}
6700 
6701 		/* We need a framebuffer to be considered enabled. */
6702 		num_active += (new_plane_state->fb != NULL);
6703 	}
6704 
6705 	return num_active;
6706 }
6707 
6708 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6709 					 struct drm_crtc_state *new_crtc_state)
6710 {
6711 	struct dm_crtc_state *dm_new_crtc_state =
6712 		to_dm_crtc_state(new_crtc_state);
6713 
6714 	dm_new_crtc_state->active_planes = 0;
6715 
6716 	if (!dm_new_crtc_state->stream)
6717 		return;
6718 
6719 	dm_new_crtc_state->active_planes =
6720 		count_crtc_active_planes(new_crtc_state);
6721 }
6722 
6723 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6724 				       struct drm_atomic_state *state)
6725 {
6726 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6727 									  crtc);
6728 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6729 	struct dc *dc = adev->dm.dc;
6730 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6731 	int ret = -EINVAL;
6732 
6733 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6734 
6735 	dm_update_crtc_active_planes(crtc, crtc_state);
6736 
6737 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6738 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6739 		return ret;
6740 	}
6741 
6742 	/*
6743 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6744 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6745 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6746 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6747 	 */
6748 	if (crtc_state->enable &&
6749 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6750 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6751 		return -EINVAL;
6752 	}
6753 
6754 	/* In some use cases, like reset, no stream is attached */
6755 	if (!dm_crtc_state->stream)
6756 		return 0;
6757 
6758 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6759 		return 0;
6760 
6761 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6762 	return ret;
6763 }
6764 
6765 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6766 				      const struct drm_display_mode *mode,
6767 				      struct drm_display_mode *adjusted_mode)
6768 {
6769 	return true;
6770 }
6771 
6772 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6773 	.disable = dm_crtc_helper_disable,
6774 	.atomic_check = dm_crtc_helper_atomic_check,
6775 	.mode_fixup = dm_crtc_helper_mode_fixup,
6776 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6777 };
6778 
6779 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6780 {
6781 
6782 }
6783 
6784 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6785 {
6786 	switch (display_color_depth) {
6787 		case COLOR_DEPTH_666:
6788 			return 6;
6789 		case COLOR_DEPTH_888:
6790 			return 8;
6791 		case COLOR_DEPTH_101010:
6792 			return 10;
6793 		case COLOR_DEPTH_121212:
6794 			return 12;
6795 		case COLOR_DEPTH_141414:
6796 			return 14;
6797 		case COLOR_DEPTH_161616:
6798 			return 16;
6799 		default:
6800 			break;
6801 		}
6802 	return 0;
6803 }
6804 
6805 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6806 					  struct drm_crtc_state *crtc_state,
6807 					  struct drm_connector_state *conn_state)
6808 {
6809 	struct drm_atomic_state *state = crtc_state->state;
6810 	struct drm_connector *connector = conn_state->connector;
6811 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6812 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6813 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6814 	struct drm_dp_mst_topology_mgr *mst_mgr;
6815 	struct drm_dp_mst_port *mst_port;
6816 	enum dc_color_depth color_depth;
6817 	int clock, bpp = 0;
6818 	bool is_y420 = false;
6819 
6820 	if (!aconnector->port || !aconnector->dc_sink)
6821 		return 0;
6822 
6823 	mst_port = aconnector->port;
6824 	mst_mgr = &aconnector->mst_port->mst_mgr;
6825 
6826 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6827 		return 0;
6828 
6829 	if (!state->duplicated) {
6830 		int max_bpc = conn_state->max_requested_bpc;
6831 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6832 				aconnector->force_yuv420_output;
6833 		color_depth = convert_color_depth_from_display_info(connector,
6834 								    is_y420,
6835 								    max_bpc);
6836 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6837 		clock = adjusted_mode->clock;
6838 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6839 	}
6840 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6841 									   mst_mgr,
6842 									   mst_port,
6843 									   dm_new_connector_state->pbn,
6844 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6845 	if (dm_new_connector_state->vcpi_slots < 0) {
6846 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6847 		return dm_new_connector_state->vcpi_slots;
6848 	}
6849 	return 0;
6850 }
6851 
6852 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6853 	.disable = dm_encoder_helper_disable,
6854 	.atomic_check = dm_encoder_helper_atomic_check
6855 };
6856 
6857 #if defined(CONFIG_DRM_AMD_DC_DCN)
6858 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6859 					    struct dc_state *dc_state,
6860 					    struct dsc_mst_fairness_vars *vars)
6861 {
6862 	struct dc_stream_state *stream = NULL;
6863 	struct drm_connector *connector;
6864 	struct drm_connector_state *new_con_state;
6865 	struct amdgpu_dm_connector *aconnector;
6866 	struct dm_connector_state *dm_conn_state;
6867 	int i, j, clock;
6868 	int vcpi, pbn_div, pbn = 0;
6869 
6870 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6871 
6872 		aconnector = to_amdgpu_dm_connector(connector);
6873 
6874 		if (!aconnector->port)
6875 			continue;
6876 
6877 		if (!new_con_state || !new_con_state->crtc)
6878 			continue;
6879 
6880 		dm_conn_state = to_dm_connector_state(new_con_state);
6881 
6882 		for (j = 0; j < dc_state->stream_count; j++) {
6883 			stream = dc_state->streams[j];
6884 			if (!stream)
6885 				continue;
6886 
6887 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6888 				break;
6889 
6890 			stream = NULL;
6891 		}
6892 
6893 		if (!stream)
6894 			continue;
6895 
6896 		if (stream->timing.flags.DSC != 1) {
6897 			drm_dp_mst_atomic_enable_dsc(state,
6898 						     aconnector->port,
6899 						     dm_conn_state->pbn,
6900 						     0,
6901 						     false);
6902 			continue;
6903 		}
6904 
6905 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6906 		clock = stream->timing.pix_clk_100hz / 10;
6907 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6908 		for (j = 0; j < dc_state->stream_count; j++) {
6909 			if (vars[j].aconnector == aconnector) {
6910 				pbn = vars[j].pbn;
6911 				break;
6912 			}
6913 		}
6914 
6915 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6916 						    aconnector->port,
6917 						    pbn, pbn_div,
6918 						    true);
6919 		if (vcpi < 0)
6920 			return vcpi;
6921 
6922 		dm_conn_state->pbn = pbn;
6923 		dm_conn_state->vcpi_slots = vcpi;
6924 	}
6925 	return 0;
6926 }
6927 #endif
6928 
6929 static void dm_drm_plane_reset(struct drm_plane *plane)
6930 {
6931 	struct dm_plane_state *amdgpu_state = NULL;
6932 
6933 	if (plane->state)
6934 		plane->funcs->atomic_destroy_state(plane, plane->state);
6935 
6936 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6937 	WARN_ON(amdgpu_state == NULL);
6938 
6939 	if (amdgpu_state)
6940 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6941 }
6942 
6943 static struct drm_plane_state *
6944 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6945 {
6946 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6947 
6948 	old_dm_plane_state = to_dm_plane_state(plane->state);
6949 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6950 	if (!dm_plane_state)
6951 		return NULL;
6952 
6953 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6954 
6955 	if (old_dm_plane_state->dc_state) {
6956 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6957 		dc_plane_state_retain(dm_plane_state->dc_state);
6958 	}
6959 
6960 	return &dm_plane_state->base;
6961 }
6962 
6963 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6964 				struct drm_plane_state *state)
6965 {
6966 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6967 
6968 	if (dm_plane_state->dc_state)
6969 		dc_plane_state_release(dm_plane_state->dc_state);
6970 
6971 	drm_atomic_helper_plane_destroy_state(plane, state);
6972 }
6973 
6974 static const struct drm_plane_funcs dm_plane_funcs = {
6975 	.update_plane	= drm_atomic_helper_update_plane,
6976 	.disable_plane	= drm_atomic_helper_disable_plane,
6977 	.destroy	= drm_primary_helper_destroy,
6978 	.reset = dm_drm_plane_reset,
6979 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6980 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6981 	.format_mod_supported = dm_plane_format_mod_supported,
6982 };
6983 
6984 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6985 				      struct drm_plane_state *new_state)
6986 {
6987 	struct amdgpu_framebuffer *afb;
6988 	struct drm_gem_object *obj;
6989 	struct amdgpu_device *adev;
6990 	struct amdgpu_bo *rbo;
6991 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6992 	struct list_head list;
6993 	struct ttm_validate_buffer tv;
6994 	struct ww_acquire_ctx ticket;
6995 	uint32_t domain;
6996 	int r;
6997 
6998 	if (!new_state->fb) {
6999 		DRM_DEBUG_KMS("No FB bound\n");
7000 		return 0;
7001 	}
7002 
7003 	afb = to_amdgpu_framebuffer(new_state->fb);
7004 	obj = new_state->fb->obj[0];
7005 	rbo = gem_to_amdgpu_bo(obj);
7006 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7007 	INIT_LIST_HEAD(&list);
7008 
7009 	tv.bo = &rbo->tbo;
7010 	tv.num_shared = 1;
7011 	list_add(&tv.head, &list);
7012 
7013 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7014 	if (r) {
7015 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7016 		return r;
7017 	}
7018 
7019 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7020 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7021 	else
7022 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7023 
7024 	r = amdgpu_bo_pin(rbo, domain);
7025 	if (unlikely(r != 0)) {
7026 		if (r != -ERESTARTSYS)
7027 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7028 		ttm_eu_backoff_reservation(&ticket, &list);
7029 		return r;
7030 	}
7031 
7032 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7033 	if (unlikely(r != 0)) {
7034 		amdgpu_bo_unpin(rbo);
7035 		ttm_eu_backoff_reservation(&ticket, &list);
7036 		DRM_ERROR("%p bind failed\n", rbo);
7037 		return r;
7038 	}
7039 
7040 	ttm_eu_backoff_reservation(&ticket, &list);
7041 
7042 	afb->address = amdgpu_bo_gpu_offset(rbo);
7043 
7044 	amdgpu_bo_ref(rbo);
7045 
7046 	/**
7047 	 * We don't do surface updates on planes that have been newly created,
7048 	 * but we also don't have the afb->address during atomic check.
7049 	 *
7050 	 * Fill in buffer attributes depending on the address here, but only on
7051 	 * newly created planes since they're not being used by DC yet and this
7052 	 * won't modify global state.
7053 	 */
7054 	dm_plane_state_old = to_dm_plane_state(plane->state);
7055 	dm_plane_state_new = to_dm_plane_state(new_state);
7056 
7057 	if (dm_plane_state_new->dc_state &&
7058 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7059 		struct dc_plane_state *plane_state =
7060 			dm_plane_state_new->dc_state;
7061 		bool force_disable_dcc = !plane_state->dcc.enable;
7062 
7063 		fill_plane_buffer_attributes(
7064 			adev, afb, plane_state->format, plane_state->rotation,
7065 			afb->tiling_flags,
7066 			&plane_state->tiling_info, &plane_state->plane_size,
7067 			&plane_state->dcc, &plane_state->address,
7068 			afb->tmz_surface, force_disable_dcc);
7069 	}
7070 
7071 	return 0;
7072 }
7073 
7074 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7075 				       struct drm_plane_state *old_state)
7076 {
7077 	struct amdgpu_bo *rbo;
7078 	int r;
7079 
7080 	if (!old_state->fb)
7081 		return;
7082 
7083 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7084 	r = amdgpu_bo_reserve(rbo, false);
7085 	if (unlikely(r)) {
7086 		DRM_ERROR("failed to reserve rbo before unpin\n");
7087 		return;
7088 	}
7089 
7090 	amdgpu_bo_unpin(rbo);
7091 	amdgpu_bo_unreserve(rbo);
7092 	amdgpu_bo_unref(&rbo);
7093 }
7094 
7095 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7096 				       struct drm_crtc_state *new_crtc_state)
7097 {
7098 	struct drm_framebuffer *fb = state->fb;
7099 	int min_downscale, max_upscale;
7100 	int min_scale = 0;
7101 	int max_scale = INT_MAX;
7102 
7103 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7104 	if (fb && state->crtc) {
7105 		/* Validate viewport to cover the case when only the position changes */
7106 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7107 			int viewport_width = state->crtc_w;
7108 			int viewport_height = state->crtc_h;
7109 
7110 			if (state->crtc_x < 0)
7111 				viewport_width += state->crtc_x;
7112 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7113 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7114 
7115 			if (state->crtc_y < 0)
7116 				viewport_height += state->crtc_y;
7117 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7118 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7119 
7120 			if (viewport_width < 0 || viewport_height < 0) {
7121 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7122 				return -EINVAL;
7123 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7124 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7125 				return -EINVAL;
7126 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7127 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7128 				return -EINVAL;
7129 			}
7130 
7131 		}
7132 
7133 		/* Get min/max allowed scaling factors from plane caps. */
7134 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7135 					     &min_downscale, &max_upscale);
7136 		/*
7137 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7138 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7139 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7140 		 */
7141 		min_scale = (1000 << 16) / max_upscale;
7142 		max_scale = (1000 << 16) / min_downscale;
7143 	}
7144 
7145 	return drm_atomic_helper_check_plane_state(
7146 		state, new_crtc_state, min_scale, max_scale, true, true);
7147 }
7148 
7149 static int dm_plane_atomic_check(struct drm_plane *plane,
7150 				 struct drm_atomic_state *state)
7151 {
7152 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7153 										 plane);
7154 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7155 	struct dc *dc = adev->dm.dc;
7156 	struct dm_plane_state *dm_plane_state;
7157 	struct dc_scaling_info scaling_info;
7158 	struct drm_crtc_state *new_crtc_state;
7159 	int ret;
7160 
7161 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7162 
7163 	dm_plane_state = to_dm_plane_state(new_plane_state);
7164 
7165 	if (!dm_plane_state->dc_state)
7166 		return 0;
7167 
7168 	new_crtc_state =
7169 		drm_atomic_get_new_crtc_state(state,
7170 					      new_plane_state->crtc);
7171 	if (!new_crtc_state)
7172 		return -EINVAL;
7173 
7174 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7175 	if (ret)
7176 		return ret;
7177 
7178 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7179 	if (ret)
7180 		return ret;
7181 
7182 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7183 		return 0;
7184 
7185 	return -EINVAL;
7186 }
7187 
7188 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7189 				       struct drm_atomic_state *state)
7190 {
7191 	/* Only support async updates on cursor planes. */
7192 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7193 		return -EINVAL;
7194 
7195 	return 0;
7196 }
7197 
7198 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7199 					 struct drm_atomic_state *state)
7200 {
7201 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7202 									   plane);
7203 	struct drm_plane_state *old_state =
7204 		drm_atomic_get_old_plane_state(state, plane);
7205 
7206 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7207 
7208 	swap(plane->state->fb, new_state->fb);
7209 
7210 	plane->state->src_x = new_state->src_x;
7211 	plane->state->src_y = new_state->src_y;
7212 	plane->state->src_w = new_state->src_w;
7213 	plane->state->src_h = new_state->src_h;
7214 	plane->state->crtc_x = new_state->crtc_x;
7215 	plane->state->crtc_y = new_state->crtc_y;
7216 	plane->state->crtc_w = new_state->crtc_w;
7217 	plane->state->crtc_h = new_state->crtc_h;
7218 
7219 	handle_cursor_update(plane, old_state);
7220 }
7221 
7222 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7223 	.prepare_fb = dm_plane_helper_prepare_fb,
7224 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7225 	.atomic_check = dm_plane_atomic_check,
7226 	.atomic_async_check = dm_plane_atomic_async_check,
7227 	.atomic_async_update = dm_plane_atomic_async_update
7228 };
7229 
7230 /*
7231  * TODO: these are currently initialized to rgb formats only.
7232  * For future use cases we should either initialize them dynamically based on
7233  * plane capabilities, or initialize this array to all formats, so internal drm
7234  * check will succeed, and let DC implement proper check
7235  */
7236 static const uint32_t rgb_formats[] = {
7237 	DRM_FORMAT_XRGB8888,
7238 	DRM_FORMAT_ARGB8888,
7239 	DRM_FORMAT_RGBA8888,
7240 	DRM_FORMAT_XRGB2101010,
7241 	DRM_FORMAT_XBGR2101010,
7242 	DRM_FORMAT_ARGB2101010,
7243 	DRM_FORMAT_ABGR2101010,
7244 	DRM_FORMAT_XRGB16161616,
7245 	DRM_FORMAT_XBGR16161616,
7246 	DRM_FORMAT_ARGB16161616,
7247 	DRM_FORMAT_ABGR16161616,
7248 	DRM_FORMAT_XBGR8888,
7249 	DRM_FORMAT_ABGR8888,
7250 	DRM_FORMAT_RGB565,
7251 };
7252 
7253 static const uint32_t overlay_formats[] = {
7254 	DRM_FORMAT_XRGB8888,
7255 	DRM_FORMAT_ARGB8888,
7256 	DRM_FORMAT_RGBA8888,
7257 	DRM_FORMAT_XBGR8888,
7258 	DRM_FORMAT_ABGR8888,
7259 	DRM_FORMAT_RGB565
7260 };
7261 
7262 static const u32 cursor_formats[] = {
7263 	DRM_FORMAT_ARGB8888
7264 };
7265 
7266 static int get_plane_formats(const struct drm_plane *plane,
7267 			     const struct dc_plane_cap *plane_cap,
7268 			     uint32_t *formats, int max_formats)
7269 {
7270 	int i, num_formats = 0;
7271 
7272 	/*
7273 	 * TODO: Query support for each group of formats directly from
7274 	 * DC plane caps. This will require adding more formats to the
7275 	 * caps list.
7276 	 */
7277 
7278 	switch (plane->type) {
7279 	case DRM_PLANE_TYPE_PRIMARY:
7280 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7281 			if (num_formats >= max_formats)
7282 				break;
7283 
7284 			formats[num_formats++] = rgb_formats[i];
7285 		}
7286 
7287 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7288 			formats[num_formats++] = DRM_FORMAT_NV12;
7289 		if (plane_cap && plane_cap->pixel_format_support.p010)
7290 			formats[num_formats++] = DRM_FORMAT_P010;
7291 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7292 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7293 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7294 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7295 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7296 		}
7297 		break;
7298 
7299 	case DRM_PLANE_TYPE_OVERLAY:
7300 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7301 			if (num_formats >= max_formats)
7302 				break;
7303 
7304 			formats[num_formats++] = overlay_formats[i];
7305 		}
7306 		break;
7307 
7308 	case DRM_PLANE_TYPE_CURSOR:
7309 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7310 			if (num_formats >= max_formats)
7311 				break;
7312 
7313 			formats[num_formats++] = cursor_formats[i];
7314 		}
7315 		break;
7316 	}
7317 
7318 	return num_formats;
7319 }
7320 
7321 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7322 				struct drm_plane *plane,
7323 				unsigned long possible_crtcs,
7324 				const struct dc_plane_cap *plane_cap)
7325 {
7326 	uint32_t formats[32];
7327 	int num_formats;
7328 	int res = -EPERM;
7329 	unsigned int supported_rotations;
7330 	uint64_t *modifiers = NULL;
7331 
7332 	num_formats = get_plane_formats(plane, plane_cap, formats,
7333 					ARRAY_SIZE(formats));
7334 
7335 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7336 	if (res)
7337 		return res;
7338 
7339 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7340 				       &dm_plane_funcs, formats, num_formats,
7341 				       modifiers, plane->type, NULL);
7342 	kfree(modifiers);
7343 	if (res)
7344 		return res;
7345 
7346 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7347 	    plane_cap && plane_cap->per_pixel_alpha) {
7348 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7349 					  BIT(DRM_MODE_BLEND_PREMULTI);
7350 
7351 		drm_plane_create_alpha_property(plane);
7352 		drm_plane_create_blend_mode_property(plane, blend_caps);
7353 	}
7354 
7355 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7356 	    plane_cap &&
7357 	    (plane_cap->pixel_format_support.nv12 ||
7358 	     plane_cap->pixel_format_support.p010)) {
7359 		/* This only affects YUV formats. */
7360 		drm_plane_create_color_properties(
7361 			plane,
7362 			BIT(DRM_COLOR_YCBCR_BT601) |
7363 			BIT(DRM_COLOR_YCBCR_BT709) |
7364 			BIT(DRM_COLOR_YCBCR_BT2020),
7365 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7366 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7367 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7368 	}
7369 
7370 	supported_rotations =
7371 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7372 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7373 
7374 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7375 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7376 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7377 						   supported_rotations);
7378 
7379 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7380 
7381 	/* Create (reset) the plane state */
7382 	if (plane->funcs->reset)
7383 		plane->funcs->reset(plane);
7384 
7385 	return 0;
7386 }
7387 
7388 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7389 			       struct drm_plane *plane,
7390 			       uint32_t crtc_index)
7391 {
7392 	struct amdgpu_crtc *acrtc = NULL;
7393 	struct drm_plane *cursor_plane;
7394 
7395 	int res = -ENOMEM;
7396 
7397 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7398 	if (!cursor_plane)
7399 		goto fail;
7400 
7401 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7402 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7403 
7404 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7405 	if (!acrtc)
7406 		goto fail;
7407 
7408 	res = drm_crtc_init_with_planes(
7409 			dm->ddev,
7410 			&acrtc->base,
7411 			plane,
7412 			cursor_plane,
7413 			&amdgpu_dm_crtc_funcs, NULL);
7414 
7415 	if (res)
7416 		goto fail;
7417 
7418 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7419 
7420 	/* Create (reset) the plane state */
7421 	if (acrtc->base.funcs->reset)
7422 		acrtc->base.funcs->reset(&acrtc->base);
7423 
7424 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7425 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7426 
7427 	acrtc->crtc_id = crtc_index;
7428 	acrtc->base.enabled = false;
7429 	acrtc->otg_inst = -1;
7430 
7431 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7432 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7433 				   true, MAX_COLOR_LUT_ENTRIES);
7434 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7435 
7436 	return 0;
7437 
7438 fail:
7439 	kfree(acrtc);
7440 	kfree(cursor_plane);
7441 	return res;
7442 }
7443 
7444 
7445 static int to_drm_connector_type(enum amd_signal_type st)
7446 {
7447 	switch (st) {
7448 	case SIGNAL_TYPE_HDMI_TYPE_A:
7449 		return DRM_MODE_CONNECTOR_HDMIA;
7450 	case SIGNAL_TYPE_EDP:
7451 		return DRM_MODE_CONNECTOR_eDP;
7452 	case SIGNAL_TYPE_LVDS:
7453 		return DRM_MODE_CONNECTOR_LVDS;
7454 	case SIGNAL_TYPE_RGB:
7455 		return DRM_MODE_CONNECTOR_VGA;
7456 	case SIGNAL_TYPE_DISPLAY_PORT:
7457 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7458 		return DRM_MODE_CONNECTOR_DisplayPort;
7459 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7460 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7461 		return DRM_MODE_CONNECTOR_DVID;
7462 	case SIGNAL_TYPE_VIRTUAL:
7463 		return DRM_MODE_CONNECTOR_VIRTUAL;
7464 
7465 	default:
7466 		return DRM_MODE_CONNECTOR_Unknown;
7467 	}
7468 }
7469 
7470 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7471 {
7472 	struct drm_encoder *encoder;
7473 
7474 	/* There is only one encoder per connector */
7475 	drm_connector_for_each_possible_encoder(connector, encoder)
7476 		return encoder;
7477 
7478 	return NULL;
7479 }
7480 
7481 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7482 {
7483 	struct drm_encoder *encoder;
7484 	struct amdgpu_encoder *amdgpu_encoder;
7485 
7486 	encoder = amdgpu_dm_connector_to_encoder(connector);
7487 
7488 	if (encoder == NULL)
7489 		return;
7490 
7491 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7492 
7493 	amdgpu_encoder->native_mode.clock = 0;
7494 
7495 	if (!list_empty(&connector->probed_modes)) {
7496 		struct drm_display_mode *preferred_mode = NULL;
7497 
7498 		list_for_each_entry(preferred_mode,
7499 				    &connector->probed_modes,
7500 				    head) {
7501 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7502 				amdgpu_encoder->native_mode = *preferred_mode;
7503 
7504 			break;
7505 		}
7506 
7507 	}
7508 }
7509 
7510 static struct drm_display_mode *
7511 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7512 			     char *name,
7513 			     int hdisplay, int vdisplay)
7514 {
7515 	struct drm_device *dev = encoder->dev;
7516 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7517 	struct drm_display_mode *mode = NULL;
7518 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7519 
7520 	mode = drm_mode_duplicate(dev, native_mode);
7521 
7522 	if (mode == NULL)
7523 		return NULL;
7524 
7525 	mode->hdisplay = hdisplay;
7526 	mode->vdisplay = vdisplay;
7527 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7528 #ifdef __linux__
7529 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7530 #else
7531 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7532 #endif
7533 
7534 	return mode;
7535 
7536 }
7537 
7538 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7539 						 struct drm_connector *connector)
7540 {
7541 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7542 	struct drm_display_mode *mode = NULL;
7543 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7544 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7545 				to_amdgpu_dm_connector(connector);
7546 	int i;
7547 	int n;
7548 	struct mode_size {
7549 		char name[DRM_DISPLAY_MODE_LEN];
7550 		int w;
7551 		int h;
7552 	} common_modes[] = {
7553 		{  "640x480",  640,  480},
7554 		{  "800x600",  800,  600},
7555 		{ "1024x768", 1024,  768},
7556 		{ "1280x720", 1280,  720},
7557 		{ "1280x800", 1280,  800},
7558 		{"1280x1024", 1280, 1024},
7559 		{ "1440x900", 1440,  900},
7560 		{"1680x1050", 1680, 1050},
7561 		{"1600x1200", 1600, 1200},
7562 		{"1920x1080", 1920, 1080},
7563 		{"1920x1200", 1920, 1200}
7564 	};
7565 
7566 	n = ARRAY_SIZE(common_modes);
7567 
7568 	for (i = 0; i < n; i++) {
7569 		struct drm_display_mode *curmode = NULL;
7570 		bool mode_existed = false;
7571 
7572 		if (common_modes[i].w > native_mode->hdisplay ||
7573 		    common_modes[i].h > native_mode->vdisplay ||
7574 		   (common_modes[i].w == native_mode->hdisplay &&
7575 		    common_modes[i].h == native_mode->vdisplay))
7576 			continue;
7577 
7578 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7579 			if (common_modes[i].w == curmode->hdisplay &&
7580 			    common_modes[i].h == curmode->vdisplay) {
7581 				mode_existed = true;
7582 				break;
7583 			}
7584 		}
7585 
7586 		if (mode_existed)
7587 			continue;
7588 
7589 		mode = amdgpu_dm_create_common_mode(encoder,
7590 				common_modes[i].name, common_modes[i].w,
7591 				common_modes[i].h);
7592 		if (!mode)
7593 			continue;
7594 
7595 		drm_mode_probed_add(connector, mode);
7596 		amdgpu_dm_connector->num_modes++;
7597 	}
7598 }
7599 
7600 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7601 {
7602 	struct drm_encoder *encoder;
7603 	struct amdgpu_encoder *amdgpu_encoder;
7604 	const struct drm_display_mode *native_mode;
7605 
7606 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7607 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7608 		return;
7609 
7610 	encoder = amdgpu_dm_connector_to_encoder(connector);
7611 	if (!encoder)
7612 		return;
7613 
7614 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7615 
7616 	native_mode = &amdgpu_encoder->native_mode;
7617 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7618 		return;
7619 
7620 	drm_connector_set_panel_orientation_with_quirk(connector,
7621 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7622 						       native_mode->hdisplay,
7623 						       native_mode->vdisplay);
7624 }
7625 
7626 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7627 					      struct edid *edid)
7628 {
7629 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7630 			to_amdgpu_dm_connector(connector);
7631 
7632 	if (edid) {
7633 		/* empty probed_modes */
7634 		INIT_LIST_HEAD(&connector->probed_modes);
7635 		amdgpu_dm_connector->num_modes =
7636 				drm_add_edid_modes(connector, edid);
7637 
7638 		/* sorting the probed modes before calling function
7639 		 * amdgpu_dm_get_native_mode() since EDID can have
7640 		 * more than one preferred mode. The modes that are
7641 		 * later in the probed mode list could be of higher
7642 		 * and preferred resolution. For example, 3840x2160
7643 		 * resolution in base EDID preferred timing and 4096x2160
7644 		 * preferred resolution in DID extension block later.
7645 		 */
7646 		drm_mode_sort(&connector->probed_modes);
7647 		amdgpu_dm_get_native_mode(connector);
7648 
7649 		/* Freesync capabilities are reset by calling
7650 		 * drm_add_edid_modes() and need to be
7651 		 * restored here.
7652 		 */
7653 		amdgpu_dm_update_freesync_caps(connector, edid);
7654 
7655 		amdgpu_set_panel_orientation(connector);
7656 	} else {
7657 		amdgpu_dm_connector->num_modes = 0;
7658 	}
7659 }
7660 
7661 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7662 			      struct drm_display_mode *mode)
7663 {
7664 	struct drm_display_mode *m;
7665 
7666 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7667 		if (drm_mode_equal(m, mode))
7668 			return true;
7669 	}
7670 
7671 	return false;
7672 }
7673 
7674 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7675 {
7676 	const struct drm_display_mode *m;
7677 	struct drm_display_mode *new_mode;
7678 	uint i;
7679 	uint32_t new_modes_count = 0;
7680 
7681 	/* Standard FPS values
7682 	 *
7683 	 * 23.976   - TV/NTSC
7684 	 * 24 	    - Cinema
7685 	 * 25 	    - TV/PAL
7686 	 * 29.97    - TV/NTSC
7687 	 * 30 	    - TV/NTSC
7688 	 * 48 	    - Cinema HFR
7689 	 * 50 	    - TV/PAL
7690 	 * 60 	    - Commonly used
7691 	 * 48,72,96 - Multiples of 24
7692 	 */
7693 	static const uint32_t common_rates[] = {
7694 		23976, 24000, 25000, 29970, 30000,
7695 		48000, 50000, 60000, 72000, 96000
7696 	};
7697 
7698 	/*
7699 	 * Find mode with highest refresh rate with the same resolution
7700 	 * as the preferred mode. Some monitors report a preferred mode
7701 	 * with lower resolution than the highest refresh rate supported.
7702 	 */
7703 
7704 	m = get_highest_refresh_rate_mode(aconnector, true);
7705 	if (!m)
7706 		return 0;
7707 
7708 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7709 		uint64_t target_vtotal, target_vtotal_diff;
7710 		uint64_t num, den;
7711 
7712 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7713 			continue;
7714 
7715 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7716 		    common_rates[i] > aconnector->max_vfreq * 1000)
7717 			continue;
7718 
7719 		num = (unsigned long long)m->clock * 1000 * 1000;
7720 		den = common_rates[i] * (unsigned long long)m->htotal;
7721 		target_vtotal = div_u64(num, den);
7722 		target_vtotal_diff = target_vtotal - m->vtotal;
7723 
7724 		/* Check for illegal modes */
7725 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7726 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7727 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7728 			continue;
7729 
7730 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7731 		if (!new_mode)
7732 			goto out;
7733 
7734 		new_mode->vtotal += (u16)target_vtotal_diff;
7735 		new_mode->vsync_start += (u16)target_vtotal_diff;
7736 		new_mode->vsync_end += (u16)target_vtotal_diff;
7737 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7738 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7739 
7740 		if (!is_duplicate_mode(aconnector, new_mode)) {
7741 			drm_mode_probed_add(&aconnector->base, new_mode);
7742 			new_modes_count += 1;
7743 		} else
7744 			drm_mode_destroy(aconnector->base.dev, new_mode);
7745 	}
7746  out:
7747 	return new_modes_count;
7748 }
7749 
7750 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7751 						   struct edid *edid)
7752 {
7753 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7754 		to_amdgpu_dm_connector(connector);
7755 
7756 	if (!(amdgpu_freesync_vid_mode && edid))
7757 		return;
7758 
7759 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7760 		amdgpu_dm_connector->num_modes +=
7761 			add_fs_modes(amdgpu_dm_connector);
7762 }
7763 
7764 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7765 {
7766 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7767 			to_amdgpu_dm_connector(connector);
7768 	struct drm_encoder *encoder;
7769 	struct edid *edid = amdgpu_dm_connector->edid;
7770 
7771 	encoder = amdgpu_dm_connector_to_encoder(connector);
7772 
7773 	if (!drm_edid_is_valid(edid)) {
7774 		amdgpu_dm_connector->num_modes =
7775 				drm_add_modes_noedid(connector, 640, 480);
7776 	} else {
7777 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7778 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7779 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7780 	}
7781 	amdgpu_dm_fbc_init(connector);
7782 
7783 	return amdgpu_dm_connector->num_modes;
7784 }
7785 
7786 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7787 				     struct amdgpu_dm_connector *aconnector,
7788 				     int connector_type,
7789 				     struct dc_link *link,
7790 				     int link_index)
7791 {
7792 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7793 
7794 	/*
7795 	 * Some of the properties below require access to state, like bpc.
7796 	 * Allocate some default initial connector state with our reset helper.
7797 	 */
7798 	if (aconnector->base.funcs->reset)
7799 		aconnector->base.funcs->reset(&aconnector->base);
7800 
7801 	aconnector->connector_id = link_index;
7802 	aconnector->dc_link = link;
7803 	aconnector->base.interlace_allowed = false;
7804 	aconnector->base.doublescan_allowed = false;
7805 	aconnector->base.stereo_allowed = false;
7806 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7807 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7808 	aconnector->audio_inst = -1;
7809 	rw_init(&aconnector->hpd_lock, "dmhpd");
7810 
7811 	/*
7812 	 * configure support HPD hot plug connector_>polled default value is 0
7813 	 * which means HPD hot plug not supported
7814 	 */
7815 	switch (connector_type) {
7816 	case DRM_MODE_CONNECTOR_HDMIA:
7817 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7818 		aconnector->base.ycbcr_420_allowed =
7819 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7820 		break;
7821 	case DRM_MODE_CONNECTOR_DisplayPort:
7822 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7823 		aconnector->base.ycbcr_420_allowed =
7824 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7825 		break;
7826 	case DRM_MODE_CONNECTOR_DVID:
7827 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7828 		break;
7829 	default:
7830 		break;
7831 	}
7832 
7833 	drm_object_attach_property(&aconnector->base.base,
7834 				dm->ddev->mode_config.scaling_mode_property,
7835 				DRM_MODE_SCALE_NONE);
7836 
7837 	drm_object_attach_property(&aconnector->base.base,
7838 				adev->mode_info.underscan_property,
7839 				UNDERSCAN_OFF);
7840 	drm_object_attach_property(&aconnector->base.base,
7841 				adev->mode_info.underscan_hborder_property,
7842 				0);
7843 	drm_object_attach_property(&aconnector->base.base,
7844 				adev->mode_info.underscan_vborder_property,
7845 				0);
7846 
7847 	if (!aconnector->mst_port)
7848 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7849 
7850 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7851 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7852 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7853 
7854 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7855 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7856 		drm_object_attach_property(&aconnector->base.base,
7857 				adev->mode_info.abm_level_property, 0);
7858 	}
7859 
7860 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7861 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7862 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7863 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7864 
7865 		if (!aconnector->mst_port)
7866 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7867 
7868 #ifdef CONFIG_DRM_AMD_DC_HDCP
7869 		if (adev->dm.hdcp_workqueue)
7870 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7871 #endif
7872 	}
7873 }
7874 
7875 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7876 			      struct i2c_msg *msgs, int num)
7877 {
7878 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7879 	struct ddc_service *ddc_service = i2c->ddc_service;
7880 	struct i2c_command cmd;
7881 	int i;
7882 	int result = -EIO;
7883 
7884 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7885 
7886 	if (!cmd.payloads)
7887 		return result;
7888 
7889 	cmd.number_of_payloads = num;
7890 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7891 	cmd.speed = 100;
7892 
7893 	for (i = 0; i < num; i++) {
7894 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7895 		cmd.payloads[i].address = msgs[i].addr;
7896 		cmd.payloads[i].length = msgs[i].len;
7897 		cmd.payloads[i].data = msgs[i].buf;
7898 	}
7899 
7900 	if (dc_submit_i2c(
7901 			ddc_service->ctx->dc,
7902 			ddc_service->ddc_pin->hw_info.ddc_channel,
7903 			&cmd))
7904 		result = num;
7905 
7906 	kfree(cmd.payloads);
7907 	return result;
7908 }
7909 
7910 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7911 {
7912 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7913 }
7914 
7915 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7916 	.master_xfer = amdgpu_dm_i2c_xfer,
7917 	.functionality = amdgpu_dm_i2c_func,
7918 };
7919 
7920 static struct amdgpu_i2c_adapter *
7921 create_i2c(struct ddc_service *ddc_service,
7922 	   int link_index,
7923 	   int *res)
7924 {
7925 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7926 	struct amdgpu_i2c_adapter *i2c;
7927 
7928 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7929 	if (!i2c)
7930 		return NULL;
7931 #ifdef notyet
7932 	i2c->base.owner = THIS_MODULE;
7933 	i2c->base.class = I2C_CLASS_DDC;
7934 	i2c->base.dev.parent = &adev->pdev->dev;
7935 #endif
7936 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7937 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7938 	i2c_set_adapdata(&i2c->base, i2c);
7939 	i2c->ddc_service = ddc_service;
7940 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7941 
7942 	return i2c;
7943 }
7944 
7945 
7946 /*
7947  * Note: this function assumes that dc_link_detect() was called for the
7948  * dc_link which will be represented by this aconnector.
7949  */
7950 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7951 				    struct amdgpu_dm_connector *aconnector,
7952 				    uint32_t link_index,
7953 				    struct amdgpu_encoder *aencoder)
7954 {
7955 	int res = 0;
7956 	int connector_type;
7957 	struct dc *dc = dm->dc;
7958 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7959 	struct amdgpu_i2c_adapter *i2c;
7960 
7961 	link->priv = aconnector;
7962 
7963 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7964 
7965 	i2c = create_i2c(link->ddc, link->link_index, &res);
7966 	if (!i2c) {
7967 		DRM_ERROR("Failed to create i2c adapter data\n");
7968 		return -ENOMEM;
7969 	}
7970 
7971 	aconnector->i2c = i2c;
7972 	res = i2c_add_adapter(&i2c->base);
7973 
7974 	if (res) {
7975 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7976 		goto out_free;
7977 	}
7978 
7979 	connector_type = to_drm_connector_type(link->connector_signal);
7980 
7981 	res = drm_connector_init_with_ddc(
7982 			dm->ddev,
7983 			&aconnector->base,
7984 			&amdgpu_dm_connector_funcs,
7985 			connector_type,
7986 			&i2c->base);
7987 
7988 	if (res) {
7989 		DRM_ERROR("connector_init failed\n");
7990 		aconnector->connector_id = -1;
7991 		goto out_free;
7992 	}
7993 
7994 	drm_connector_helper_add(
7995 			&aconnector->base,
7996 			&amdgpu_dm_connector_helper_funcs);
7997 
7998 	amdgpu_dm_connector_init_helper(
7999 		dm,
8000 		aconnector,
8001 		connector_type,
8002 		link,
8003 		link_index);
8004 
8005 	drm_connector_attach_encoder(
8006 		&aconnector->base, &aencoder->base);
8007 
8008 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8009 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8010 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8011 
8012 out_free:
8013 	if (res) {
8014 		kfree(i2c);
8015 		aconnector->i2c = NULL;
8016 	}
8017 	return res;
8018 }
8019 
8020 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8021 {
8022 	switch (adev->mode_info.num_crtc) {
8023 	case 1:
8024 		return 0x1;
8025 	case 2:
8026 		return 0x3;
8027 	case 3:
8028 		return 0x7;
8029 	case 4:
8030 		return 0xf;
8031 	case 5:
8032 		return 0x1f;
8033 	case 6:
8034 	default:
8035 		return 0x3f;
8036 	}
8037 }
8038 
8039 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8040 				  struct amdgpu_encoder *aencoder,
8041 				  uint32_t link_index)
8042 {
8043 	struct amdgpu_device *adev = drm_to_adev(dev);
8044 
8045 	int res = drm_encoder_init(dev,
8046 				   &aencoder->base,
8047 				   &amdgpu_dm_encoder_funcs,
8048 				   DRM_MODE_ENCODER_TMDS,
8049 				   NULL);
8050 
8051 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8052 
8053 	if (!res)
8054 		aencoder->encoder_id = link_index;
8055 	else
8056 		aencoder->encoder_id = -1;
8057 
8058 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8059 
8060 	return res;
8061 }
8062 
8063 static void manage_dm_interrupts(struct amdgpu_device *adev,
8064 				 struct amdgpu_crtc *acrtc,
8065 				 bool enable)
8066 {
8067 	/*
8068 	 * We have no guarantee that the frontend index maps to the same
8069 	 * backend index - some even map to more than one.
8070 	 *
8071 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8072 	 */
8073 	int irq_type =
8074 		amdgpu_display_crtc_idx_to_irq_type(
8075 			adev,
8076 			acrtc->crtc_id);
8077 
8078 	if (enable) {
8079 		drm_crtc_vblank_on(&acrtc->base);
8080 		amdgpu_irq_get(
8081 			adev,
8082 			&adev->pageflip_irq,
8083 			irq_type);
8084 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8085 		amdgpu_irq_get(
8086 			adev,
8087 			&adev->vline0_irq,
8088 			irq_type);
8089 #endif
8090 	} else {
8091 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8092 		amdgpu_irq_put(
8093 			adev,
8094 			&adev->vline0_irq,
8095 			irq_type);
8096 #endif
8097 		amdgpu_irq_put(
8098 			adev,
8099 			&adev->pageflip_irq,
8100 			irq_type);
8101 		drm_crtc_vblank_off(&acrtc->base);
8102 	}
8103 }
8104 
8105 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8106 				      struct amdgpu_crtc *acrtc)
8107 {
8108 	int irq_type =
8109 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8110 
8111 	/**
8112 	 * This reads the current state for the IRQ and force reapplies
8113 	 * the setting to hardware.
8114 	 */
8115 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8116 }
8117 
8118 static bool
8119 is_scaling_state_different(const struct dm_connector_state *dm_state,
8120 			   const struct dm_connector_state *old_dm_state)
8121 {
8122 	if (dm_state->scaling != old_dm_state->scaling)
8123 		return true;
8124 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8125 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8126 			return true;
8127 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8128 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8129 			return true;
8130 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8131 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8132 		return true;
8133 	return false;
8134 }
8135 
8136 #ifdef CONFIG_DRM_AMD_DC_HDCP
8137 static bool is_content_protection_different(struct drm_connector_state *state,
8138 					    const struct drm_connector_state *old_state,
8139 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8140 {
8141 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8142 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8143 
8144 	/* Handle: Type0/1 change */
8145 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8146 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8147 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8148 		return true;
8149 	}
8150 
8151 	/* CP is being re enabled, ignore this
8152 	 *
8153 	 * Handles:	ENABLED -> DESIRED
8154 	 */
8155 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8156 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8157 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8158 		return false;
8159 	}
8160 
8161 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8162 	 *
8163 	 * Handles:	UNDESIRED -> ENABLED
8164 	 */
8165 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8166 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8167 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8168 
8169 	/* Stream removed and re-enabled
8170 	 *
8171 	 * Can sometimes overlap with the HPD case,
8172 	 * thus set update_hdcp to false to avoid
8173 	 * setting HDCP multiple times.
8174 	 *
8175 	 * Handles:	DESIRED -> DESIRED (Special case)
8176 	 */
8177 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8178 		state->crtc && state->crtc->enabled &&
8179 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8180 		dm_con_state->update_hdcp = false;
8181 		return true;
8182 	}
8183 
8184 	/* Hot-plug, headless s3, dpms
8185 	 *
8186 	 * Only start HDCP if the display is connected/enabled.
8187 	 * update_hdcp flag will be set to false until the next
8188 	 * HPD comes in.
8189 	 *
8190 	 * Handles:	DESIRED -> DESIRED (Special case)
8191 	 */
8192 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8193 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8194 		dm_con_state->update_hdcp = false;
8195 		return true;
8196 	}
8197 
8198 	/*
8199 	 * Handles:	UNDESIRED -> UNDESIRED
8200 	 *		DESIRED -> DESIRED
8201 	 *		ENABLED -> ENABLED
8202 	 */
8203 	if (old_state->content_protection == state->content_protection)
8204 		return false;
8205 
8206 	/*
8207 	 * Handles:	UNDESIRED -> DESIRED
8208 	 *		DESIRED -> UNDESIRED
8209 	 *		ENABLED -> UNDESIRED
8210 	 */
8211 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8212 		return true;
8213 
8214 	/*
8215 	 * Handles:	DESIRED -> ENABLED
8216 	 */
8217 	return false;
8218 }
8219 
8220 #endif
8221 static void remove_stream(struct amdgpu_device *adev,
8222 			  struct amdgpu_crtc *acrtc,
8223 			  struct dc_stream_state *stream)
8224 {
8225 	/* this is the update mode case */
8226 
8227 	acrtc->otg_inst = -1;
8228 	acrtc->enabled = false;
8229 }
8230 
8231 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8232 			       struct dc_cursor_position *position)
8233 {
8234 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8235 	int x, y;
8236 	int xorigin = 0, yorigin = 0;
8237 
8238 	if (!crtc || !plane->state->fb)
8239 		return 0;
8240 
8241 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8242 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8243 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8244 			  __func__,
8245 			  plane->state->crtc_w,
8246 			  plane->state->crtc_h);
8247 		return -EINVAL;
8248 	}
8249 
8250 	x = plane->state->crtc_x;
8251 	y = plane->state->crtc_y;
8252 
8253 	if (x <= -amdgpu_crtc->max_cursor_width ||
8254 	    y <= -amdgpu_crtc->max_cursor_height)
8255 		return 0;
8256 
8257 	if (x < 0) {
8258 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8259 		x = 0;
8260 	}
8261 	if (y < 0) {
8262 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8263 		y = 0;
8264 	}
8265 	position->enable = true;
8266 	position->translate_by_source = true;
8267 	position->x = x;
8268 	position->y = y;
8269 	position->x_hotspot = xorigin;
8270 	position->y_hotspot = yorigin;
8271 
8272 	return 0;
8273 }
8274 
8275 static void handle_cursor_update(struct drm_plane *plane,
8276 				 struct drm_plane_state *old_plane_state)
8277 {
8278 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8279 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8280 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8281 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8282 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8283 	uint64_t address = afb ? afb->address : 0;
8284 	struct dc_cursor_position position = {0};
8285 	struct dc_cursor_attributes attributes;
8286 	int ret;
8287 
8288 	if (!plane->state->fb && !old_plane_state->fb)
8289 		return;
8290 
8291 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8292 		      __func__,
8293 		      amdgpu_crtc->crtc_id,
8294 		      plane->state->crtc_w,
8295 		      plane->state->crtc_h);
8296 
8297 	ret = get_cursor_position(plane, crtc, &position);
8298 	if (ret)
8299 		return;
8300 
8301 	if (!position.enable) {
8302 		/* turn off cursor */
8303 		if (crtc_state && crtc_state->stream) {
8304 			mutex_lock(&adev->dm.dc_lock);
8305 			dc_stream_set_cursor_position(crtc_state->stream,
8306 						      &position);
8307 			mutex_unlock(&adev->dm.dc_lock);
8308 		}
8309 		return;
8310 	}
8311 
8312 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8313 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8314 
8315 	memset(&attributes, 0, sizeof(attributes));
8316 	attributes.address.high_part = upper_32_bits(address);
8317 	attributes.address.low_part  = lower_32_bits(address);
8318 	attributes.width             = plane->state->crtc_w;
8319 	attributes.height            = plane->state->crtc_h;
8320 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8321 	attributes.rotation_angle    = 0;
8322 	attributes.attribute_flags.value = 0;
8323 
8324 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8325 
8326 	if (crtc_state->stream) {
8327 		mutex_lock(&adev->dm.dc_lock);
8328 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8329 							 &attributes))
8330 			DRM_ERROR("DC failed to set cursor attributes\n");
8331 
8332 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8333 						   &position))
8334 			DRM_ERROR("DC failed to set cursor position\n");
8335 		mutex_unlock(&adev->dm.dc_lock);
8336 	}
8337 }
8338 
8339 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8340 {
8341 
8342 	assert_spin_locked(&acrtc->base.dev->event_lock);
8343 	WARN_ON(acrtc->event);
8344 
8345 	acrtc->event = acrtc->base.state->event;
8346 
8347 	/* Set the flip status */
8348 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8349 
8350 	/* Mark this event as consumed */
8351 	acrtc->base.state->event = NULL;
8352 
8353 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8354 		     acrtc->crtc_id);
8355 }
8356 
8357 static void update_freesync_state_on_stream(
8358 	struct amdgpu_display_manager *dm,
8359 	struct dm_crtc_state *new_crtc_state,
8360 	struct dc_stream_state *new_stream,
8361 	struct dc_plane_state *surface,
8362 	u32 flip_timestamp_in_us)
8363 {
8364 	struct mod_vrr_params vrr_params;
8365 	struct dc_info_packet vrr_infopacket = {0};
8366 	struct amdgpu_device *adev = dm->adev;
8367 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8368 	unsigned long flags;
8369 	bool pack_sdp_v1_3 = false;
8370 
8371 	if (!new_stream)
8372 		return;
8373 
8374 	/*
8375 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8376 	 * For now it's sufficient to just guard against these conditions.
8377 	 */
8378 
8379 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8380 		return;
8381 
8382 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8383         vrr_params = acrtc->dm_irq_params.vrr_params;
8384 
8385 	if (surface) {
8386 		mod_freesync_handle_preflip(
8387 			dm->freesync_module,
8388 			surface,
8389 			new_stream,
8390 			flip_timestamp_in_us,
8391 			&vrr_params);
8392 
8393 		if (adev->family < AMDGPU_FAMILY_AI &&
8394 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8395 			mod_freesync_handle_v_update(dm->freesync_module,
8396 						     new_stream, &vrr_params);
8397 
8398 			/* Need to call this before the frame ends. */
8399 			dc_stream_adjust_vmin_vmax(dm->dc,
8400 						   new_crtc_state->stream,
8401 						   &vrr_params.adjust);
8402 		}
8403 	}
8404 
8405 	mod_freesync_build_vrr_infopacket(
8406 		dm->freesync_module,
8407 		new_stream,
8408 		&vrr_params,
8409 		PACKET_TYPE_VRR,
8410 		TRANSFER_FUNC_UNKNOWN,
8411 		&vrr_infopacket,
8412 		pack_sdp_v1_3);
8413 
8414 	new_crtc_state->freesync_timing_changed |=
8415 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8416 			&vrr_params.adjust,
8417 			sizeof(vrr_params.adjust)) != 0);
8418 
8419 	new_crtc_state->freesync_vrr_info_changed |=
8420 		(memcmp(&new_crtc_state->vrr_infopacket,
8421 			&vrr_infopacket,
8422 			sizeof(vrr_infopacket)) != 0);
8423 
8424 	acrtc->dm_irq_params.vrr_params = vrr_params;
8425 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8426 
8427 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8428 	new_stream->vrr_infopacket = vrr_infopacket;
8429 
8430 	if (new_crtc_state->freesync_vrr_info_changed)
8431 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8432 			      new_crtc_state->base.crtc->base.id,
8433 			      (int)new_crtc_state->base.vrr_enabled,
8434 			      (int)vrr_params.state);
8435 
8436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8437 }
8438 
8439 static void update_stream_irq_parameters(
8440 	struct amdgpu_display_manager *dm,
8441 	struct dm_crtc_state *new_crtc_state)
8442 {
8443 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8444 	struct mod_vrr_params vrr_params;
8445 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8446 	struct amdgpu_device *adev = dm->adev;
8447 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8448 	unsigned long flags;
8449 
8450 	if (!new_stream)
8451 		return;
8452 
8453 	/*
8454 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8455 	 * For now it's sufficient to just guard against these conditions.
8456 	 */
8457 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8458 		return;
8459 
8460 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8461 	vrr_params = acrtc->dm_irq_params.vrr_params;
8462 
8463 	if (new_crtc_state->vrr_supported &&
8464 	    config.min_refresh_in_uhz &&
8465 	    config.max_refresh_in_uhz) {
8466 		/*
8467 		 * if freesync compatible mode was set, config.state will be set
8468 		 * in atomic check
8469 		 */
8470 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8471 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8472 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8473 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8474 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8475 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8476 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8477 		} else {
8478 			config.state = new_crtc_state->base.vrr_enabled ?
8479 						     VRR_STATE_ACTIVE_VARIABLE :
8480 						     VRR_STATE_INACTIVE;
8481 		}
8482 	} else {
8483 		config.state = VRR_STATE_UNSUPPORTED;
8484 	}
8485 
8486 	mod_freesync_build_vrr_params(dm->freesync_module,
8487 				      new_stream,
8488 				      &config, &vrr_params);
8489 
8490 	new_crtc_state->freesync_timing_changed |=
8491 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8492 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8493 
8494 	new_crtc_state->freesync_config = config;
8495 	/* Copy state for access from DM IRQ handler */
8496 	acrtc->dm_irq_params.freesync_config = config;
8497 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8498 	acrtc->dm_irq_params.vrr_params = vrr_params;
8499 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8500 }
8501 
8502 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8503 					    struct dm_crtc_state *new_state)
8504 {
8505 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8506 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8507 
8508 	if (!old_vrr_active && new_vrr_active) {
8509 		/* Transition VRR inactive -> active:
8510 		 * While VRR is active, we must not disable vblank irq, as a
8511 		 * reenable after disable would compute bogus vblank/pflip
8512 		 * timestamps if it likely happened inside display front-porch.
8513 		 *
8514 		 * We also need vupdate irq for the actual core vblank handling
8515 		 * at end of vblank.
8516 		 */
8517 		dm_set_vupdate_irq(new_state->base.crtc, true);
8518 		drm_crtc_vblank_get(new_state->base.crtc);
8519 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8520 				 __func__, new_state->base.crtc->base.id);
8521 	} else if (old_vrr_active && !new_vrr_active) {
8522 		/* Transition VRR active -> inactive:
8523 		 * Allow vblank irq disable again for fixed refresh rate.
8524 		 */
8525 		dm_set_vupdate_irq(new_state->base.crtc, false);
8526 		drm_crtc_vblank_put(new_state->base.crtc);
8527 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8528 				 __func__, new_state->base.crtc->base.id);
8529 	}
8530 }
8531 
8532 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8533 {
8534 	struct drm_plane *plane;
8535 	struct drm_plane_state *old_plane_state;
8536 	int i;
8537 
8538 	/*
8539 	 * TODO: Make this per-stream so we don't issue redundant updates for
8540 	 * commits with multiple streams.
8541 	 */
8542 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8543 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8544 			handle_cursor_update(plane, old_plane_state);
8545 }
8546 
8547 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8548 				    struct dc_state *dc_state,
8549 				    struct drm_device *dev,
8550 				    struct amdgpu_display_manager *dm,
8551 				    struct drm_crtc *pcrtc,
8552 				    bool wait_for_vblank)
8553 {
8554 	uint32_t i;
8555 	uint64_t timestamp_ns;
8556 	struct drm_plane *plane;
8557 	struct drm_plane_state *old_plane_state, *new_plane_state;
8558 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8559 	struct drm_crtc_state *new_pcrtc_state =
8560 			drm_atomic_get_new_crtc_state(state, pcrtc);
8561 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8562 	struct dm_crtc_state *dm_old_crtc_state =
8563 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8564 	int planes_count = 0, vpos, hpos;
8565 	long r;
8566 	unsigned long flags;
8567 	struct amdgpu_bo *abo;
8568 	uint32_t target_vblank, last_flip_vblank;
8569 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8570 	bool pflip_present = false;
8571 	struct {
8572 		struct dc_surface_update surface_updates[MAX_SURFACES];
8573 		struct dc_plane_info plane_infos[MAX_SURFACES];
8574 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8575 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8576 		struct dc_stream_update stream_update;
8577 	} *bundle;
8578 
8579 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8580 
8581 	if (!bundle) {
8582 		dm_error("Failed to allocate update bundle\n");
8583 		goto cleanup;
8584 	}
8585 
8586 	/*
8587 	 * Disable the cursor first if we're disabling all the planes.
8588 	 * It'll remain on the screen after the planes are re-enabled
8589 	 * if we don't.
8590 	 */
8591 	if (acrtc_state->active_planes == 0)
8592 		amdgpu_dm_commit_cursors(state);
8593 
8594 	/* update planes when needed */
8595 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8596 		struct drm_crtc *crtc = new_plane_state->crtc;
8597 		struct drm_crtc_state *new_crtc_state;
8598 		struct drm_framebuffer *fb = new_plane_state->fb;
8599 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8600 		bool plane_needs_flip;
8601 		struct dc_plane_state *dc_plane;
8602 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8603 
8604 		/* Cursor plane is handled after stream updates */
8605 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8606 			continue;
8607 
8608 		if (!fb || !crtc || pcrtc != crtc)
8609 			continue;
8610 
8611 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8612 		if (!new_crtc_state->active)
8613 			continue;
8614 
8615 		dc_plane = dm_new_plane_state->dc_state;
8616 
8617 		bundle->surface_updates[planes_count].surface = dc_plane;
8618 		if (new_pcrtc_state->color_mgmt_changed) {
8619 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8620 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8621 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8622 		}
8623 
8624 		fill_dc_scaling_info(new_plane_state,
8625 				     &bundle->scaling_infos[planes_count]);
8626 
8627 		bundle->surface_updates[planes_count].scaling_info =
8628 			&bundle->scaling_infos[planes_count];
8629 
8630 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8631 
8632 		pflip_present = pflip_present || plane_needs_flip;
8633 
8634 		if (!plane_needs_flip) {
8635 			planes_count += 1;
8636 			continue;
8637 		}
8638 
8639 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8640 
8641 		/*
8642 		 * Wait for all fences on this FB. Do limited wait to avoid
8643 		 * deadlock during GPU reset when this fence will not signal
8644 		 * but we hold reservation lock for the BO.
8645 		 */
8646 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8647 					  msecs_to_jiffies(5000));
8648 		if (unlikely(r <= 0))
8649 			DRM_ERROR("Waiting for fences timed out!");
8650 
8651 		fill_dc_plane_info_and_addr(
8652 			dm->adev, new_plane_state,
8653 			afb->tiling_flags,
8654 			&bundle->plane_infos[planes_count],
8655 			&bundle->flip_addrs[planes_count].address,
8656 			afb->tmz_surface, false);
8657 
8658 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8659 				 new_plane_state->plane->index,
8660 				 bundle->plane_infos[planes_count].dcc.enable);
8661 
8662 		bundle->surface_updates[planes_count].plane_info =
8663 			&bundle->plane_infos[planes_count];
8664 
8665 		/*
8666 		 * Only allow immediate flips for fast updates that don't
8667 		 * change FB pitch, DCC state, rotation or mirroing.
8668 		 */
8669 		bundle->flip_addrs[planes_count].flip_immediate =
8670 			crtc->state->async_flip &&
8671 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8672 
8673 		timestamp_ns = ktime_get_ns();
8674 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8675 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8676 		bundle->surface_updates[planes_count].surface = dc_plane;
8677 
8678 		if (!bundle->surface_updates[planes_count].surface) {
8679 			DRM_ERROR("No surface for CRTC: id=%d\n",
8680 					acrtc_attach->crtc_id);
8681 			continue;
8682 		}
8683 
8684 		if (plane == pcrtc->primary)
8685 			update_freesync_state_on_stream(
8686 				dm,
8687 				acrtc_state,
8688 				acrtc_state->stream,
8689 				dc_plane,
8690 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8691 
8692 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8693 				 __func__,
8694 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8695 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8696 
8697 		planes_count += 1;
8698 
8699 	}
8700 
8701 	if (pflip_present) {
8702 		if (!vrr_active) {
8703 			/* Use old throttling in non-vrr fixed refresh rate mode
8704 			 * to keep flip scheduling based on target vblank counts
8705 			 * working in a backwards compatible way, e.g., for
8706 			 * clients using the GLX_OML_sync_control extension or
8707 			 * DRI3/Present extension with defined target_msc.
8708 			 */
8709 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8710 		}
8711 		else {
8712 			/* For variable refresh rate mode only:
8713 			 * Get vblank of last completed flip to avoid > 1 vrr
8714 			 * flips per video frame by use of throttling, but allow
8715 			 * flip programming anywhere in the possibly large
8716 			 * variable vrr vblank interval for fine-grained flip
8717 			 * timing control and more opportunity to avoid stutter
8718 			 * on late submission of flips.
8719 			 */
8720 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8721 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8722 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8723 		}
8724 
8725 		target_vblank = last_flip_vblank + wait_for_vblank;
8726 
8727 		/*
8728 		 * Wait until we're out of the vertical blank period before the one
8729 		 * targeted by the flip
8730 		 */
8731 		while ((acrtc_attach->enabled &&
8732 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8733 							    0, &vpos, &hpos, NULL,
8734 							    NULL, &pcrtc->hwmode)
8735 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8736 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8737 			(int)(target_vblank -
8738 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8739 			usleep_range(1000, 1100);
8740 		}
8741 
8742 		/**
8743 		 * Prepare the flip event for the pageflip interrupt to handle.
8744 		 *
8745 		 * This only works in the case where we've already turned on the
8746 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8747 		 * from 0 -> n planes we have to skip a hardware generated event
8748 		 * and rely on sending it from software.
8749 		 */
8750 		if (acrtc_attach->base.state->event &&
8751 		    acrtc_state->active_planes > 0) {
8752 			drm_crtc_vblank_get(pcrtc);
8753 
8754 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8755 
8756 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8757 			prepare_flip_isr(acrtc_attach);
8758 
8759 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8760 		}
8761 
8762 		if (acrtc_state->stream) {
8763 			if (acrtc_state->freesync_vrr_info_changed)
8764 				bundle->stream_update.vrr_infopacket =
8765 					&acrtc_state->stream->vrr_infopacket;
8766 		}
8767 	}
8768 
8769 	/* Update the planes if changed or disable if we don't have any. */
8770 	if ((planes_count || acrtc_state->active_planes == 0) &&
8771 		acrtc_state->stream) {
8772 #if defined(CONFIG_DRM_AMD_DC_DCN)
8773 		/*
8774 		 * If PSR or idle optimizations are enabled then flush out
8775 		 * any pending work before hardware programming.
8776 		 */
8777 		if (dm->vblank_control_workqueue)
8778 			flush_workqueue(dm->vblank_control_workqueue);
8779 #endif
8780 
8781 		bundle->stream_update.stream = acrtc_state->stream;
8782 		if (new_pcrtc_state->mode_changed) {
8783 			bundle->stream_update.src = acrtc_state->stream->src;
8784 			bundle->stream_update.dst = acrtc_state->stream->dst;
8785 		}
8786 
8787 		if (new_pcrtc_state->color_mgmt_changed) {
8788 			/*
8789 			 * TODO: This isn't fully correct since we've actually
8790 			 * already modified the stream in place.
8791 			 */
8792 			bundle->stream_update.gamut_remap =
8793 				&acrtc_state->stream->gamut_remap_matrix;
8794 			bundle->stream_update.output_csc_transform =
8795 				&acrtc_state->stream->csc_color_matrix;
8796 			bundle->stream_update.out_transfer_func =
8797 				acrtc_state->stream->out_transfer_func;
8798 		}
8799 
8800 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8801 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8802 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8803 
8804 		/*
8805 		 * If FreeSync state on the stream has changed then we need to
8806 		 * re-adjust the min/max bounds now that DC doesn't handle this
8807 		 * as part of commit.
8808 		 */
8809 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8810 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8811 			dc_stream_adjust_vmin_vmax(
8812 				dm->dc, acrtc_state->stream,
8813 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8814 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8815 		}
8816 		mutex_lock(&dm->dc_lock);
8817 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8818 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8819 			amdgpu_dm_psr_disable(acrtc_state->stream);
8820 
8821 		dc_commit_updates_for_stream(dm->dc,
8822 						     bundle->surface_updates,
8823 						     planes_count,
8824 						     acrtc_state->stream,
8825 						     &bundle->stream_update,
8826 						     dc_state);
8827 
8828 		/**
8829 		 * Enable or disable the interrupts on the backend.
8830 		 *
8831 		 * Most pipes are put into power gating when unused.
8832 		 *
8833 		 * When power gating is enabled on a pipe we lose the
8834 		 * interrupt enablement state when power gating is disabled.
8835 		 *
8836 		 * So we need to update the IRQ control state in hardware
8837 		 * whenever the pipe turns on (since it could be previously
8838 		 * power gated) or off (since some pipes can't be power gated
8839 		 * on some ASICs).
8840 		 */
8841 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8842 			dm_update_pflip_irq_state(drm_to_adev(dev),
8843 						  acrtc_attach);
8844 
8845 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8846 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8847 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8848 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8849 
8850 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
8851 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8852 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8853 			struct amdgpu_dm_connector *aconn =
8854 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8855 
8856 			if (aconn->psr_skip_count > 0)
8857 				aconn->psr_skip_count--;
8858 
8859 			/* Allow PSR when skip count is 0. */
8860 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8861 		} else {
8862 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
8863 		}
8864 
8865 		mutex_unlock(&dm->dc_lock);
8866 	}
8867 
8868 	/*
8869 	 * Update cursor state *after* programming all the planes.
8870 	 * This avoids redundant programming in the case where we're going
8871 	 * to be disabling a single plane - those pipes are being disabled.
8872 	 */
8873 	if (acrtc_state->active_planes)
8874 		amdgpu_dm_commit_cursors(state);
8875 
8876 cleanup:
8877 	kfree(bundle);
8878 }
8879 
8880 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8881 				   struct drm_atomic_state *state)
8882 {
8883 	struct amdgpu_device *adev = drm_to_adev(dev);
8884 	struct amdgpu_dm_connector *aconnector;
8885 	struct drm_connector *connector;
8886 	struct drm_connector_state *old_con_state, *new_con_state;
8887 	struct drm_crtc_state *new_crtc_state;
8888 	struct dm_crtc_state *new_dm_crtc_state;
8889 	const struct dc_stream_status *status;
8890 	int i, inst;
8891 
8892 	/* Notify device removals. */
8893 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8894 		if (old_con_state->crtc != new_con_state->crtc) {
8895 			/* CRTC changes require notification. */
8896 			goto notify;
8897 		}
8898 
8899 		if (!new_con_state->crtc)
8900 			continue;
8901 
8902 		new_crtc_state = drm_atomic_get_new_crtc_state(
8903 			state, new_con_state->crtc);
8904 
8905 		if (!new_crtc_state)
8906 			continue;
8907 
8908 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8909 			continue;
8910 
8911 	notify:
8912 		aconnector = to_amdgpu_dm_connector(connector);
8913 
8914 		mutex_lock(&adev->dm.audio_lock);
8915 		inst = aconnector->audio_inst;
8916 		aconnector->audio_inst = -1;
8917 		mutex_unlock(&adev->dm.audio_lock);
8918 
8919 		amdgpu_dm_audio_eld_notify(adev, inst);
8920 	}
8921 
8922 	/* Notify audio device additions. */
8923 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8924 		if (!new_con_state->crtc)
8925 			continue;
8926 
8927 		new_crtc_state = drm_atomic_get_new_crtc_state(
8928 			state, new_con_state->crtc);
8929 
8930 		if (!new_crtc_state)
8931 			continue;
8932 
8933 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8934 			continue;
8935 
8936 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8937 		if (!new_dm_crtc_state->stream)
8938 			continue;
8939 
8940 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8941 		if (!status)
8942 			continue;
8943 
8944 		aconnector = to_amdgpu_dm_connector(connector);
8945 
8946 		mutex_lock(&adev->dm.audio_lock);
8947 		inst = status->audio_inst;
8948 		aconnector->audio_inst = inst;
8949 		mutex_unlock(&adev->dm.audio_lock);
8950 
8951 		amdgpu_dm_audio_eld_notify(adev, inst);
8952 	}
8953 }
8954 
8955 /*
8956  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8957  * @crtc_state: the DRM CRTC state
8958  * @stream_state: the DC stream state.
8959  *
8960  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8961  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8962  */
8963 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8964 						struct dc_stream_state *stream_state)
8965 {
8966 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8967 }
8968 
8969 /**
8970  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8971  * @state: The atomic state to commit
8972  *
8973  * This will tell DC to commit the constructed DC state from atomic_check,
8974  * programming the hardware. Any failures here implies a hardware failure, since
8975  * atomic check should have filtered anything non-kosher.
8976  */
8977 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8978 {
8979 	struct drm_device *dev = state->dev;
8980 	struct amdgpu_device *adev = drm_to_adev(dev);
8981 	struct amdgpu_display_manager *dm = &adev->dm;
8982 	struct dm_atomic_state *dm_state;
8983 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8984 	uint32_t i, j;
8985 	struct drm_crtc *crtc;
8986 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8987 	unsigned long flags;
8988 	bool wait_for_vblank = true;
8989 	struct drm_connector *connector;
8990 	struct drm_connector_state *old_con_state, *new_con_state;
8991 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8992 	int crtc_disable_count = 0;
8993 	bool mode_set_reset_required = false;
8994 
8995 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8996 
8997 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8998 
8999 	dm_state = dm_atomic_get_new_state(state);
9000 	if (dm_state && dm_state->context) {
9001 		dc_state = dm_state->context;
9002 	} else {
9003 		/* No state changes, retain current state. */
9004 		dc_state_temp = dc_create_state(dm->dc);
9005 		ASSERT(dc_state_temp);
9006 		dc_state = dc_state_temp;
9007 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9008 	}
9009 
9010 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9011 				       new_crtc_state, i) {
9012 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9013 
9014 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9015 
9016 		if (old_crtc_state->active &&
9017 		    (!new_crtc_state->active ||
9018 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9019 			manage_dm_interrupts(adev, acrtc, false);
9020 			dc_stream_release(dm_old_crtc_state->stream);
9021 		}
9022 	}
9023 
9024 	drm_atomic_helper_calc_timestamping_constants(state);
9025 
9026 	/* update changed items */
9027 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9028 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9029 
9030 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9031 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9032 
9033 		DRM_DEBUG_ATOMIC(
9034 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9035 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9036 			"connectors_changed:%d\n",
9037 			acrtc->crtc_id,
9038 			new_crtc_state->enable,
9039 			new_crtc_state->active,
9040 			new_crtc_state->planes_changed,
9041 			new_crtc_state->mode_changed,
9042 			new_crtc_state->active_changed,
9043 			new_crtc_state->connectors_changed);
9044 
9045 		/* Disable cursor if disabling crtc */
9046 		if (old_crtc_state->active && !new_crtc_state->active) {
9047 			struct dc_cursor_position position;
9048 
9049 			memset(&position, 0, sizeof(position));
9050 			mutex_lock(&dm->dc_lock);
9051 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9052 			mutex_unlock(&dm->dc_lock);
9053 		}
9054 
9055 		/* Copy all transient state flags into dc state */
9056 		if (dm_new_crtc_state->stream) {
9057 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9058 							    dm_new_crtc_state->stream);
9059 		}
9060 
9061 		/* handles headless hotplug case, updating new_state and
9062 		 * aconnector as needed
9063 		 */
9064 
9065 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9066 
9067 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9068 
9069 			if (!dm_new_crtc_state->stream) {
9070 				/*
9071 				 * this could happen because of issues with
9072 				 * userspace notifications delivery.
9073 				 * In this case userspace tries to set mode on
9074 				 * display which is disconnected in fact.
9075 				 * dc_sink is NULL in this case on aconnector.
9076 				 * We expect reset mode will come soon.
9077 				 *
9078 				 * This can also happen when unplug is done
9079 				 * during resume sequence ended
9080 				 *
9081 				 * In this case, we want to pretend we still
9082 				 * have a sink to keep the pipe running so that
9083 				 * hw state is consistent with the sw state
9084 				 */
9085 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9086 						__func__, acrtc->base.base.id);
9087 				continue;
9088 			}
9089 
9090 			if (dm_old_crtc_state->stream)
9091 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9092 
9093 			pm_runtime_get_noresume(dev->dev);
9094 
9095 			acrtc->enabled = true;
9096 			acrtc->hw_mode = new_crtc_state->mode;
9097 			crtc->hwmode = new_crtc_state->mode;
9098 			mode_set_reset_required = true;
9099 		} else if (modereset_required(new_crtc_state)) {
9100 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9101 			/* i.e. reset mode */
9102 			if (dm_old_crtc_state->stream)
9103 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9104 
9105 			mode_set_reset_required = true;
9106 		}
9107 	} /* for_each_crtc_in_state() */
9108 
9109 	if (dc_state) {
9110 		/* if there mode set or reset, disable eDP PSR */
9111 		if (mode_set_reset_required) {
9112 #if defined(CONFIG_DRM_AMD_DC_DCN)
9113 			if (dm->vblank_control_workqueue)
9114 				flush_workqueue(dm->vblank_control_workqueue);
9115 #endif
9116 			amdgpu_dm_psr_disable_all(dm);
9117 		}
9118 
9119 		dm_enable_per_frame_crtc_master_sync(dc_state);
9120 		mutex_lock(&dm->dc_lock);
9121 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9122 #if defined(CONFIG_DRM_AMD_DC_DCN)
9123                /* Allow idle optimization when vblank count is 0 for display off */
9124                if (dm->active_vblank_irq_count == 0)
9125                    dc_allow_idle_optimizations(dm->dc,true);
9126 #endif
9127 		mutex_unlock(&dm->dc_lock);
9128 	}
9129 
9130 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9131 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9132 
9133 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9134 
9135 		if (dm_new_crtc_state->stream != NULL) {
9136 			const struct dc_stream_status *status =
9137 					dc_stream_get_status(dm_new_crtc_state->stream);
9138 
9139 			if (!status)
9140 				status = dc_stream_get_status_from_state(dc_state,
9141 									 dm_new_crtc_state->stream);
9142 			if (!status)
9143 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9144 			else
9145 				acrtc->otg_inst = status->primary_otg_inst;
9146 		}
9147 	}
9148 #ifdef CONFIG_DRM_AMD_DC_HDCP
9149 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9150 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9151 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9152 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9153 
9154 		new_crtc_state = NULL;
9155 
9156 		if (acrtc)
9157 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9158 
9159 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9160 
9161 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9162 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9163 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9164 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9165 			dm_new_con_state->update_hdcp = true;
9166 			continue;
9167 		}
9168 
9169 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9170 			hdcp_update_display(
9171 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9172 				new_con_state->hdcp_content_type,
9173 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9174 	}
9175 #endif
9176 
9177 	/* Handle connector state changes */
9178 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9179 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9180 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9181 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9182 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9183 		struct dc_stream_update stream_update;
9184 		struct dc_info_packet hdr_packet;
9185 		struct dc_stream_status *status = NULL;
9186 		bool abm_changed, hdr_changed, scaling_changed;
9187 
9188 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9189 		memset(&stream_update, 0, sizeof(stream_update));
9190 
9191 		if (acrtc) {
9192 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9193 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9194 		}
9195 
9196 		/* Skip any modesets/resets */
9197 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9198 			continue;
9199 
9200 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9201 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9202 
9203 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9204 							     dm_old_con_state);
9205 
9206 		abm_changed = dm_new_crtc_state->abm_level !=
9207 			      dm_old_crtc_state->abm_level;
9208 
9209 		hdr_changed =
9210 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9211 
9212 		if (!scaling_changed && !abm_changed && !hdr_changed)
9213 			continue;
9214 
9215 		stream_update.stream = dm_new_crtc_state->stream;
9216 		if (scaling_changed) {
9217 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9218 					dm_new_con_state, dm_new_crtc_state->stream);
9219 
9220 			stream_update.src = dm_new_crtc_state->stream->src;
9221 			stream_update.dst = dm_new_crtc_state->stream->dst;
9222 		}
9223 
9224 		if (abm_changed) {
9225 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9226 
9227 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9228 		}
9229 
9230 		if (hdr_changed) {
9231 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9232 			stream_update.hdr_static_metadata = &hdr_packet;
9233 		}
9234 
9235 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9236 
9237 		if (WARN_ON(!status))
9238 			continue;
9239 
9240 		WARN_ON(!status->plane_count);
9241 
9242 		/*
9243 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9244 		 * Here we create an empty update on each plane.
9245 		 * To fix this, DC should permit updating only stream properties.
9246 		 */
9247 		for (j = 0; j < status->plane_count; j++)
9248 			dummy_updates[j].surface = status->plane_states[0];
9249 
9250 
9251 		mutex_lock(&dm->dc_lock);
9252 		dc_commit_updates_for_stream(dm->dc,
9253 						     dummy_updates,
9254 						     status->plane_count,
9255 						     dm_new_crtc_state->stream,
9256 						     &stream_update,
9257 						     dc_state);
9258 		mutex_unlock(&dm->dc_lock);
9259 	}
9260 
9261 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9262 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9263 				      new_crtc_state, i) {
9264 		if (old_crtc_state->active && !new_crtc_state->active)
9265 			crtc_disable_count++;
9266 
9267 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9268 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9269 
9270 		/* For freesync config update on crtc state and params for irq */
9271 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9272 
9273 		/* Handle vrr on->off / off->on transitions */
9274 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9275 						dm_new_crtc_state);
9276 	}
9277 
9278 	/**
9279 	 * Enable interrupts for CRTCs that are newly enabled or went through
9280 	 * a modeset. It was intentionally deferred until after the front end
9281 	 * state was modified to wait until the OTG was on and so the IRQ
9282 	 * handlers didn't access stale or invalid state.
9283 	 */
9284 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9285 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9286 #ifdef CONFIG_DEBUG_FS
9287 		bool configure_crc = false;
9288 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9289 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9290 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9291 #endif
9292 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9293 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9294 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9295 #endif
9296 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9297 
9298 		if (new_crtc_state->active &&
9299 		    (!old_crtc_state->active ||
9300 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9301 			dc_stream_retain(dm_new_crtc_state->stream);
9302 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9303 			manage_dm_interrupts(adev, acrtc, true);
9304 
9305 #ifdef CONFIG_DEBUG_FS
9306 			/**
9307 			 * Frontend may have changed so reapply the CRC capture
9308 			 * settings for the stream.
9309 			 */
9310 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9311 
9312 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9313 				configure_crc = true;
9314 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9315 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9316 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9317 					acrtc->dm_irq_params.crc_window.update_win = true;
9318 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9319 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9320 					crc_rd_wrk->crtc = crtc;
9321 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9322 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9323 				}
9324 #endif
9325 			}
9326 
9327 			if (configure_crc)
9328 				if (amdgpu_dm_crtc_configure_crc_source(
9329 					crtc, dm_new_crtc_state, cur_crc_src))
9330 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9331 #endif
9332 		}
9333 	}
9334 
9335 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9336 		if (new_crtc_state->async_flip)
9337 			wait_for_vblank = false;
9338 
9339 	/* update planes when needed per crtc*/
9340 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9341 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9342 
9343 		if (dm_new_crtc_state->stream)
9344 			amdgpu_dm_commit_planes(state, dc_state, dev,
9345 						dm, crtc, wait_for_vblank);
9346 	}
9347 
9348 	/* Update audio instances for each connector. */
9349 	amdgpu_dm_commit_audio(dev, state);
9350 
9351 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9352 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9353 	/* restore the backlight level */
9354 	for (i = 0; i < dm->num_of_edps; i++) {
9355 		if (dm->backlight_dev[i] &&
9356 		    (dm->actual_brightness[i] != dm->brightness[i]))
9357 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9358 	}
9359 #endif
9360 	/*
9361 	 * send vblank event on all events not handled in flip and
9362 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9363 	 */
9364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9365 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9366 
9367 		if (new_crtc_state->event)
9368 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9369 
9370 		new_crtc_state->event = NULL;
9371 	}
9372 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9373 
9374 	/* Signal HW programming completion */
9375 	drm_atomic_helper_commit_hw_done(state);
9376 
9377 	if (wait_for_vblank)
9378 		drm_atomic_helper_wait_for_flip_done(dev, state);
9379 
9380 	drm_atomic_helper_cleanup_planes(dev, state);
9381 
9382 	/* return the stolen vga memory back to VRAM */
9383 	if (!adev->mman.keep_stolen_vga_memory)
9384 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9385 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9386 
9387 	/*
9388 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9389 	 * so we can put the GPU into runtime suspend if we're not driving any
9390 	 * displays anymore
9391 	 */
9392 	for (i = 0; i < crtc_disable_count; i++)
9393 		pm_runtime_put_autosuspend(dev->dev);
9394 	pm_runtime_mark_last_busy(dev->dev);
9395 
9396 	if (dc_state_temp)
9397 		dc_release_state(dc_state_temp);
9398 }
9399 
9400 
9401 static int dm_force_atomic_commit(struct drm_connector *connector)
9402 {
9403 	int ret = 0;
9404 	struct drm_device *ddev = connector->dev;
9405 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9406 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9407 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9408 	struct drm_connector_state *conn_state;
9409 	struct drm_crtc_state *crtc_state;
9410 	struct drm_plane_state *plane_state;
9411 
9412 	if (!state)
9413 		return -ENOMEM;
9414 
9415 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9416 
9417 	/* Construct an atomic state to restore previous display setting */
9418 
9419 	/*
9420 	 * Attach connectors to drm_atomic_state
9421 	 */
9422 	conn_state = drm_atomic_get_connector_state(state, connector);
9423 
9424 	ret = PTR_ERR_OR_ZERO(conn_state);
9425 	if (ret)
9426 		goto out;
9427 
9428 	/* Attach crtc to drm_atomic_state*/
9429 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9430 
9431 	ret = PTR_ERR_OR_ZERO(crtc_state);
9432 	if (ret)
9433 		goto out;
9434 
9435 	/* force a restore */
9436 	crtc_state->mode_changed = true;
9437 
9438 	/* Attach plane to drm_atomic_state */
9439 	plane_state = drm_atomic_get_plane_state(state, plane);
9440 
9441 	ret = PTR_ERR_OR_ZERO(plane_state);
9442 	if (ret)
9443 		goto out;
9444 
9445 	/* Call commit internally with the state we just constructed */
9446 	ret = drm_atomic_commit(state);
9447 
9448 out:
9449 	drm_atomic_state_put(state);
9450 	if (ret)
9451 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9452 
9453 	return ret;
9454 }
9455 
9456 /*
9457  * This function handles all cases when set mode does not come upon hotplug.
9458  * This includes when a display is unplugged then plugged back into the
9459  * same port and when running without usermode desktop manager supprot
9460  */
9461 void dm_restore_drm_connector_state(struct drm_device *dev,
9462 				    struct drm_connector *connector)
9463 {
9464 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9465 	struct amdgpu_crtc *disconnected_acrtc;
9466 	struct dm_crtc_state *acrtc_state;
9467 
9468 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9469 		return;
9470 
9471 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9472 	if (!disconnected_acrtc)
9473 		return;
9474 
9475 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9476 	if (!acrtc_state->stream)
9477 		return;
9478 
9479 	/*
9480 	 * If the previous sink is not released and different from the current,
9481 	 * we deduce we are in a state where we can not rely on usermode call
9482 	 * to turn on the display, so we do it here
9483 	 */
9484 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9485 		dm_force_atomic_commit(&aconnector->base);
9486 }
9487 
9488 /*
9489  * Grabs all modesetting locks to serialize against any blocking commits,
9490  * Waits for completion of all non blocking commits.
9491  */
9492 static int do_aquire_global_lock(struct drm_device *dev,
9493 				 struct drm_atomic_state *state)
9494 {
9495 	struct drm_crtc *crtc;
9496 	struct drm_crtc_commit *commit;
9497 	long ret;
9498 
9499 	/*
9500 	 * Adding all modeset locks to aquire_ctx will
9501 	 * ensure that when the framework release it the
9502 	 * extra locks we are locking here will get released to
9503 	 */
9504 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9505 	if (ret)
9506 		return ret;
9507 
9508 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9509 		spin_lock(&crtc->commit_lock);
9510 		commit = list_first_entry_or_null(&crtc->commit_list,
9511 				struct drm_crtc_commit, commit_entry);
9512 		if (commit)
9513 			drm_crtc_commit_get(commit);
9514 		spin_unlock(&crtc->commit_lock);
9515 
9516 		if (!commit)
9517 			continue;
9518 
9519 		/*
9520 		 * Make sure all pending HW programming completed and
9521 		 * page flips done
9522 		 */
9523 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9524 
9525 		if (ret > 0)
9526 			ret = wait_for_completion_interruptible_timeout(
9527 					&commit->flip_done, 10*HZ);
9528 
9529 		if (ret == 0)
9530 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9531 				  "timed out\n", crtc->base.id, crtc->name);
9532 
9533 		drm_crtc_commit_put(commit);
9534 	}
9535 
9536 	return ret < 0 ? ret : 0;
9537 }
9538 
9539 static void get_freesync_config_for_crtc(
9540 	struct dm_crtc_state *new_crtc_state,
9541 	struct dm_connector_state *new_con_state)
9542 {
9543 	struct mod_freesync_config config = {0};
9544 	struct amdgpu_dm_connector *aconnector =
9545 			to_amdgpu_dm_connector(new_con_state->base.connector);
9546 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9547 	int vrefresh = drm_mode_vrefresh(mode);
9548 	bool fs_vid_mode = false;
9549 
9550 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9551 					vrefresh >= aconnector->min_vfreq &&
9552 					vrefresh <= aconnector->max_vfreq;
9553 
9554 	if (new_crtc_state->vrr_supported) {
9555 		new_crtc_state->stream->ignore_msa_timing_param = true;
9556 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9557 
9558 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9559 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9560 		config.vsif_supported = true;
9561 		config.btr = true;
9562 
9563 		if (fs_vid_mode) {
9564 			config.state = VRR_STATE_ACTIVE_FIXED;
9565 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9566 			goto out;
9567 		} else if (new_crtc_state->base.vrr_enabled) {
9568 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9569 		} else {
9570 			config.state = VRR_STATE_INACTIVE;
9571 		}
9572 	}
9573 out:
9574 	new_crtc_state->freesync_config = config;
9575 }
9576 
9577 static void reset_freesync_config_for_crtc(
9578 	struct dm_crtc_state *new_crtc_state)
9579 {
9580 	new_crtc_state->vrr_supported = false;
9581 
9582 	memset(&new_crtc_state->vrr_infopacket, 0,
9583 	       sizeof(new_crtc_state->vrr_infopacket));
9584 }
9585 
9586 static bool
9587 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9588 				 struct drm_crtc_state *new_crtc_state)
9589 {
9590 	struct drm_display_mode old_mode, new_mode;
9591 
9592 	if (!old_crtc_state || !new_crtc_state)
9593 		return false;
9594 
9595 	old_mode = old_crtc_state->mode;
9596 	new_mode = new_crtc_state->mode;
9597 
9598 	if (old_mode.clock       == new_mode.clock &&
9599 	    old_mode.hdisplay    == new_mode.hdisplay &&
9600 	    old_mode.vdisplay    == new_mode.vdisplay &&
9601 	    old_mode.htotal      == new_mode.htotal &&
9602 	    old_mode.vtotal      != new_mode.vtotal &&
9603 	    old_mode.hsync_start == new_mode.hsync_start &&
9604 	    old_mode.vsync_start != new_mode.vsync_start &&
9605 	    old_mode.hsync_end   == new_mode.hsync_end &&
9606 	    old_mode.vsync_end   != new_mode.vsync_end &&
9607 	    old_mode.hskew       == new_mode.hskew &&
9608 	    old_mode.vscan       == new_mode.vscan &&
9609 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9610 	    (new_mode.vsync_end - new_mode.vsync_start))
9611 		return true;
9612 
9613 	return false;
9614 }
9615 
9616 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9617 	uint64_t num, den, res;
9618 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9619 
9620 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9621 
9622 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9623 	den = (unsigned long long)new_crtc_state->mode.htotal *
9624 	      (unsigned long long)new_crtc_state->mode.vtotal;
9625 
9626 	res = div_u64(num, den);
9627 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9628 }
9629 
9630 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9631 				struct drm_atomic_state *state,
9632 				struct drm_crtc *crtc,
9633 				struct drm_crtc_state *old_crtc_state,
9634 				struct drm_crtc_state *new_crtc_state,
9635 				bool enable,
9636 				bool *lock_and_validation_needed)
9637 {
9638 	struct dm_atomic_state *dm_state = NULL;
9639 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9640 	struct dc_stream_state *new_stream;
9641 	int ret = 0;
9642 
9643 	/*
9644 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9645 	 * update changed items
9646 	 */
9647 	struct amdgpu_crtc *acrtc = NULL;
9648 	struct amdgpu_dm_connector *aconnector = NULL;
9649 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9650 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9651 
9652 	new_stream = NULL;
9653 
9654 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9655 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9656 	acrtc = to_amdgpu_crtc(crtc);
9657 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9658 
9659 	/* TODO This hack should go away */
9660 	if (aconnector && enable) {
9661 		/* Make sure fake sink is created in plug-in scenario */
9662 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9663 							    &aconnector->base);
9664 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9665 							    &aconnector->base);
9666 
9667 		if (IS_ERR(drm_new_conn_state)) {
9668 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9669 			goto fail;
9670 		}
9671 
9672 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9673 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9674 
9675 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9676 			goto skip_modeset;
9677 
9678 		new_stream = create_validate_stream_for_sink(aconnector,
9679 							     &new_crtc_state->mode,
9680 							     dm_new_conn_state,
9681 							     dm_old_crtc_state->stream);
9682 
9683 		/*
9684 		 * we can have no stream on ACTION_SET if a display
9685 		 * was disconnected during S3, in this case it is not an
9686 		 * error, the OS will be updated after detection, and
9687 		 * will do the right thing on next atomic commit
9688 		 */
9689 
9690 		if (!new_stream) {
9691 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9692 					__func__, acrtc->base.base.id);
9693 			ret = -ENOMEM;
9694 			goto fail;
9695 		}
9696 
9697 		/*
9698 		 * TODO: Check VSDB bits to decide whether this should
9699 		 * be enabled or not.
9700 		 */
9701 		new_stream->triggered_crtc_reset.enabled =
9702 			dm->force_timing_sync;
9703 
9704 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9705 
9706 		ret = fill_hdr_info_packet(drm_new_conn_state,
9707 					   &new_stream->hdr_static_metadata);
9708 		if (ret)
9709 			goto fail;
9710 
9711 		/*
9712 		 * If we already removed the old stream from the context
9713 		 * (and set the new stream to NULL) then we can't reuse
9714 		 * the old stream even if the stream and scaling are unchanged.
9715 		 * We'll hit the BUG_ON and black screen.
9716 		 *
9717 		 * TODO: Refactor this function to allow this check to work
9718 		 * in all conditions.
9719 		 */
9720 		if (amdgpu_freesync_vid_mode &&
9721 		    dm_new_crtc_state->stream &&
9722 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9723 			goto skip_modeset;
9724 
9725 		if (dm_new_crtc_state->stream &&
9726 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9727 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9728 			new_crtc_state->mode_changed = false;
9729 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9730 					 new_crtc_state->mode_changed);
9731 		}
9732 	}
9733 
9734 	/* mode_changed flag may get updated above, need to check again */
9735 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9736 		goto skip_modeset;
9737 
9738 	DRM_DEBUG_ATOMIC(
9739 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9740 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9741 		"connectors_changed:%d\n",
9742 		acrtc->crtc_id,
9743 		new_crtc_state->enable,
9744 		new_crtc_state->active,
9745 		new_crtc_state->planes_changed,
9746 		new_crtc_state->mode_changed,
9747 		new_crtc_state->active_changed,
9748 		new_crtc_state->connectors_changed);
9749 
9750 	/* Remove stream for any changed/disabled CRTC */
9751 	if (!enable) {
9752 
9753 		if (!dm_old_crtc_state->stream)
9754 			goto skip_modeset;
9755 
9756 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9757 		    is_timing_unchanged_for_freesync(new_crtc_state,
9758 						     old_crtc_state)) {
9759 			new_crtc_state->mode_changed = false;
9760 			DRM_DEBUG_DRIVER(
9761 				"Mode change not required for front porch change, "
9762 				"setting mode_changed to %d",
9763 				new_crtc_state->mode_changed);
9764 
9765 			set_freesync_fixed_config(dm_new_crtc_state);
9766 
9767 			goto skip_modeset;
9768 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9769 			   is_freesync_video_mode(&new_crtc_state->mode,
9770 						  aconnector)) {
9771 			struct drm_display_mode *high_mode;
9772 
9773 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
9774 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9775 				set_freesync_fixed_config(dm_new_crtc_state);
9776 			}
9777 		}
9778 
9779 		ret = dm_atomic_get_state(state, &dm_state);
9780 		if (ret)
9781 			goto fail;
9782 
9783 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9784 				crtc->base.id);
9785 
9786 		/* i.e. reset mode */
9787 		if (dc_remove_stream_from_ctx(
9788 				dm->dc,
9789 				dm_state->context,
9790 				dm_old_crtc_state->stream) != DC_OK) {
9791 			ret = -EINVAL;
9792 			goto fail;
9793 		}
9794 
9795 		dc_stream_release(dm_old_crtc_state->stream);
9796 		dm_new_crtc_state->stream = NULL;
9797 
9798 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9799 
9800 		*lock_and_validation_needed = true;
9801 
9802 	} else {/* Add stream for any updated/enabled CRTC */
9803 		/*
9804 		 * Quick fix to prevent NULL pointer on new_stream when
9805 		 * added MST connectors not found in existing crtc_state in the chained mode
9806 		 * TODO: need to dig out the root cause of that
9807 		 */
9808 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9809 			goto skip_modeset;
9810 
9811 		if (modereset_required(new_crtc_state))
9812 			goto skip_modeset;
9813 
9814 		if (modeset_required(new_crtc_state, new_stream,
9815 				     dm_old_crtc_state->stream)) {
9816 
9817 			WARN_ON(dm_new_crtc_state->stream);
9818 
9819 			ret = dm_atomic_get_state(state, &dm_state);
9820 			if (ret)
9821 				goto fail;
9822 
9823 			dm_new_crtc_state->stream = new_stream;
9824 
9825 			dc_stream_retain(new_stream);
9826 
9827 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9828 					 crtc->base.id);
9829 
9830 			if (dc_add_stream_to_ctx(
9831 					dm->dc,
9832 					dm_state->context,
9833 					dm_new_crtc_state->stream) != DC_OK) {
9834 				ret = -EINVAL;
9835 				goto fail;
9836 			}
9837 
9838 			*lock_and_validation_needed = true;
9839 		}
9840 	}
9841 
9842 skip_modeset:
9843 	/* Release extra reference */
9844 	if (new_stream)
9845 		 dc_stream_release(new_stream);
9846 
9847 	/*
9848 	 * We want to do dc stream updates that do not require a
9849 	 * full modeset below.
9850 	 */
9851 	if (!(enable && aconnector && new_crtc_state->active))
9852 		return 0;
9853 	/*
9854 	 * Given above conditions, the dc state cannot be NULL because:
9855 	 * 1. We're in the process of enabling CRTCs (just been added
9856 	 *    to the dc context, or already is on the context)
9857 	 * 2. Has a valid connector attached, and
9858 	 * 3. Is currently active and enabled.
9859 	 * => The dc stream state currently exists.
9860 	 */
9861 	BUG_ON(dm_new_crtc_state->stream == NULL);
9862 
9863 	/* Scaling or underscan settings */
9864 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9865 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9866 		update_stream_scaling_settings(
9867 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9868 
9869 	/* ABM settings */
9870 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9871 
9872 	/*
9873 	 * Color management settings. We also update color properties
9874 	 * when a modeset is needed, to ensure it gets reprogrammed.
9875 	 */
9876 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9877 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9878 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9879 		if (ret)
9880 			goto fail;
9881 	}
9882 
9883 	/* Update Freesync settings. */
9884 	get_freesync_config_for_crtc(dm_new_crtc_state,
9885 				     dm_new_conn_state);
9886 
9887 	return ret;
9888 
9889 fail:
9890 	if (new_stream)
9891 		dc_stream_release(new_stream);
9892 	return ret;
9893 }
9894 
9895 static bool should_reset_plane(struct drm_atomic_state *state,
9896 			       struct drm_plane *plane,
9897 			       struct drm_plane_state *old_plane_state,
9898 			       struct drm_plane_state *new_plane_state)
9899 {
9900 	struct drm_plane *other;
9901 	struct drm_plane_state *old_other_state, *new_other_state;
9902 	struct drm_crtc_state *new_crtc_state;
9903 	int i;
9904 
9905 	/*
9906 	 * TODO: Remove this hack once the checks below are sufficient
9907 	 * enough to determine when we need to reset all the planes on
9908 	 * the stream.
9909 	 */
9910 	if (state->allow_modeset)
9911 		return true;
9912 
9913 	/* Exit early if we know that we're adding or removing the plane. */
9914 	if (old_plane_state->crtc != new_plane_state->crtc)
9915 		return true;
9916 
9917 	/* old crtc == new_crtc == NULL, plane not in context. */
9918 	if (!new_plane_state->crtc)
9919 		return false;
9920 
9921 	new_crtc_state =
9922 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9923 
9924 	if (!new_crtc_state)
9925 		return true;
9926 
9927 	/* CRTC Degamma changes currently require us to recreate planes. */
9928 	if (new_crtc_state->color_mgmt_changed)
9929 		return true;
9930 
9931 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9932 		return true;
9933 
9934 	/*
9935 	 * If there are any new primary or overlay planes being added or
9936 	 * removed then the z-order can potentially change. To ensure
9937 	 * correct z-order and pipe acquisition the current DC architecture
9938 	 * requires us to remove and recreate all existing planes.
9939 	 *
9940 	 * TODO: Come up with a more elegant solution for this.
9941 	 */
9942 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9943 		struct amdgpu_framebuffer *old_afb, *new_afb;
9944 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9945 			continue;
9946 
9947 		if (old_other_state->crtc != new_plane_state->crtc &&
9948 		    new_other_state->crtc != new_plane_state->crtc)
9949 			continue;
9950 
9951 		if (old_other_state->crtc != new_other_state->crtc)
9952 			return true;
9953 
9954 		/* Src/dst size and scaling updates. */
9955 		if (old_other_state->src_w != new_other_state->src_w ||
9956 		    old_other_state->src_h != new_other_state->src_h ||
9957 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9958 		    old_other_state->crtc_h != new_other_state->crtc_h)
9959 			return true;
9960 
9961 		/* Rotation / mirroring updates. */
9962 		if (old_other_state->rotation != new_other_state->rotation)
9963 			return true;
9964 
9965 		/* Blending updates. */
9966 		if (old_other_state->pixel_blend_mode !=
9967 		    new_other_state->pixel_blend_mode)
9968 			return true;
9969 
9970 		/* Alpha updates. */
9971 		if (old_other_state->alpha != new_other_state->alpha)
9972 			return true;
9973 
9974 		/* Colorspace changes. */
9975 		if (old_other_state->color_range != new_other_state->color_range ||
9976 		    old_other_state->color_encoding != new_other_state->color_encoding)
9977 			return true;
9978 
9979 		/* Framebuffer checks fall at the end. */
9980 		if (!old_other_state->fb || !new_other_state->fb)
9981 			continue;
9982 
9983 		/* Pixel format changes can require bandwidth updates. */
9984 		if (old_other_state->fb->format != new_other_state->fb->format)
9985 			return true;
9986 
9987 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9988 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9989 
9990 		/* Tiling and DCC changes also require bandwidth updates. */
9991 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9992 		    old_afb->base.modifier != new_afb->base.modifier)
9993 			return true;
9994 	}
9995 
9996 	return false;
9997 }
9998 
9999 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10000 			      struct drm_plane_state *new_plane_state,
10001 			      struct drm_framebuffer *fb)
10002 {
10003 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10004 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10005 	unsigned int pitch;
10006 	bool linear;
10007 
10008 	if (fb->width > new_acrtc->max_cursor_width ||
10009 	    fb->height > new_acrtc->max_cursor_height) {
10010 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10011 				 new_plane_state->fb->width,
10012 				 new_plane_state->fb->height);
10013 		return -EINVAL;
10014 	}
10015 	if (new_plane_state->src_w != fb->width << 16 ||
10016 	    new_plane_state->src_h != fb->height << 16) {
10017 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10018 		return -EINVAL;
10019 	}
10020 
10021 	/* Pitch in pixels */
10022 	pitch = fb->pitches[0] / fb->format->cpp[0];
10023 
10024 	if (fb->width != pitch) {
10025 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10026 				 fb->width, pitch);
10027 		return -EINVAL;
10028 	}
10029 
10030 	switch (pitch) {
10031 	case 64:
10032 	case 128:
10033 	case 256:
10034 		/* FB pitch is supported by cursor plane */
10035 		break;
10036 	default:
10037 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10038 		return -EINVAL;
10039 	}
10040 
10041 	/* Core DRM takes care of checking FB modifiers, so we only need to
10042 	 * check tiling flags when the FB doesn't have a modifier. */
10043 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10044 		if (adev->family < AMDGPU_FAMILY_AI) {
10045 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10046 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10047 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10048 		} else {
10049 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10050 		}
10051 		if (!linear) {
10052 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10053 			return -EINVAL;
10054 		}
10055 	}
10056 
10057 	return 0;
10058 }
10059 
10060 static int dm_update_plane_state(struct dc *dc,
10061 				 struct drm_atomic_state *state,
10062 				 struct drm_plane *plane,
10063 				 struct drm_plane_state *old_plane_state,
10064 				 struct drm_plane_state *new_plane_state,
10065 				 bool enable,
10066 				 bool *lock_and_validation_needed)
10067 {
10068 
10069 	struct dm_atomic_state *dm_state = NULL;
10070 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10071 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10072 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10073 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10074 	struct amdgpu_crtc *new_acrtc;
10075 	bool needs_reset;
10076 	int ret = 0;
10077 
10078 
10079 	new_plane_crtc = new_plane_state->crtc;
10080 	old_plane_crtc = old_plane_state->crtc;
10081 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10082 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10083 
10084 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10085 		if (!enable || !new_plane_crtc ||
10086 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10087 			return 0;
10088 
10089 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10090 
10091 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10092 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10093 			return -EINVAL;
10094 		}
10095 
10096 		if (new_plane_state->fb) {
10097 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10098 						 new_plane_state->fb);
10099 			if (ret)
10100 				return ret;
10101 		}
10102 
10103 		return 0;
10104 	}
10105 
10106 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10107 					 new_plane_state);
10108 
10109 	/* Remove any changed/removed planes */
10110 	if (!enable) {
10111 		if (!needs_reset)
10112 			return 0;
10113 
10114 		if (!old_plane_crtc)
10115 			return 0;
10116 
10117 		old_crtc_state = drm_atomic_get_old_crtc_state(
10118 				state, old_plane_crtc);
10119 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10120 
10121 		if (!dm_old_crtc_state->stream)
10122 			return 0;
10123 
10124 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10125 				plane->base.id, old_plane_crtc->base.id);
10126 
10127 		ret = dm_atomic_get_state(state, &dm_state);
10128 		if (ret)
10129 			return ret;
10130 
10131 		if (!dc_remove_plane_from_context(
10132 				dc,
10133 				dm_old_crtc_state->stream,
10134 				dm_old_plane_state->dc_state,
10135 				dm_state->context)) {
10136 
10137 			return -EINVAL;
10138 		}
10139 
10140 
10141 		dc_plane_state_release(dm_old_plane_state->dc_state);
10142 		dm_new_plane_state->dc_state = NULL;
10143 
10144 		*lock_and_validation_needed = true;
10145 
10146 	} else { /* Add new planes */
10147 		struct dc_plane_state *dc_new_plane_state;
10148 
10149 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10150 			return 0;
10151 
10152 		if (!new_plane_crtc)
10153 			return 0;
10154 
10155 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10156 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10157 
10158 		if (!dm_new_crtc_state->stream)
10159 			return 0;
10160 
10161 		if (!needs_reset)
10162 			return 0;
10163 
10164 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10165 		if (ret)
10166 			return ret;
10167 
10168 		WARN_ON(dm_new_plane_state->dc_state);
10169 
10170 		dc_new_plane_state = dc_create_plane_state(dc);
10171 		if (!dc_new_plane_state)
10172 			return -ENOMEM;
10173 
10174 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10175 				 plane->base.id, new_plane_crtc->base.id);
10176 
10177 		ret = fill_dc_plane_attributes(
10178 			drm_to_adev(new_plane_crtc->dev),
10179 			dc_new_plane_state,
10180 			new_plane_state,
10181 			new_crtc_state);
10182 		if (ret) {
10183 			dc_plane_state_release(dc_new_plane_state);
10184 			return ret;
10185 		}
10186 
10187 		ret = dm_atomic_get_state(state, &dm_state);
10188 		if (ret) {
10189 			dc_plane_state_release(dc_new_plane_state);
10190 			return ret;
10191 		}
10192 
10193 		/*
10194 		 * Any atomic check errors that occur after this will
10195 		 * not need a release. The plane state will be attached
10196 		 * to the stream, and therefore part of the atomic
10197 		 * state. It'll be released when the atomic state is
10198 		 * cleaned.
10199 		 */
10200 		if (!dc_add_plane_to_context(
10201 				dc,
10202 				dm_new_crtc_state->stream,
10203 				dc_new_plane_state,
10204 				dm_state->context)) {
10205 
10206 			dc_plane_state_release(dc_new_plane_state);
10207 			return -EINVAL;
10208 		}
10209 
10210 		dm_new_plane_state->dc_state = dc_new_plane_state;
10211 
10212 		/* Tell DC to do a full surface update every time there
10213 		 * is a plane change. Inefficient, but works for now.
10214 		 */
10215 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10216 
10217 		*lock_and_validation_needed = true;
10218 	}
10219 
10220 
10221 	return ret;
10222 }
10223 
10224 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10225 				struct drm_crtc *crtc,
10226 				struct drm_crtc_state *new_crtc_state)
10227 {
10228 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10229 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10230 
10231 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10232 	 * cursor per pipe but it's going to inherit the scaling and
10233 	 * positioning from the underlying pipe. Check the cursor plane's
10234 	 * blending properties match the primary plane's. */
10235 
10236 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10237 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10238 	if (!new_cursor_state || !new_primary_state ||
10239 	    !new_cursor_state->fb || !new_primary_state->fb) {
10240 		return 0;
10241 	}
10242 
10243 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10244 			 (new_cursor_state->src_w >> 16);
10245 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10246 			 (new_cursor_state->src_h >> 16);
10247 
10248 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10249 			 (new_primary_state->src_w >> 16);
10250 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10251 			 (new_primary_state->src_h >> 16);
10252 
10253 	if (cursor_scale_w != primary_scale_w ||
10254 	    cursor_scale_h != primary_scale_h) {
10255 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10256 		return -EINVAL;
10257 	}
10258 
10259 	return 0;
10260 }
10261 
10262 #if defined(CONFIG_DRM_AMD_DC_DCN)
10263 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10264 {
10265 	struct drm_connector *connector;
10266 	struct drm_connector_state *conn_state, *old_conn_state;
10267 	struct amdgpu_dm_connector *aconnector = NULL;
10268 	int i;
10269 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10270 		if (!conn_state->crtc)
10271 			conn_state = old_conn_state;
10272 
10273 		if (conn_state->crtc != crtc)
10274 			continue;
10275 
10276 		aconnector = to_amdgpu_dm_connector(connector);
10277 		if (!aconnector->port || !aconnector->mst_port)
10278 			aconnector = NULL;
10279 		else
10280 			break;
10281 	}
10282 
10283 	if (!aconnector)
10284 		return 0;
10285 
10286 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10287 }
10288 #endif
10289 
10290 static int validate_overlay(struct drm_atomic_state *state)
10291 {
10292 	int i;
10293 	struct drm_plane *plane;
10294 	struct drm_plane_state *new_plane_state;
10295 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10296 
10297 	/* Check if primary plane is contained inside overlay */
10298 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10299 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10300 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10301 				return 0;
10302 
10303 			overlay_state = new_plane_state;
10304 			continue;
10305 		}
10306 	}
10307 
10308 	/* check if we're making changes to the overlay plane */
10309 	if (!overlay_state)
10310 		return 0;
10311 
10312 	/* check if overlay plane is enabled */
10313 	if (!overlay_state->crtc)
10314 		return 0;
10315 
10316 	/* find the primary plane for the CRTC that the overlay is enabled on */
10317 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10318 	if (IS_ERR(primary_state))
10319 		return PTR_ERR(primary_state);
10320 
10321 	/* check if primary plane is enabled */
10322 	if (!primary_state->crtc)
10323 		return 0;
10324 
10325 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10326 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10327 	    primary_state->crtc_y < overlay_state->crtc_y ||
10328 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10329 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10330 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10331 		return -EINVAL;
10332 	}
10333 
10334 	return 0;
10335 }
10336 
10337 /**
10338  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10339  * @dev: The DRM device
10340  * @state: The atomic state to commit
10341  *
10342  * Validate that the given atomic state is programmable by DC into hardware.
10343  * This involves constructing a &struct dc_state reflecting the new hardware
10344  * state we wish to commit, then querying DC to see if it is programmable. It's
10345  * important not to modify the existing DC state. Otherwise, atomic_check
10346  * may unexpectedly commit hardware changes.
10347  *
10348  * When validating the DC state, it's important that the right locks are
10349  * acquired. For full updates case which removes/adds/updates streams on one
10350  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10351  * that any such full update commit will wait for completion of any outstanding
10352  * flip using DRMs synchronization events.
10353  *
10354  * Note that DM adds the affected connectors for all CRTCs in state, when that
10355  * might not seem necessary. This is because DC stream creation requires the
10356  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10357  * be possible but non-trivial - a possible TODO item.
10358  *
10359  * Return: -Error code if validation failed.
10360  */
10361 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10362 				  struct drm_atomic_state *state)
10363 {
10364 	struct amdgpu_device *adev = drm_to_adev(dev);
10365 	struct dm_atomic_state *dm_state = NULL;
10366 	struct dc *dc = adev->dm.dc;
10367 	struct drm_connector *connector;
10368 	struct drm_connector_state *old_con_state, *new_con_state;
10369 	struct drm_crtc *crtc;
10370 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10371 	struct drm_plane *plane;
10372 	struct drm_plane_state *old_plane_state, *new_plane_state;
10373 	enum dc_status status;
10374 	int ret, i;
10375 	bool lock_and_validation_needed = false;
10376 	struct dm_crtc_state *dm_old_crtc_state;
10377 #if defined(CONFIG_DRM_AMD_DC_DCN)
10378 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10379 #endif
10380 
10381 	trace_amdgpu_dm_atomic_check_begin(state);
10382 
10383 	ret = drm_atomic_helper_check_modeset(dev, state);
10384 	if (ret)
10385 		goto fail;
10386 
10387 	/* Check connector changes */
10388 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10389 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10390 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10391 
10392 		/* Skip connectors that are disabled or part of modeset already. */
10393 		if (!old_con_state->crtc && !new_con_state->crtc)
10394 			continue;
10395 
10396 		if (!new_con_state->crtc)
10397 			continue;
10398 
10399 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10400 		if (IS_ERR(new_crtc_state)) {
10401 			ret = PTR_ERR(new_crtc_state);
10402 			goto fail;
10403 		}
10404 
10405 		if (dm_old_con_state->abm_level !=
10406 		    dm_new_con_state->abm_level)
10407 			new_crtc_state->connectors_changed = true;
10408 	}
10409 
10410 #if defined(CONFIG_DRM_AMD_DC_DCN)
10411 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10412 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10413 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10414 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10415 				if (ret)
10416 					goto fail;
10417 			}
10418 		}
10419 	}
10420 #endif
10421 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10422 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10423 
10424 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10425 		    !new_crtc_state->color_mgmt_changed &&
10426 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10427 			dm_old_crtc_state->dsc_force_changed == false)
10428 			continue;
10429 
10430 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10431 		if (ret)
10432 			goto fail;
10433 
10434 		if (!new_crtc_state->enable)
10435 			continue;
10436 
10437 		ret = drm_atomic_add_affected_connectors(state, crtc);
10438 		if (ret)
10439 			return ret;
10440 
10441 		ret = drm_atomic_add_affected_planes(state, crtc);
10442 		if (ret)
10443 			goto fail;
10444 
10445 		if (dm_old_crtc_state->dsc_force_changed)
10446 			new_crtc_state->mode_changed = true;
10447 	}
10448 
10449 	/*
10450 	 * Add all primary and overlay planes on the CRTC to the state
10451 	 * whenever a plane is enabled to maintain correct z-ordering
10452 	 * and to enable fast surface updates.
10453 	 */
10454 	drm_for_each_crtc(crtc, dev) {
10455 		bool modified = false;
10456 
10457 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10458 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10459 				continue;
10460 
10461 			if (new_plane_state->crtc == crtc ||
10462 			    old_plane_state->crtc == crtc) {
10463 				modified = true;
10464 				break;
10465 			}
10466 		}
10467 
10468 		if (!modified)
10469 			continue;
10470 
10471 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10472 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10473 				continue;
10474 
10475 			new_plane_state =
10476 				drm_atomic_get_plane_state(state, plane);
10477 
10478 			if (IS_ERR(new_plane_state)) {
10479 				ret = PTR_ERR(new_plane_state);
10480 				goto fail;
10481 			}
10482 		}
10483 	}
10484 
10485 	/* Remove exiting planes if they are modified */
10486 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10487 		ret = dm_update_plane_state(dc, state, plane,
10488 					    old_plane_state,
10489 					    new_plane_state,
10490 					    false,
10491 					    &lock_and_validation_needed);
10492 		if (ret)
10493 			goto fail;
10494 	}
10495 
10496 	/* Disable all crtcs which require disable */
10497 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10498 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10499 					   old_crtc_state,
10500 					   new_crtc_state,
10501 					   false,
10502 					   &lock_and_validation_needed);
10503 		if (ret)
10504 			goto fail;
10505 	}
10506 
10507 	/* Enable all crtcs which require enable */
10508 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10509 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10510 					   old_crtc_state,
10511 					   new_crtc_state,
10512 					   true,
10513 					   &lock_and_validation_needed);
10514 		if (ret)
10515 			goto fail;
10516 	}
10517 
10518 	ret = validate_overlay(state);
10519 	if (ret)
10520 		goto fail;
10521 
10522 	/* Add new/modified planes */
10523 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10524 		ret = dm_update_plane_state(dc, state, plane,
10525 					    old_plane_state,
10526 					    new_plane_state,
10527 					    true,
10528 					    &lock_and_validation_needed);
10529 		if (ret)
10530 			goto fail;
10531 	}
10532 
10533 	/* Run this here since we want to validate the streams we created */
10534 	ret = drm_atomic_helper_check_planes(dev, state);
10535 	if (ret)
10536 		goto fail;
10537 
10538 	/* Check cursor planes scaling */
10539 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10540 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10541 		if (ret)
10542 			goto fail;
10543 	}
10544 
10545 	if (state->legacy_cursor_update) {
10546 		/*
10547 		 * This is a fast cursor update coming from the plane update
10548 		 * helper, check if it can be done asynchronously for better
10549 		 * performance.
10550 		 */
10551 		state->async_update =
10552 			!drm_atomic_helper_async_check(dev, state);
10553 
10554 		/*
10555 		 * Skip the remaining global validation if this is an async
10556 		 * update. Cursor updates can be done without affecting
10557 		 * state or bandwidth calcs and this avoids the performance
10558 		 * penalty of locking the private state object and
10559 		 * allocating a new dc_state.
10560 		 */
10561 		if (state->async_update)
10562 			return 0;
10563 	}
10564 
10565 	/* Check scaling and underscan changes*/
10566 	/* TODO Removed scaling changes validation due to inability to commit
10567 	 * new stream into context w\o causing full reset. Need to
10568 	 * decide how to handle.
10569 	 */
10570 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10571 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10572 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10573 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10574 
10575 		/* Skip any modesets/resets */
10576 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10577 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10578 			continue;
10579 
10580 		/* Skip any thing not scale or underscan changes */
10581 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10582 			continue;
10583 
10584 		lock_and_validation_needed = true;
10585 	}
10586 
10587 	/**
10588 	 * Streams and planes are reset when there are changes that affect
10589 	 * bandwidth. Anything that affects bandwidth needs to go through
10590 	 * DC global validation to ensure that the configuration can be applied
10591 	 * to hardware.
10592 	 *
10593 	 * We have to currently stall out here in atomic_check for outstanding
10594 	 * commits to finish in this case because our IRQ handlers reference
10595 	 * DRM state directly - we can end up disabling interrupts too early
10596 	 * if we don't.
10597 	 *
10598 	 * TODO: Remove this stall and drop DM state private objects.
10599 	 */
10600 	if (lock_and_validation_needed) {
10601 		ret = dm_atomic_get_state(state, &dm_state);
10602 		if (ret)
10603 			goto fail;
10604 
10605 		ret = do_aquire_global_lock(dev, state);
10606 		if (ret)
10607 			goto fail;
10608 
10609 #if defined(CONFIG_DRM_AMD_DC_DCN)
10610 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10611 			goto fail;
10612 
10613 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10614 		if (ret)
10615 			goto fail;
10616 #endif
10617 
10618 		/*
10619 		 * Perform validation of MST topology in the state:
10620 		 * We need to perform MST atomic check before calling
10621 		 * dc_validate_global_state(), or there is a chance
10622 		 * to get stuck in an infinite loop and hang eventually.
10623 		 */
10624 		ret = drm_dp_mst_atomic_check(state);
10625 		if (ret)
10626 			goto fail;
10627 		status = dc_validate_global_state(dc, dm_state->context, false);
10628 		if (status != DC_OK) {
10629 			drm_dbg_atomic(dev,
10630 				       "DC global validation failure: %s (%d)",
10631 				       dc_status_to_str(status), status);
10632 			ret = -EINVAL;
10633 			goto fail;
10634 		}
10635 	} else {
10636 		/*
10637 		 * The commit is a fast update. Fast updates shouldn't change
10638 		 * the DC context, affect global validation, and can have their
10639 		 * commit work done in parallel with other commits not touching
10640 		 * the same resource. If we have a new DC context as part of
10641 		 * the DM atomic state from validation we need to free it and
10642 		 * retain the existing one instead.
10643 		 *
10644 		 * Furthermore, since the DM atomic state only contains the DC
10645 		 * context and can safely be annulled, we can free the state
10646 		 * and clear the associated private object now to free
10647 		 * some memory and avoid a possible use-after-free later.
10648 		 */
10649 
10650 		for (i = 0; i < state->num_private_objs; i++) {
10651 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10652 
10653 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10654 				int j = state->num_private_objs-1;
10655 
10656 				dm_atomic_destroy_state(obj,
10657 						state->private_objs[i].state);
10658 
10659 				/* If i is not at the end of the array then the
10660 				 * last element needs to be moved to where i was
10661 				 * before the array can safely be truncated.
10662 				 */
10663 				if (i != j)
10664 					state->private_objs[i] =
10665 						state->private_objs[j];
10666 
10667 				state->private_objs[j].ptr = NULL;
10668 				state->private_objs[j].state = NULL;
10669 				state->private_objs[j].old_state = NULL;
10670 				state->private_objs[j].new_state = NULL;
10671 
10672 				state->num_private_objs = j;
10673 				break;
10674 			}
10675 		}
10676 	}
10677 
10678 	/* Store the overall update type for use later in atomic check. */
10679 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10680 		struct dm_crtc_state *dm_new_crtc_state =
10681 			to_dm_crtc_state(new_crtc_state);
10682 
10683 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10684 							 UPDATE_TYPE_FULL :
10685 							 UPDATE_TYPE_FAST;
10686 	}
10687 
10688 	/* Must be success */
10689 	WARN_ON(ret);
10690 
10691 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10692 
10693 	return ret;
10694 
10695 fail:
10696 	if (ret == -EDEADLK)
10697 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10698 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10699 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10700 	else
10701 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10702 
10703 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10704 
10705 	return ret;
10706 }
10707 
10708 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10709 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10710 {
10711 	uint8_t dpcd_data;
10712 	bool capable = false;
10713 
10714 	if (amdgpu_dm_connector->dc_link &&
10715 		dm_helpers_dp_read_dpcd(
10716 				NULL,
10717 				amdgpu_dm_connector->dc_link,
10718 				DP_DOWN_STREAM_PORT_COUNT,
10719 				&dpcd_data,
10720 				sizeof(dpcd_data))) {
10721 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10722 	}
10723 
10724 	return capable;
10725 }
10726 
10727 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10728 		unsigned int offset,
10729 		unsigned int total_length,
10730 		uint8_t *data,
10731 		unsigned int length,
10732 		struct amdgpu_hdmi_vsdb_info *vsdb)
10733 {
10734 	bool res;
10735 	union dmub_rb_cmd cmd;
10736 	struct dmub_cmd_send_edid_cea *input;
10737 	struct dmub_cmd_edid_cea_output *output;
10738 
10739 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10740 		return false;
10741 
10742 	memset(&cmd, 0, sizeof(cmd));
10743 
10744 	input = &cmd.edid_cea.data.input;
10745 
10746 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10747 	cmd.edid_cea.header.sub_type = 0;
10748 	cmd.edid_cea.header.payload_bytes =
10749 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10750 	input->offset = offset;
10751 	input->length = length;
10752 	input->total_length = total_length;
10753 	memcpy(input->payload, data, length);
10754 
10755 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10756 	if (!res) {
10757 		DRM_ERROR("EDID CEA parser failed\n");
10758 		return false;
10759 	}
10760 
10761 	output = &cmd.edid_cea.data.output;
10762 
10763 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10764 		if (!output->ack.success) {
10765 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10766 					output->ack.offset);
10767 		}
10768 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10769 		if (!output->amd_vsdb.vsdb_found)
10770 			return false;
10771 
10772 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10773 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10774 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10775 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10776 	} else {
10777 		if (output->type != 0)
10778 			DRM_WARN("Unknown EDID CEA parser results\n");
10779 		return false;
10780 	}
10781 
10782 	return true;
10783 }
10784 
10785 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10786 		uint8_t *edid_ext, int len,
10787 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10788 {
10789 	int i;
10790 
10791 	/* send extension block to DMCU for parsing */
10792 	for (i = 0; i < len; i += 8) {
10793 		bool res;
10794 		int offset;
10795 
10796 		/* send 8 bytes a time */
10797 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10798 			return false;
10799 
10800 		if (i+8 == len) {
10801 			/* EDID block sent completed, expect result */
10802 			int version, min_rate, max_rate;
10803 
10804 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10805 			if (res) {
10806 				/* amd vsdb found */
10807 				vsdb_info->freesync_supported = 1;
10808 				vsdb_info->amd_vsdb_version = version;
10809 				vsdb_info->min_refresh_rate_hz = min_rate;
10810 				vsdb_info->max_refresh_rate_hz = max_rate;
10811 				return true;
10812 			}
10813 			/* not amd vsdb */
10814 			return false;
10815 		}
10816 
10817 		/* check for ack*/
10818 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10819 		if (!res)
10820 			return false;
10821 	}
10822 
10823 	return false;
10824 }
10825 
10826 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10827 		uint8_t *edid_ext, int len,
10828 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10829 {
10830 	int i;
10831 
10832 	/* send extension block to DMCU for parsing */
10833 	for (i = 0; i < len; i += 8) {
10834 		/* send 8 bytes a time */
10835 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10836 			return false;
10837 	}
10838 
10839 	return vsdb_info->freesync_supported;
10840 }
10841 
10842 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10843 		uint8_t *edid_ext, int len,
10844 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10845 {
10846 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10847 
10848 	if (adev->dm.dmub_srv)
10849 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10850 	else
10851 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10852 }
10853 
10854 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10855 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10856 {
10857 	uint8_t *edid_ext = NULL;
10858 	int i;
10859 	bool valid_vsdb_found = false;
10860 
10861 	/*----- drm_find_cea_extension() -----*/
10862 	/* No EDID or EDID extensions */
10863 	if (edid == NULL || edid->extensions == 0)
10864 		return -ENODEV;
10865 
10866 	/* Find CEA extension */
10867 	for (i = 0; i < edid->extensions; i++) {
10868 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10869 		if (edid_ext[0] == CEA_EXT)
10870 			break;
10871 	}
10872 
10873 	if (i == edid->extensions)
10874 		return -ENODEV;
10875 
10876 	/*----- cea_db_offsets() -----*/
10877 	if (edid_ext[0] != CEA_EXT)
10878 		return -ENODEV;
10879 
10880 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10881 
10882 	return valid_vsdb_found ? i : -ENODEV;
10883 }
10884 
10885 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10886 					struct edid *edid)
10887 {
10888 	int i = 0;
10889 	struct detailed_timing *timing;
10890 	struct detailed_non_pixel *data;
10891 	struct detailed_data_monitor_range *range;
10892 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10893 			to_amdgpu_dm_connector(connector);
10894 	struct dm_connector_state *dm_con_state = NULL;
10895 
10896 	struct drm_device *dev = connector->dev;
10897 	struct amdgpu_device *adev = drm_to_adev(dev);
10898 	bool freesync_capable = false;
10899 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10900 
10901 	if (!connector->state) {
10902 		DRM_ERROR("%s - Connector has no state", __func__);
10903 		goto update;
10904 	}
10905 
10906 	if (!edid) {
10907 		dm_con_state = to_dm_connector_state(connector->state);
10908 
10909 		amdgpu_dm_connector->min_vfreq = 0;
10910 		amdgpu_dm_connector->max_vfreq = 0;
10911 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10912 
10913 		goto update;
10914 	}
10915 
10916 	dm_con_state = to_dm_connector_state(connector->state);
10917 
10918 	if (!amdgpu_dm_connector->dc_sink) {
10919 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10920 		goto update;
10921 	}
10922 	if (!adev->dm.freesync_module)
10923 		goto update;
10924 
10925 
10926 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10927 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10928 		bool edid_check_required = false;
10929 
10930 		if (edid) {
10931 			edid_check_required = is_dp_capable_without_timing_msa(
10932 						adev->dm.dc,
10933 						amdgpu_dm_connector);
10934 		}
10935 
10936 		if (edid_check_required == true && (edid->version > 1 ||
10937 		   (edid->version == 1 && edid->revision > 1))) {
10938 			for (i = 0; i < 4; i++) {
10939 
10940 				timing	= &edid->detailed_timings[i];
10941 				data	= &timing->data.other_data;
10942 				range	= &data->data.range;
10943 				/*
10944 				 * Check if monitor has continuous frequency mode
10945 				 */
10946 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10947 					continue;
10948 				/*
10949 				 * Check for flag range limits only. If flag == 1 then
10950 				 * no additional timing information provided.
10951 				 * Default GTF, GTF Secondary curve and CVT are not
10952 				 * supported
10953 				 */
10954 				if (range->flags != 1)
10955 					continue;
10956 
10957 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10958 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10959 				amdgpu_dm_connector->pixel_clock_mhz =
10960 					range->pixel_clock_mhz * 10;
10961 
10962 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10963 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10964 
10965 				break;
10966 			}
10967 
10968 			if (amdgpu_dm_connector->max_vfreq -
10969 			    amdgpu_dm_connector->min_vfreq > 10) {
10970 
10971 				freesync_capable = true;
10972 			}
10973 		}
10974 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10975 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10976 		if (i >= 0 && vsdb_info.freesync_supported) {
10977 			timing  = &edid->detailed_timings[i];
10978 			data    = &timing->data.other_data;
10979 
10980 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10981 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10982 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10983 				freesync_capable = true;
10984 
10985 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10986 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10987 		}
10988 	}
10989 
10990 update:
10991 	if (dm_con_state)
10992 		dm_con_state->freesync_capable = freesync_capable;
10993 
10994 	if (connector->vrr_capable_property)
10995 		drm_connector_set_vrr_capable_property(connector,
10996 						       freesync_capable);
10997 }
10998 
10999 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11000 {
11001 	struct amdgpu_device *adev = drm_to_adev(dev);
11002 	struct dc *dc = adev->dm.dc;
11003 	int i;
11004 
11005 	mutex_lock(&adev->dm.dc_lock);
11006 	if (dc->current_state) {
11007 		for (i = 0; i < dc->current_state->stream_count; ++i)
11008 			dc->current_state->streams[i]
11009 				->triggered_crtc_reset.enabled =
11010 				adev->dm.force_timing_sync;
11011 
11012 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11013 		dc_trigger_sync(dc, dc->current_state);
11014 	}
11015 	mutex_unlock(&adev->dm.dc_lock);
11016 }
11017 
11018 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11019 		       uint32_t value, const char *func_name)
11020 {
11021 #ifdef DM_CHECK_ADDR_0
11022 	if (address == 0) {
11023 		DC_ERR("invalid register write. address = 0");
11024 		return;
11025 	}
11026 #endif
11027 	cgs_write_register(ctx->cgs_device, address, value);
11028 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11029 }
11030 
11031 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11032 			  const char *func_name)
11033 {
11034 	uint32_t value;
11035 #ifdef DM_CHECK_ADDR_0
11036 	if (address == 0) {
11037 		DC_ERR("invalid register read; address = 0\n");
11038 		return 0;
11039 	}
11040 #endif
11041 
11042 	if (ctx->dmub_srv &&
11043 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11044 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11045 		ASSERT(false);
11046 		return 0;
11047 	}
11048 
11049 	value = cgs_read_register(ctx->cgs_device, address);
11050 
11051 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11052 
11053 	return value;
11054 }
11055 
11056 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11057 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11058 {
11059 	struct amdgpu_device *adev = ctx->driver_context;
11060 	int ret = 0;
11061 
11062 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11063 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11064 	if (ret == 0) {
11065 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11066 		return -1;
11067 	}
11068 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11069 
11070 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11071 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11072 
11073 		// For read case, Copy data to payload
11074 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11075 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11076 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11077 			adev->dm.dmub_notify->aux_reply.length);
11078 	}
11079 
11080 	return adev->dm.dmub_notify->aux_reply.length;
11081 }
11082