xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 198f0b5dccae76a18ee7603263e9fb6884a167ec)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 #define DMUB_TRACE_MAX_READ 64
622 /**
623  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624  * @interrupt_params: used for determining the Outbox instance
625  *
626  * Handles the Outbox Interrupt
627  * event handler.
628  */
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 #ifdef notyet
797 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
798 	.bind	= amdgpu_dm_audio_component_bind,
799 	.unbind	= amdgpu_dm_audio_component_unbind,
800 };
801 #endif
802 
803 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
804 {
805 	int i, ret;
806 
807 	if (!amdgpu_audio)
808 		return 0;
809 
810 	adev->mode_info.audio.enabled = true;
811 
812 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
813 
814 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
815 		adev->mode_info.audio.pin[i].channels = -1;
816 		adev->mode_info.audio.pin[i].rate = -1;
817 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
818 		adev->mode_info.audio.pin[i].status_bits = 0;
819 		adev->mode_info.audio.pin[i].category_code = 0;
820 		adev->mode_info.audio.pin[i].connected = false;
821 		adev->mode_info.audio.pin[i].id =
822 			adev->dm.dc->res_pool->audios[i]->inst;
823 		adev->mode_info.audio.pin[i].offset = 0;
824 	}
825 
826 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
827 	if (ret < 0)
828 		return ret;
829 
830 	adev->dm.audio_registered = true;
831 
832 	return 0;
833 }
834 
835 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
836 {
837 	if (!amdgpu_audio)
838 		return;
839 
840 	if (!adev->mode_info.audio.enabled)
841 		return;
842 
843 	if (adev->dm.audio_registered) {
844 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
845 		adev->dm.audio_registered = false;
846 	}
847 
848 	/* TODO: Disable audio? */
849 
850 	adev->mode_info.audio.enabled = false;
851 }
852 
853 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
854 {
855 	struct drm_audio_component *acomp = adev->dm.audio_component;
856 
857 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
858 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
859 
860 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
861 						 pin, -1);
862 	}
863 }
864 
865 static int dm_dmub_hw_init(struct amdgpu_device *adev)
866 {
867 	const struct dmcub_firmware_header_v1_0 *hdr;
868 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
869 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
870 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
871 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
872 	struct abm *abm = adev->dm.dc->res_pool->abm;
873 	struct dmub_srv_hw_params hw_params;
874 	enum dmub_status status;
875 	const unsigned char *fw_inst_const, *fw_bss_data;
876 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
877 	bool has_hw_support;
878 
879 	if (!dmub_srv)
880 		/* DMUB isn't supported on the ASIC. */
881 		return 0;
882 
883 	if (!fb_info) {
884 		DRM_ERROR("No framebuffer info for DMUB service.\n");
885 		return -EINVAL;
886 	}
887 
888 	if (!dmub_fw) {
889 		/* Firmware required for DMUB support. */
890 		DRM_ERROR("No firmware provided for DMUB.\n");
891 		return -EINVAL;
892 	}
893 
894 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
895 	if (status != DMUB_STATUS_OK) {
896 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
897 		return -EINVAL;
898 	}
899 
900 	if (!has_hw_support) {
901 		DRM_INFO("DMUB unsupported on ASIC\n");
902 		return 0;
903 	}
904 
905 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
906 
907 	fw_inst_const = dmub_fw->data +
908 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
909 			PSP_HEADER_BYTES;
910 
911 	fw_bss_data = dmub_fw->data +
912 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
913 		      le32_to_cpu(hdr->inst_const_bytes);
914 
915 	/* Copy firmware and bios info into FB memory. */
916 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
917 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
918 
919 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
920 
921 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
922 	 * amdgpu_ucode_init_single_fw will load dmub firmware
923 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
924 	 * will be done by dm_dmub_hw_init
925 	 */
926 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
927 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
928 				fw_inst_const_size);
929 	}
930 
931 	if (fw_bss_data_size)
932 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
933 		       fw_bss_data, fw_bss_data_size);
934 
935 	/* Copy firmware bios info into FB memory. */
936 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
937 	       adev->bios_size);
938 
939 	/* Reset regions that need to be reset. */
940 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
941 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
942 
943 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
944 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
945 
946 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
947 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
948 
949 	/* Initialize hardware. */
950 	memset(&hw_params, 0, sizeof(hw_params));
951 	hw_params.fb_base = adev->gmc.fb_start;
952 	hw_params.fb_offset = adev->gmc.aper_base;
953 
954 	/* backdoor load firmware and trigger dmub running */
955 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
956 		hw_params.load_inst_const = true;
957 
958 	if (dmcu)
959 		hw_params.psp_version = dmcu->psp_version;
960 
961 	for (i = 0; i < fb_info->num_fb; ++i)
962 		hw_params.fb[i] = &fb_info->fb[i];
963 
964 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
965 	if (status != DMUB_STATUS_OK) {
966 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
967 		return -EINVAL;
968 	}
969 
970 	/* Wait for firmware load to finish. */
971 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
972 	if (status != DMUB_STATUS_OK)
973 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
974 
975 	/* Init DMCU and ABM if available. */
976 	if (dmcu && abm) {
977 		dmcu->funcs->dmcu_init(dmcu);
978 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
979 	}
980 
981 	if (!adev->dm.dc->ctx->dmub_srv)
982 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
983 	if (!adev->dm.dc->ctx->dmub_srv) {
984 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
985 		return -ENOMEM;
986 	}
987 
988 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
989 		 adev->dm.dmcub_fw_version);
990 
991 	return 0;
992 }
993 
994 #if defined(CONFIG_DRM_AMD_DC_DCN)
995 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
996 {
997 	uint64_t pt_base;
998 	uint32_t logical_addr_low;
999 	uint32_t logical_addr_high;
1000 	uint32_t agp_base, agp_bot, agp_top;
1001 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1002 
1003 	memset(pa_config, 0, sizeof(*pa_config));
1004 
1005 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1006 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1007 
1008 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1009 		/*
1010 		 * Raven2 has a HW issue that it is unable to use the vram which
1011 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1012 		 * workaround that increase system aperture high address (add 1)
1013 		 * to get rid of the VM fault and hardware hang.
1014 		 */
1015 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1016 	else
1017 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1018 
1019 	agp_base = 0;
1020 	agp_bot = adev->gmc.agp_start >> 24;
1021 	agp_top = adev->gmc.agp_end >> 24;
1022 
1023 
1024 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1025 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1026 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1027 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1028 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1029 	page_table_base.low_part = lower_32_bits(pt_base);
1030 
1031 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1032 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1033 
1034 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1035 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1036 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1037 
1038 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1039 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1040 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1041 
1042 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1043 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1044 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1045 
1046 	pa_config->is_hvm_enabled = 0;
1047 
1048 }
1049 #endif
1050 #if defined(CONFIG_DRM_AMD_DC_DCN)
1051 static void vblank_control_worker(struct work_struct *work)
1052 {
1053 	struct vblank_control_work *vblank_work =
1054 		container_of(work, struct vblank_control_work, work);
1055 	struct amdgpu_display_manager *dm = vblank_work->dm;
1056 
1057 	mutex_lock(&dm->dc_lock);
1058 
1059 	if (vblank_work->enable)
1060 		dm->active_vblank_irq_count++;
1061 	else if(dm->active_vblank_irq_count)
1062 		dm->active_vblank_irq_count--;
1063 
1064 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1065 
1066 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1067 
1068 	/* Control PSR based on vblank requirements from OS */
1069 	if (vblank_work->stream && vblank_work->stream->link) {
1070 		if (vblank_work->enable) {
1071 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1072 				amdgpu_dm_psr_disable(vblank_work->stream);
1073 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1074 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1075 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1076 			amdgpu_dm_psr_enable(vblank_work->stream);
1077 		}
1078 	}
1079 
1080 	mutex_unlock(&dm->dc_lock);
1081 
1082 	dc_stream_release(vblank_work->stream);
1083 
1084 	kfree(vblank_work);
1085 }
1086 
1087 #endif
1088 static int amdgpu_dm_init(struct amdgpu_device *adev)
1089 {
1090 	struct dc_init_data init_data;
1091 #ifdef CONFIG_DRM_AMD_DC_HDCP
1092 	struct dc_callback_init init_params;
1093 #endif
1094 	int r;
1095 
1096 	adev->dm.ddev = adev_to_drm(adev);
1097 	adev->dm.adev = adev;
1098 
1099 	/* Zero all the fields */
1100 	memset(&init_data, 0, sizeof(init_data));
1101 #ifdef CONFIG_DRM_AMD_DC_HDCP
1102 	memset(&init_params, 0, sizeof(init_params));
1103 #endif
1104 
1105 	rw_init(&adev->dm.dc_lock, "dmdc");
1106 	rw_init(&adev->dm.audio_lock, "dmaud");
1107 #if defined(CONFIG_DRM_AMD_DC_DCN)
1108 	mtx_init(&adev->dm.vblank_lock, IPL_TTY);
1109 #endif
1110 
1111 	if(amdgpu_dm_irq_init(adev)) {
1112 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1113 		goto error;
1114 	}
1115 
1116 	init_data.asic_id.chip_family = adev->family;
1117 
1118 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1119 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1120 	init_data.asic_id.chip_id = adev->pdev->device;
1121 
1122 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1123 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1124 	init_data.asic_id.atombios_base_address =
1125 		adev->mode_info.atom_context->bios;
1126 
1127 	init_data.driver = adev;
1128 
1129 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1130 
1131 	if (!adev->dm.cgs_device) {
1132 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1133 		goto error;
1134 	}
1135 
1136 	init_data.cgs_device = adev->dm.cgs_device;
1137 
1138 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1139 
1140 	switch (adev->asic_type) {
1141 	case CHIP_CARRIZO:
1142 	case CHIP_STONEY:
1143 	case CHIP_RAVEN:
1144 	case CHIP_RENOIR:
1145 		init_data.flags.gpu_vm_support = true;
1146 		switch (adev->dm.dmcub_fw_version) {
1147 		case 0: /* development */
1148 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1149 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1150 			init_data.flags.disable_dmcu = false;
1151 			break;
1152 		default:
1153 			init_data.flags.disable_dmcu = true;
1154 		}
1155 		break;
1156 	case CHIP_VANGOGH:
1157 	case CHIP_YELLOW_CARP:
1158 		init_data.flags.gpu_vm_support = true;
1159 		break;
1160 	default:
1161 		break;
1162 	}
1163 
1164 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1165 		init_data.flags.fbc_support = true;
1166 
1167 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1168 		init_data.flags.multi_mon_pp_mclk_switch = true;
1169 
1170 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1171 		init_data.flags.disable_fractional_pwm = true;
1172 
1173 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1174 		init_data.flags.edp_no_power_sequencing = true;
1175 
1176 	init_data.flags.power_down_display_on_boot = true;
1177 
1178 	INIT_LIST_HEAD(&adev->dm.da_list);
1179 	/* Display Core create. */
1180 	adev->dm.dc = dc_create(&init_data);
1181 
1182 	if (adev->dm.dc) {
1183 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1184 	} else {
1185 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1186 		goto error;
1187 	}
1188 
1189 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1190 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1191 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1192 	}
1193 
1194 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1195 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1196 
1197 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1198 		adev->dm.dc->debug.disable_stutter = true;
1199 
1200 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1201 		adev->dm.dc->debug.disable_dsc = true;
1202 
1203 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1204 		adev->dm.dc->debug.disable_clock_gate = true;
1205 
1206 	r = dm_dmub_hw_init(adev);
1207 	if (r) {
1208 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1209 		goto error;
1210 	}
1211 
1212 	dc_hardware_init(adev->dm.dc);
1213 
1214 #if defined(CONFIG_DRM_AMD_DC_DCN)
1215 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1216 		struct dc_phy_addr_space_config pa_config;
1217 
1218 		mmhub_read_system_context(adev, &pa_config);
1219 
1220 		// Call the DC init_memory func
1221 		dc_setup_system_context(adev->dm.dc, &pa_config);
1222 	}
1223 #endif
1224 
1225 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1226 	if (!adev->dm.freesync_module) {
1227 		DRM_ERROR(
1228 		"amdgpu: failed to initialize freesync_module.\n");
1229 	} else
1230 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1231 				adev->dm.freesync_module);
1232 
1233 	amdgpu_dm_init_color_mod();
1234 
1235 #if defined(CONFIG_DRM_AMD_DC_DCN)
1236 	if (adev->dm.dc->caps.max_links > 0) {
1237 		adev->dm.vblank_control_workqueue =
1238 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1239 		if (!adev->dm.vblank_control_workqueue)
1240 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1241 	}
1242 #endif
1243 
1244 #ifdef CONFIG_DRM_AMD_DC_HDCP
1245 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1246 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1247 
1248 		if (!adev->dm.hdcp_workqueue)
1249 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1250 		else
1251 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1252 
1253 		dc_init_callbacks(adev->dm.dc, &init_params);
1254 	}
1255 #endif
1256 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1257 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1258 #endif
1259 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1260 		init_completion(&adev->dm.dmub_aux_transfer_done);
1261 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1262 		if (!adev->dm.dmub_notify) {
1263 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1264 			goto error;
1265 		}
1266 		amdgpu_dm_outbox_init(adev);
1267 	}
1268 
1269 	if (amdgpu_dm_initialize_drm_device(adev)) {
1270 		DRM_ERROR(
1271 		"amdgpu: failed to initialize sw for display support.\n");
1272 		goto error;
1273 	}
1274 
1275 	/* create fake encoders for MST */
1276 	dm_dp_create_fake_mst_encoders(adev);
1277 
1278 	/* TODO: Add_display_info? */
1279 
1280 	/* TODO use dynamic cursor width */
1281 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1282 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1283 
1284 	/* Disable vblank IRQs aggressively for power-saving */
1285 	adev_to_drm(adev)->vblank_disable_immediate = true;
1286 
1287 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1288 		DRM_ERROR(
1289 		"amdgpu: failed to initialize sw for display support.\n");
1290 		goto error;
1291 	}
1292 
1293 
1294 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1295 
1296 	return 0;
1297 error:
1298 	amdgpu_dm_fini(adev);
1299 
1300 	return -EINVAL;
1301 }
1302 
1303 static int amdgpu_dm_early_fini(void *handle)
1304 {
1305 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1306 
1307 	amdgpu_dm_audio_fini(adev);
1308 
1309 	return 0;
1310 }
1311 
1312 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1313 {
1314 	int i;
1315 
1316 #if defined(CONFIG_DRM_AMD_DC_DCN)
1317 	if (adev->dm.vblank_control_workqueue) {
1318 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1319 		adev->dm.vblank_control_workqueue = NULL;
1320 	}
1321 #endif
1322 
1323 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1324 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1325 	}
1326 
1327 	amdgpu_dm_destroy_drm_device(&adev->dm);
1328 
1329 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1330 	if (adev->dm.crc_rd_wrk) {
1331 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1332 		kfree(adev->dm.crc_rd_wrk);
1333 		adev->dm.crc_rd_wrk = NULL;
1334 	}
1335 #endif
1336 #ifdef CONFIG_DRM_AMD_DC_HDCP
1337 	if (adev->dm.hdcp_workqueue) {
1338 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1339 		adev->dm.hdcp_workqueue = NULL;
1340 	}
1341 
1342 	if (adev->dm.dc)
1343 		dc_deinit_callbacks(adev->dm.dc);
1344 #endif
1345 
1346 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1347 
1348 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1349 		kfree(adev->dm.dmub_notify);
1350 		adev->dm.dmub_notify = NULL;
1351 	}
1352 
1353 	if (adev->dm.dmub_bo)
1354 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1355 				      &adev->dm.dmub_bo_gpu_addr,
1356 				      &adev->dm.dmub_bo_cpu_addr);
1357 
1358 	/* DC Destroy TODO: Replace destroy DAL */
1359 	if (adev->dm.dc)
1360 		dc_destroy(&adev->dm.dc);
1361 	/*
1362 	 * TODO: pageflip, vlank interrupt
1363 	 *
1364 	 * amdgpu_dm_irq_fini(adev);
1365 	 */
1366 
1367 	if (adev->dm.cgs_device) {
1368 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1369 		adev->dm.cgs_device = NULL;
1370 	}
1371 	if (adev->dm.freesync_module) {
1372 		mod_freesync_destroy(adev->dm.freesync_module);
1373 		adev->dm.freesync_module = NULL;
1374 	}
1375 
1376 	mutex_destroy(&adev->dm.audio_lock);
1377 	mutex_destroy(&adev->dm.dc_lock);
1378 
1379 	return;
1380 }
1381 
1382 static int load_dmcu_fw(struct amdgpu_device *adev)
1383 {
1384 	const char *fw_name_dmcu = NULL;
1385 	int r;
1386 	const struct dmcu_firmware_header_v1_0 *hdr;
1387 
1388 	switch(adev->asic_type) {
1389 #if defined(CONFIG_DRM_AMD_DC_SI)
1390 	case CHIP_TAHITI:
1391 	case CHIP_PITCAIRN:
1392 	case CHIP_VERDE:
1393 	case CHIP_OLAND:
1394 #endif
1395 	case CHIP_BONAIRE:
1396 	case CHIP_HAWAII:
1397 	case CHIP_KAVERI:
1398 	case CHIP_KABINI:
1399 	case CHIP_MULLINS:
1400 	case CHIP_TONGA:
1401 	case CHIP_FIJI:
1402 	case CHIP_CARRIZO:
1403 	case CHIP_STONEY:
1404 	case CHIP_POLARIS11:
1405 	case CHIP_POLARIS10:
1406 	case CHIP_POLARIS12:
1407 	case CHIP_VEGAM:
1408 	case CHIP_VEGA10:
1409 	case CHIP_VEGA12:
1410 	case CHIP_VEGA20:
1411 	case CHIP_NAVI10:
1412 	case CHIP_NAVI14:
1413 	case CHIP_RENOIR:
1414 	case CHIP_SIENNA_CICHLID:
1415 	case CHIP_NAVY_FLOUNDER:
1416 	case CHIP_DIMGREY_CAVEFISH:
1417 	case CHIP_BEIGE_GOBY:
1418 	case CHIP_VANGOGH:
1419 	case CHIP_YELLOW_CARP:
1420 		return 0;
1421 	case CHIP_NAVI12:
1422 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1423 		break;
1424 	case CHIP_RAVEN:
1425 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1426 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1427 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1428 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1429 		else
1430 			return 0;
1431 		break;
1432 	default:
1433 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1434 		return -EINVAL;
1435 	}
1436 
1437 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1438 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1439 		return 0;
1440 	}
1441 
1442 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1443 	if (r == -ENOENT) {
1444 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1445 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1446 		adev->dm.fw_dmcu = NULL;
1447 		return 0;
1448 	}
1449 	if (r) {
1450 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1451 			fw_name_dmcu);
1452 		return r;
1453 	}
1454 
1455 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1456 	if (r) {
1457 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1458 			fw_name_dmcu);
1459 		release_firmware(adev->dm.fw_dmcu);
1460 		adev->dm.fw_dmcu = NULL;
1461 		return r;
1462 	}
1463 
1464 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1465 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1466 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1467 	adev->firmware.fw_size +=
1468 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1469 
1470 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1471 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1472 	adev->firmware.fw_size +=
1473 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1474 
1475 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1476 
1477 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1478 
1479 	return 0;
1480 }
1481 
1482 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1483 {
1484 	struct amdgpu_device *adev = ctx;
1485 
1486 	return dm_read_reg(adev->dm.dc->ctx, address);
1487 }
1488 
1489 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1490 				     uint32_t value)
1491 {
1492 	struct amdgpu_device *adev = ctx;
1493 
1494 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1495 }
1496 
1497 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1498 {
1499 	struct dmub_srv_create_params create_params;
1500 	struct dmub_srv_region_params region_params;
1501 	struct dmub_srv_region_info region_info;
1502 	struct dmub_srv_fb_params fb_params;
1503 	struct dmub_srv_fb_info *fb_info;
1504 	struct dmub_srv *dmub_srv;
1505 	const struct dmcub_firmware_header_v1_0 *hdr;
1506 	const char *fw_name_dmub;
1507 	enum dmub_asic dmub_asic;
1508 	enum dmub_status status;
1509 	int r;
1510 
1511 	switch (adev->asic_type) {
1512 	case CHIP_RENOIR:
1513 		dmub_asic = DMUB_ASIC_DCN21;
1514 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1515 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1516 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1517 		break;
1518 	case CHIP_SIENNA_CICHLID:
1519 		dmub_asic = DMUB_ASIC_DCN30;
1520 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1521 		break;
1522 	case CHIP_NAVY_FLOUNDER:
1523 		dmub_asic = DMUB_ASIC_DCN30;
1524 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1525 		break;
1526 	case CHIP_VANGOGH:
1527 		dmub_asic = DMUB_ASIC_DCN301;
1528 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1529 		break;
1530 	case CHIP_DIMGREY_CAVEFISH:
1531 		dmub_asic = DMUB_ASIC_DCN302;
1532 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1533 		break;
1534 	case CHIP_BEIGE_GOBY:
1535 		dmub_asic = DMUB_ASIC_DCN303;
1536 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1537 		break;
1538 	case CHIP_YELLOW_CARP:
1539 		dmub_asic = DMUB_ASIC_DCN31;
1540 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1541 		break;
1542 
1543 	default:
1544 		/* ASIC doesn't support DMUB. */
1545 		return 0;
1546 	}
1547 
1548 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1549 	if (r) {
1550 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1551 		return 0;
1552 	}
1553 
1554 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1555 	if (r) {
1556 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1557 		return 0;
1558 	}
1559 
1560 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1561 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1562 
1563 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1564 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1565 			AMDGPU_UCODE_ID_DMCUB;
1566 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1567 			adev->dm.dmub_fw;
1568 		adev->firmware.fw_size +=
1569 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1570 
1571 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1572 			 adev->dm.dmcub_fw_version);
1573 	}
1574 
1575 
1576 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1577 	dmub_srv = adev->dm.dmub_srv;
1578 
1579 	if (!dmub_srv) {
1580 		DRM_ERROR("Failed to allocate DMUB service!\n");
1581 		return -ENOMEM;
1582 	}
1583 
1584 	memset(&create_params, 0, sizeof(create_params));
1585 	create_params.user_ctx = adev;
1586 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1587 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1588 	create_params.asic = dmub_asic;
1589 
1590 	/* Create the DMUB service. */
1591 	status = dmub_srv_create(dmub_srv, &create_params);
1592 	if (status != DMUB_STATUS_OK) {
1593 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1594 		return -EINVAL;
1595 	}
1596 
1597 	/* Calculate the size of all the regions for the DMUB service. */
1598 	memset(&region_params, 0, sizeof(region_params));
1599 
1600 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1601 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1602 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1603 	region_params.vbios_size = adev->bios_size;
1604 	region_params.fw_bss_data = region_params.bss_data_size ?
1605 		adev->dm.dmub_fw->data +
1606 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1607 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1608 	region_params.fw_inst_const =
1609 		adev->dm.dmub_fw->data +
1610 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1611 		PSP_HEADER_BYTES;
1612 
1613 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1614 					   &region_info);
1615 
1616 	if (status != DMUB_STATUS_OK) {
1617 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1618 		return -EINVAL;
1619 	}
1620 
1621 	/*
1622 	 * Allocate a framebuffer based on the total size of all the regions.
1623 	 * TODO: Move this into GART.
1624 	 */
1625 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1626 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1627 				    &adev->dm.dmub_bo_gpu_addr,
1628 				    &adev->dm.dmub_bo_cpu_addr);
1629 	if (r)
1630 		return r;
1631 
1632 	/* Rebase the regions on the framebuffer address. */
1633 	memset(&fb_params, 0, sizeof(fb_params));
1634 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1635 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1636 	fb_params.region_info = &region_info;
1637 
1638 	adev->dm.dmub_fb_info =
1639 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1640 	fb_info = adev->dm.dmub_fb_info;
1641 
1642 	if (!fb_info) {
1643 		DRM_ERROR(
1644 			"Failed to allocate framebuffer info for DMUB service!\n");
1645 		return -ENOMEM;
1646 	}
1647 
1648 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1649 	if (status != DMUB_STATUS_OK) {
1650 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1651 		return -EINVAL;
1652 	}
1653 
1654 	return 0;
1655 }
1656 
1657 static int dm_sw_init(void *handle)
1658 {
1659 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1660 	int r;
1661 
1662 	r = dm_dmub_sw_init(adev);
1663 	if (r)
1664 		return r;
1665 
1666 	return load_dmcu_fw(adev);
1667 }
1668 
1669 static int dm_sw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	kfree(adev->dm.dmub_fb_info);
1674 	adev->dm.dmub_fb_info = NULL;
1675 
1676 	if (adev->dm.dmub_srv) {
1677 		dmub_srv_destroy(adev->dm.dmub_srv);
1678 		adev->dm.dmub_srv = NULL;
1679 	}
1680 
1681 	release_firmware(adev->dm.dmub_fw);
1682 	adev->dm.dmub_fw = NULL;
1683 
1684 	release_firmware(adev->dm.fw_dmcu);
1685 	adev->dm.fw_dmcu = NULL;
1686 
1687 	return 0;
1688 }
1689 
1690 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1691 {
1692 	struct amdgpu_dm_connector *aconnector;
1693 	struct drm_connector *connector;
1694 	struct drm_connector_list_iter iter;
1695 	int ret = 0;
1696 
1697 	drm_connector_list_iter_begin(dev, &iter);
1698 	drm_for_each_connector_iter(connector, &iter) {
1699 		aconnector = to_amdgpu_dm_connector(connector);
1700 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1701 		    aconnector->mst_mgr.aux) {
1702 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1703 					 aconnector,
1704 					 aconnector->base.base.id);
1705 
1706 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1707 			if (ret < 0) {
1708 				DRM_ERROR("DM_MST: Failed to start MST\n");
1709 				aconnector->dc_link->type =
1710 					dc_connection_single;
1711 				break;
1712 			}
1713 		}
1714 	}
1715 	drm_connector_list_iter_end(&iter);
1716 
1717 	return ret;
1718 }
1719 
1720 static int dm_late_init(void *handle)
1721 {
1722 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1723 
1724 	struct dmcu_iram_parameters params;
1725 	unsigned int linear_lut[16];
1726 	int i;
1727 	struct dmcu *dmcu = NULL;
1728 
1729 	dmcu = adev->dm.dc->res_pool->dmcu;
1730 
1731 	for (i = 0; i < 16; i++)
1732 		linear_lut[i] = 0xFFFF * i / 15;
1733 
1734 	params.set = 0;
1735 	params.backlight_ramping_override = false;
1736 	params.backlight_ramping_start = 0xCCCC;
1737 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1738 	params.backlight_lut_array_size = 16;
1739 	params.backlight_lut_array = linear_lut;
1740 
1741 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1742 	 * 0xFFFF x 0.01 = 0x28F
1743 	 */
1744 	params.min_abm_backlight = 0x28F;
1745 	/* In the case where abm is implemented on dmcub,
1746 	* dmcu object will be null.
1747 	* ABM 2.4 and up are implemented on dmcub.
1748 	*/
1749 	if (dmcu) {
1750 		if (!dmcu_load_iram(dmcu, params))
1751 			return -EINVAL;
1752 	} else if (adev->dm.dc->ctx->dmub_srv) {
1753 		struct dc_link *edp_links[MAX_NUM_EDP];
1754 		int edp_num;
1755 
1756 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1757 		for (i = 0; i < edp_num; i++) {
1758 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1759 				return -EINVAL;
1760 		}
1761 	}
1762 
1763 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1764 }
1765 
1766 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1767 {
1768 	struct amdgpu_dm_connector *aconnector;
1769 	struct drm_connector *connector;
1770 	struct drm_connector_list_iter iter;
1771 	struct drm_dp_mst_topology_mgr *mgr;
1772 	int ret;
1773 	bool need_hotplug = false;
1774 
1775 	drm_connector_list_iter_begin(dev, &iter);
1776 	drm_for_each_connector_iter(connector, &iter) {
1777 		aconnector = to_amdgpu_dm_connector(connector);
1778 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1779 		    aconnector->mst_port)
1780 			continue;
1781 
1782 		mgr = &aconnector->mst_mgr;
1783 
1784 		if (suspend) {
1785 			drm_dp_mst_topology_mgr_suspend(mgr);
1786 		} else {
1787 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1788 			if (ret < 0) {
1789 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1790 				need_hotplug = true;
1791 			}
1792 		}
1793 	}
1794 	drm_connector_list_iter_end(&iter);
1795 
1796 	if (need_hotplug)
1797 		drm_kms_helper_hotplug_event(dev);
1798 }
1799 
1800 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1801 {
1802 	struct smu_context *smu = &adev->smu;
1803 	int ret = 0;
1804 
1805 	if (!is_support_sw_smu(adev))
1806 		return 0;
1807 
1808 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1809 	 * on window driver dc implementation.
1810 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1811 	 * should be passed to smu during boot up and resume from s3.
1812 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1813 	 * dcn20_resource_construct
1814 	 * then call pplib functions below to pass the settings to smu:
1815 	 * smu_set_watermarks_for_clock_ranges
1816 	 * smu_set_watermarks_table
1817 	 * navi10_set_watermarks_table
1818 	 * smu_write_watermarks_table
1819 	 *
1820 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1821 	 * dc has implemented different flow for window driver:
1822 	 * dc_hardware_init / dc_set_power_state
1823 	 * dcn10_init_hw
1824 	 * notify_wm_ranges
1825 	 * set_wm_ranges
1826 	 * -- Linux
1827 	 * smu_set_watermarks_for_clock_ranges
1828 	 * renoir_set_watermarks_table
1829 	 * smu_write_watermarks_table
1830 	 *
1831 	 * For Linux,
1832 	 * dc_hardware_init -> amdgpu_dm_init
1833 	 * dc_set_power_state --> dm_resume
1834 	 *
1835 	 * therefore, this function apply to navi10/12/14 but not Renoir
1836 	 * *
1837 	 */
1838 	switch(adev->asic_type) {
1839 	case CHIP_NAVI10:
1840 	case CHIP_NAVI14:
1841 	case CHIP_NAVI12:
1842 		break;
1843 	default:
1844 		return 0;
1845 	}
1846 
1847 	ret = smu_write_watermarks_table(smu);
1848 	if (ret) {
1849 		DRM_ERROR("Failed to update WMTABLE!\n");
1850 		return ret;
1851 	}
1852 
1853 	return 0;
1854 }
1855 
1856 /**
1857  * dm_hw_init() - Initialize DC device
1858  * @handle: The base driver device containing the amdgpu_dm device.
1859  *
1860  * Initialize the &struct amdgpu_display_manager device. This involves calling
1861  * the initializers of each DM component, then populating the struct with them.
1862  *
1863  * Although the function implies hardware initialization, both hardware and
1864  * software are initialized here. Splitting them out to their relevant init
1865  * hooks is a future TODO item.
1866  *
1867  * Some notable things that are initialized here:
1868  *
1869  * - Display Core, both software and hardware
1870  * - DC modules that we need (freesync and color management)
1871  * - DRM software states
1872  * - Interrupt sources and handlers
1873  * - Vblank support
1874  * - Debug FS entries, if enabled
1875  */
1876 static int dm_hw_init(void *handle)
1877 {
1878 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1879 	/* Create DAL display manager */
1880 	amdgpu_dm_init(adev);
1881 	amdgpu_dm_hpd_init(adev);
1882 
1883 	return 0;
1884 }
1885 
1886 /**
1887  * dm_hw_fini() - Teardown DC device
1888  * @handle: The base driver device containing the amdgpu_dm device.
1889  *
1890  * Teardown components within &struct amdgpu_display_manager that require
1891  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1892  * were loaded. Also flush IRQ workqueues and disable them.
1893  */
1894 static int dm_hw_fini(void *handle)
1895 {
1896 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1897 
1898 	amdgpu_dm_hpd_fini(adev);
1899 
1900 	amdgpu_dm_irq_fini(adev);
1901 	amdgpu_dm_fini(adev);
1902 	return 0;
1903 }
1904 
1905 
1906 static int dm_enable_vblank(struct drm_crtc *crtc);
1907 static void dm_disable_vblank(struct drm_crtc *crtc);
1908 
1909 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1910 				 struct dc_state *state, bool enable)
1911 {
1912 	enum dc_irq_source irq_source;
1913 	struct amdgpu_crtc *acrtc;
1914 	int rc = -EBUSY;
1915 	int i = 0;
1916 
1917 	for (i = 0; i < state->stream_count; i++) {
1918 		acrtc = get_crtc_by_otg_inst(
1919 				adev, state->stream_status[i].primary_otg_inst);
1920 
1921 		if (acrtc && state->stream_status[i].plane_count != 0) {
1922 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1923 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1924 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1925 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1926 			if (rc)
1927 				DRM_WARN("Failed to %s pflip interrupts\n",
1928 					 enable ? "enable" : "disable");
1929 
1930 			if (enable) {
1931 				rc = dm_enable_vblank(&acrtc->base);
1932 				if (rc)
1933 					DRM_WARN("Failed to enable vblank interrupts\n");
1934 			} else {
1935 				dm_disable_vblank(&acrtc->base);
1936 			}
1937 
1938 		}
1939 	}
1940 
1941 }
1942 
1943 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1944 {
1945 	struct dc_state *context = NULL;
1946 	enum dc_status res = DC_ERROR_UNEXPECTED;
1947 	int i;
1948 	struct dc_stream_state *del_streams[MAX_PIPES];
1949 	int del_streams_count = 0;
1950 
1951 	memset(del_streams, 0, sizeof(del_streams));
1952 
1953 	context = dc_create_state(dc);
1954 	if (context == NULL)
1955 		goto context_alloc_fail;
1956 
1957 	dc_resource_state_copy_construct_current(dc, context);
1958 
1959 	/* First remove from context all streams */
1960 	for (i = 0; i < context->stream_count; i++) {
1961 		struct dc_stream_state *stream = context->streams[i];
1962 
1963 		del_streams[del_streams_count++] = stream;
1964 	}
1965 
1966 	/* Remove all planes for removed streams and then remove the streams */
1967 	for (i = 0; i < del_streams_count; i++) {
1968 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1969 			res = DC_FAIL_DETACH_SURFACES;
1970 			goto fail;
1971 		}
1972 
1973 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1974 		if (res != DC_OK)
1975 			goto fail;
1976 	}
1977 
1978 
1979 	res = dc_validate_global_state(dc, context, false);
1980 
1981 	if (res != DC_OK) {
1982 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1983 		goto fail;
1984 	}
1985 
1986 	res = dc_commit_state(dc, context);
1987 
1988 fail:
1989 	dc_release_state(context);
1990 
1991 context_alloc_fail:
1992 	return res;
1993 }
1994 
1995 static int dm_suspend(void *handle)
1996 {
1997 	struct amdgpu_device *adev = handle;
1998 	struct amdgpu_display_manager *dm = &adev->dm;
1999 	int ret = 0;
2000 
2001 	if (amdgpu_in_reset(adev)) {
2002 		mutex_lock(&dm->dc_lock);
2003 
2004 #if defined(CONFIG_DRM_AMD_DC_DCN)
2005 		dc_allow_idle_optimizations(adev->dm.dc, false);
2006 #endif
2007 
2008 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2009 
2010 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2011 
2012 		amdgpu_dm_commit_zero_streams(dm->dc);
2013 
2014 		amdgpu_dm_irq_suspend(adev);
2015 
2016 		return ret;
2017 	}
2018 
2019 	WARN_ON(adev->dm.cached_state);
2020 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2021 
2022 	s3_handle_mst(adev_to_drm(adev), true);
2023 
2024 	amdgpu_dm_irq_suspend(adev);
2025 
2026 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2027 
2028 	return 0;
2029 }
2030 
2031 static struct amdgpu_dm_connector *
2032 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2033 					     struct drm_crtc *crtc)
2034 {
2035 	uint32_t i;
2036 	struct drm_connector_state *new_con_state;
2037 	struct drm_connector *connector;
2038 	struct drm_crtc *crtc_from_state;
2039 
2040 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2041 		crtc_from_state = new_con_state->crtc;
2042 
2043 		if (crtc_from_state == crtc)
2044 			return to_amdgpu_dm_connector(connector);
2045 	}
2046 
2047 	return NULL;
2048 }
2049 
2050 static void emulated_link_detect(struct dc_link *link)
2051 {
2052 	struct dc_sink_init_data sink_init_data = { 0 };
2053 	struct display_sink_capability sink_caps = { 0 };
2054 	enum dc_edid_status edid_status;
2055 	struct dc_context *dc_ctx = link->ctx;
2056 	struct dc_sink *sink = NULL;
2057 	struct dc_sink *prev_sink = NULL;
2058 
2059 	link->type = dc_connection_none;
2060 	prev_sink = link->local_sink;
2061 
2062 	if (prev_sink)
2063 		dc_sink_release(prev_sink);
2064 
2065 	switch (link->connector_signal) {
2066 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2067 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2068 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2069 		break;
2070 	}
2071 
2072 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2073 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2074 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2075 		break;
2076 	}
2077 
2078 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2079 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2080 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2081 		break;
2082 	}
2083 
2084 	case SIGNAL_TYPE_LVDS: {
2085 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2086 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2087 		break;
2088 	}
2089 
2090 	case SIGNAL_TYPE_EDP: {
2091 		sink_caps.transaction_type =
2092 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2093 		sink_caps.signal = SIGNAL_TYPE_EDP;
2094 		break;
2095 	}
2096 
2097 	case SIGNAL_TYPE_DISPLAY_PORT: {
2098 		sink_caps.transaction_type =
2099 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2100 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2101 		break;
2102 	}
2103 
2104 	default:
2105 		DC_ERROR("Invalid connector type! signal:%d\n",
2106 			link->connector_signal);
2107 		return;
2108 	}
2109 
2110 	sink_init_data.link = link;
2111 	sink_init_data.sink_signal = sink_caps.signal;
2112 
2113 	sink = dc_sink_create(&sink_init_data);
2114 	if (!sink) {
2115 		DC_ERROR("Failed to create sink!\n");
2116 		return;
2117 	}
2118 
2119 	/* dc_sink_create returns a new reference */
2120 	link->local_sink = sink;
2121 
2122 	edid_status = dm_helpers_read_local_edid(
2123 			link->ctx,
2124 			link,
2125 			sink);
2126 
2127 	if (edid_status != EDID_OK)
2128 		DC_ERROR("Failed to read EDID");
2129 
2130 }
2131 
2132 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2133 				     struct amdgpu_display_manager *dm)
2134 {
2135 	struct {
2136 		struct dc_surface_update surface_updates[MAX_SURFACES];
2137 		struct dc_plane_info plane_infos[MAX_SURFACES];
2138 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2139 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2140 		struct dc_stream_update stream_update;
2141 	} * bundle;
2142 	int k, m;
2143 
2144 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2145 
2146 	if (!bundle) {
2147 		dm_error("Failed to allocate update bundle\n");
2148 		goto cleanup;
2149 	}
2150 
2151 	for (k = 0; k < dc_state->stream_count; k++) {
2152 		bundle->stream_update.stream = dc_state->streams[k];
2153 
2154 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2155 			bundle->surface_updates[m].surface =
2156 				dc_state->stream_status->plane_states[m];
2157 			bundle->surface_updates[m].surface->force_full_update =
2158 				true;
2159 		}
2160 		dc_commit_updates_for_stream(
2161 			dm->dc, bundle->surface_updates,
2162 			dc_state->stream_status->plane_count,
2163 			dc_state->streams[k], &bundle->stream_update, dc_state);
2164 	}
2165 
2166 cleanup:
2167 	kfree(bundle);
2168 
2169 	return;
2170 }
2171 
2172 static void dm_set_dpms_off(struct dc_link *link)
2173 {
2174 	struct dc_stream_state *stream_state;
2175 	struct amdgpu_dm_connector *aconnector = link->priv;
2176 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2177 	struct dc_stream_update stream_update;
2178 	bool dpms_off = true;
2179 
2180 	memset(&stream_update, 0, sizeof(stream_update));
2181 	stream_update.dpms_off = &dpms_off;
2182 
2183 	mutex_lock(&adev->dm.dc_lock);
2184 	stream_state = dc_stream_find_from_link(link);
2185 
2186 	if (stream_state == NULL) {
2187 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2188 		mutex_unlock(&adev->dm.dc_lock);
2189 		return;
2190 	}
2191 
2192 	stream_update.stream = stream_state;
2193 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2194 				     stream_state, &stream_update,
2195 				     stream_state->ctx->dc->current_state);
2196 	mutex_unlock(&adev->dm.dc_lock);
2197 }
2198 
2199 static int dm_resume(void *handle)
2200 {
2201 	struct amdgpu_device *adev = handle;
2202 	struct drm_device *ddev = adev_to_drm(adev);
2203 	struct amdgpu_display_manager *dm = &adev->dm;
2204 	struct amdgpu_dm_connector *aconnector;
2205 	struct drm_connector *connector;
2206 	struct drm_connector_list_iter iter;
2207 	struct drm_crtc *crtc;
2208 	struct drm_crtc_state *new_crtc_state;
2209 	struct dm_crtc_state *dm_new_crtc_state;
2210 	struct drm_plane *plane;
2211 	struct drm_plane_state *new_plane_state;
2212 	struct dm_plane_state *dm_new_plane_state;
2213 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2214 	enum dc_connection_type new_connection_type = dc_connection_none;
2215 	struct dc_state *dc_state;
2216 	int i, r, j;
2217 
2218 	if (amdgpu_in_reset(adev)) {
2219 		dc_state = dm->cached_dc_state;
2220 
2221 		if (dc_enable_dmub_notifications(adev->dm.dc))
2222 			amdgpu_dm_outbox_init(adev);
2223 
2224 		r = dm_dmub_hw_init(adev);
2225 		if (r)
2226 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2227 
2228 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2229 		dc_resume(dm->dc);
2230 
2231 		amdgpu_dm_irq_resume_early(adev);
2232 
2233 		for (i = 0; i < dc_state->stream_count; i++) {
2234 			dc_state->streams[i]->mode_changed = true;
2235 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2236 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2237 					= 0xffffffff;
2238 			}
2239 		}
2240 #if defined(CONFIG_DRM_AMD_DC_DCN)
2241 		/*
2242 		 * Resource allocation happens for link encoders for newer ASIC in
2243 		 * dc_validate_global_state, so we need to revalidate it.
2244 		 *
2245 		 * This shouldn't fail (it passed once before), so warn if it does.
2246 		 */
2247 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2248 #endif
2249 
2250 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2251 
2252 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2253 
2254 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2255 
2256 		dc_release_state(dm->cached_dc_state);
2257 		dm->cached_dc_state = NULL;
2258 
2259 		amdgpu_dm_irq_resume_late(adev);
2260 
2261 		mutex_unlock(&dm->dc_lock);
2262 
2263 		return 0;
2264 	}
2265 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2266 	dc_release_state(dm_state->context);
2267 	dm_state->context = dc_create_state(dm->dc);
2268 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2269 	dc_resource_state_construct(dm->dc, dm_state->context);
2270 
2271 	/* Re-enable outbox interrupts for DPIA. */
2272 	if (dc_enable_dmub_notifications(adev->dm.dc))
2273 		amdgpu_dm_outbox_init(adev);
2274 
2275 	/* Before powering on DC we need to re-initialize DMUB. */
2276 	r = dm_dmub_hw_init(adev);
2277 	if (r)
2278 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2279 
2280 	/* power on hardware */
2281 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2282 
2283 	/* program HPD filter */
2284 	dc_resume(dm->dc);
2285 
2286 	/*
2287 	 * early enable HPD Rx IRQ, should be done before set mode as short
2288 	 * pulse interrupts are used for MST
2289 	 */
2290 	amdgpu_dm_irq_resume_early(adev);
2291 
2292 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2293 	s3_handle_mst(ddev, false);
2294 
2295 	/* Do detection*/
2296 	drm_connector_list_iter_begin(ddev, &iter);
2297 	drm_for_each_connector_iter(connector, &iter) {
2298 		aconnector = to_amdgpu_dm_connector(connector);
2299 
2300 		/*
2301 		 * this is the case when traversing through already created
2302 		 * MST connectors, should be skipped
2303 		 */
2304 		if (aconnector->mst_port)
2305 			continue;
2306 
2307 		mutex_lock(&aconnector->hpd_lock);
2308 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2309 			DRM_ERROR("KMS: Failed to detect connector\n");
2310 
2311 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2312 			emulated_link_detect(aconnector->dc_link);
2313 		else
2314 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2315 
2316 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2317 			aconnector->fake_enable = false;
2318 
2319 		if (aconnector->dc_sink)
2320 			dc_sink_release(aconnector->dc_sink);
2321 		aconnector->dc_sink = NULL;
2322 		amdgpu_dm_update_connector_after_detect(aconnector);
2323 		mutex_unlock(&aconnector->hpd_lock);
2324 	}
2325 	drm_connector_list_iter_end(&iter);
2326 
2327 	/* Force mode set in atomic commit */
2328 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2329 		new_crtc_state->active_changed = true;
2330 
2331 	/*
2332 	 * atomic_check is expected to create the dc states. We need to release
2333 	 * them here, since they were duplicated as part of the suspend
2334 	 * procedure.
2335 	 */
2336 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2337 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2338 		if (dm_new_crtc_state->stream) {
2339 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2340 			dc_stream_release(dm_new_crtc_state->stream);
2341 			dm_new_crtc_state->stream = NULL;
2342 		}
2343 	}
2344 
2345 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2346 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2347 		if (dm_new_plane_state->dc_state) {
2348 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2349 			dc_plane_state_release(dm_new_plane_state->dc_state);
2350 			dm_new_plane_state->dc_state = NULL;
2351 		}
2352 	}
2353 
2354 	drm_atomic_helper_resume(ddev, dm->cached_state);
2355 
2356 	dm->cached_state = NULL;
2357 
2358 	amdgpu_dm_irq_resume_late(adev);
2359 
2360 	amdgpu_dm_smu_write_watermarks_table(adev);
2361 
2362 	return 0;
2363 }
2364 
2365 /**
2366  * DOC: DM Lifecycle
2367  *
2368  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2369  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2370  * the base driver's device list to be initialized and torn down accordingly.
2371  *
2372  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2373  */
2374 
2375 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2376 	.name = "dm",
2377 	.early_init = dm_early_init,
2378 	.late_init = dm_late_init,
2379 	.sw_init = dm_sw_init,
2380 	.sw_fini = dm_sw_fini,
2381 	.early_fini = amdgpu_dm_early_fini,
2382 	.hw_init = dm_hw_init,
2383 	.hw_fini = dm_hw_fini,
2384 	.suspend = dm_suspend,
2385 	.resume = dm_resume,
2386 	.is_idle = dm_is_idle,
2387 	.wait_for_idle = dm_wait_for_idle,
2388 	.check_soft_reset = dm_check_soft_reset,
2389 	.soft_reset = dm_soft_reset,
2390 	.set_clockgating_state = dm_set_clockgating_state,
2391 	.set_powergating_state = dm_set_powergating_state,
2392 };
2393 
2394 const struct amdgpu_ip_block_version dm_ip_block =
2395 {
2396 	.type = AMD_IP_BLOCK_TYPE_DCE,
2397 	.major = 1,
2398 	.minor = 0,
2399 	.rev = 0,
2400 	.funcs = &amdgpu_dm_funcs,
2401 };
2402 
2403 
2404 /**
2405  * DOC: atomic
2406  *
2407  * *WIP*
2408  */
2409 
2410 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2411 	.fb_create = amdgpu_display_user_framebuffer_create,
2412 	.get_format_info = amd_get_format_info,
2413 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2414 	.atomic_check = amdgpu_dm_atomic_check,
2415 	.atomic_commit = drm_atomic_helper_commit,
2416 };
2417 
2418 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2419 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2420 };
2421 
2422 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2423 {
2424 	u32 max_cll, min_cll, max, min, q, r;
2425 	struct amdgpu_dm_backlight_caps *caps;
2426 	struct amdgpu_display_manager *dm;
2427 	struct drm_connector *conn_base;
2428 	struct amdgpu_device *adev;
2429 	struct dc_link *link = NULL;
2430 	static const u8 pre_computed_values[] = {
2431 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2432 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2433 	int i;
2434 
2435 	if (!aconnector || !aconnector->dc_link)
2436 		return;
2437 
2438 	link = aconnector->dc_link;
2439 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2440 		return;
2441 
2442 	conn_base = &aconnector->base;
2443 	adev = drm_to_adev(conn_base->dev);
2444 	dm = &adev->dm;
2445 	for (i = 0; i < dm->num_of_edps; i++) {
2446 		if (link == dm->backlight_link[i])
2447 			break;
2448 	}
2449 	if (i >= dm->num_of_edps)
2450 		return;
2451 	caps = &dm->backlight_caps[i];
2452 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2453 	caps->aux_support = false;
2454 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2455 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2456 
2457 	if (caps->ext_caps->bits.oled == 1 /*||
2458 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2459 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2460 		caps->aux_support = true;
2461 
2462 	if (amdgpu_backlight == 0)
2463 		caps->aux_support = false;
2464 	else if (amdgpu_backlight == 1)
2465 		caps->aux_support = true;
2466 
2467 	/* From the specification (CTA-861-G), for calculating the maximum
2468 	 * luminance we need to use:
2469 	 *	Luminance = 50*2**(CV/32)
2470 	 * Where CV is a one-byte value.
2471 	 * For calculating this expression we may need float point precision;
2472 	 * to avoid this complexity level, we take advantage that CV is divided
2473 	 * by a constant. From the Euclids division algorithm, we know that CV
2474 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2475 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2476 	 * need to pre-compute the value of r/32. For pre-computing the values
2477 	 * We just used the following Ruby line:
2478 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2479 	 * The results of the above expressions can be verified at
2480 	 * pre_computed_values.
2481 	 */
2482 	q = max_cll >> 5;
2483 	r = max_cll % 32;
2484 	max = (1 << q) * pre_computed_values[r];
2485 
2486 	// min luminance: maxLum * (CV/255)^2 / 100
2487 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2488 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2489 
2490 	caps->aux_max_input_signal = max;
2491 	caps->aux_min_input_signal = min;
2492 }
2493 
2494 void amdgpu_dm_update_connector_after_detect(
2495 		struct amdgpu_dm_connector *aconnector)
2496 {
2497 	struct drm_connector *connector = &aconnector->base;
2498 	struct drm_device *dev = connector->dev;
2499 	struct dc_sink *sink;
2500 
2501 	/* MST handled by drm_mst framework */
2502 	if (aconnector->mst_mgr.mst_state == true)
2503 		return;
2504 
2505 	sink = aconnector->dc_link->local_sink;
2506 	if (sink)
2507 		dc_sink_retain(sink);
2508 
2509 	/*
2510 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2511 	 * the connector sink is set to either fake or physical sink depends on link status.
2512 	 * Skip if already done during boot.
2513 	 */
2514 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2515 			&& aconnector->dc_em_sink) {
2516 
2517 		/*
2518 		 * For S3 resume with headless use eml_sink to fake stream
2519 		 * because on resume connector->sink is set to NULL
2520 		 */
2521 		mutex_lock(&dev->mode_config.mutex);
2522 
2523 		if (sink) {
2524 			if (aconnector->dc_sink) {
2525 				amdgpu_dm_update_freesync_caps(connector, NULL);
2526 				/*
2527 				 * retain and release below are used to
2528 				 * bump up refcount for sink because the link doesn't point
2529 				 * to it anymore after disconnect, so on next crtc to connector
2530 				 * reshuffle by UMD we will get into unwanted dc_sink release
2531 				 */
2532 				dc_sink_release(aconnector->dc_sink);
2533 			}
2534 			aconnector->dc_sink = sink;
2535 			dc_sink_retain(aconnector->dc_sink);
2536 			amdgpu_dm_update_freesync_caps(connector,
2537 					aconnector->edid);
2538 		} else {
2539 			amdgpu_dm_update_freesync_caps(connector, NULL);
2540 			if (!aconnector->dc_sink) {
2541 				aconnector->dc_sink = aconnector->dc_em_sink;
2542 				dc_sink_retain(aconnector->dc_sink);
2543 			}
2544 		}
2545 
2546 		mutex_unlock(&dev->mode_config.mutex);
2547 
2548 		if (sink)
2549 			dc_sink_release(sink);
2550 		return;
2551 	}
2552 
2553 	/*
2554 	 * TODO: temporary guard to look for proper fix
2555 	 * if this sink is MST sink, we should not do anything
2556 	 */
2557 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2558 		dc_sink_release(sink);
2559 		return;
2560 	}
2561 
2562 	if (aconnector->dc_sink == sink) {
2563 		/*
2564 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2565 		 * Do nothing!!
2566 		 */
2567 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2568 				aconnector->connector_id);
2569 		if (sink)
2570 			dc_sink_release(sink);
2571 		return;
2572 	}
2573 
2574 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2575 		aconnector->connector_id, aconnector->dc_sink, sink);
2576 
2577 	mutex_lock(&dev->mode_config.mutex);
2578 
2579 	/*
2580 	 * 1. Update status of the drm connector
2581 	 * 2. Send an event and let userspace tell us what to do
2582 	 */
2583 	if (sink) {
2584 		/*
2585 		 * TODO: check if we still need the S3 mode update workaround.
2586 		 * If yes, put it here.
2587 		 */
2588 		if (aconnector->dc_sink) {
2589 			amdgpu_dm_update_freesync_caps(connector, NULL);
2590 			dc_sink_release(aconnector->dc_sink);
2591 		}
2592 
2593 		aconnector->dc_sink = sink;
2594 		dc_sink_retain(aconnector->dc_sink);
2595 		if (sink->dc_edid.length == 0) {
2596 			aconnector->edid = NULL;
2597 			if (aconnector->dc_link->aux_mode) {
2598 				drm_dp_cec_unset_edid(
2599 					&aconnector->dm_dp_aux.aux);
2600 			}
2601 		} else {
2602 			aconnector->edid =
2603 				(struct edid *)sink->dc_edid.raw_edid;
2604 
2605 			drm_connector_update_edid_property(connector,
2606 							   aconnector->edid);
2607 			if (aconnector->dc_link->aux_mode)
2608 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2609 						    aconnector->edid);
2610 		}
2611 
2612 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2613 		update_connector_ext_caps(aconnector);
2614 	} else {
2615 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2616 		amdgpu_dm_update_freesync_caps(connector, NULL);
2617 		drm_connector_update_edid_property(connector, NULL);
2618 		aconnector->num_modes = 0;
2619 		dc_sink_release(aconnector->dc_sink);
2620 		aconnector->dc_sink = NULL;
2621 		aconnector->edid = NULL;
2622 #ifdef CONFIG_DRM_AMD_DC_HDCP
2623 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2624 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2625 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2626 #endif
2627 	}
2628 
2629 	mutex_unlock(&dev->mode_config.mutex);
2630 
2631 	update_subconnector_property(aconnector);
2632 
2633 	if (sink)
2634 		dc_sink_release(sink);
2635 }
2636 
2637 static void handle_hpd_irq(void *param)
2638 {
2639 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2640 	struct drm_connector *connector = &aconnector->base;
2641 	struct drm_device *dev = connector->dev;
2642 	enum dc_connection_type new_connection_type = dc_connection_none;
2643 	struct amdgpu_device *adev = drm_to_adev(dev);
2644 #ifdef CONFIG_DRM_AMD_DC_HDCP
2645 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2646 #endif
2647 
2648 	if (adev->dm.disable_hpd_irq)
2649 		return;
2650 
2651 	/*
2652 	 * In case of failure or MST no need to update connector status or notify the OS
2653 	 * since (for MST case) MST does this in its own context.
2654 	 */
2655 	mutex_lock(&aconnector->hpd_lock);
2656 
2657 #ifdef CONFIG_DRM_AMD_DC_HDCP
2658 	if (adev->dm.hdcp_workqueue) {
2659 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2660 		dm_con_state->update_hdcp = true;
2661 	}
2662 #endif
2663 	if (aconnector->fake_enable)
2664 		aconnector->fake_enable = false;
2665 
2666 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2667 		DRM_ERROR("KMS: Failed to detect connector\n");
2668 
2669 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2670 		emulated_link_detect(aconnector->dc_link);
2671 
2672 
2673 		drm_modeset_lock_all(dev);
2674 		dm_restore_drm_connector_state(dev, connector);
2675 		drm_modeset_unlock_all(dev);
2676 
2677 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2678 			drm_kms_helper_hotplug_event(dev);
2679 
2680 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2681 		if (new_connection_type == dc_connection_none &&
2682 		    aconnector->dc_link->type == dc_connection_none)
2683 			dm_set_dpms_off(aconnector->dc_link);
2684 
2685 		amdgpu_dm_update_connector_after_detect(aconnector);
2686 
2687 		drm_modeset_lock_all(dev);
2688 		dm_restore_drm_connector_state(dev, connector);
2689 		drm_modeset_unlock_all(dev);
2690 
2691 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2692 			drm_kms_helper_hotplug_event(dev);
2693 	}
2694 	mutex_unlock(&aconnector->hpd_lock);
2695 
2696 }
2697 
2698 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2699 {
2700 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2701 	uint8_t dret;
2702 	bool new_irq_handled = false;
2703 	int dpcd_addr;
2704 	int dpcd_bytes_to_read;
2705 
2706 	const int max_process_count = 30;
2707 	int process_count = 0;
2708 
2709 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2710 
2711 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2712 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2713 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2714 		dpcd_addr = DP_SINK_COUNT;
2715 	} else {
2716 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2717 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2718 		dpcd_addr = DP_SINK_COUNT_ESI;
2719 	}
2720 
2721 	dret = drm_dp_dpcd_read(
2722 		&aconnector->dm_dp_aux.aux,
2723 		dpcd_addr,
2724 		esi,
2725 		dpcd_bytes_to_read);
2726 
2727 	while (dret == dpcd_bytes_to_read &&
2728 		process_count < max_process_count) {
2729 		uint8_t retry;
2730 		dret = 0;
2731 
2732 		process_count++;
2733 
2734 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2735 		/* handle HPD short pulse irq */
2736 		if (aconnector->mst_mgr.mst_state)
2737 			drm_dp_mst_hpd_irq(
2738 				&aconnector->mst_mgr,
2739 				esi,
2740 				&new_irq_handled);
2741 
2742 		if (new_irq_handled) {
2743 			/* ACK at DPCD to notify down stream */
2744 			const int ack_dpcd_bytes_to_write =
2745 				dpcd_bytes_to_read - 1;
2746 
2747 			for (retry = 0; retry < 3; retry++) {
2748 				uint8_t wret;
2749 
2750 				wret = drm_dp_dpcd_write(
2751 					&aconnector->dm_dp_aux.aux,
2752 					dpcd_addr + 1,
2753 					&esi[1],
2754 					ack_dpcd_bytes_to_write);
2755 				if (wret == ack_dpcd_bytes_to_write)
2756 					break;
2757 			}
2758 
2759 			/* check if there is new irq to be handled */
2760 			dret = drm_dp_dpcd_read(
2761 				&aconnector->dm_dp_aux.aux,
2762 				dpcd_addr,
2763 				esi,
2764 				dpcd_bytes_to_read);
2765 
2766 			new_irq_handled = false;
2767 		} else {
2768 			break;
2769 		}
2770 	}
2771 
2772 	if (process_count == max_process_count)
2773 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2774 }
2775 
2776 static void handle_hpd_rx_irq(void *param)
2777 {
2778 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2779 	struct drm_connector *connector = &aconnector->base;
2780 	struct drm_device *dev = connector->dev;
2781 	struct dc_link *dc_link = aconnector->dc_link;
2782 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2783 	bool result = false;
2784 	enum dc_connection_type new_connection_type = dc_connection_none;
2785 	struct amdgpu_device *adev = drm_to_adev(dev);
2786 	union hpd_irq_data hpd_irq_data;
2787 	bool lock_flag = 0;
2788 
2789 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2790 
2791 	if (adev->dm.disable_hpd_irq)
2792 		return;
2793 
2794 
2795 	/*
2796 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2797 	 * conflict, after implement i2c helper, this mutex should be
2798 	 * retired.
2799 	 */
2800 	mutex_lock(&aconnector->hpd_lock);
2801 
2802 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2803 
2804 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2805 		(dc_link->type == dc_connection_mst_branch)) {
2806 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2807 			result = true;
2808 			dm_handle_hpd_rx_irq(aconnector);
2809 			goto out;
2810 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2811 			result = false;
2812 			dm_handle_hpd_rx_irq(aconnector);
2813 			goto out;
2814 		}
2815 	}
2816 
2817 	/*
2818 	 * TODO: We need the lock to avoid touching DC state while it's being
2819 	 * modified during automated compliance testing, or when link loss
2820 	 * happens. While this should be split into subhandlers and proper
2821 	 * interfaces to avoid having to conditionally lock like this in the
2822 	 * outer layer, we need this workaround temporarily to allow MST
2823 	 * lightup in some scenarios to avoid timeout.
2824 	 */
2825 	if (!amdgpu_in_reset(adev) &&
2826 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2827 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2828 		mutex_lock(&adev->dm.dc_lock);
2829 		lock_flag = 1;
2830 	}
2831 
2832 #ifdef CONFIG_DRM_AMD_DC_HDCP
2833 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2834 #else
2835 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2836 #endif
2837 	if (!amdgpu_in_reset(adev) && lock_flag)
2838 		mutex_unlock(&adev->dm.dc_lock);
2839 
2840 out:
2841 	if (result && !is_mst_root_connector) {
2842 		/* Downstream Port status changed. */
2843 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2844 			DRM_ERROR("KMS: Failed to detect connector\n");
2845 
2846 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2847 			emulated_link_detect(dc_link);
2848 
2849 			if (aconnector->fake_enable)
2850 				aconnector->fake_enable = false;
2851 
2852 			amdgpu_dm_update_connector_after_detect(aconnector);
2853 
2854 
2855 			drm_modeset_lock_all(dev);
2856 			dm_restore_drm_connector_state(dev, connector);
2857 			drm_modeset_unlock_all(dev);
2858 
2859 			drm_kms_helper_hotplug_event(dev);
2860 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2861 
2862 			if (aconnector->fake_enable)
2863 				aconnector->fake_enable = false;
2864 
2865 			amdgpu_dm_update_connector_after_detect(aconnector);
2866 
2867 
2868 			drm_modeset_lock_all(dev);
2869 			dm_restore_drm_connector_state(dev, connector);
2870 			drm_modeset_unlock_all(dev);
2871 
2872 			drm_kms_helper_hotplug_event(dev);
2873 		}
2874 	}
2875 #ifdef CONFIG_DRM_AMD_DC_HDCP
2876 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2877 		if (adev->dm.hdcp_workqueue)
2878 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2879 	}
2880 #endif
2881 
2882 	if (dc_link->type != dc_connection_mst_branch)
2883 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2884 
2885 	mutex_unlock(&aconnector->hpd_lock);
2886 }
2887 
2888 static void register_hpd_handlers(struct amdgpu_device *adev)
2889 {
2890 	struct drm_device *dev = adev_to_drm(adev);
2891 	struct drm_connector *connector;
2892 	struct amdgpu_dm_connector *aconnector;
2893 	const struct dc_link *dc_link;
2894 	struct dc_interrupt_params int_params = {0};
2895 
2896 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2897 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2898 
2899 	list_for_each_entry(connector,
2900 			&dev->mode_config.connector_list, head)	{
2901 
2902 		aconnector = to_amdgpu_dm_connector(connector);
2903 		dc_link = aconnector->dc_link;
2904 
2905 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2906 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2907 			int_params.irq_source = dc_link->irq_source_hpd;
2908 
2909 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2910 					handle_hpd_irq,
2911 					(void *) aconnector);
2912 		}
2913 
2914 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2915 
2916 			/* Also register for DP short pulse (hpd_rx). */
2917 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2918 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2919 
2920 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2921 					handle_hpd_rx_irq,
2922 					(void *) aconnector);
2923 		}
2924 	}
2925 }
2926 
2927 #if defined(CONFIG_DRM_AMD_DC_SI)
2928 /* Register IRQ sources and initialize IRQ callbacks */
2929 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2930 {
2931 	struct dc *dc = adev->dm.dc;
2932 	struct common_irq_params *c_irq_params;
2933 	struct dc_interrupt_params int_params = {0};
2934 	int r;
2935 	int i;
2936 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2937 
2938 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2939 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2940 
2941 	/*
2942 	 * Actions of amdgpu_irq_add_id():
2943 	 * 1. Register a set() function with base driver.
2944 	 *    Base driver will call set() function to enable/disable an
2945 	 *    interrupt in DC hardware.
2946 	 * 2. Register amdgpu_dm_irq_handler().
2947 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2948 	 *    coming from DC hardware.
2949 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2950 	 *    for acknowledging and handling. */
2951 
2952 	/* Use VBLANK interrupt */
2953 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2954 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2955 		if (r) {
2956 			DRM_ERROR("Failed to add crtc irq id!\n");
2957 			return r;
2958 		}
2959 
2960 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2961 		int_params.irq_source =
2962 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2963 
2964 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2965 
2966 		c_irq_params->adev = adev;
2967 		c_irq_params->irq_src = int_params.irq_source;
2968 
2969 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2970 				dm_crtc_high_irq, c_irq_params);
2971 	}
2972 
2973 	/* Use GRPH_PFLIP interrupt */
2974 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2975 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2976 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2977 		if (r) {
2978 			DRM_ERROR("Failed to add page flip irq id!\n");
2979 			return r;
2980 		}
2981 
2982 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2983 		int_params.irq_source =
2984 			dc_interrupt_to_irq_source(dc, i, 0);
2985 
2986 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2987 
2988 		c_irq_params->adev = adev;
2989 		c_irq_params->irq_src = int_params.irq_source;
2990 
2991 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2992 				dm_pflip_high_irq, c_irq_params);
2993 
2994 	}
2995 
2996 	/* HPD */
2997 	r = amdgpu_irq_add_id(adev, client_id,
2998 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2999 	if (r) {
3000 		DRM_ERROR("Failed to add hpd irq id!\n");
3001 		return r;
3002 	}
3003 
3004 	register_hpd_handlers(adev);
3005 
3006 	return 0;
3007 }
3008 #endif
3009 
3010 /* Register IRQ sources and initialize IRQ callbacks */
3011 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3012 {
3013 	struct dc *dc = adev->dm.dc;
3014 	struct common_irq_params *c_irq_params;
3015 	struct dc_interrupt_params int_params = {0};
3016 	int r;
3017 	int i;
3018 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3019 
3020 	if (adev->asic_type >= CHIP_VEGA10)
3021 		client_id = SOC15_IH_CLIENTID_DCE;
3022 
3023 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3024 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3025 
3026 	/*
3027 	 * Actions of amdgpu_irq_add_id():
3028 	 * 1. Register a set() function with base driver.
3029 	 *    Base driver will call set() function to enable/disable an
3030 	 *    interrupt in DC hardware.
3031 	 * 2. Register amdgpu_dm_irq_handler().
3032 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3033 	 *    coming from DC hardware.
3034 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3035 	 *    for acknowledging and handling. */
3036 
3037 	/* Use VBLANK interrupt */
3038 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3039 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3040 		if (r) {
3041 			DRM_ERROR("Failed to add crtc irq id!\n");
3042 			return r;
3043 		}
3044 
3045 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3046 		int_params.irq_source =
3047 			dc_interrupt_to_irq_source(dc, i, 0);
3048 
3049 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3050 
3051 		c_irq_params->adev = adev;
3052 		c_irq_params->irq_src = int_params.irq_source;
3053 
3054 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3055 				dm_crtc_high_irq, c_irq_params);
3056 	}
3057 
3058 	/* Use VUPDATE interrupt */
3059 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3060 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3061 		if (r) {
3062 			DRM_ERROR("Failed to add vupdate irq id!\n");
3063 			return r;
3064 		}
3065 
3066 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3067 		int_params.irq_source =
3068 			dc_interrupt_to_irq_source(dc, i, 0);
3069 
3070 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3071 
3072 		c_irq_params->adev = adev;
3073 		c_irq_params->irq_src = int_params.irq_source;
3074 
3075 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3076 				dm_vupdate_high_irq, c_irq_params);
3077 	}
3078 
3079 	/* Use GRPH_PFLIP interrupt */
3080 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3081 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3082 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3083 		if (r) {
3084 			DRM_ERROR("Failed to add page flip irq id!\n");
3085 			return r;
3086 		}
3087 
3088 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3089 		int_params.irq_source =
3090 			dc_interrupt_to_irq_source(dc, i, 0);
3091 
3092 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3093 
3094 		c_irq_params->adev = adev;
3095 		c_irq_params->irq_src = int_params.irq_source;
3096 
3097 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3098 				dm_pflip_high_irq, c_irq_params);
3099 
3100 	}
3101 
3102 	/* HPD */
3103 	r = amdgpu_irq_add_id(adev, client_id,
3104 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3105 	if (r) {
3106 		DRM_ERROR("Failed to add hpd irq id!\n");
3107 		return r;
3108 	}
3109 
3110 	register_hpd_handlers(adev);
3111 
3112 	return 0;
3113 }
3114 
3115 #if defined(CONFIG_DRM_AMD_DC_DCN)
3116 /* Register IRQ sources and initialize IRQ callbacks */
3117 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3118 {
3119 	struct dc *dc = adev->dm.dc;
3120 	struct common_irq_params *c_irq_params;
3121 	struct dc_interrupt_params int_params = {0};
3122 	int r;
3123 	int i;
3124 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3125 	static const unsigned int vrtl_int_srcid[] = {
3126 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3127 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3128 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3129 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3130 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3131 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3132 	};
3133 #endif
3134 
3135 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3136 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3137 
3138 	/*
3139 	 * Actions of amdgpu_irq_add_id():
3140 	 * 1. Register a set() function with base driver.
3141 	 *    Base driver will call set() function to enable/disable an
3142 	 *    interrupt in DC hardware.
3143 	 * 2. Register amdgpu_dm_irq_handler().
3144 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3145 	 *    coming from DC hardware.
3146 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3147 	 *    for acknowledging and handling.
3148 	 */
3149 
3150 	/* Use VSTARTUP interrupt */
3151 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3152 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3153 			i++) {
3154 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3155 
3156 		if (r) {
3157 			DRM_ERROR("Failed to add crtc irq id!\n");
3158 			return r;
3159 		}
3160 
3161 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3162 		int_params.irq_source =
3163 			dc_interrupt_to_irq_source(dc, i, 0);
3164 
3165 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3166 
3167 		c_irq_params->adev = adev;
3168 		c_irq_params->irq_src = int_params.irq_source;
3169 
3170 		amdgpu_dm_irq_register_interrupt(
3171 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3172 	}
3173 
3174 	/* Use otg vertical line interrupt */
3175 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3176 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3177 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3178 				vrtl_int_srcid[i], &adev->vline0_irq);
3179 
3180 		if (r) {
3181 			DRM_ERROR("Failed to add vline0 irq id!\n");
3182 			return r;
3183 		}
3184 
3185 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3186 		int_params.irq_source =
3187 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3188 
3189 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3190 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3191 			break;
3192 		}
3193 
3194 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3195 					- DC_IRQ_SOURCE_DC1_VLINE0];
3196 
3197 		c_irq_params->adev = adev;
3198 		c_irq_params->irq_src = int_params.irq_source;
3199 
3200 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3201 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3202 	}
3203 #endif
3204 
3205 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3206 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3207 	 * to trigger at end of each vblank, regardless of state of the lock,
3208 	 * matching DCE behaviour.
3209 	 */
3210 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3211 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3212 	     i++) {
3213 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3214 
3215 		if (r) {
3216 			DRM_ERROR("Failed to add vupdate irq id!\n");
3217 			return r;
3218 		}
3219 
3220 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3221 		int_params.irq_source =
3222 			dc_interrupt_to_irq_source(dc, i, 0);
3223 
3224 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3225 
3226 		c_irq_params->adev = adev;
3227 		c_irq_params->irq_src = int_params.irq_source;
3228 
3229 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3230 				dm_vupdate_high_irq, c_irq_params);
3231 	}
3232 
3233 	/* Use GRPH_PFLIP interrupt */
3234 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3235 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3236 			i++) {
3237 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3238 		if (r) {
3239 			DRM_ERROR("Failed to add page flip irq id!\n");
3240 			return r;
3241 		}
3242 
3243 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3244 		int_params.irq_source =
3245 			dc_interrupt_to_irq_source(dc, i, 0);
3246 
3247 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3248 
3249 		c_irq_params->adev = adev;
3250 		c_irq_params->irq_src = int_params.irq_source;
3251 
3252 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3253 				dm_pflip_high_irq, c_irq_params);
3254 
3255 	}
3256 
3257 	/* HPD */
3258 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3259 			&adev->hpd_irq);
3260 	if (r) {
3261 		DRM_ERROR("Failed to add hpd irq id!\n");
3262 		return r;
3263 	}
3264 
3265 	register_hpd_handlers(adev);
3266 
3267 	return 0;
3268 }
3269 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3270 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3271 {
3272 	struct dc *dc = adev->dm.dc;
3273 	struct common_irq_params *c_irq_params;
3274 	struct dc_interrupt_params int_params = {0};
3275 	int r, i;
3276 
3277 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3278 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3279 
3280 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3281 			&adev->dmub_outbox_irq);
3282 	if (r) {
3283 		DRM_ERROR("Failed to add outbox irq id!\n");
3284 		return r;
3285 	}
3286 
3287 	if (dc->ctx->dmub_srv) {
3288 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3289 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3290 		int_params.irq_source =
3291 		dc_interrupt_to_irq_source(dc, i, 0);
3292 
3293 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3294 
3295 		c_irq_params->adev = adev;
3296 		c_irq_params->irq_src = int_params.irq_source;
3297 
3298 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3299 				dm_dmub_outbox1_low_irq, c_irq_params);
3300 	}
3301 
3302 	return 0;
3303 }
3304 #endif
3305 
3306 /*
3307  * Acquires the lock for the atomic state object and returns
3308  * the new atomic state.
3309  *
3310  * This should only be called during atomic check.
3311  */
3312 static int dm_atomic_get_state(struct drm_atomic_state *state,
3313 			       struct dm_atomic_state **dm_state)
3314 {
3315 	struct drm_device *dev = state->dev;
3316 	struct amdgpu_device *adev = drm_to_adev(dev);
3317 	struct amdgpu_display_manager *dm = &adev->dm;
3318 	struct drm_private_state *priv_state;
3319 
3320 	if (*dm_state)
3321 		return 0;
3322 
3323 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3324 	if (IS_ERR(priv_state))
3325 		return PTR_ERR(priv_state);
3326 
3327 	*dm_state = to_dm_atomic_state(priv_state);
3328 
3329 	return 0;
3330 }
3331 
3332 static struct dm_atomic_state *
3333 dm_atomic_get_new_state(struct drm_atomic_state *state)
3334 {
3335 	struct drm_device *dev = state->dev;
3336 	struct amdgpu_device *adev = drm_to_adev(dev);
3337 	struct amdgpu_display_manager *dm = &adev->dm;
3338 	struct drm_private_obj *obj;
3339 	struct drm_private_state *new_obj_state;
3340 	int i;
3341 
3342 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3343 		if (obj->funcs == dm->atomic_obj.funcs)
3344 			return to_dm_atomic_state(new_obj_state);
3345 	}
3346 
3347 	return NULL;
3348 }
3349 
3350 static struct drm_private_state *
3351 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3352 {
3353 	struct dm_atomic_state *old_state, *new_state;
3354 
3355 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3356 	if (!new_state)
3357 		return NULL;
3358 
3359 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3360 
3361 	old_state = to_dm_atomic_state(obj->state);
3362 
3363 	if (old_state && old_state->context)
3364 		new_state->context = dc_copy_state(old_state->context);
3365 
3366 	if (!new_state->context) {
3367 		kfree(new_state);
3368 		return NULL;
3369 	}
3370 
3371 	return &new_state->base;
3372 }
3373 
3374 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3375 				    struct drm_private_state *state)
3376 {
3377 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3378 
3379 	if (dm_state && dm_state->context)
3380 		dc_release_state(dm_state->context);
3381 
3382 	kfree(dm_state);
3383 }
3384 
3385 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3386 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3387 	.atomic_destroy_state = dm_atomic_destroy_state,
3388 };
3389 
3390 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3391 {
3392 	struct dm_atomic_state *state;
3393 	int r;
3394 
3395 	adev->mode_info.mode_config_initialized = true;
3396 
3397 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3398 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3399 
3400 	adev_to_drm(adev)->mode_config.max_width = 16384;
3401 	adev_to_drm(adev)->mode_config.max_height = 16384;
3402 
3403 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3404 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3405 	/* indicates support for immediate flip */
3406 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3407 
3408 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3409 
3410 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3411 	if (!state)
3412 		return -ENOMEM;
3413 
3414 	state->context = dc_create_state(adev->dm.dc);
3415 	if (!state->context) {
3416 		kfree(state);
3417 		return -ENOMEM;
3418 	}
3419 
3420 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3421 
3422 	drm_atomic_private_obj_init(adev_to_drm(adev),
3423 				    &adev->dm.atomic_obj,
3424 				    &state->base,
3425 				    &dm_atomic_state_funcs);
3426 
3427 	r = amdgpu_display_modeset_create_props(adev);
3428 	if (r) {
3429 		dc_release_state(state->context);
3430 		kfree(state);
3431 		return r;
3432 	}
3433 
3434 	r = amdgpu_dm_audio_init(adev);
3435 	if (r) {
3436 		dc_release_state(state->context);
3437 		kfree(state);
3438 		return r;
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3445 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3446 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3447 
3448 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3449 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3450 
3451 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3452 					    int bl_idx)
3453 {
3454 #if defined(CONFIG_ACPI)
3455 	struct amdgpu_dm_backlight_caps caps;
3456 
3457 	memset(&caps, 0, sizeof(caps));
3458 
3459 	if (dm->backlight_caps[bl_idx].caps_valid)
3460 		return;
3461 
3462 	amdgpu_acpi_get_backlight_caps(&caps);
3463 	if (caps.caps_valid) {
3464 		dm->backlight_caps[bl_idx].caps_valid = true;
3465 		if (caps.aux_support)
3466 			return;
3467 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3468 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3469 	} else {
3470 		dm->backlight_caps[bl_idx].min_input_signal =
3471 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3472 		dm->backlight_caps[bl_idx].max_input_signal =
3473 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3474 	}
3475 #else
3476 	if (dm->backlight_caps[bl_idx].aux_support)
3477 		return;
3478 
3479 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3480 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3481 #endif
3482 }
3483 
3484 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3485 				unsigned *min, unsigned *max)
3486 {
3487 	if (!caps)
3488 		return 0;
3489 
3490 	if (caps->aux_support) {
3491 		// Firmware limits are in nits, DC API wants millinits.
3492 		*max = 1000 * caps->aux_max_input_signal;
3493 		*min = 1000 * caps->aux_min_input_signal;
3494 	} else {
3495 		// Firmware limits are 8-bit, PWM control is 16-bit.
3496 		*max = 0x101 * caps->max_input_signal;
3497 		*min = 0x101 * caps->min_input_signal;
3498 	}
3499 	return 1;
3500 }
3501 
3502 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3503 					uint32_t brightness)
3504 {
3505 	unsigned min, max;
3506 
3507 	if (!get_brightness_range(caps, &min, &max))
3508 		return brightness;
3509 
3510 	// Rescale 0..255 to min..max
3511 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3512 				       AMDGPU_MAX_BL_LEVEL);
3513 }
3514 
3515 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3516 				      uint32_t brightness)
3517 {
3518 	unsigned min, max;
3519 
3520 	if (!get_brightness_range(caps, &min, &max))
3521 		return brightness;
3522 
3523 	if (brightness < min)
3524 		return 0;
3525 	// Rescale min..max to 0..255
3526 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3527 				 max - min);
3528 }
3529 
3530 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3531 					 int bl_idx,
3532 					 u32 user_brightness)
3533 {
3534 	struct amdgpu_dm_backlight_caps caps;
3535 	struct dc_link *link;
3536 	u32 brightness;
3537 	bool rc;
3538 
3539 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3540 	caps = dm->backlight_caps[bl_idx];
3541 
3542 	dm->brightness[bl_idx] = user_brightness;
3543 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3544 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3545 
3546 	/* Change brightness based on AUX property */
3547 	if (caps.aux_support) {
3548 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3549 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3550 		if (!rc)
3551 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3552 	} else {
3553 		rc = dc_link_set_backlight_level(link, brightness, 0);
3554 		if (!rc)
3555 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3556 	}
3557 
3558 	return rc ? 0 : 1;
3559 }
3560 
3561 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3562 {
3563 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3564 	int i;
3565 
3566 	for (i = 0; i < dm->num_of_edps; i++) {
3567 		if (bd == dm->backlight_dev[i])
3568 			break;
3569 	}
3570 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3571 		i = 0;
3572 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3573 
3574 	return 0;
3575 }
3576 
3577 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3578 					 int bl_idx)
3579 {
3580 	struct amdgpu_dm_backlight_caps caps;
3581 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3582 
3583 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3584 	caps = dm->backlight_caps[bl_idx];
3585 
3586 	if (caps.aux_support) {
3587 		u32 avg, peak;
3588 		bool rc;
3589 
3590 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3591 		if (!rc)
3592 			return dm->brightness[bl_idx];
3593 		return convert_brightness_to_user(&caps, avg);
3594 	} else {
3595 		int ret = dc_link_get_backlight_level(link);
3596 
3597 		if (ret == DC_ERROR_UNEXPECTED)
3598 			return dm->brightness[bl_idx];
3599 		return convert_brightness_to_user(&caps, ret);
3600 	}
3601 }
3602 
3603 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3604 {
3605 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3606 	int i;
3607 
3608 	for (i = 0; i < dm->num_of_edps; i++) {
3609 		if (bd == dm->backlight_dev[i])
3610 			break;
3611 	}
3612 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3613 		i = 0;
3614 	return amdgpu_dm_backlight_get_level(dm, i);
3615 }
3616 
3617 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3618 	.options = BL_CORE_SUSPENDRESUME,
3619 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3620 	.update_status	= amdgpu_dm_backlight_update_status,
3621 };
3622 
3623 static void
3624 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3625 {
3626 	char bl_name[16];
3627 	struct backlight_properties props = { 0 };
3628 
3629 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3630 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3631 
3632 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3633 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3634 	props.type = BACKLIGHT_RAW;
3635 
3636 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3637 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3638 
3639 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3640 								       adev_to_drm(dm->adev)->dev,
3641 								       dm,
3642 								       &amdgpu_dm_backlight_ops,
3643 								       &props);
3644 
3645 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3646 		DRM_ERROR("DM: Backlight registration failed!\n");
3647 	else
3648 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3649 }
3650 #endif
3651 
3652 static int initialize_plane(struct amdgpu_display_manager *dm,
3653 			    struct amdgpu_mode_info *mode_info, int plane_id,
3654 			    enum drm_plane_type plane_type,
3655 			    const struct dc_plane_cap *plane_cap)
3656 {
3657 	struct drm_plane *plane;
3658 	unsigned long possible_crtcs;
3659 	int ret = 0;
3660 
3661 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3662 	if (!plane) {
3663 		DRM_ERROR("KMS: Failed to allocate plane\n");
3664 		return -ENOMEM;
3665 	}
3666 	plane->type = plane_type;
3667 
3668 	/*
3669 	 * HACK: IGT tests expect that the primary plane for a CRTC
3670 	 * can only have one possible CRTC. Only expose support for
3671 	 * any CRTC if they're not going to be used as a primary plane
3672 	 * for a CRTC - like overlay or underlay planes.
3673 	 */
3674 	possible_crtcs = 1 << plane_id;
3675 	if (plane_id >= dm->dc->caps.max_streams)
3676 		possible_crtcs = 0xff;
3677 
3678 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3679 
3680 	if (ret) {
3681 		DRM_ERROR("KMS: Failed to initialize plane\n");
3682 		kfree(plane);
3683 		return ret;
3684 	}
3685 
3686 	if (mode_info)
3687 		mode_info->planes[plane_id] = plane;
3688 
3689 	return ret;
3690 }
3691 
3692 
3693 static void register_backlight_device(struct amdgpu_display_manager *dm,
3694 				      struct dc_link *link)
3695 {
3696 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3697 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3698 
3699 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3700 	    link->type != dc_connection_none) {
3701 		/*
3702 		 * Event if registration failed, we should continue with
3703 		 * DM initialization because not having a backlight control
3704 		 * is better then a black screen.
3705 		 */
3706 		if (!dm->backlight_dev[dm->num_of_edps])
3707 			amdgpu_dm_register_backlight_device(dm);
3708 
3709 		if (dm->backlight_dev[dm->num_of_edps]) {
3710 			dm->backlight_link[dm->num_of_edps] = link;
3711 			dm->num_of_edps++;
3712 		}
3713 	}
3714 #endif
3715 }
3716 
3717 
3718 /*
3719  * In this architecture, the association
3720  * connector -> encoder -> crtc
3721  * id not really requried. The crtc and connector will hold the
3722  * display_index as an abstraction to use with DAL component
3723  *
3724  * Returns 0 on success
3725  */
3726 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3727 {
3728 	struct amdgpu_display_manager *dm = &adev->dm;
3729 	int32_t i;
3730 	struct amdgpu_dm_connector *aconnector = NULL;
3731 	struct amdgpu_encoder *aencoder = NULL;
3732 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3733 	uint32_t link_cnt;
3734 	int32_t primary_planes;
3735 	enum dc_connection_type new_connection_type = dc_connection_none;
3736 	const struct dc_plane_cap *plane;
3737 
3738 	dm->display_indexes_num = dm->dc->caps.max_streams;
3739 	/* Update the actual used number of crtc */
3740 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3741 
3742 	link_cnt = dm->dc->caps.max_links;
3743 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3744 		DRM_ERROR("DM: Failed to initialize mode config\n");
3745 		return -EINVAL;
3746 	}
3747 
3748 	/* There is one primary plane per CRTC */
3749 	primary_planes = dm->dc->caps.max_streams;
3750 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3751 
3752 	/*
3753 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3754 	 * Order is reversed to match iteration order in atomic check.
3755 	 */
3756 	for (i = (primary_planes - 1); i >= 0; i--) {
3757 		plane = &dm->dc->caps.planes[i];
3758 
3759 		if (initialize_plane(dm, mode_info, i,
3760 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3761 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3762 			goto fail;
3763 		}
3764 	}
3765 
3766 	/*
3767 	 * Initialize overlay planes, index starting after primary planes.
3768 	 * These planes have a higher DRM index than the primary planes since
3769 	 * they should be considered as having a higher z-order.
3770 	 * Order is reversed to match iteration order in atomic check.
3771 	 *
3772 	 * Only support DCN for now, and only expose one so we don't encourage
3773 	 * userspace to use up all the pipes.
3774 	 */
3775 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3776 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3777 
3778 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3779 			continue;
3780 
3781 		if (!plane->blends_with_above || !plane->blends_with_below)
3782 			continue;
3783 
3784 		if (!plane->pixel_format_support.argb8888)
3785 			continue;
3786 
3787 		if (initialize_plane(dm, NULL, primary_planes + i,
3788 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3789 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3790 			goto fail;
3791 		}
3792 
3793 		/* Only create one overlay plane. */
3794 		break;
3795 	}
3796 
3797 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3798 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3799 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3800 			goto fail;
3801 		}
3802 
3803 #if defined(CONFIG_DRM_AMD_DC_DCN)
3804 	/* Use Outbox interrupt */
3805 	switch (adev->asic_type) {
3806 	case CHIP_SIENNA_CICHLID:
3807 	case CHIP_NAVY_FLOUNDER:
3808 	case CHIP_YELLOW_CARP:
3809 	case CHIP_RENOIR:
3810 		if (register_outbox_irq_handlers(dm->adev)) {
3811 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3812 			goto fail;
3813 		}
3814 		break;
3815 	default:
3816 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3817 	}
3818 #endif
3819 
3820 	/* loops over all connectors on the board */
3821 	for (i = 0; i < link_cnt; i++) {
3822 		struct dc_link *link = NULL;
3823 
3824 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3825 			DRM_ERROR(
3826 				"KMS: Cannot support more than %d display indexes\n",
3827 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3828 			continue;
3829 		}
3830 
3831 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3832 		if (!aconnector)
3833 			goto fail;
3834 
3835 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3836 		if (!aencoder)
3837 			goto fail;
3838 
3839 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3840 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3841 			goto fail;
3842 		}
3843 
3844 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3845 			DRM_ERROR("KMS: Failed to initialize connector\n");
3846 			goto fail;
3847 		}
3848 
3849 		link = dc_get_link_at_index(dm->dc, i);
3850 
3851 		if (!dc_link_detect_sink(link, &new_connection_type))
3852 			DRM_ERROR("KMS: Failed to detect connector\n");
3853 
3854 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3855 			emulated_link_detect(link);
3856 			amdgpu_dm_update_connector_after_detect(aconnector);
3857 
3858 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3859 			amdgpu_dm_update_connector_after_detect(aconnector);
3860 			register_backlight_device(dm, link);
3861 
3862 			if (dm->num_of_edps)
3863 				update_connector_ext_caps(aconnector);
3864 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3865 				amdgpu_dm_set_psr_caps(link);
3866 		}
3867 
3868 
3869 	}
3870 
3871 	/* Software is initialized. Now we can register interrupt handlers. */
3872 	switch (adev->asic_type) {
3873 #if defined(CONFIG_DRM_AMD_DC_SI)
3874 	case CHIP_TAHITI:
3875 	case CHIP_PITCAIRN:
3876 	case CHIP_VERDE:
3877 	case CHIP_OLAND:
3878 		if (dce60_register_irq_handlers(dm->adev)) {
3879 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3880 			goto fail;
3881 		}
3882 		break;
3883 #endif
3884 	case CHIP_BONAIRE:
3885 	case CHIP_HAWAII:
3886 	case CHIP_KAVERI:
3887 	case CHIP_KABINI:
3888 	case CHIP_MULLINS:
3889 	case CHIP_TONGA:
3890 	case CHIP_FIJI:
3891 	case CHIP_CARRIZO:
3892 	case CHIP_STONEY:
3893 	case CHIP_POLARIS11:
3894 	case CHIP_POLARIS10:
3895 	case CHIP_POLARIS12:
3896 	case CHIP_VEGAM:
3897 	case CHIP_VEGA10:
3898 	case CHIP_VEGA12:
3899 	case CHIP_VEGA20:
3900 		if (dce110_register_irq_handlers(dm->adev)) {
3901 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3902 			goto fail;
3903 		}
3904 		break;
3905 #if defined(CONFIG_DRM_AMD_DC_DCN)
3906 	case CHIP_RAVEN:
3907 	case CHIP_NAVI12:
3908 	case CHIP_NAVI10:
3909 	case CHIP_NAVI14:
3910 	case CHIP_RENOIR:
3911 	case CHIP_SIENNA_CICHLID:
3912 	case CHIP_NAVY_FLOUNDER:
3913 	case CHIP_DIMGREY_CAVEFISH:
3914 	case CHIP_BEIGE_GOBY:
3915 	case CHIP_VANGOGH:
3916 	case CHIP_YELLOW_CARP:
3917 		if (dcn10_register_irq_handlers(dm->adev)) {
3918 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3919 			goto fail;
3920 		}
3921 		break;
3922 #endif
3923 	default:
3924 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3925 		goto fail;
3926 	}
3927 
3928 	return 0;
3929 fail:
3930 	kfree(aencoder);
3931 	kfree(aconnector);
3932 
3933 	return -EINVAL;
3934 }
3935 
3936 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3937 {
3938 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3939 	return;
3940 }
3941 
3942 /******************************************************************************
3943  * amdgpu_display_funcs functions
3944  *****************************************************************************/
3945 
3946 /*
3947  * dm_bandwidth_update - program display watermarks
3948  *
3949  * @adev: amdgpu_device pointer
3950  *
3951  * Calculate and program the display watermarks and line buffer allocation.
3952  */
3953 static void dm_bandwidth_update(struct amdgpu_device *adev)
3954 {
3955 	/* TODO: implement later */
3956 }
3957 
3958 static const struct amdgpu_display_funcs dm_display_funcs = {
3959 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3960 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3961 	.backlight_set_level = NULL, /* never called for DC */
3962 	.backlight_get_level = NULL, /* never called for DC */
3963 	.hpd_sense = NULL,/* called unconditionally */
3964 	.hpd_set_polarity = NULL, /* called unconditionally */
3965 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3966 	.page_flip_get_scanoutpos =
3967 		dm_crtc_get_scanoutpos,/* called unconditionally */
3968 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3969 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3970 };
3971 
3972 #if defined(CONFIG_DEBUG_KERNEL_DC)
3973 
3974 static ssize_t s3_debug_store(struct device *device,
3975 			      struct device_attribute *attr,
3976 			      const char *buf,
3977 			      size_t count)
3978 {
3979 	int ret;
3980 	int s3_state;
3981 	struct drm_device *drm_dev = dev_get_drvdata(device);
3982 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3983 
3984 	ret = kstrtoint(buf, 0, &s3_state);
3985 
3986 	if (ret == 0) {
3987 		if (s3_state) {
3988 			dm_resume(adev);
3989 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3990 		} else
3991 			dm_suspend(adev);
3992 	}
3993 
3994 	return ret == 0 ? count : 0;
3995 }
3996 
3997 DEVICE_ATTR_WO(s3_debug);
3998 
3999 #endif
4000 
4001 static int dm_early_init(void *handle)
4002 {
4003 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4004 
4005 	switch (adev->asic_type) {
4006 #if defined(CONFIG_DRM_AMD_DC_SI)
4007 	case CHIP_TAHITI:
4008 	case CHIP_PITCAIRN:
4009 	case CHIP_VERDE:
4010 		adev->mode_info.num_crtc = 6;
4011 		adev->mode_info.num_hpd = 6;
4012 		adev->mode_info.num_dig = 6;
4013 		break;
4014 	case CHIP_OLAND:
4015 		adev->mode_info.num_crtc = 2;
4016 		adev->mode_info.num_hpd = 2;
4017 		adev->mode_info.num_dig = 2;
4018 		break;
4019 #endif
4020 	case CHIP_BONAIRE:
4021 	case CHIP_HAWAII:
4022 		adev->mode_info.num_crtc = 6;
4023 		adev->mode_info.num_hpd = 6;
4024 		adev->mode_info.num_dig = 6;
4025 		break;
4026 	case CHIP_KAVERI:
4027 		adev->mode_info.num_crtc = 4;
4028 		adev->mode_info.num_hpd = 6;
4029 		adev->mode_info.num_dig = 7;
4030 		break;
4031 	case CHIP_KABINI:
4032 	case CHIP_MULLINS:
4033 		adev->mode_info.num_crtc = 2;
4034 		adev->mode_info.num_hpd = 6;
4035 		adev->mode_info.num_dig = 6;
4036 		break;
4037 	case CHIP_FIJI:
4038 	case CHIP_TONGA:
4039 		adev->mode_info.num_crtc = 6;
4040 		adev->mode_info.num_hpd = 6;
4041 		adev->mode_info.num_dig = 7;
4042 		break;
4043 	case CHIP_CARRIZO:
4044 		adev->mode_info.num_crtc = 3;
4045 		adev->mode_info.num_hpd = 6;
4046 		adev->mode_info.num_dig = 9;
4047 		break;
4048 	case CHIP_STONEY:
4049 		adev->mode_info.num_crtc = 2;
4050 		adev->mode_info.num_hpd = 6;
4051 		adev->mode_info.num_dig = 9;
4052 		break;
4053 	case CHIP_POLARIS11:
4054 	case CHIP_POLARIS12:
4055 		adev->mode_info.num_crtc = 5;
4056 		adev->mode_info.num_hpd = 5;
4057 		adev->mode_info.num_dig = 5;
4058 		break;
4059 	case CHIP_POLARIS10:
4060 	case CHIP_VEGAM:
4061 		adev->mode_info.num_crtc = 6;
4062 		adev->mode_info.num_hpd = 6;
4063 		adev->mode_info.num_dig = 6;
4064 		break;
4065 	case CHIP_VEGA10:
4066 	case CHIP_VEGA12:
4067 	case CHIP_VEGA20:
4068 		adev->mode_info.num_crtc = 6;
4069 		adev->mode_info.num_hpd = 6;
4070 		adev->mode_info.num_dig = 6;
4071 		break;
4072 #if defined(CONFIG_DRM_AMD_DC_DCN)
4073 	case CHIP_RAVEN:
4074 	case CHIP_RENOIR:
4075 	case CHIP_VANGOGH:
4076 		adev->mode_info.num_crtc = 4;
4077 		adev->mode_info.num_hpd = 4;
4078 		adev->mode_info.num_dig = 4;
4079 		break;
4080 	case CHIP_NAVI10:
4081 	case CHIP_NAVI12:
4082 	case CHIP_SIENNA_CICHLID:
4083 	case CHIP_NAVY_FLOUNDER:
4084 		adev->mode_info.num_crtc = 6;
4085 		adev->mode_info.num_hpd = 6;
4086 		adev->mode_info.num_dig = 6;
4087 		break;
4088 	case CHIP_YELLOW_CARP:
4089 		adev->mode_info.num_crtc = 4;
4090 		adev->mode_info.num_hpd = 4;
4091 		adev->mode_info.num_dig = 4;
4092 		break;
4093 	case CHIP_NAVI14:
4094 	case CHIP_DIMGREY_CAVEFISH:
4095 		adev->mode_info.num_crtc = 5;
4096 		adev->mode_info.num_hpd = 5;
4097 		adev->mode_info.num_dig = 5;
4098 		break;
4099 	case CHIP_BEIGE_GOBY:
4100 		adev->mode_info.num_crtc = 2;
4101 		adev->mode_info.num_hpd = 2;
4102 		adev->mode_info.num_dig = 2;
4103 		break;
4104 #endif
4105 	default:
4106 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4107 		return -EINVAL;
4108 	}
4109 
4110 	amdgpu_dm_set_irq_funcs(adev);
4111 
4112 	if (adev->mode_info.funcs == NULL)
4113 		adev->mode_info.funcs = &dm_display_funcs;
4114 
4115 	/*
4116 	 * Note: Do NOT change adev->audio_endpt_rreg and
4117 	 * adev->audio_endpt_wreg because they are initialised in
4118 	 * amdgpu_device_init()
4119 	 */
4120 #if defined(CONFIG_DEBUG_KERNEL_DC)
4121 	device_create_file(
4122 		adev_to_drm(adev)->dev,
4123 		&dev_attr_s3_debug);
4124 #endif
4125 
4126 	return 0;
4127 }
4128 
4129 static bool modeset_required(struct drm_crtc_state *crtc_state,
4130 			     struct dc_stream_state *new_stream,
4131 			     struct dc_stream_state *old_stream)
4132 {
4133 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4134 }
4135 
4136 static bool modereset_required(struct drm_crtc_state *crtc_state)
4137 {
4138 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4139 }
4140 
4141 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4142 {
4143 	drm_encoder_cleanup(encoder);
4144 	kfree(encoder);
4145 }
4146 
4147 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4148 	.destroy = amdgpu_dm_encoder_destroy,
4149 };
4150 
4151 
4152 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4153 					 struct drm_framebuffer *fb,
4154 					 int *min_downscale, int *max_upscale)
4155 {
4156 	struct amdgpu_device *adev = drm_to_adev(dev);
4157 	struct dc *dc = adev->dm.dc;
4158 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4159 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4160 
4161 	switch (fb->format->format) {
4162 	case DRM_FORMAT_P010:
4163 	case DRM_FORMAT_NV12:
4164 	case DRM_FORMAT_NV21:
4165 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4166 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4167 		break;
4168 
4169 	case DRM_FORMAT_XRGB16161616F:
4170 	case DRM_FORMAT_ARGB16161616F:
4171 	case DRM_FORMAT_XBGR16161616F:
4172 	case DRM_FORMAT_ABGR16161616F:
4173 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4174 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4175 		break;
4176 
4177 	default:
4178 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4179 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4180 		break;
4181 	}
4182 
4183 	/*
4184 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4185 	 * scaling factor of 1.0 == 1000 units.
4186 	 */
4187 	if (*max_upscale == 1)
4188 		*max_upscale = 1000;
4189 
4190 	if (*min_downscale == 1)
4191 		*min_downscale = 1000;
4192 }
4193 
4194 
4195 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4196 				struct dc_scaling_info *scaling_info)
4197 {
4198 	int scale_w, scale_h, min_downscale, max_upscale;
4199 
4200 	memset(scaling_info, 0, sizeof(*scaling_info));
4201 
4202 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4203 	scaling_info->src_rect.x = state->src_x >> 16;
4204 	scaling_info->src_rect.y = state->src_y >> 16;
4205 
4206 	/*
4207 	 * For reasons we don't (yet) fully understand a non-zero
4208 	 * src_y coordinate into an NV12 buffer can cause a
4209 	 * system hang. To avoid hangs (and maybe be overly cautious)
4210 	 * let's reject both non-zero src_x and src_y.
4211 	 *
4212 	 * We currently know of only one use-case to reproduce a
4213 	 * scenario with non-zero src_x and src_y for NV12, which
4214 	 * is to gesture the YouTube Android app into full screen
4215 	 * on ChromeOS.
4216 	 */
4217 	if (state->fb &&
4218 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4219 	    (scaling_info->src_rect.x != 0 ||
4220 	     scaling_info->src_rect.y != 0))
4221 		return -EINVAL;
4222 
4223 	scaling_info->src_rect.width = state->src_w >> 16;
4224 	if (scaling_info->src_rect.width == 0)
4225 		return -EINVAL;
4226 
4227 	scaling_info->src_rect.height = state->src_h >> 16;
4228 	if (scaling_info->src_rect.height == 0)
4229 		return -EINVAL;
4230 
4231 	scaling_info->dst_rect.x = state->crtc_x;
4232 	scaling_info->dst_rect.y = state->crtc_y;
4233 
4234 	if (state->crtc_w == 0)
4235 		return -EINVAL;
4236 
4237 	scaling_info->dst_rect.width = state->crtc_w;
4238 
4239 	if (state->crtc_h == 0)
4240 		return -EINVAL;
4241 
4242 	scaling_info->dst_rect.height = state->crtc_h;
4243 
4244 	/* DRM doesn't specify clipping on destination output. */
4245 	scaling_info->clip_rect = scaling_info->dst_rect;
4246 
4247 	/* Validate scaling per-format with DC plane caps */
4248 	if (state->plane && state->plane->dev && state->fb) {
4249 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4250 					     &min_downscale, &max_upscale);
4251 	} else {
4252 		min_downscale = 250;
4253 		max_upscale = 16000;
4254 	}
4255 
4256 	scale_w = scaling_info->dst_rect.width * 1000 /
4257 		  scaling_info->src_rect.width;
4258 
4259 	if (scale_w < min_downscale || scale_w > max_upscale)
4260 		return -EINVAL;
4261 
4262 	scale_h = scaling_info->dst_rect.height * 1000 /
4263 		  scaling_info->src_rect.height;
4264 
4265 	if (scale_h < min_downscale || scale_h > max_upscale)
4266 		return -EINVAL;
4267 
4268 	/*
4269 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4270 	 * assume reasonable defaults based on the format.
4271 	 */
4272 
4273 	return 0;
4274 }
4275 
4276 static void
4277 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4278 				 uint64_t tiling_flags)
4279 {
4280 	/* Fill GFX8 params */
4281 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4282 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4283 
4284 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4285 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4286 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4287 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4288 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4289 
4290 		/* XXX fix me for VI */
4291 		tiling_info->gfx8.num_banks = num_banks;
4292 		tiling_info->gfx8.array_mode =
4293 				DC_ARRAY_2D_TILED_THIN1;
4294 		tiling_info->gfx8.tile_split = tile_split;
4295 		tiling_info->gfx8.bank_width = bankw;
4296 		tiling_info->gfx8.bank_height = bankh;
4297 		tiling_info->gfx8.tile_aspect = mtaspect;
4298 		tiling_info->gfx8.tile_mode =
4299 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4300 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4301 			== DC_ARRAY_1D_TILED_THIN1) {
4302 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4303 	}
4304 
4305 	tiling_info->gfx8.pipe_config =
4306 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4307 }
4308 
4309 static void
4310 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4311 				  union dc_tiling_info *tiling_info)
4312 {
4313 	tiling_info->gfx9.num_pipes =
4314 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4315 	tiling_info->gfx9.num_banks =
4316 		adev->gfx.config.gb_addr_config_fields.num_banks;
4317 	tiling_info->gfx9.pipe_interleave =
4318 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4319 	tiling_info->gfx9.num_shader_engines =
4320 		adev->gfx.config.gb_addr_config_fields.num_se;
4321 	tiling_info->gfx9.max_compressed_frags =
4322 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4323 	tiling_info->gfx9.num_rb_per_se =
4324 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4325 	tiling_info->gfx9.shaderEnable = 1;
4326 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4327 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4328 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4329 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4330 	    adev->asic_type == CHIP_YELLOW_CARP ||
4331 	    adev->asic_type == CHIP_VANGOGH)
4332 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4333 }
4334 
4335 static int
4336 validate_dcc(struct amdgpu_device *adev,
4337 	     const enum surface_pixel_format format,
4338 	     const enum dc_rotation_angle rotation,
4339 	     const union dc_tiling_info *tiling_info,
4340 	     const struct dc_plane_dcc_param *dcc,
4341 	     const struct dc_plane_address *address,
4342 	     const struct plane_size *plane_size)
4343 {
4344 	struct dc *dc = adev->dm.dc;
4345 	struct dc_dcc_surface_param input;
4346 	struct dc_surface_dcc_cap output;
4347 
4348 	memset(&input, 0, sizeof(input));
4349 	memset(&output, 0, sizeof(output));
4350 
4351 	if (!dcc->enable)
4352 		return 0;
4353 
4354 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4355 	    !dc->cap_funcs.get_dcc_compression_cap)
4356 		return -EINVAL;
4357 
4358 	input.format = format;
4359 	input.surface_size.width = plane_size->surface_size.width;
4360 	input.surface_size.height = plane_size->surface_size.height;
4361 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4362 
4363 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4364 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4365 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4366 		input.scan = SCAN_DIRECTION_VERTICAL;
4367 
4368 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4369 		return -EINVAL;
4370 
4371 	if (!output.capable)
4372 		return -EINVAL;
4373 
4374 	if (dcc->independent_64b_blks == 0 &&
4375 	    output.grph.rgb.independent_64b_blks != 0)
4376 		return -EINVAL;
4377 
4378 	return 0;
4379 }
4380 
4381 static bool
4382 modifier_has_dcc(uint64_t modifier)
4383 {
4384 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4385 }
4386 
4387 static unsigned
4388 modifier_gfx9_swizzle_mode(uint64_t modifier)
4389 {
4390 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4391 		return 0;
4392 
4393 	return AMD_FMT_MOD_GET(TILE, modifier);
4394 }
4395 
4396 static const struct drm_format_info *
4397 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4398 {
4399 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4400 }
4401 
4402 static void
4403 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4404 				    union dc_tiling_info *tiling_info,
4405 				    uint64_t modifier)
4406 {
4407 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4408 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4409 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4410 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4411 
4412 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4413 
4414 	if (!IS_AMD_FMT_MOD(modifier))
4415 		return;
4416 
4417 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4418 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4419 
4420 	if (adev->family >= AMDGPU_FAMILY_NV) {
4421 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4422 	} else {
4423 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4424 
4425 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4426 	}
4427 }
4428 
4429 enum dm_micro_swizzle {
4430 	MICRO_SWIZZLE_Z = 0,
4431 	MICRO_SWIZZLE_S = 1,
4432 	MICRO_SWIZZLE_D = 2,
4433 	MICRO_SWIZZLE_R = 3
4434 };
4435 
4436 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4437 					  uint32_t format,
4438 					  uint64_t modifier)
4439 {
4440 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4441 	const struct drm_format_info *info = drm_format_info(format);
4442 	int i;
4443 
4444 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4445 
4446 	if (!info)
4447 		return false;
4448 
4449 	/*
4450 	 * We always have to allow these modifiers:
4451 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4452 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4453 	 */
4454 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4455 	    modifier == DRM_FORMAT_MOD_INVALID) {
4456 		return true;
4457 	}
4458 
4459 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4460 	for (i = 0; i < plane->modifier_count; i++) {
4461 		if (modifier == plane->modifiers[i])
4462 			break;
4463 	}
4464 	if (i == plane->modifier_count)
4465 		return false;
4466 
4467 	/*
4468 	 * For D swizzle the canonical modifier depends on the bpp, so check
4469 	 * it here.
4470 	 */
4471 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4472 	    adev->family >= AMDGPU_FAMILY_NV) {
4473 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4474 			return false;
4475 	}
4476 
4477 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4478 	    info->cpp[0] < 8)
4479 		return false;
4480 
4481 	if (modifier_has_dcc(modifier)) {
4482 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4483 		if (info->cpp[0] != 4)
4484 			return false;
4485 		/* We support multi-planar formats, but not when combined with
4486 		 * additional DCC metadata planes. */
4487 		if (info->num_planes > 1)
4488 			return false;
4489 	}
4490 
4491 	return true;
4492 }
4493 
4494 static void
4495 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4496 {
4497 	if (!*mods)
4498 		return;
4499 
4500 	if (*cap - *size < 1) {
4501 		uint64_t new_cap = *cap * 2;
4502 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4503 
4504 		if (!new_mods) {
4505 			kfree(*mods);
4506 			*mods = NULL;
4507 			return;
4508 		}
4509 
4510 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4511 		kfree(*mods);
4512 		*mods = new_mods;
4513 		*cap = new_cap;
4514 	}
4515 
4516 	(*mods)[*size] = mod;
4517 	*size += 1;
4518 }
4519 
4520 static void
4521 add_gfx9_modifiers(const struct amdgpu_device *adev,
4522 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4523 {
4524 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4525 	int pipe_xor_bits = min(8, pipes +
4526 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4527 	int bank_xor_bits = min(8 - pipe_xor_bits,
4528 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4529 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4530 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4531 
4532 
4533 	if (adev->family == AMDGPU_FAMILY_RV) {
4534 		/* Raven2 and later */
4535 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4536 
4537 		/*
4538 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4539 		 * doesn't support _D on DCN
4540 		 */
4541 
4542 		if (has_constant_encode) {
4543 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4544 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4545 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4546 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4547 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4548 				    AMD_FMT_MOD_SET(DCC, 1) |
4549 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4550 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4551 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4552 		}
4553 
4554 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4555 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4556 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4557 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4558 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4559 			    AMD_FMT_MOD_SET(DCC, 1) |
4560 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4561 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4562 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4563 
4564 		if (has_constant_encode) {
4565 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4566 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4567 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4568 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4569 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4570 				    AMD_FMT_MOD_SET(DCC, 1) |
4571 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4572 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4573 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4574 
4575 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4576 				    AMD_FMT_MOD_SET(RB, rb) |
4577 				    AMD_FMT_MOD_SET(PIPE, pipes));
4578 		}
4579 
4580 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4581 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4582 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4583 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4584 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4585 			    AMD_FMT_MOD_SET(DCC, 1) |
4586 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4587 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4588 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4589 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4590 			    AMD_FMT_MOD_SET(RB, rb) |
4591 			    AMD_FMT_MOD_SET(PIPE, pipes));
4592 	}
4593 
4594 	/*
4595 	 * Only supported for 64bpp on Raven, will be filtered on format in
4596 	 * dm_plane_format_mod_supported.
4597 	 */
4598 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4599 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4600 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4601 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4602 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4603 
4604 	if (adev->family == AMDGPU_FAMILY_RV) {
4605 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4606 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4607 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4608 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4609 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4610 	}
4611 
4612 	/*
4613 	 * Only supported for 64bpp on Raven, will be filtered on format in
4614 	 * dm_plane_format_mod_supported.
4615 	 */
4616 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4617 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4618 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4619 
4620 	if (adev->family == AMDGPU_FAMILY_RV) {
4621 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4622 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4623 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4624 	}
4625 }
4626 
4627 static void
4628 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4629 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4630 {
4631 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4632 
4633 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4634 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4635 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4636 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4637 		    AMD_FMT_MOD_SET(DCC, 1) |
4638 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4639 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4640 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4641 
4642 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4643 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4644 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4645 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4646 		    AMD_FMT_MOD_SET(DCC, 1) |
4647 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4648 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4649 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4650 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4651 
4652 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4653 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4654 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4655 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4656 
4657 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4658 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4659 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4660 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4661 
4662 
4663 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4664 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4665 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4666 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4667 
4668 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4669 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4670 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4671 }
4672 
4673 static void
4674 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4675 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4676 {
4677 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4678 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4679 
4680 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4681 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4682 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4683 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4684 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4685 		    AMD_FMT_MOD_SET(DCC, 1) |
4686 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4687 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4688 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4689 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4690 
4691 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4692 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4693 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4694 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4695 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4696 		    AMD_FMT_MOD_SET(DCC, 1) |
4697 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4698 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4699 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4700 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4701 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4702 
4703 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4704 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4705 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4706 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4707 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4708 
4709 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4710 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4711 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4712 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4713 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4714 
4715 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4716 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4717 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4718 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4719 
4720 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4721 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4722 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4723 }
4724 
4725 static int
4726 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4727 {
4728 	uint64_t size = 0, capacity = 128;
4729 	*mods = NULL;
4730 
4731 	/* We have not hooked up any pre-GFX9 modifiers. */
4732 	if (adev->family < AMDGPU_FAMILY_AI)
4733 		return 0;
4734 
4735 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4736 
4737 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4738 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4739 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4740 		return *mods ? 0 : -ENOMEM;
4741 	}
4742 
4743 	switch (adev->family) {
4744 	case AMDGPU_FAMILY_AI:
4745 	case AMDGPU_FAMILY_RV:
4746 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4747 		break;
4748 	case AMDGPU_FAMILY_NV:
4749 	case AMDGPU_FAMILY_VGH:
4750 	case AMDGPU_FAMILY_YC:
4751 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4752 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4753 		else
4754 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4755 		break;
4756 	}
4757 
4758 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4759 
4760 	/* INVALID marks the end of the list. */
4761 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4762 
4763 	if (!*mods)
4764 		return -ENOMEM;
4765 
4766 	return 0;
4767 }
4768 
4769 static int
4770 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4771 					  const struct amdgpu_framebuffer *afb,
4772 					  const enum surface_pixel_format format,
4773 					  const enum dc_rotation_angle rotation,
4774 					  const struct plane_size *plane_size,
4775 					  union dc_tiling_info *tiling_info,
4776 					  struct dc_plane_dcc_param *dcc,
4777 					  struct dc_plane_address *address,
4778 					  const bool force_disable_dcc)
4779 {
4780 	const uint64_t modifier = afb->base.modifier;
4781 	int ret = 0;
4782 
4783 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4784 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4785 
4786 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4787 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4788 
4789 		dcc->enable = 1;
4790 		dcc->meta_pitch = afb->base.pitches[1];
4791 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4792 
4793 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4794 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4795 	}
4796 
4797 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4798 	if (ret)
4799 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4800 
4801 	return ret;
4802 }
4803 
4804 static int
4805 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4806 			     const struct amdgpu_framebuffer *afb,
4807 			     const enum surface_pixel_format format,
4808 			     const enum dc_rotation_angle rotation,
4809 			     const uint64_t tiling_flags,
4810 			     union dc_tiling_info *tiling_info,
4811 			     struct plane_size *plane_size,
4812 			     struct dc_plane_dcc_param *dcc,
4813 			     struct dc_plane_address *address,
4814 			     bool tmz_surface,
4815 			     bool force_disable_dcc)
4816 {
4817 	const struct drm_framebuffer *fb = &afb->base;
4818 	int ret;
4819 
4820 	memset(tiling_info, 0, sizeof(*tiling_info));
4821 	memset(plane_size, 0, sizeof(*plane_size));
4822 	memset(dcc, 0, sizeof(*dcc));
4823 	memset(address, 0, sizeof(*address));
4824 
4825 	address->tmz_surface = tmz_surface;
4826 
4827 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4828 		uint64_t addr = afb->address + fb->offsets[0];
4829 
4830 		plane_size->surface_size.x = 0;
4831 		plane_size->surface_size.y = 0;
4832 		plane_size->surface_size.width = fb->width;
4833 		plane_size->surface_size.height = fb->height;
4834 		plane_size->surface_pitch =
4835 			fb->pitches[0] / fb->format->cpp[0];
4836 
4837 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4838 		address->grph.addr.low_part = lower_32_bits(addr);
4839 		address->grph.addr.high_part = upper_32_bits(addr);
4840 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4841 		uint64_t luma_addr = afb->address + fb->offsets[0];
4842 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4843 
4844 		plane_size->surface_size.x = 0;
4845 		plane_size->surface_size.y = 0;
4846 		plane_size->surface_size.width = fb->width;
4847 		plane_size->surface_size.height = fb->height;
4848 		plane_size->surface_pitch =
4849 			fb->pitches[0] / fb->format->cpp[0];
4850 
4851 		plane_size->chroma_size.x = 0;
4852 		plane_size->chroma_size.y = 0;
4853 		/* TODO: set these based on surface format */
4854 		plane_size->chroma_size.width = fb->width / 2;
4855 		plane_size->chroma_size.height = fb->height / 2;
4856 
4857 		plane_size->chroma_pitch =
4858 			fb->pitches[1] / fb->format->cpp[1];
4859 
4860 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4861 		address->video_progressive.luma_addr.low_part =
4862 			lower_32_bits(luma_addr);
4863 		address->video_progressive.luma_addr.high_part =
4864 			upper_32_bits(luma_addr);
4865 		address->video_progressive.chroma_addr.low_part =
4866 			lower_32_bits(chroma_addr);
4867 		address->video_progressive.chroma_addr.high_part =
4868 			upper_32_bits(chroma_addr);
4869 	}
4870 
4871 	if (adev->family >= AMDGPU_FAMILY_AI) {
4872 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4873 								rotation, plane_size,
4874 								tiling_info, dcc,
4875 								address,
4876 								force_disable_dcc);
4877 		if (ret)
4878 			return ret;
4879 	} else {
4880 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4881 	}
4882 
4883 	return 0;
4884 }
4885 
4886 static void
4887 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4888 			       bool *per_pixel_alpha, bool *global_alpha,
4889 			       int *global_alpha_value)
4890 {
4891 	*per_pixel_alpha = false;
4892 	*global_alpha = false;
4893 	*global_alpha_value = 0xff;
4894 
4895 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4896 		return;
4897 
4898 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4899 		static const uint32_t alpha_formats[] = {
4900 			DRM_FORMAT_ARGB8888,
4901 			DRM_FORMAT_RGBA8888,
4902 			DRM_FORMAT_ABGR8888,
4903 		};
4904 		uint32_t format = plane_state->fb->format->format;
4905 		unsigned int i;
4906 
4907 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4908 			if (format == alpha_formats[i]) {
4909 				*per_pixel_alpha = true;
4910 				break;
4911 			}
4912 		}
4913 	}
4914 
4915 	if (plane_state->alpha < 0xffff) {
4916 		*global_alpha = true;
4917 		*global_alpha_value = plane_state->alpha >> 8;
4918 	}
4919 }
4920 
4921 static int
4922 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4923 			    const enum surface_pixel_format format,
4924 			    enum dc_color_space *color_space)
4925 {
4926 	bool full_range;
4927 
4928 	*color_space = COLOR_SPACE_SRGB;
4929 
4930 	/* DRM color properties only affect non-RGB formats. */
4931 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4932 		return 0;
4933 
4934 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4935 
4936 	switch (plane_state->color_encoding) {
4937 	case DRM_COLOR_YCBCR_BT601:
4938 		if (full_range)
4939 			*color_space = COLOR_SPACE_YCBCR601;
4940 		else
4941 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4942 		break;
4943 
4944 	case DRM_COLOR_YCBCR_BT709:
4945 		if (full_range)
4946 			*color_space = COLOR_SPACE_YCBCR709;
4947 		else
4948 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4949 		break;
4950 
4951 	case DRM_COLOR_YCBCR_BT2020:
4952 		if (full_range)
4953 			*color_space = COLOR_SPACE_2020_YCBCR;
4954 		else
4955 			return -EINVAL;
4956 		break;
4957 
4958 	default:
4959 		return -EINVAL;
4960 	}
4961 
4962 	return 0;
4963 }
4964 
4965 static int
4966 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4967 			    const struct drm_plane_state *plane_state,
4968 			    const uint64_t tiling_flags,
4969 			    struct dc_plane_info *plane_info,
4970 			    struct dc_plane_address *address,
4971 			    bool tmz_surface,
4972 			    bool force_disable_dcc)
4973 {
4974 	const struct drm_framebuffer *fb = plane_state->fb;
4975 	const struct amdgpu_framebuffer *afb =
4976 		to_amdgpu_framebuffer(plane_state->fb);
4977 	int ret;
4978 
4979 	memset(plane_info, 0, sizeof(*plane_info));
4980 
4981 	switch (fb->format->format) {
4982 	case DRM_FORMAT_C8:
4983 		plane_info->format =
4984 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4985 		break;
4986 	case DRM_FORMAT_RGB565:
4987 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4988 		break;
4989 	case DRM_FORMAT_XRGB8888:
4990 	case DRM_FORMAT_ARGB8888:
4991 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4992 		break;
4993 	case DRM_FORMAT_XRGB2101010:
4994 	case DRM_FORMAT_ARGB2101010:
4995 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4996 		break;
4997 	case DRM_FORMAT_XBGR2101010:
4998 	case DRM_FORMAT_ABGR2101010:
4999 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5000 		break;
5001 	case DRM_FORMAT_XBGR8888:
5002 	case DRM_FORMAT_ABGR8888:
5003 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5004 		break;
5005 	case DRM_FORMAT_NV21:
5006 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5007 		break;
5008 	case DRM_FORMAT_NV12:
5009 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5010 		break;
5011 	case DRM_FORMAT_P010:
5012 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5013 		break;
5014 	case DRM_FORMAT_XRGB16161616F:
5015 	case DRM_FORMAT_ARGB16161616F:
5016 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5017 		break;
5018 	case DRM_FORMAT_XBGR16161616F:
5019 	case DRM_FORMAT_ABGR16161616F:
5020 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5021 		break;
5022 	case DRM_FORMAT_XRGB16161616:
5023 	case DRM_FORMAT_ARGB16161616:
5024 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5025 		break;
5026 	case DRM_FORMAT_XBGR16161616:
5027 	case DRM_FORMAT_ABGR16161616:
5028 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5029 		break;
5030 	default:
5031 		DRM_ERROR(
5032 			"Unsupported screen format %p4cc\n",
5033 			&fb->format->format);
5034 		return -EINVAL;
5035 	}
5036 
5037 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5038 	case DRM_MODE_ROTATE_0:
5039 		plane_info->rotation = ROTATION_ANGLE_0;
5040 		break;
5041 	case DRM_MODE_ROTATE_90:
5042 		plane_info->rotation = ROTATION_ANGLE_90;
5043 		break;
5044 	case DRM_MODE_ROTATE_180:
5045 		plane_info->rotation = ROTATION_ANGLE_180;
5046 		break;
5047 	case DRM_MODE_ROTATE_270:
5048 		plane_info->rotation = ROTATION_ANGLE_270;
5049 		break;
5050 	default:
5051 		plane_info->rotation = ROTATION_ANGLE_0;
5052 		break;
5053 	}
5054 
5055 	plane_info->visible = true;
5056 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5057 
5058 	plane_info->layer_index = 0;
5059 
5060 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5061 					  &plane_info->color_space);
5062 	if (ret)
5063 		return ret;
5064 
5065 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5066 					   plane_info->rotation, tiling_flags,
5067 					   &plane_info->tiling_info,
5068 					   &plane_info->plane_size,
5069 					   &plane_info->dcc, address, tmz_surface,
5070 					   force_disable_dcc);
5071 	if (ret)
5072 		return ret;
5073 
5074 	fill_blending_from_plane_state(
5075 		plane_state, &plane_info->per_pixel_alpha,
5076 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5077 
5078 	return 0;
5079 }
5080 
5081 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5082 				    struct dc_plane_state *dc_plane_state,
5083 				    struct drm_plane_state *plane_state,
5084 				    struct drm_crtc_state *crtc_state)
5085 {
5086 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5087 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5088 	struct dc_scaling_info scaling_info;
5089 	struct dc_plane_info plane_info;
5090 	int ret;
5091 	bool force_disable_dcc = false;
5092 
5093 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5094 	if (ret)
5095 		return ret;
5096 
5097 	dc_plane_state->src_rect = scaling_info.src_rect;
5098 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5099 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5100 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5101 
5102 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5103 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5104 					  afb->tiling_flags,
5105 					  &plane_info,
5106 					  &dc_plane_state->address,
5107 					  afb->tmz_surface,
5108 					  force_disable_dcc);
5109 	if (ret)
5110 		return ret;
5111 
5112 	dc_plane_state->format = plane_info.format;
5113 	dc_plane_state->color_space = plane_info.color_space;
5114 	dc_plane_state->format = plane_info.format;
5115 	dc_plane_state->plane_size = plane_info.plane_size;
5116 	dc_plane_state->rotation = plane_info.rotation;
5117 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5118 	dc_plane_state->stereo_format = plane_info.stereo_format;
5119 	dc_plane_state->tiling_info = plane_info.tiling_info;
5120 	dc_plane_state->visible = plane_info.visible;
5121 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5122 	dc_plane_state->global_alpha = plane_info.global_alpha;
5123 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5124 	dc_plane_state->dcc = plane_info.dcc;
5125 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5126 	dc_plane_state->flip_int_enabled = true;
5127 
5128 	/*
5129 	 * Always set input transfer function, since plane state is refreshed
5130 	 * every time.
5131 	 */
5132 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5133 	if (ret)
5134 		return ret;
5135 
5136 	return 0;
5137 }
5138 
5139 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5140 					   const struct dm_connector_state *dm_state,
5141 					   struct dc_stream_state *stream)
5142 {
5143 	enum amdgpu_rmx_type rmx_type;
5144 
5145 	struct rect src = { 0 }; /* viewport in composition space*/
5146 	struct rect dst = { 0 }; /* stream addressable area */
5147 
5148 	/* no mode. nothing to be done */
5149 	if (!mode)
5150 		return;
5151 
5152 	/* Full screen scaling by default */
5153 	src.width = mode->hdisplay;
5154 	src.height = mode->vdisplay;
5155 	dst.width = stream->timing.h_addressable;
5156 	dst.height = stream->timing.v_addressable;
5157 
5158 	if (dm_state) {
5159 		rmx_type = dm_state->scaling;
5160 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5161 			if (src.width * dst.height <
5162 					src.height * dst.width) {
5163 				/* height needs less upscaling/more downscaling */
5164 				dst.width = src.width *
5165 						dst.height / src.height;
5166 			} else {
5167 				/* width needs less upscaling/more downscaling */
5168 				dst.height = src.height *
5169 						dst.width / src.width;
5170 			}
5171 		} else if (rmx_type == RMX_CENTER) {
5172 			dst = src;
5173 		}
5174 
5175 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5176 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5177 
5178 		if (dm_state->underscan_enable) {
5179 			dst.x += dm_state->underscan_hborder / 2;
5180 			dst.y += dm_state->underscan_vborder / 2;
5181 			dst.width -= dm_state->underscan_hborder;
5182 			dst.height -= dm_state->underscan_vborder;
5183 		}
5184 	}
5185 
5186 	stream->src = src;
5187 	stream->dst = dst;
5188 
5189 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5190 		      dst.x, dst.y, dst.width, dst.height);
5191 
5192 }
5193 
5194 static enum dc_color_depth
5195 convert_color_depth_from_display_info(const struct drm_connector *connector,
5196 				      bool is_y420, int requested_bpc)
5197 {
5198 	uint8_t bpc;
5199 
5200 	if (is_y420) {
5201 		bpc = 8;
5202 
5203 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5204 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5205 			bpc = 16;
5206 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5207 			bpc = 12;
5208 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5209 			bpc = 10;
5210 	} else {
5211 		bpc = (uint8_t)connector->display_info.bpc;
5212 		/* Assume 8 bpc by default if no bpc is specified. */
5213 		bpc = bpc ? bpc : 8;
5214 	}
5215 
5216 	if (requested_bpc > 0) {
5217 		/*
5218 		 * Cap display bpc based on the user requested value.
5219 		 *
5220 		 * The value for state->max_bpc may not correctly updated
5221 		 * depending on when the connector gets added to the state
5222 		 * or if this was called outside of atomic check, so it
5223 		 * can't be used directly.
5224 		 */
5225 		bpc = min_t(u8, bpc, requested_bpc);
5226 
5227 		/* Round down to the nearest even number. */
5228 		bpc = bpc - (bpc & 1);
5229 	}
5230 
5231 	switch (bpc) {
5232 	case 0:
5233 		/*
5234 		 * Temporary Work around, DRM doesn't parse color depth for
5235 		 * EDID revision before 1.4
5236 		 * TODO: Fix edid parsing
5237 		 */
5238 		return COLOR_DEPTH_888;
5239 	case 6:
5240 		return COLOR_DEPTH_666;
5241 	case 8:
5242 		return COLOR_DEPTH_888;
5243 	case 10:
5244 		return COLOR_DEPTH_101010;
5245 	case 12:
5246 		return COLOR_DEPTH_121212;
5247 	case 14:
5248 		return COLOR_DEPTH_141414;
5249 	case 16:
5250 		return COLOR_DEPTH_161616;
5251 	default:
5252 		return COLOR_DEPTH_UNDEFINED;
5253 	}
5254 }
5255 
5256 static enum dc_aspect_ratio
5257 get_aspect_ratio(const struct drm_display_mode *mode_in)
5258 {
5259 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5260 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5261 }
5262 
5263 static enum dc_color_space
5264 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5265 {
5266 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5267 
5268 	switch (dc_crtc_timing->pixel_encoding)	{
5269 	case PIXEL_ENCODING_YCBCR422:
5270 	case PIXEL_ENCODING_YCBCR444:
5271 	case PIXEL_ENCODING_YCBCR420:
5272 	{
5273 		/*
5274 		 * 27030khz is the separation point between HDTV and SDTV
5275 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5276 		 * respectively
5277 		 */
5278 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5279 			if (dc_crtc_timing->flags.Y_ONLY)
5280 				color_space =
5281 					COLOR_SPACE_YCBCR709_LIMITED;
5282 			else
5283 				color_space = COLOR_SPACE_YCBCR709;
5284 		} else {
5285 			if (dc_crtc_timing->flags.Y_ONLY)
5286 				color_space =
5287 					COLOR_SPACE_YCBCR601_LIMITED;
5288 			else
5289 				color_space = COLOR_SPACE_YCBCR601;
5290 		}
5291 
5292 	}
5293 	break;
5294 	case PIXEL_ENCODING_RGB:
5295 		color_space = COLOR_SPACE_SRGB;
5296 		break;
5297 
5298 	default:
5299 		WARN_ON(1);
5300 		break;
5301 	}
5302 
5303 	return color_space;
5304 }
5305 
5306 static bool adjust_colour_depth_from_display_info(
5307 	struct dc_crtc_timing *timing_out,
5308 	const struct drm_display_info *info)
5309 {
5310 	enum dc_color_depth depth = timing_out->display_color_depth;
5311 	int normalized_clk;
5312 	do {
5313 		normalized_clk = timing_out->pix_clk_100hz / 10;
5314 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5315 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5316 			normalized_clk /= 2;
5317 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5318 		switch (depth) {
5319 		case COLOR_DEPTH_888:
5320 			break;
5321 		case COLOR_DEPTH_101010:
5322 			normalized_clk = (normalized_clk * 30) / 24;
5323 			break;
5324 		case COLOR_DEPTH_121212:
5325 			normalized_clk = (normalized_clk * 36) / 24;
5326 			break;
5327 		case COLOR_DEPTH_161616:
5328 			normalized_clk = (normalized_clk * 48) / 24;
5329 			break;
5330 		default:
5331 			/* The above depths are the only ones valid for HDMI. */
5332 			return false;
5333 		}
5334 		if (normalized_clk <= info->max_tmds_clock) {
5335 			timing_out->display_color_depth = depth;
5336 			return true;
5337 		}
5338 	} while (--depth > COLOR_DEPTH_666);
5339 	return false;
5340 }
5341 
5342 static void fill_stream_properties_from_drm_display_mode(
5343 	struct dc_stream_state *stream,
5344 	const struct drm_display_mode *mode_in,
5345 	const struct drm_connector *connector,
5346 	const struct drm_connector_state *connector_state,
5347 	const struct dc_stream_state *old_stream,
5348 	int requested_bpc)
5349 {
5350 	struct dc_crtc_timing *timing_out = &stream->timing;
5351 	const struct drm_display_info *info = &connector->display_info;
5352 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5353 	struct hdmi_vendor_infoframe hv_frame;
5354 	struct hdmi_avi_infoframe avi_frame;
5355 
5356 	memset(&hv_frame, 0, sizeof(hv_frame));
5357 	memset(&avi_frame, 0, sizeof(avi_frame));
5358 
5359 	timing_out->h_border_left = 0;
5360 	timing_out->h_border_right = 0;
5361 	timing_out->v_border_top = 0;
5362 	timing_out->v_border_bottom = 0;
5363 	/* TODO: un-hardcode */
5364 	if (drm_mode_is_420_only(info, mode_in)
5365 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5366 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5367 	else if (drm_mode_is_420_also(info, mode_in)
5368 			&& aconnector->force_yuv420_output)
5369 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5370 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5371 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5372 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5373 	else
5374 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5375 
5376 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5377 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5378 		connector,
5379 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5380 		requested_bpc);
5381 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5382 	timing_out->hdmi_vic = 0;
5383 
5384 	if(old_stream) {
5385 		timing_out->vic = old_stream->timing.vic;
5386 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5387 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5388 	} else {
5389 		timing_out->vic = drm_match_cea_mode(mode_in);
5390 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5391 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5392 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5393 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5394 	}
5395 
5396 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5397 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5398 		timing_out->vic = avi_frame.video_code;
5399 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5400 		timing_out->hdmi_vic = hv_frame.vic;
5401 	}
5402 
5403 	if (is_freesync_video_mode(mode_in, aconnector)) {
5404 		timing_out->h_addressable = mode_in->hdisplay;
5405 		timing_out->h_total = mode_in->htotal;
5406 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5407 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5408 		timing_out->v_total = mode_in->vtotal;
5409 		timing_out->v_addressable = mode_in->vdisplay;
5410 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5411 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5412 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5413 	} else {
5414 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5415 		timing_out->h_total = mode_in->crtc_htotal;
5416 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5417 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5418 		timing_out->v_total = mode_in->crtc_vtotal;
5419 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5420 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5421 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5422 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5423 	}
5424 
5425 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5426 
5427 	stream->output_color_space = get_output_color_space(timing_out);
5428 
5429 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5430 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5431 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5432 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5433 		    drm_mode_is_420_also(info, mode_in) &&
5434 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5435 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5436 			adjust_colour_depth_from_display_info(timing_out, info);
5437 		}
5438 	}
5439 }
5440 
5441 static void fill_audio_info(struct audio_info *audio_info,
5442 			    const struct drm_connector *drm_connector,
5443 			    const struct dc_sink *dc_sink)
5444 {
5445 	int i = 0;
5446 	int cea_revision = 0;
5447 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5448 
5449 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5450 	audio_info->product_id = edid_caps->product_id;
5451 
5452 	cea_revision = drm_connector->display_info.cea_rev;
5453 
5454 #ifdef __linux__
5455 	strscpy(audio_info->display_name,
5456 		edid_caps->display_name,
5457 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5458 #else
5459 	strncpy(audio_info->display_name,
5460 		edid_caps->display_name,
5461 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
5462 #endif
5463 
5464 	if (cea_revision >= 3) {
5465 		audio_info->mode_count = edid_caps->audio_mode_count;
5466 
5467 		for (i = 0; i < audio_info->mode_count; ++i) {
5468 			audio_info->modes[i].format_code =
5469 					(enum audio_format_code)
5470 					(edid_caps->audio_modes[i].format_code);
5471 			audio_info->modes[i].channel_count =
5472 					edid_caps->audio_modes[i].channel_count;
5473 			audio_info->modes[i].sample_rates.all =
5474 					edid_caps->audio_modes[i].sample_rate;
5475 			audio_info->modes[i].sample_size =
5476 					edid_caps->audio_modes[i].sample_size;
5477 		}
5478 	}
5479 
5480 	audio_info->flags.all = edid_caps->speaker_flags;
5481 
5482 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5483 	if (drm_connector->latency_present[0]) {
5484 		audio_info->video_latency = drm_connector->video_latency[0];
5485 		audio_info->audio_latency = drm_connector->audio_latency[0];
5486 	}
5487 
5488 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5489 
5490 }
5491 
5492 static void
5493 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5494 				      struct drm_display_mode *dst_mode)
5495 {
5496 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5497 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5498 	dst_mode->crtc_clock = src_mode->crtc_clock;
5499 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5500 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5501 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5502 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5503 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5504 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5505 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5506 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5507 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5508 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5509 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5510 }
5511 
5512 static void
5513 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5514 					const struct drm_display_mode *native_mode,
5515 					bool scale_enabled)
5516 {
5517 	if (scale_enabled) {
5518 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5519 	} else if (native_mode->clock == drm_mode->clock &&
5520 			native_mode->htotal == drm_mode->htotal &&
5521 			native_mode->vtotal == drm_mode->vtotal) {
5522 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5523 	} else {
5524 		/* no scaling nor amdgpu inserted, no need to patch */
5525 	}
5526 }
5527 
5528 static struct dc_sink *
5529 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5530 {
5531 	struct dc_sink_init_data sink_init_data = { 0 };
5532 	struct dc_sink *sink = NULL;
5533 	sink_init_data.link = aconnector->dc_link;
5534 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5535 
5536 	sink = dc_sink_create(&sink_init_data);
5537 	if (!sink) {
5538 		DRM_ERROR("Failed to create sink!\n");
5539 		return NULL;
5540 	}
5541 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5542 
5543 	return sink;
5544 }
5545 
5546 static void set_multisync_trigger_params(
5547 		struct dc_stream_state *stream)
5548 {
5549 	struct dc_stream_state *master = NULL;
5550 
5551 	if (stream->triggered_crtc_reset.enabled) {
5552 		master = stream->triggered_crtc_reset.event_source;
5553 		stream->triggered_crtc_reset.event =
5554 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5555 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5556 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5557 	}
5558 }
5559 
5560 static void set_master_stream(struct dc_stream_state *stream_set[],
5561 			      int stream_count)
5562 {
5563 	int j, highest_rfr = 0, master_stream = 0;
5564 
5565 	for (j = 0;  j < stream_count; j++) {
5566 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5567 			int refresh_rate = 0;
5568 
5569 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5570 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5571 			if (refresh_rate > highest_rfr) {
5572 				highest_rfr = refresh_rate;
5573 				master_stream = j;
5574 			}
5575 		}
5576 	}
5577 	for (j = 0;  j < stream_count; j++) {
5578 		if (stream_set[j])
5579 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5580 	}
5581 }
5582 
5583 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5584 {
5585 	int i = 0;
5586 	struct dc_stream_state *stream;
5587 
5588 	if (context->stream_count < 2)
5589 		return;
5590 	for (i = 0; i < context->stream_count ; i++) {
5591 		if (!context->streams[i])
5592 			continue;
5593 		/*
5594 		 * TODO: add a function to read AMD VSDB bits and set
5595 		 * crtc_sync_master.multi_sync_enabled flag
5596 		 * For now it's set to false
5597 		 */
5598 	}
5599 
5600 	set_master_stream(context->streams, context->stream_count);
5601 
5602 	for (i = 0; i < context->stream_count ; i++) {
5603 		stream = context->streams[i];
5604 
5605 		if (!stream)
5606 			continue;
5607 
5608 		set_multisync_trigger_params(stream);
5609 	}
5610 }
5611 
5612 #if defined(CONFIG_DRM_AMD_DC_DCN)
5613 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5614 							struct dc_sink *sink, struct dc_stream_state *stream,
5615 							struct dsc_dec_dpcd_caps *dsc_caps)
5616 {
5617 	stream->timing.flags.DSC = 0;
5618 	dsc_caps->is_dsc_supported = false;
5619 
5620 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5621 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5622 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5623 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5624 				      dsc_caps);
5625 	}
5626 }
5627 
5628 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5629 										struct dc_sink *sink, struct dc_stream_state *stream,
5630 										struct dsc_dec_dpcd_caps *dsc_caps)
5631 {
5632 	struct drm_connector *drm_connector = &aconnector->base;
5633 	uint32_t link_bandwidth_kbps;
5634 
5635 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5636 							dc_link_get_link_cap(aconnector->dc_link));
5637 	/* Set DSC policy according to dsc_clock_en */
5638 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5639 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5640 
5641 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5642 
5643 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5644 						dsc_caps,
5645 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5646 						0,
5647 						link_bandwidth_kbps,
5648 						&stream->timing,
5649 						&stream->timing.dsc_cfg)) {
5650 			stream->timing.flags.DSC = 1;
5651 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5652 		}
5653 	}
5654 
5655 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5656 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5657 		stream->timing.flags.DSC = 1;
5658 
5659 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5660 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5661 
5662 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5663 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5664 
5665 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5666 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5667 }
5668 #endif
5669 
5670 /**
5671  * DOC: FreeSync Video
5672  *
5673  * When a userspace application wants to play a video, the content follows a
5674  * standard format definition that usually specifies the FPS for that format.
5675  * The below list illustrates some video format and the expected FPS,
5676  * respectively:
5677  *
5678  * - TV/NTSC (23.976 FPS)
5679  * - Cinema (24 FPS)
5680  * - TV/PAL (25 FPS)
5681  * - TV/NTSC (29.97 FPS)
5682  * - TV/NTSC (30 FPS)
5683  * - Cinema HFR (48 FPS)
5684  * - TV/PAL (50 FPS)
5685  * - Commonly used (60 FPS)
5686  * - Multiples of 24 (48,72,96 FPS)
5687  *
5688  * The list of standards video format is not huge and can be added to the
5689  * connector modeset list beforehand. With that, userspace can leverage
5690  * FreeSync to extends the front porch in order to attain the target refresh
5691  * rate. Such a switch will happen seamlessly, without screen blanking or
5692  * reprogramming of the output in any other way. If the userspace requests a
5693  * modesetting change compatible with FreeSync modes that only differ in the
5694  * refresh rate, DC will skip the full update and avoid blink during the
5695  * transition. For example, the video player can change the modesetting from
5696  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5697  * causing any display blink. This same concept can be applied to a mode
5698  * setting change.
5699  */
5700 static struct drm_display_mode *
5701 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5702 			  bool use_probed_modes)
5703 {
5704 	struct drm_display_mode *m, *m_pref = NULL;
5705 	u16 current_refresh, highest_refresh;
5706 	struct list_head *list_head = use_probed_modes ?
5707 						    &aconnector->base.probed_modes :
5708 						    &aconnector->base.modes;
5709 
5710 	if (aconnector->freesync_vid_base.clock != 0)
5711 		return &aconnector->freesync_vid_base;
5712 
5713 	/* Find the preferred mode */
5714 	list_for_each_entry (m, list_head, head) {
5715 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5716 			m_pref = m;
5717 			break;
5718 		}
5719 	}
5720 
5721 	if (!m_pref) {
5722 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5723 		m_pref = list_first_entry_or_null(
5724 			&aconnector->base.modes, struct drm_display_mode, head);
5725 		if (!m_pref) {
5726 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5727 			return NULL;
5728 		}
5729 	}
5730 
5731 	highest_refresh = drm_mode_vrefresh(m_pref);
5732 
5733 	/*
5734 	 * Find the mode with highest refresh rate with same resolution.
5735 	 * For some monitors, preferred mode is not the mode with highest
5736 	 * supported refresh rate.
5737 	 */
5738 	list_for_each_entry (m, list_head, head) {
5739 		current_refresh  = drm_mode_vrefresh(m);
5740 
5741 		if (m->hdisplay == m_pref->hdisplay &&
5742 		    m->vdisplay == m_pref->vdisplay &&
5743 		    highest_refresh < current_refresh) {
5744 			highest_refresh = current_refresh;
5745 			m_pref = m;
5746 		}
5747 	}
5748 
5749 	aconnector->freesync_vid_base = *m_pref;
5750 	return m_pref;
5751 }
5752 
5753 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5754 				   struct amdgpu_dm_connector *aconnector)
5755 {
5756 	struct drm_display_mode *high_mode;
5757 	int timing_diff;
5758 
5759 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5760 	if (!high_mode || !mode)
5761 		return false;
5762 
5763 	timing_diff = high_mode->vtotal - mode->vtotal;
5764 
5765 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5766 	    high_mode->hdisplay != mode->hdisplay ||
5767 	    high_mode->vdisplay != mode->vdisplay ||
5768 	    high_mode->hsync_start != mode->hsync_start ||
5769 	    high_mode->hsync_end != mode->hsync_end ||
5770 	    high_mode->htotal != mode->htotal ||
5771 	    high_mode->hskew != mode->hskew ||
5772 	    high_mode->vscan != mode->vscan ||
5773 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5774 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5775 		return false;
5776 	else
5777 		return true;
5778 }
5779 
5780 static struct dc_stream_state *
5781 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5782 		       const struct drm_display_mode *drm_mode,
5783 		       const struct dm_connector_state *dm_state,
5784 		       const struct dc_stream_state *old_stream,
5785 		       int requested_bpc)
5786 {
5787 	struct drm_display_mode *preferred_mode = NULL;
5788 	struct drm_connector *drm_connector;
5789 	const struct drm_connector_state *con_state =
5790 		dm_state ? &dm_state->base : NULL;
5791 	struct dc_stream_state *stream = NULL;
5792 	struct drm_display_mode mode = *drm_mode;
5793 	struct drm_display_mode saved_mode;
5794 	struct drm_display_mode *freesync_mode = NULL;
5795 	bool native_mode_found = false;
5796 	bool recalculate_timing = false;
5797 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5798 	int mode_refresh;
5799 	int preferred_refresh = 0;
5800 #if defined(CONFIG_DRM_AMD_DC_DCN)
5801 	struct dsc_dec_dpcd_caps dsc_caps;
5802 #endif
5803 	struct dc_sink *sink = NULL;
5804 
5805 	memset(&saved_mode, 0, sizeof(saved_mode));
5806 
5807 	if (aconnector == NULL) {
5808 		DRM_ERROR("aconnector is NULL!\n");
5809 		return stream;
5810 	}
5811 
5812 	drm_connector = &aconnector->base;
5813 
5814 	if (!aconnector->dc_sink) {
5815 		sink = create_fake_sink(aconnector);
5816 		if (!sink)
5817 			return stream;
5818 	} else {
5819 		sink = aconnector->dc_sink;
5820 		dc_sink_retain(sink);
5821 	}
5822 
5823 	stream = dc_create_stream_for_sink(sink);
5824 
5825 	if (stream == NULL) {
5826 		DRM_ERROR("Failed to create stream for sink!\n");
5827 		goto finish;
5828 	}
5829 
5830 	stream->dm_stream_context = aconnector;
5831 
5832 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5833 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5834 
5835 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5836 		/* Search for preferred mode */
5837 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5838 			native_mode_found = true;
5839 			break;
5840 		}
5841 	}
5842 	if (!native_mode_found)
5843 		preferred_mode = list_first_entry_or_null(
5844 				&aconnector->base.modes,
5845 				struct drm_display_mode,
5846 				head);
5847 
5848 	mode_refresh = drm_mode_vrefresh(&mode);
5849 
5850 	if (preferred_mode == NULL) {
5851 		/*
5852 		 * This may not be an error, the use case is when we have no
5853 		 * usermode calls to reset and set mode upon hotplug. In this
5854 		 * case, we call set mode ourselves to restore the previous mode
5855 		 * and the modelist may not be filled in in time.
5856 		 */
5857 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5858 	} else {
5859 		recalculate_timing = amdgpu_freesync_vid_mode &&
5860 				 is_freesync_video_mode(&mode, aconnector);
5861 		if (recalculate_timing) {
5862 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5863 			saved_mode = mode;
5864 			mode = *freesync_mode;
5865 		} else {
5866 			decide_crtc_timing_for_drm_display_mode(
5867 				&mode, preferred_mode, scale);
5868 
5869 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5870 		}
5871 	}
5872 
5873 	if (recalculate_timing)
5874 		drm_mode_set_crtcinfo(&saved_mode, 0);
5875 	else if (!dm_state)
5876 		drm_mode_set_crtcinfo(&mode, 0);
5877 
5878        /*
5879 	* If scaling is enabled and refresh rate didn't change
5880 	* we copy the vic and polarities of the old timings
5881 	*/
5882 	if (!scale || mode_refresh != preferred_refresh)
5883 		fill_stream_properties_from_drm_display_mode(
5884 			stream, &mode, &aconnector->base, con_state, NULL,
5885 			requested_bpc);
5886 	else
5887 		fill_stream_properties_from_drm_display_mode(
5888 			stream, &mode, &aconnector->base, con_state, old_stream,
5889 			requested_bpc);
5890 
5891 #if defined(CONFIG_DRM_AMD_DC_DCN)
5892 	/* SST DSC determination policy */
5893 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5894 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5895 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5896 #endif
5897 
5898 	update_stream_scaling_settings(&mode, dm_state, stream);
5899 
5900 	fill_audio_info(
5901 		&stream->audio_info,
5902 		drm_connector,
5903 		sink);
5904 
5905 	update_stream_signal(stream, sink);
5906 
5907 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5908 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5909 
5910 	if (stream->link->psr_settings.psr_feature_enabled) {
5911 		//
5912 		// should decide stream support vsc sdp colorimetry capability
5913 		// before building vsc info packet
5914 		//
5915 		stream->use_vsc_sdp_for_colorimetry = false;
5916 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5917 			stream->use_vsc_sdp_for_colorimetry =
5918 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5919 		} else {
5920 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5921 				stream->use_vsc_sdp_for_colorimetry = true;
5922 		}
5923 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5924 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5925 
5926 	}
5927 finish:
5928 	dc_sink_release(sink);
5929 
5930 	return stream;
5931 }
5932 
5933 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5934 {
5935 	drm_crtc_cleanup(crtc);
5936 	kfree(crtc);
5937 }
5938 
5939 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5940 				  struct drm_crtc_state *state)
5941 {
5942 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5943 
5944 	/* TODO Destroy dc_stream objects are stream object is flattened */
5945 	if (cur->stream)
5946 		dc_stream_release(cur->stream);
5947 
5948 
5949 	__drm_atomic_helper_crtc_destroy_state(state);
5950 
5951 
5952 	kfree(state);
5953 }
5954 
5955 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5956 {
5957 	struct dm_crtc_state *state;
5958 
5959 	if (crtc->state)
5960 		dm_crtc_destroy_state(crtc, crtc->state);
5961 
5962 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5963 	if (WARN_ON(!state))
5964 		return;
5965 
5966 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5967 }
5968 
5969 static struct drm_crtc_state *
5970 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5971 {
5972 	struct dm_crtc_state *state, *cur;
5973 
5974 	cur = to_dm_crtc_state(crtc->state);
5975 
5976 	if (WARN_ON(!crtc->state))
5977 		return NULL;
5978 
5979 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5980 	if (!state)
5981 		return NULL;
5982 
5983 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5984 
5985 	if (cur->stream) {
5986 		state->stream = cur->stream;
5987 		dc_stream_retain(state->stream);
5988 	}
5989 
5990 	state->active_planes = cur->active_planes;
5991 	state->vrr_infopacket = cur->vrr_infopacket;
5992 	state->abm_level = cur->abm_level;
5993 	state->vrr_supported = cur->vrr_supported;
5994 	state->freesync_config = cur->freesync_config;
5995 	state->cm_has_degamma = cur->cm_has_degamma;
5996 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5997 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5998 
5999 	return &state->base;
6000 }
6001 
6002 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6003 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6004 {
6005 	crtc_debugfs_init(crtc);
6006 
6007 	return 0;
6008 }
6009 #endif
6010 
6011 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6012 {
6013 	enum dc_irq_source irq_source;
6014 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6015 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6016 	int rc;
6017 
6018 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6019 
6020 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6021 
6022 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6023 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6024 	return rc;
6025 }
6026 
6027 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6028 {
6029 	enum dc_irq_source irq_source;
6030 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6031 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6032 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6033 #if defined(CONFIG_DRM_AMD_DC_DCN)
6034 	struct amdgpu_display_manager *dm = &adev->dm;
6035 	struct vblank_control_work *work;
6036 #endif
6037 	int rc = 0;
6038 
6039 	if (enable) {
6040 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6041 		if (amdgpu_dm_vrr_active(acrtc_state))
6042 			rc = dm_set_vupdate_irq(crtc, true);
6043 	} else {
6044 		/* vblank irq off -> vupdate irq off */
6045 		rc = dm_set_vupdate_irq(crtc, false);
6046 	}
6047 
6048 	if (rc)
6049 		return rc;
6050 
6051 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6052 
6053 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6054 		return -EBUSY;
6055 
6056 	if (amdgpu_in_reset(adev))
6057 		return 0;
6058 
6059 #if defined(CONFIG_DRM_AMD_DC_DCN)
6060 	if (dm->vblank_control_workqueue) {
6061 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6062 		if (!work)
6063 			return -ENOMEM;
6064 
6065 		INIT_WORK(&work->work, vblank_control_worker);
6066 		work->dm = dm;
6067 		work->acrtc = acrtc;
6068 		work->enable = enable;
6069 
6070 		if (acrtc_state->stream) {
6071 			dc_stream_retain(acrtc_state->stream);
6072 			work->stream = acrtc_state->stream;
6073 		}
6074 
6075 		queue_work(dm->vblank_control_workqueue, &work->work);
6076 	}
6077 #endif
6078 
6079 	return 0;
6080 }
6081 
6082 static int dm_enable_vblank(struct drm_crtc *crtc)
6083 {
6084 	return dm_set_vblank(crtc, true);
6085 }
6086 
6087 static void dm_disable_vblank(struct drm_crtc *crtc)
6088 {
6089 	dm_set_vblank(crtc, false);
6090 }
6091 
6092 /* Implemented only the options currently availible for the driver */
6093 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6094 	.reset = dm_crtc_reset_state,
6095 	.destroy = amdgpu_dm_crtc_destroy,
6096 	.set_config = drm_atomic_helper_set_config,
6097 	.page_flip = drm_atomic_helper_page_flip,
6098 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6099 	.atomic_destroy_state = dm_crtc_destroy_state,
6100 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6101 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6102 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6103 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6104 	.enable_vblank = dm_enable_vblank,
6105 	.disable_vblank = dm_disable_vblank,
6106 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6107 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6108 	.late_register = amdgpu_dm_crtc_late_register,
6109 #endif
6110 };
6111 
6112 static enum drm_connector_status
6113 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6114 {
6115 	bool connected;
6116 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6117 
6118 	/*
6119 	 * Notes:
6120 	 * 1. This interface is NOT called in context of HPD irq.
6121 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6122 	 * makes it a bad place for *any* MST-related activity.
6123 	 */
6124 
6125 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6126 	    !aconnector->fake_enable)
6127 		connected = (aconnector->dc_sink != NULL);
6128 	else
6129 		connected = (aconnector->base.force == DRM_FORCE_ON);
6130 
6131 	update_subconnector_property(aconnector);
6132 
6133 	return (connected ? connector_status_connected :
6134 			connector_status_disconnected);
6135 }
6136 
6137 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6138 					    struct drm_connector_state *connector_state,
6139 					    struct drm_property *property,
6140 					    uint64_t val)
6141 {
6142 	struct drm_device *dev = connector->dev;
6143 	struct amdgpu_device *adev = drm_to_adev(dev);
6144 	struct dm_connector_state *dm_old_state =
6145 		to_dm_connector_state(connector->state);
6146 	struct dm_connector_state *dm_new_state =
6147 		to_dm_connector_state(connector_state);
6148 
6149 	int ret = -EINVAL;
6150 
6151 	if (property == dev->mode_config.scaling_mode_property) {
6152 		enum amdgpu_rmx_type rmx_type;
6153 
6154 		switch (val) {
6155 		case DRM_MODE_SCALE_CENTER:
6156 			rmx_type = RMX_CENTER;
6157 			break;
6158 		case DRM_MODE_SCALE_ASPECT:
6159 			rmx_type = RMX_ASPECT;
6160 			break;
6161 		case DRM_MODE_SCALE_FULLSCREEN:
6162 			rmx_type = RMX_FULL;
6163 			break;
6164 		case DRM_MODE_SCALE_NONE:
6165 		default:
6166 			rmx_type = RMX_OFF;
6167 			break;
6168 		}
6169 
6170 		if (dm_old_state->scaling == rmx_type)
6171 			return 0;
6172 
6173 		dm_new_state->scaling = rmx_type;
6174 		ret = 0;
6175 	} else if (property == adev->mode_info.underscan_hborder_property) {
6176 		dm_new_state->underscan_hborder = val;
6177 		ret = 0;
6178 	} else if (property == adev->mode_info.underscan_vborder_property) {
6179 		dm_new_state->underscan_vborder = val;
6180 		ret = 0;
6181 	} else if (property == adev->mode_info.underscan_property) {
6182 		dm_new_state->underscan_enable = val;
6183 		ret = 0;
6184 	} else if (property == adev->mode_info.abm_level_property) {
6185 		dm_new_state->abm_level = val;
6186 		ret = 0;
6187 	}
6188 
6189 	return ret;
6190 }
6191 
6192 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6193 					    const struct drm_connector_state *state,
6194 					    struct drm_property *property,
6195 					    uint64_t *val)
6196 {
6197 	struct drm_device *dev = connector->dev;
6198 	struct amdgpu_device *adev = drm_to_adev(dev);
6199 	struct dm_connector_state *dm_state =
6200 		to_dm_connector_state(state);
6201 	int ret = -EINVAL;
6202 
6203 	if (property == dev->mode_config.scaling_mode_property) {
6204 		switch (dm_state->scaling) {
6205 		case RMX_CENTER:
6206 			*val = DRM_MODE_SCALE_CENTER;
6207 			break;
6208 		case RMX_ASPECT:
6209 			*val = DRM_MODE_SCALE_ASPECT;
6210 			break;
6211 		case RMX_FULL:
6212 			*val = DRM_MODE_SCALE_FULLSCREEN;
6213 			break;
6214 		case RMX_OFF:
6215 		default:
6216 			*val = DRM_MODE_SCALE_NONE;
6217 			break;
6218 		}
6219 		ret = 0;
6220 	} else if (property == adev->mode_info.underscan_hborder_property) {
6221 		*val = dm_state->underscan_hborder;
6222 		ret = 0;
6223 	} else if (property == adev->mode_info.underscan_vborder_property) {
6224 		*val = dm_state->underscan_vborder;
6225 		ret = 0;
6226 	} else if (property == adev->mode_info.underscan_property) {
6227 		*val = dm_state->underscan_enable;
6228 		ret = 0;
6229 	} else if (property == adev->mode_info.abm_level_property) {
6230 		*val = dm_state->abm_level;
6231 		ret = 0;
6232 	}
6233 
6234 	return ret;
6235 }
6236 
6237 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6238 {
6239 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6240 
6241 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6242 }
6243 
6244 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6245 {
6246 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6247 	const struct dc_link *link = aconnector->dc_link;
6248 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6249 	struct amdgpu_display_manager *dm = &adev->dm;
6250 	int i;
6251 
6252 	/*
6253 	 * Call only if mst_mgr was iniitalized before since it's not done
6254 	 * for all connector types.
6255 	 */
6256 	if (aconnector->mst_mgr.dev)
6257 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6258 
6259 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6260 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6261 	for (i = 0; i < dm->num_of_edps; i++) {
6262 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6263 			backlight_device_unregister(dm->backlight_dev[i]);
6264 			dm->backlight_dev[i] = NULL;
6265 		}
6266 	}
6267 #endif
6268 
6269 	if (aconnector->dc_em_sink)
6270 		dc_sink_release(aconnector->dc_em_sink);
6271 	aconnector->dc_em_sink = NULL;
6272 	if (aconnector->dc_sink)
6273 		dc_sink_release(aconnector->dc_sink);
6274 	aconnector->dc_sink = NULL;
6275 
6276 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6277 	drm_connector_unregister(connector);
6278 	drm_connector_cleanup(connector);
6279 	if (aconnector->i2c) {
6280 		i2c_del_adapter(&aconnector->i2c->base);
6281 		kfree(aconnector->i2c);
6282 	}
6283 	kfree(aconnector->dm_dp_aux.aux.name);
6284 
6285 	kfree(connector);
6286 }
6287 
6288 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6289 {
6290 	struct dm_connector_state *state =
6291 		to_dm_connector_state(connector->state);
6292 
6293 	if (connector->state)
6294 		__drm_atomic_helper_connector_destroy_state(connector->state);
6295 
6296 	kfree(state);
6297 
6298 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6299 
6300 	if (state) {
6301 		state->scaling = RMX_OFF;
6302 		state->underscan_enable = false;
6303 		state->underscan_hborder = 0;
6304 		state->underscan_vborder = 0;
6305 		state->base.max_requested_bpc = 8;
6306 		state->vcpi_slots = 0;
6307 		state->pbn = 0;
6308 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6309 			state->abm_level = amdgpu_dm_abm_level;
6310 
6311 		__drm_atomic_helper_connector_reset(connector, &state->base);
6312 	}
6313 }
6314 
6315 struct drm_connector_state *
6316 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6317 {
6318 	struct dm_connector_state *state =
6319 		to_dm_connector_state(connector->state);
6320 
6321 	struct dm_connector_state *new_state =
6322 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6323 
6324 	if (!new_state)
6325 		return NULL;
6326 
6327 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6328 
6329 	new_state->freesync_capable = state->freesync_capable;
6330 	new_state->abm_level = state->abm_level;
6331 	new_state->scaling = state->scaling;
6332 	new_state->underscan_enable = state->underscan_enable;
6333 	new_state->underscan_hborder = state->underscan_hborder;
6334 	new_state->underscan_vborder = state->underscan_vborder;
6335 	new_state->vcpi_slots = state->vcpi_slots;
6336 	new_state->pbn = state->pbn;
6337 	return &new_state->base;
6338 }
6339 
6340 static int
6341 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6342 {
6343 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6344 		to_amdgpu_dm_connector(connector);
6345 	int r;
6346 
6347 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6348 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6349 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6350 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6351 		if (r)
6352 			return r;
6353 	}
6354 
6355 #if defined(CONFIG_DEBUG_FS)
6356 	connector_debugfs_init(amdgpu_dm_connector);
6357 #endif
6358 
6359 	return 0;
6360 }
6361 
6362 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6363 	.reset = amdgpu_dm_connector_funcs_reset,
6364 	.detect = amdgpu_dm_connector_detect,
6365 	.fill_modes = drm_helper_probe_single_connector_modes,
6366 	.destroy = amdgpu_dm_connector_destroy,
6367 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6368 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6369 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6370 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6371 	.late_register = amdgpu_dm_connector_late_register,
6372 	.early_unregister = amdgpu_dm_connector_unregister
6373 };
6374 
6375 static int get_modes(struct drm_connector *connector)
6376 {
6377 	return amdgpu_dm_connector_get_modes(connector);
6378 }
6379 
6380 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6381 {
6382 	struct dc_sink_init_data init_params = {
6383 			.link = aconnector->dc_link,
6384 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6385 	};
6386 	struct edid *edid;
6387 
6388 	if (!aconnector->base.edid_blob_ptr) {
6389 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6390 				aconnector->base.name);
6391 
6392 		aconnector->base.force = DRM_FORCE_OFF;
6393 		aconnector->base.override_edid = false;
6394 		return;
6395 	}
6396 
6397 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6398 
6399 	aconnector->edid = edid;
6400 
6401 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6402 		aconnector->dc_link,
6403 		(uint8_t *)edid,
6404 		(edid->extensions + 1) * EDID_LENGTH,
6405 		&init_params);
6406 
6407 	if (aconnector->base.force == DRM_FORCE_ON) {
6408 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6409 		aconnector->dc_link->local_sink :
6410 		aconnector->dc_em_sink;
6411 		dc_sink_retain(aconnector->dc_sink);
6412 	}
6413 }
6414 
6415 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6416 {
6417 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6418 
6419 	/*
6420 	 * In case of headless boot with force on for DP managed connector
6421 	 * Those settings have to be != 0 to get initial modeset
6422 	 */
6423 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6424 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6425 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6426 	}
6427 
6428 
6429 	aconnector->base.override_edid = true;
6430 	create_eml_sink(aconnector);
6431 }
6432 
6433 static struct dc_stream_state *
6434 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6435 				const struct drm_display_mode *drm_mode,
6436 				const struct dm_connector_state *dm_state,
6437 				const struct dc_stream_state *old_stream)
6438 {
6439 	struct drm_connector *connector = &aconnector->base;
6440 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6441 	struct dc_stream_state *stream;
6442 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6443 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6444 	enum dc_status dc_result = DC_OK;
6445 
6446 	do {
6447 		stream = create_stream_for_sink(aconnector, drm_mode,
6448 						dm_state, old_stream,
6449 						requested_bpc);
6450 		if (stream == NULL) {
6451 			DRM_ERROR("Failed to create stream for sink!\n");
6452 			break;
6453 		}
6454 
6455 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6456 
6457 		if (dc_result != DC_OK) {
6458 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6459 				      drm_mode->hdisplay,
6460 				      drm_mode->vdisplay,
6461 				      drm_mode->clock,
6462 				      dc_result,
6463 				      dc_status_to_str(dc_result));
6464 
6465 			dc_stream_release(stream);
6466 			stream = NULL;
6467 			requested_bpc -= 2; /* lower bpc to retry validation */
6468 		}
6469 
6470 	} while (stream == NULL && requested_bpc >= 6);
6471 
6472 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6473 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6474 
6475 		aconnector->force_yuv420_output = true;
6476 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6477 						dm_state, old_stream);
6478 		aconnector->force_yuv420_output = false;
6479 	}
6480 
6481 	return stream;
6482 }
6483 
6484 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6485 				   struct drm_display_mode *mode)
6486 {
6487 	int result = MODE_ERROR;
6488 	struct dc_sink *dc_sink;
6489 	/* TODO: Unhardcode stream count */
6490 	struct dc_stream_state *stream;
6491 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6492 
6493 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6494 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6495 		return result;
6496 
6497 	/*
6498 	 * Only run this the first time mode_valid is called to initilialize
6499 	 * EDID mgmt
6500 	 */
6501 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6502 		!aconnector->dc_em_sink)
6503 		handle_edid_mgmt(aconnector);
6504 
6505 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6506 
6507 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6508 				aconnector->base.force != DRM_FORCE_ON) {
6509 		DRM_ERROR("dc_sink is NULL!\n");
6510 		goto fail;
6511 	}
6512 
6513 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6514 	if (stream) {
6515 		dc_stream_release(stream);
6516 		result = MODE_OK;
6517 	}
6518 
6519 fail:
6520 	/* TODO: error handling*/
6521 	return result;
6522 }
6523 
6524 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6525 				struct dc_info_packet *out)
6526 {
6527 	struct hdmi_drm_infoframe frame;
6528 	unsigned char buf[30]; /* 26 + 4 */
6529 	ssize_t len;
6530 	int ret, i;
6531 
6532 	memset(out, 0, sizeof(*out));
6533 
6534 	if (!state->hdr_output_metadata)
6535 		return 0;
6536 
6537 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6538 	if (ret)
6539 		return ret;
6540 
6541 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6542 	if (len < 0)
6543 		return (int)len;
6544 
6545 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6546 	if (len != 30)
6547 		return -EINVAL;
6548 
6549 	/* Prepare the infopacket for DC. */
6550 	switch (state->connector->connector_type) {
6551 	case DRM_MODE_CONNECTOR_HDMIA:
6552 		out->hb0 = 0x87; /* type */
6553 		out->hb1 = 0x01; /* version */
6554 		out->hb2 = 0x1A; /* length */
6555 		out->sb[0] = buf[3]; /* checksum */
6556 		i = 1;
6557 		break;
6558 
6559 	case DRM_MODE_CONNECTOR_DisplayPort:
6560 	case DRM_MODE_CONNECTOR_eDP:
6561 		out->hb0 = 0x00; /* sdp id, zero */
6562 		out->hb1 = 0x87; /* type */
6563 		out->hb2 = 0x1D; /* payload len - 1 */
6564 		out->hb3 = (0x13 << 2); /* sdp version */
6565 		out->sb[0] = 0x01; /* version */
6566 		out->sb[1] = 0x1A; /* length */
6567 		i = 2;
6568 		break;
6569 
6570 	default:
6571 		return -EINVAL;
6572 	}
6573 
6574 	memcpy(&out->sb[i], &buf[4], 26);
6575 	out->valid = true;
6576 
6577 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6578 		       sizeof(out->sb), false);
6579 
6580 	return 0;
6581 }
6582 
6583 static int
6584 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6585 				 struct drm_atomic_state *state)
6586 {
6587 	struct drm_connector_state *new_con_state =
6588 		drm_atomic_get_new_connector_state(state, conn);
6589 	struct drm_connector_state *old_con_state =
6590 		drm_atomic_get_old_connector_state(state, conn);
6591 	struct drm_crtc *crtc = new_con_state->crtc;
6592 	struct drm_crtc_state *new_crtc_state;
6593 	int ret;
6594 
6595 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6596 
6597 	if (!crtc)
6598 		return 0;
6599 
6600 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6601 		struct dc_info_packet hdr_infopacket;
6602 
6603 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6604 		if (ret)
6605 			return ret;
6606 
6607 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6608 		if (IS_ERR(new_crtc_state))
6609 			return PTR_ERR(new_crtc_state);
6610 
6611 		/*
6612 		 * DC considers the stream backends changed if the
6613 		 * static metadata changes. Forcing the modeset also
6614 		 * gives a simple way for userspace to switch from
6615 		 * 8bpc to 10bpc when setting the metadata to enter
6616 		 * or exit HDR.
6617 		 *
6618 		 * Changing the static metadata after it's been
6619 		 * set is permissible, however. So only force a
6620 		 * modeset if we're entering or exiting HDR.
6621 		 */
6622 		new_crtc_state->mode_changed =
6623 			!old_con_state->hdr_output_metadata ||
6624 			!new_con_state->hdr_output_metadata;
6625 	}
6626 
6627 	return 0;
6628 }
6629 
6630 static const struct drm_connector_helper_funcs
6631 amdgpu_dm_connector_helper_funcs = {
6632 	/*
6633 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6634 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6635 	 * are missing after user start lightdm. So we need to renew modes list.
6636 	 * in get_modes call back, not just return the modes count
6637 	 */
6638 	.get_modes = get_modes,
6639 	.mode_valid = amdgpu_dm_connector_mode_valid,
6640 	.atomic_check = amdgpu_dm_connector_atomic_check,
6641 };
6642 
6643 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6644 {
6645 }
6646 
6647 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6648 {
6649 	struct drm_atomic_state *state = new_crtc_state->state;
6650 	struct drm_plane *plane;
6651 	int num_active = 0;
6652 
6653 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6654 		struct drm_plane_state *new_plane_state;
6655 
6656 		/* Cursor planes are "fake". */
6657 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6658 			continue;
6659 
6660 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6661 
6662 		if (!new_plane_state) {
6663 			/*
6664 			 * The plane is enable on the CRTC and hasn't changed
6665 			 * state. This means that it previously passed
6666 			 * validation and is therefore enabled.
6667 			 */
6668 			num_active += 1;
6669 			continue;
6670 		}
6671 
6672 		/* We need a framebuffer to be considered enabled. */
6673 		num_active += (new_plane_state->fb != NULL);
6674 	}
6675 
6676 	return num_active;
6677 }
6678 
6679 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6680 					 struct drm_crtc_state *new_crtc_state)
6681 {
6682 	struct dm_crtc_state *dm_new_crtc_state =
6683 		to_dm_crtc_state(new_crtc_state);
6684 
6685 	dm_new_crtc_state->active_planes = 0;
6686 
6687 	if (!dm_new_crtc_state->stream)
6688 		return;
6689 
6690 	dm_new_crtc_state->active_planes =
6691 		count_crtc_active_planes(new_crtc_state);
6692 }
6693 
6694 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6695 				       struct drm_atomic_state *state)
6696 {
6697 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6698 									  crtc);
6699 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6700 	struct dc *dc = adev->dm.dc;
6701 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6702 	int ret = -EINVAL;
6703 
6704 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6705 
6706 	dm_update_crtc_active_planes(crtc, crtc_state);
6707 
6708 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6709 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6710 		return ret;
6711 	}
6712 
6713 	/*
6714 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6715 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6716 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6717 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6718 	 */
6719 	if (crtc_state->enable &&
6720 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6721 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6722 		return -EINVAL;
6723 	}
6724 
6725 	/* In some use cases, like reset, no stream is attached */
6726 	if (!dm_crtc_state->stream)
6727 		return 0;
6728 
6729 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6730 		return 0;
6731 
6732 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6733 	return ret;
6734 }
6735 
6736 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6737 				      const struct drm_display_mode *mode,
6738 				      struct drm_display_mode *adjusted_mode)
6739 {
6740 	return true;
6741 }
6742 
6743 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6744 	.disable = dm_crtc_helper_disable,
6745 	.atomic_check = dm_crtc_helper_atomic_check,
6746 	.mode_fixup = dm_crtc_helper_mode_fixup,
6747 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6748 };
6749 
6750 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6751 {
6752 
6753 }
6754 
6755 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6756 {
6757 	switch (display_color_depth) {
6758 		case COLOR_DEPTH_666:
6759 			return 6;
6760 		case COLOR_DEPTH_888:
6761 			return 8;
6762 		case COLOR_DEPTH_101010:
6763 			return 10;
6764 		case COLOR_DEPTH_121212:
6765 			return 12;
6766 		case COLOR_DEPTH_141414:
6767 			return 14;
6768 		case COLOR_DEPTH_161616:
6769 			return 16;
6770 		default:
6771 			break;
6772 		}
6773 	return 0;
6774 }
6775 
6776 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6777 					  struct drm_crtc_state *crtc_state,
6778 					  struct drm_connector_state *conn_state)
6779 {
6780 	struct drm_atomic_state *state = crtc_state->state;
6781 	struct drm_connector *connector = conn_state->connector;
6782 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6783 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6784 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6785 	struct drm_dp_mst_topology_mgr *mst_mgr;
6786 	struct drm_dp_mst_port *mst_port;
6787 	enum dc_color_depth color_depth;
6788 	int clock, bpp = 0;
6789 	bool is_y420 = false;
6790 
6791 	if (!aconnector->port || !aconnector->dc_sink)
6792 		return 0;
6793 
6794 	mst_port = aconnector->port;
6795 	mst_mgr = &aconnector->mst_port->mst_mgr;
6796 
6797 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6798 		return 0;
6799 
6800 	if (!state->duplicated) {
6801 		int max_bpc = conn_state->max_requested_bpc;
6802 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6803 				aconnector->force_yuv420_output;
6804 		color_depth = convert_color_depth_from_display_info(connector,
6805 								    is_y420,
6806 								    max_bpc);
6807 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6808 		clock = adjusted_mode->clock;
6809 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6810 	}
6811 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6812 									   mst_mgr,
6813 									   mst_port,
6814 									   dm_new_connector_state->pbn,
6815 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6816 	if (dm_new_connector_state->vcpi_slots < 0) {
6817 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6818 		return dm_new_connector_state->vcpi_slots;
6819 	}
6820 	return 0;
6821 }
6822 
6823 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6824 	.disable = dm_encoder_helper_disable,
6825 	.atomic_check = dm_encoder_helper_atomic_check
6826 };
6827 
6828 #if defined(CONFIG_DRM_AMD_DC_DCN)
6829 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6830 					    struct dc_state *dc_state,
6831 					    struct dsc_mst_fairness_vars *vars)
6832 {
6833 	struct dc_stream_state *stream = NULL;
6834 	struct drm_connector *connector;
6835 	struct drm_connector_state *new_con_state;
6836 	struct amdgpu_dm_connector *aconnector;
6837 	struct dm_connector_state *dm_conn_state;
6838 	int i, j, clock;
6839 	int vcpi, pbn_div, pbn = 0;
6840 
6841 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6842 
6843 		aconnector = to_amdgpu_dm_connector(connector);
6844 
6845 		if (!aconnector->port)
6846 			continue;
6847 
6848 		if (!new_con_state || !new_con_state->crtc)
6849 			continue;
6850 
6851 		dm_conn_state = to_dm_connector_state(new_con_state);
6852 
6853 		for (j = 0; j < dc_state->stream_count; j++) {
6854 			stream = dc_state->streams[j];
6855 			if (!stream)
6856 				continue;
6857 
6858 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6859 				break;
6860 
6861 			stream = NULL;
6862 		}
6863 
6864 		if (!stream)
6865 			continue;
6866 
6867 		if (stream->timing.flags.DSC != 1) {
6868 			drm_dp_mst_atomic_enable_dsc(state,
6869 						     aconnector->port,
6870 						     dm_conn_state->pbn,
6871 						     0,
6872 						     false);
6873 			continue;
6874 		}
6875 
6876 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6877 		clock = stream->timing.pix_clk_100hz / 10;
6878 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6879 		for (j = 0; j < dc_state->stream_count; j++) {
6880 			if (vars[j].aconnector == aconnector) {
6881 				pbn = vars[j].pbn;
6882 				break;
6883 			}
6884 		}
6885 
6886 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6887 						    aconnector->port,
6888 						    pbn, pbn_div,
6889 						    true);
6890 		if (vcpi < 0)
6891 			return vcpi;
6892 
6893 		dm_conn_state->pbn = pbn;
6894 		dm_conn_state->vcpi_slots = vcpi;
6895 	}
6896 	return 0;
6897 }
6898 #endif
6899 
6900 static void dm_drm_plane_reset(struct drm_plane *plane)
6901 {
6902 	struct dm_plane_state *amdgpu_state = NULL;
6903 
6904 	if (plane->state)
6905 		plane->funcs->atomic_destroy_state(plane, plane->state);
6906 
6907 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6908 	WARN_ON(amdgpu_state == NULL);
6909 
6910 	if (amdgpu_state)
6911 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6912 }
6913 
6914 static struct drm_plane_state *
6915 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6916 {
6917 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6918 
6919 	old_dm_plane_state = to_dm_plane_state(plane->state);
6920 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6921 	if (!dm_plane_state)
6922 		return NULL;
6923 
6924 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6925 
6926 	if (old_dm_plane_state->dc_state) {
6927 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6928 		dc_plane_state_retain(dm_plane_state->dc_state);
6929 	}
6930 
6931 	return &dm_plane_state->base;
6932 }
6933 
6934 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6935 				struct drm_plane_state *state)
6936 {
6937 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6938 
6939 	if (dm_plane_state->dc_state)
6940 		dc_plane_state_release(dm_plane_state->dc_state);
6941 
6942 	drm_atomic_helper_plane_destroy_state(plane, state);
6943 }
6944 
6945 static const struct drm_plane_funcs dm_plane_funcs = {
6946 	.update_plane	= drm_atomic_helper_update_plane,
6947 	.disable_plane	= drm_atomic_helper_disable_plane,
6948 	.destroy	= drm_primary_helper_destroy,
6949 	.reset = dm_drm_plane_reset,
6950 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6951 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6952 	.format_mod_supported = dm_plane_format_mod_supported,
6953 };
6954 
6955 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6956 				      struct drm_plane_state *new_state)
6957 {
6958 	struct amdgpu_framebuffer *afb;
6959 	struct drm_gem_object *obj;
6960 	struct amdgpu_device *adev;
6961 	struct amdgpu_bo *rbo;
6962 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6963 	struct list_head list;
6964 	struct ttm_validate_buffer tv;
6965 	struct ww_acquire_ctx ticket;
6966 	uint32_t domain;
6967 	int r;
6968 
6969 	if (!new_state->fb) {
6970 		DRM_DEBUG_KMS("No FB bound\n");
6971 		return 0;
6972 	}
6973 
6974 	afb = to_amdgpu_framebuffer(new_state->fb);
6975 	obj = new_state->fb->obj[0];
6976 	rbo = gem_to_amdgpu_bo(obj);
6977 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6978 	INIT_LIST_HEAD(&list);
6979 
6980 	tv.bo = &rbo->tbo;
6981 	tv.num_shared = 1;
6982 	list_add(&tv.head, &list);
6983 
6984 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6985 	if (r) {
6986 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6987 		return r;
6988 	}
6989 
6990 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6991 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6992 	else
6993 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6994 
6995 	r = amdgpu_bo_pin(rbo, domain);
6996 	if (unlikely(r != 0)) {
6997 		if (r != -ERESTARTSYS)
6998 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6999 		ttm_eu_backoff_reservation(&ticket, &list);
7000 		return r;
7001 	}
7002 
7003 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7004 	if (unlikely(r != 0)) {
7005 		amdgpu_bo_unpin(rbo);
7006 		ttm_eu_backoff_reservation(&ticket, &list);
7007 		DRM_ERROR("%p bind failed\n", rbo);
7008 		return r;
7009 	}
7010 
7011 	ttm_eu_backoff_reservation(&ticket, &list);
7012 
7013 	afb->address = amdgpu_bo_gpu_offset(rbo);
7014 
7015 	amdgpu_bo_ref(rbo);
7016 
7017 	/**
7018 	 * We don't do surface updates on planes that have been newly created,
7019 	 * but we also don't have the afb->address during atomic check.
7020 	 *
7021 	 * Fill in buffer attributes depending on the address here, but only on
7022 	 * newly created planes since they're not being used by DC yet and this
7023 	 * won't modify global state.
7024 	 */
7025 	dm_plane_state_old = to_dm_plane_state(plane->state);
7026 	dm_plane_state_new = to_dm_plane_state(new_state);
7027 
7028 	if (dm_plane_state_new->dc_state &&
7029 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7030 		struct dc_plane_state *plane_state =
7031 			dm_plane_state_new->dc_state;
7032 		bool force_disable_dcc = !plane_state->dcc.enable;
7033 
7034 		fill_plane_buffer_attributes(
7035 			adev, afb, plane_state->format, plane_state->rotation,
7036 			afb->tiling_flags,
7037 			&plane_state->tiling_info, &plane_state->plane_size,
7038 			&plane_state->dcc, &plane_state->address,
7039 			afb->tmz_surface, force_disable_dcc);
7040 	}
7041 
7042 	return 0;
7043 }
7044 
7045 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7046 				       struct drm_plane_state *old_state)
7047 {
7048 	struct amdgpu_bo *rbo;
7049 	int r;
7050 
7051 	if (!old_state->fb)
7052 		return;
7053 
7054 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7055 	r = amdgpu_bo_reserve(rbo, false);
7056 	if (unlikely(r)) {
7057 		DRM_ERROR("failed to reserve rbo before unpin\n");
7058 		return;
7059 	}
7060 
7061 	amdgpu_bo_unpin(rbo);
7062 	amdgpu_bo_unreserve(rbo);
7063 	amdgpu_bo_unref(&rbo);
7064 }
7065 
7066 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7067 				       struct drm_crtc_state *new_crtc_state)
7068 {
7069 	struct drm_framebuffer *fb = state->fb;
7070 	int min_downscale, max_upscale;
7071 	int min_scale = 0;
7072 	int max_scale = INT_MAX;
7073 
7074 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7075 	if (fb && state->crtc) {
7076 		/* Validate viewport to cover the case when only the position changes */
7077 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7078 			int viewport_width = state->crtc_w;
7079 			int viewport_height = state->crtc_h;
7080 
7081 			if (state->crtc_x < 0)
7082 				viewport_width += state->crtc_x;
7083 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7084 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7085 
7086 			if (state->crtc_y < 0)
7087 				viewport_height += state->crtc_y;
7088 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7089 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7090 
7091 			if (viewport_width < 0 || viewport_height < 0) {
7092 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7093 				return -EINVAL;
7094 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7095 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7096 				return -EINVAL;
7097 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7098 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7099 				return -EINVAL;
7100 			}
7101 
7102 		}
7103 
7104 		/* Get min/max allowed scaling factors from plane caps. */
7105 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7106 					     &min_downscale, &max_upscale);
7107 		/*
7108 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7109 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7110 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7111 		 */
7112 		min_scale = (1000 << 16) / max_upscale;
7113 		max_scale = (1000 << 16) / min_downscale;
7114 	}
7115 
7116 	return drm_atomic_helper_check_plane_state(
7117 		state, new_crtc_state, min_scale, max_scale, true, true);
7118 }
7119 
7120 static int dm_plane_atomic_check(struct drm_plane *plane,
7121 				 struct drm_atomic_state *state)
7122 {
7123 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7124 										 plane);
7125 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7126 	struct dc *dc = adev->dm.dc;
7127 	struct dm_plane_state *dm_plane_state;
7128 	struct dc_scaling_info scaling_info;
7129 	struct drm_crtc_state *new_crtc_state;
7130 	int ret;
7131 
7132 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7133 
7134 	dm_plane_state = to_dm_plane_state(new_plane_state);
7135 
7136 	if (!dm_plane_state->dc_state)
7137 		return 0;
7138 
7139 	new_crtc_state =
7140 		drm_atomic_get_new_crtc_state(state,
7141 					      new_plane_state->crtc);
7142 	if (!new_crtc_state)
7143 		return -EINVAL;
7144 
7145 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7146 	if (ret)
7147 		return ret;
7148 
7149 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7150 	if (ret)
7151 		return ret;
7152 
7153 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7154 		return 0;
7155 
7156 	return -EINVAL;
7157 }
7158 
7159 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7160 				       struct drm_atomic_state *state)
7161 {
7162 	/* Only support async updates on cursor planes. */
7163 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7164 		return -EINVAL;
7165 
7166 	return 0;
7167 }
7168 
7169 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7170 					 struct drm_atomic_state *state)
7171 {
7172 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7173 									   plane);
7174 	struct drm_plane_state *old_state =
7175 		drm_atomic_get_old_plane_state(state, plane);
7176 
7177 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7178 
7179 	swap(plane->state->fb, new_state->fb);
7180 
7181 	plane->state->src_x = new_state->src_x;
7182 	plane->state->src_y = new_state->src_y;
7183 	plane->state->src_w = new_state->src_w;
7184 	plane->state->src_h = new_state->src_h;
7185 	plane->state->crtc_x = new_state->crtc_x;
7186 	plane->state->crtc_y = new_state->crtc_y;
7187 	plane->state->crtc_w = new_state->crtc_w;
7188 	plane->state->crtc_h = new_state->crtc_h;
7189 
7190 	handle_cursor_update(plane, old_state);
7191 }
7192 
7193 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7194 	.prepare_fb = dm_plane_helper_prepare_fb,
7195 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7196 	.atomic_check = dm_plane_atomic_check,
7197 	.atomic_async_check = dm_plane_atomic_async_check,
7198 	.atomic_async_update = dm_plane_atomic_async_update
7199 };
7200 
7201 /*
7202  * TODO: these are currently initialized to rgb formats only.
7203  * For future use cases we should either initialize them dynamically based on
7204  * plane capabilities, or initialize this array to all formats, so internal drm
7205  * check will succeed, and let DC implement proper check
7206  */
7207 static const uint32_t rgb_formats[] = {
7208 	DRM_FORMAT_XRGB8888,
7209 	DRM_FORMAT_ARGB8888,
7210 	DRM_FORMAT_RGBA8888,
7211 	DRM_FORMAT_XRGB2101010,
7212 	DRM_FORMAT_XBGR2101010,
7213 	DRM_FORMAT_ARGB2101010,
7214 	DRM_FORMAT_ABGR2101010,
7215 	DRM_FORMAT_XRGB16161616,
7216 	DRM_FORMAT_XBGR16161616,
7217 	DRM_FORMAT_ARGB16161616,
7218 	DRM_FORMAT_ABGR16161616,
7219 	DRM_FORMAT_XBGR8888,
7220 	DRM_FORMAT_ABGR8888,
7221 	DRM_FORMAT_RGB565,
7222 };
7223 
7224 static const uint32_t overlay_formats[] = {
7225 	DRM_FORMAT_XRGB8888,
7226 	DRM_FORMAT_ARGB8888,
7227 	DRM_FORMAT_RGBA8888,
7228 	DRM_FORMAT_XBGR8888,
7229 	DRM_FORMAT_ABGR8888,
7230 	DRM_FORMAT_RGB565
7231 };
7232 
7233 static const u32 cursor_formats[] = {
7234 	DRM_FORMAT_ARGB8888
7235 };
7236 
7237 static int get_plane_formats(const struct drm_plane *plane,
7238 			     const struct dc_plane_cap *plane_cap,
7239 			     uint32_t *formats, int max_formats)
7240 {
7241 	int i, num_formats = 0;
7242 
7243 	/*
7244 	 * TODO: Query support for each group of formats directly from
7245 	 * DC plane caps. This will require adding more formats to the
7246 	 * caps list.
7247 	 */
7248 
7249 	switch (plane->type) {
7250 	case DRM_PLANE_TYPE_PRIMARY:
7251 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7252 			if (num_formats >= max_formats)
7253 				break;
7254 
7255 			formats[num_formats++] = rgb_formats[i];
7256 		}
7257 
7258 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7259 			formats[num_formats++] = DRM_FORMAT_NV12;
7260 		if (plane_cap && plane_cap->pixel_format_support.p010)
7261 			formats[num_formats++] = DRM_FORMAT_P010;
7262 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7263 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7264 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7265 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7266 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7267 		}
7268 		break;
7269 
7270 	case DRM_PLANE_TYPE_OVERLAY:
7271 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7272 			if (num_formats >= max_formats)
7273 				break;
7274 
7275 			formats[num_formats++] = overlay_formats[i];
7276 		}
7277 		break;
7278 
7279 	case DRM_PLANE_TYPE_CURSOR:
7280 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7281 			if (num_formats >= max_formats)
7282 				break;
7283 
7284 			formats[num_formats++] = cursor_formats[i];
7285 		}
7286 		break;
7287 	}
7288 
7289 	return num_formats;
7290 }
7291 
7292 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7293 				struct drm_plane *plane,
7294 				unsigned long possible_crtcs,
7295 				const struct dc_plane_cap *plane_cap)
7296 {
7297 	uint32_t formats[32];
7298 	int num_formats;
7299 	int res = -EPERM;
7300 	unsigned int supported_rotations;
7301 	uint64_t *modifiers = NULL;
7302 
7303 	num_formats = get_plane_formats(plane, plane_cap, formats,
7304 					ARRAY_SIZE(formats));
7305 
7306 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7307 	if (res)
7308 		return res;
7309 
7310 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7311 				       &dm_plane_funcs, formats, num_formats,
7312 				       modifiers, plane->type, NULL);
7313 	kfree(modifiers);
7314 	if (res)
7315 		return res;
7316 
7317 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7318 	    plane_cap && plane_cap->per_pixel_alpha) {
7319 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7320 					  BIT(DRM_MODE_BLEND_PREMULTI);
7321 
7322 		drm_plane_create_alpha_property(plane);
7323 		drm_plane_create_blend_mode_property(plane, blend_caps);
7324 	}
7325 
7326 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7327 	    plane_cap &&
7328 	    (plane_cap->pixel_format_support.nv12 ||
7329 	     plane_cap->pixel_format_support.p010)) {
7330 		/* This only affects YUV formats. */
7331 		drm_plane_create_color_properties(
7332 			plane,
7333 			BIT(DRM_COLOR_YCBCR_BT601) |
7334 			BIT(DRM_COLOR_YCBCR_BT709) |
7335 			BIT(DRM_COLOR_YCBCR_BT2020),
7336 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7337 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7338 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7339 	}
7340 
7341 	supported_rotations =
7342 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7343 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7344 
7345 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7346 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7347 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7348 						   supported_rotations);
7349 
7350 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7351 
7352 	/* Create (reset) the plane state */
7353 	if (plane->funcs->reset)
7354 		plane->funcs->reset(plane);
7355 
7356 	return 0;
7357 }
7358 
7359 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7360 			       struct drm_plane *plane,
7361 			       uint32_t crtc_index)
7362 {
7363 	struct amdgpu_crtc *acrtc = NULL;
7364 	struct drm_plane *cursor_plane;
7365 
7366 	int res = -ENOMEM;
7367 
7368 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7369 	if (!cursor_plane)
7370 		goto fail;
7371 
7372 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7373 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7374 
7375 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7376 	if (!acrtc)
7377 		goto fail;
7378 
7379 	res = drm_crtc_init_with_planes(
7380 			dm->ddev,
7381 			&acrtc->base,
7382 			plane,
7383 			cursor_plane,
7384 			&amdgpu_dm_crtc_funcs, NULL);
7385 
7386 	if (res)
7387 		goto fail;
7388 
7389 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7390 
7391 	/* Create (reset) the plane state */
7392 	if (acrtc->base.funcs->reset)
7393 		acrtc->base.funcs->reset(&acrtc->base);
7394 
7395 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7396 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7397 
7398 	acrtc->crtc_id = crtc_index;
7399 	acrtc->base.enabled = false;
7400 	acrtc->otg_inst = -1;
7401 
7402 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7403 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7404 				   true, MAX_COLOR_LUT_ENTRIES);
7405 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7406 
7407 	return 0;
7408 
7409 fail:
7410 	kfree(acrtc);
7411 	kfree(cursor_plane);
7412 	return res;
7413 }
7414 
7415 
7416 static int to_drm_connector_type(enum amd_signal_type st)
7417 {
7418 	switch (st) {
7419 	case SIGNAL_TYPE_HDMI_TYPE_A:
7420 		return DRM_MODE_CONNECTOR_HDMIA;
7421 	case SIGNAL_TYPE_EDP:
7422 		return DRM_MODE_CONNECTOR_eDP;
7423 	case SIGNAL_TYPE_LVDS:
7424 		return DRM_MODE_CONNECTOR_LVDS;
7425 	case SIGNAL_TYPE_RGB:
7426 		return DRM_MODE_CONNECTOR_VGA;
7427 	case SIGNAL_TYPE_DISPLAY_PORT:
7428 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7429 		return DRM_MODE_CONNECTOR_DisplayPort;
7430 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7431 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7432 		return DRM_MODE_CONNECTOR_DVID;
7433 	case SIGNAL_TYPE_VIRTUAL:
7434 		return DRM_MODE_CONNECTOR_VIRTUAL;
7435 
7436 	default:
7437 		return DRM_MODE_CONNECTOR_Unknown;
7438 	}
7439 }
7440 
7441 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7442 {
7443 	struct drm_encoder *encoder;
7444 
7445 	/* There is only one encoder per connector */
7446 	drm_connector_for_each_possible_encoder(connector, encoder)
7447 		return encoder;
7448 
7449 	return NULL;
7450 }
7451 
7452 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7453 {
7454 	struct drm_encoder *encoder;
7455 	struct amdgpu_encoder *amdgpu_encoder;
7456 
7457 	encoder = amdgpu_dm_connector_to_encoder(connector);
7458 
7459 	if (encoder == NULL)
7460 		return;
7461 
7462 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7463 
7464 	amdgpu_encoder->native_mode.clock = 0;
7465 
7466 	if (!list_empty(&connector->probed_modes)) {
7467 		struct drm_display_mode *preferred_mode = NULL;
7468 
7469 		list_for_each_entry(preferred_mode,
7470 				    &connector->probed_modes,
7471 				    head) {
7472 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7473 				amdgpu_encoder->native_mode = *preferred_mode;
7474 
7475 			break;
7476 		}
7477 
7478 	}
7479 }
7480 
7481 static struct drm_display_mode *
7482 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7483 			     char *name,
7484 			     int hdisplay, int vdisplay)
7485 {
7486 	struct drm_device *dev = encoder->dev;
7487 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7488 	struct drm_display_mode *mode = NULL;
7489 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7490 
7491 	mode = drm_mode_duplicate(dev, native_mode);
7492 
7493 	if (mode == NULL)
7494 		return NULL;
7495 
7496 	mode->hdisplay = hdisplay;
7497 	mode->vdisplay = vdisplay;
7498 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7499 #ifdef __linux__
7500 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7501 #else
7502 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7503 #endif
7504 
7505 	return mode;
7506 
7507 }
7508 
7509 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7510 						 struct drm_connector *connector)
7511 {
7512 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7513 	struct drm_display_mode *mode = NULL;
7514 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7515 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7516 				to_amdgpu_dm_connector(connector);
7517 	int i;
7518 	int n;
7519 	struct mode_size {
7520 		char name[DRM_DISPLAY_MODE_LEN];
7521 		int w;
7522 		int h;
7523 	} common_modes[] = {
7524 		{  "640x480",  640,  480},
7525 		{  "800x600",  800,  600},
7526 		{ "1024x768", 1024,  768},
7527 		{ "1280x720", 1280,  720},
7528 		{ "1280x800", 1280,  800},
7529 		{"1280x1024", 1280, 1024},
7530 		{ "1440x900", 1440,  900},
7531 		{"1680x1050", 1680, 1050},
7532 		{"1600x1200", 1600, 1200},
7533 		{"1920x1080", 1920, 1080},
7534 		{"1920x1200", 1920, 1200}
7535 	};
7536 
7537 	n = ARRAY_SIZE(common_modes);
7538 
7539 	for (i = 0; i < n; i++) {
7540 		struct drm_display_mode *curmode = NULL;
7541 		bool mode_existed = false;
7542 
7543 		if (common_modes[i].w > native_mode->hdisplay ||
7544 		    common_modes[i].h > native_mode->vdisplay ||
7545 		   (common_modes[i].w == native_mode->hdisplay &&
7546 		    common_modes[i].h == native_mode->vdisplay))
7547 			continue;
7548 
7549 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7550 			if (common_modes[i].w == curmode->hdisplay &&
7551 			    common_modes[i].h == curmode->vdisplay) {
7552 				mode_existed = true;
7553 				break;
7554 			}
7555 		}
7556 
7557 		if (mode_existed)
7558 			continue;
7559 
7560 		mode = amdgpu_dm_create_common_mode(encoder,
7561 				common_modes[i].name, common_modes[i].w,
7562 				common_modes[i].h);
7563 		drm_mode_probed_add(connector, mode);
7564 		amdgpu_dm_connector->num_modes++;
7565 	}
7566 }
7567 
7568 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7569 {
7570 	struct drm_encoder *encoder;
7571 	struct amdgpu_encoder *amdgpu_encoder;
7572 	const struct drm_display_mode *native_mode;
7573 
7574 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7575 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7576 		return;
7577 
7578 	encoder = amdgpu_dm_connector_to_encoder(connector);
7579 	if (!encoder)
7580 		return;
7581 
7582 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7583 
7584 	native_mode = &amdgpu_encoder->native_mode;
7585 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7586 		return;
7587 
7588 	drm_connector_set_panel_orientation_with_quirk(connector,
7589 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7590 						       native_mode->hdisplay,
7591 						       native_mode->vdisplay);
7592 }
7593 
7594 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7595 					      struct edid *edid)
7596 {
7597 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7598 			to_amdgpu_dm_connector(connector);
7599 
7600 	if (edid) {
7601 		/* empty probed_modes */
7602 		INIT_LIST_HEAD(&connector->probed_modes);
7603 		amdgpu_dm_connector->num_modes =
7604 				drm_add_edid_modes(connector, edid);
7605 
7606 		/* sorting the probed modes before calling function
7607 		 * amdgpu_dm_get_native_mode() since EDID can have
7608 		 * more than one preferred mode. The modes that are
7609 		 * later in the probed mode list could be of higher
7610 		 * and preferred resolution. For example, 3840x2160
7611 		 * resolution in base EDID preferred timing and 4096x2160
7612 		 * preferred resolution in DID extension block later.
7613 		 */
7614 		drm_mode_sort(&connector->probed_modes);
7615 		amdgpu_dm_get_native_mode(connector);
7616 
7617 		/* Freesync capabilities are reset by calling
7618 		 * drm_add_edid_modes() and need to be
7619 		 * restored here.
7620 		 */
7621 		amdgpu_dm_update_freesync_caps(connector, edid);
7622 
7623 		amdgpu_set_panel_orientation(connector);
7624 	} else {
7625 		amdgpu_dm_connector->num_modes = 0;
7626 	}
7627 }
7628 
7629 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7630 			      struct drm_display_mode *mode)
7631 {
7632 	struct drm_display_mode *m;
7633 
7634 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7635 		if (drm_mode_equal(m, mode))
7636 			return true;
7637 	}
7638 
7639 	return false;
7640 }
7641 
7642 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7643 {
7644 	const struct drm_display_mode *m;
7645 	struct drm_display_mode *new_mode;
7646 	uint i;
7647 	uint32_t new_modes_count = 0;
7648 
7649 	/* Standard FPS values
7650 	 *
7651 	 * 23.976   - TV/NTSC
7652 	 * 24 	    - Cinema
7653 	 * 25 	    - TV/PAL
7654 	 * 29.97    - TV/NTSC
7655 	 * 30 	    - TV/NTSC
7656 	 * 48 	    - Cinema HFR
7657 	 * 50 	    - TV/PAL
7658 	 * 60 	    - Commonly used
7659 	 * 48,72,96 - Multiples of 24
7660 	 */
7661 	static const uint32_t common_rates[] = {
7662 		23976, 24000, 25000, 29970, 30000,
7663 		48000, 50000, 60000, 72000, 96000
7664 	};
7665 
7666 	/*
7667 	 * Find mode with highest refresh rate with the same resolution
7668 	 * as the preferred mode. Some monitors report a preferred mode
7669 	 * with lower resolution than the highest refresh rate supported.
7670 	 */
7671 
7672 	m = get_highest_refresh_rate_mode(aconnector, true);
7673 	if (!m)
7674 		return 0;
7675 
7676 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7677 		uint64_t target_vtotal, target_vtotal_diff;
7678 		uint64_t num, den;
7679 
7680 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7681 			continue;
7682 
7683 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7684 		    common_rates[i] > aconnector->max_vfreq * 1000)
7685 			continue;
7686 
7687 		num = (unsigned long long)m->clock * 1000 * 1000;
7688 		den = common_rates[i] * (unsigned long long)m->htotal;
7689 		target_vtotal = div_u64(num, den);
7690 		target_vtotal_diff = target_vtotal - m->vtotal;
7691 
7692 		/* Check for illegal modes */
7693 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7694 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7695 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7696 			continue;
7697 
7698 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7699 		if (!new_mode)
7700 			goto out;
7701 
7702 		new_mode->vtotal += (u16)target_vtotal_diff;
7703 		new_mode->vsync_start += (u16)target_vtotal_diff;
7704 		new_mode->vsync_end += (u16)target_vtotal_diff;
7705 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7706 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7707 
7708 		if (!is_duplicate_mode(aconnector, new_mode)) {
7709 			drm_mode_probed_add(&aconnector->base, new_mode);
7710 			new_modes_count += 1;
7711 		} else
7712 			drm_mode_destroy(aconnector->base.dev, new_mode);
7713 	}
7714  out:
7715 	return new_modes_count;
7716 }
7717 
7718 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7719 						   struct edid *edid)
7720 {
7721 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7722 		to_amdgpu_dm_connector(connector);
7723 
7724 	if (!(amdgpu_freesync_vid_mode && edid))
7725 		return;
7726 
7727 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7728 		amdgpu_dm_connector->num_modes +=
7729 			add_fs_modes(amdgpu_dm_connector);
7730 }
7731 
7732 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7733 {
7734 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7735 			to_amdgpu_dm_connector(connector);
7736 	struct drm_encoder *encoder;
7737 	struct edid *edid = amdgpu_dm_connector->edid;
7738 
7739 	encoder = amdgpu_dm_connector_to_encoder(connector);
7740 
7741 	if (!drm_edid_is_valid(edid)) {
7742 		amdgpu_dm_connector->num_modes =
7743 				drm_add_modes_noedid(connector, 640, 480);
7744 	} else {
7745 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7746 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7747 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7748 	}
7749 	amdgpu_dm_fbc_init(connector);
7750 
7751 	return amdgpu_dm_connector->num_modes;
7752 }
7753 
7754 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7755 				     struct amdgpu_dm_connector *aconnector,
7756 				     int connector_type,
7757 				     struct dc_link *link,
7758 				     int link_index)
7759 {
7760 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7761 
7762 	/*
7763 	 * Some of the properties below require access to state, like bpc.
7764 	 * Allocate some default initial connector state with our reset helper.
7765 	 */
7766 	if (aconnector->base.funcs->reset)
7767 		aconnector->base.funcs->reset(&aconnector->base);
7768 
7769 	aconnector->connector_id = link_index;
7770 	aconnector->dc_link = link;
7771 	aconnector->base.interlace_allowed = false;
7772 	aconnector->base.doublescan_allowed = false;
7773 	aconnector->base.stereo_allowed = false;
7774 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7775 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7776 	aconnector->audio_inst = -1;
7777 	rw_init(&aconnector->hpd_lock, "dmhpd");
7778 
7779 	/*
7780 	 * configure support HPD hot plug connector_>polled default value is 0
7781 	 * which means HPD hot plug not supported
7782 	 */
7783 	switch (connector_type) {
7784 	case DRM_MODE_CONNECTOR_HDMIA:
7785 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7786 		aconnector->base.ycbcr_420_allowed =
7787 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7788 		break;
7789 	case DRM_MODE_CONNECTOR_DisplayPort:
7790 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7791 		aconnector->base.ycbcr_420_allowed =
7792 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7793 		break;
7794 	case DRM_MODE_CONNECTOR_DVID:
7795 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7796 		break;
7797 	default:
7798 		break;
7799 	}
7800 
7801 	drm_object_attach_property(&aconnector->base.base,
7802 				dm->ddev->mode_config.scaling_mode_property,
7803 				DRM_MODE_SCALE_NONE);
7804 
7805 	drm_object_attach_property(&aconnector->base.base,
7806 				adev->mode_info.underscan_property,
7807 				UNDERSCAN_OFF);
7808 	drm_object_attach_property(&aconnector->base.base,
7809 				adev->mode_info.underscan_hborder_property,
7810 				0);
7811 	drm_object_attach_property(&aconnector->base.base,
7812 				adev->mode_info.underscan_vborder_property,
7813 				0);
7814 
7815 	if (!aconnector->mst_port)
7816 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7817 
7818 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7819 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7820 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7821 
7822 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7823 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7824 		drm_object_attach_property(&aconnector->base.base,
7825 				adev->mode_info.abm_level_property, 0);
7826 	}
7827 
7828 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7829 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7830 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7831 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7832 
7833 		if (!aconnector->mst_port)
7834 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7835 
7836 #ifdef CONFIG_DRM_AMD_DC_HDCP
7837 		if (adev->dm.hdcp_workqueue)
7838 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7839 #endif
7840 	}
7841 }
7842 
7843 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7844 			      struct i2c_msg *msgs, int num)
7845 {
7846 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7847 	struct ddc_service *ddc_service = i2c->ddc_service;
7848 	struct i2c_command cmd;
7849 	int i;
7850 	int result = -EIO;
7851 
7852 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7853 
7854 	if (!cmd.payloads)
7855 		return result;
7856 
7857 	cmd.number_of_payloads = num;
7858 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7859 	cmd.speed = 100;
7860 
7861 	for (i = 0; i < num; i++) {
7862 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7863 		cmd.payloads[i].address = msgs[i].addr;
7864 		cmd.payloads[i].length = msgs[i].len;
7865 		cmd.payloads[i].data = msgs[i].buf;
7866 	}
7867 
7868 	if (dc_submit_i2c(
7869 			ddc_service->ctx->dc,
7870 			ddc_service->ddc_pin->hw_info.ddc_channel,
7871 			&cmd))
7872 		result = num;
7873 
7874 	kfree(cmd.payloads);
7875 	return result;
7876 }
7877 
7878 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7879 {
7880 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7881 }
7882 
7883 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7884 	.master_xfer = amdgpu_dm_i2c_xfer,
7885 	.functionality = amdgpu_dm_i2c_func,
7886 };
7887 
7888 static struct amdgpu_i2c_adapter *
7889 create_i2c(struct ddc_service *ddc_service,
7890 	   int link_index,
7891 	   int *res)
7892 {
7893 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7894 	struct amdgpu_i2c_adapter *i2c;
7895 
7896 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7897 	if (!i2c)
7898 		return NULL;
7899 #ifdef notyet
7900 	i2c->base.owner = THIS_MODULE;
7901 	i2c->base.class = I2C_CLASS_DDC;
7902 	i2c->base.dev.parent = &adev->pdev->dev;
7903 #endif
7904 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7905 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7906 	i2c_set_adapdata(&i2c->base, i2c);
7907 	i2c->ddc_service = ddc_service;
7908 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7909 
7910 	return i2c;
7911 }
7912 
7913 
7914 /*
7915  * Note: this function assumes that dc_link_detect() was called for the
7916  * dc_link which will be represented by this aconnector.
7917  */
7918 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7919 				    struct amdgpu_dm_connector *aconnector,
7920 				    uint32_t link_index,
7921 				    struct amdgpu_encoder *aencoder)
7922 {
7923 	int res = 0;
7924 	int connector_type;
7925 	struct dc *dc = dm->dc;
7926 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7927 	struct amdgpu_i2c_adapter *i2c;
7928 
7929 	link->priv = aconnector;
7930 
7931 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7932 
7933 	i2c = create_i2c(link->ddc, link->link_index, &res);
7934 	if (!i2c) {
7935 		DRM_ERROR("Failed to create i2c adapter data\n");
7936 		return -ENOMEM;
7937 	}
7938 
7939 	aconnector->i2c = i2c;
7940 	res = i2c_add_adapter(&i2c->base);
7941 
7942 	if (res) {
7943 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7944 		goto out_free;
7945 	}
7946 
7947 	connector_type = to_drm_connector_type(link->connector_signal);
7948 
7949 	res = drm_connector_init_with_ddc(
7950 			dm->ddev,
7951 			&aconnector->base,
7952 			&amdgpu_dm_connector_funcs,
7953 			connector_type,
7954 			&i2c->base);
7955 
7956 	if (res) {
7957 		DRM_ERROR("connector_init failed\n");
7958 		aconnector->connector_id = -1;
7959 		goto out_free;
7960 	}
7961 
7962 	drm_connector_helper_add(
7963 			&aconnector->base,
7964 			&amdgpu_dm_connector_helper_funcs);
7965 
7966 	amdgpu_dm_connector_init_helper(
7967 		dm,
7968 		aconnector,
7969 		connector_type,
7970 		link,
7971 		link_index);
7972 
7973 	drm_connector_attach_encoder(
7974 		&aconnector->base, &aencoder->base);
7975 
7976 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7977 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7978 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7979 
7980 out_free:
7981 	if (res) {
7982 		kfree(i2c);
7983 		aconnector->i2c = NULL;
7984 	}
7985 	return res;
7986 }
7987 
7988 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7989 {
7990 	switch (adev->mode_info.num_crtc) {
7991 	case 1:
7992 		return 0x1;
7993 	case 2:
7994 		return 0x3;
7995 	case 3:
7996 		return 0x7;
7997 	case 4:
7998 		return 0xf;
7999 	case 5:
8000 		return 0x1f;
8001 	case 6:
8002 	default:
8003 		return 0x3f;
8004 	}
8005 }
8006 
8007 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8008 				  struct amdgpu_encoder *aencoder,
8009 				  uint32_t link_index)
8010 {
8011 	struct amdgpu_device *adev = drm_to_adev(dev);
8012 
8013 	int res = drm_encoder_init(dev,
8014 				   &aencoder->base,
8015 				   &amdgpu_dm_encoder_funcs,
8016 				   DRM_MODE_ENCODER_TMDS,
8017 				   NULL);
8018 
8019 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8020 
8021 	if (!res)
8022 		aencoder->encoder_id = link_index;
8023 	else
8024 		aencoder->encoder_id = -1;
8025 
8026 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8027 
8028 	return res;
8029 }
8030 
8031 static void manage_dm_interrupts(struct amdgpu_device *adev,
8032 				 struct amdgpu_crtc *acrtc,
8033 				 bool enable)
8034 {
8035 	/*
8036 	 * We have no guarantee that the frontend index maps to the same
8037 	 * backend index - some even map to more than one.
8038 	 *
8039 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8040 	 */
8041 	int irq_type =
8042 		amdgpu_display_crtc_idx_to_irq_type(
8043 			adev,
8044 			acrtc->crtc_id);
8045 
8046 	if (enable) {
8047 		drm_crtc_vblank_on(&acrtc->base);
8048 		amdgpu_irq_get(
8049 			adev,
8050 			&adev->pageflip_irq,
8051 			irq_type);
8052 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8053 		amdgpu_irq_get(
8054 			adev,
8055 			&adev->vline0_irq,
8056 			irq_type);
8057 #endif
8058 	} else {
8059 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8060 		amdgpu_irq_put(
8061 			adev,
8062 			&adev->vline0_irq,
8063 			irq_type);
8064 #endif
8065 		amdgpu_irq_put(
8066 			adev,
8067 			&adev->pageflip_irq,
8068 			irq_type);
8069 		drm_crtc_vblank_off(&acrtc->base);
8070 	}
8071 }
8072 
8073 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8074 				      struct amdgpu_crtc *acrtc)
8075 {
8076 	int irq_type =
8077 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8078 
8079 	/**
8080 	 * This reads the current state for the IRQ and force reapplies
8081 	 * the setting to hardware.
8082 	 */
8083 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8084 }
8085 
8086 static bool
8087 is_scaling_state_different(const struct dm_connector_state *dm_state,
8088 			   const struct dm_connector_state *old_dm_state)
8089 {
8090 	if (dm_state->scaling != old_dm_state->scaling)
8091 		return true;
8092 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8093 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8094 			return true;
8095 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8096 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8097 			return true;
8098 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8099 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8100 		return true;
8101 	return false;
8102 }
8103 
8104 #ifdef CONFIG_DRM_AMD_DC_HDCP
8105 static bool is_content_protection_different(struct drm_connector_state *state,
8106 					    const struct drm_connector_state *old_state,
8107 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8108 {
8109 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8110 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8111 
8112 	/* Handle: Type0/1 change */
8113 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8114 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8115 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8116 		return true;
8117 	}
8118 
8119 	/* CP is being re enabled, ignore this
8120 	 *
8121 	 * Handles:	ENABLED -> DESIRED
8122 	 */
8123 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8124 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8125 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8126 		return false;
8127 	}
8128 
8129 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8130 	 *
8131 	 * Handles:	UNDESIRED -> ENABLED
8132 	 */
8133 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8134 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8135 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8136 
8137 	/* Stream removed and re-enabled
8138 	 *
8139 	 * Can sometimes overlap with the HPD case,
8140 	 * thus set update_hdcp to false to avoid
8141 	 * setting HDCP multiple times.
8142 	 *
8143 	 * Handles:	DESIRED -> DESIRED (Special case)
8144 	 */
8145 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8146 		state->crtc && state->crtc->enabled &&
8147 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8148 		dm_con_state->update_hdcp = false;
8149 		return true;
8150 	}
8151 
8152 	/* Hot-plug, headless s3, dpms
8153 	 *
8154 	 * Only start HDCP if the display is connected/enabled.
8155 	 * update_hdcp flag will be set to false until the next
8156 	 * HPD comes in.
8157 	 *
8158 	 * Handles:	DESIRED -> DESIRED (Special case)
8159 	 */
8160 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8161 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8162 		dm_con_state->update_hdcp = false;
8163 		return true;
8164 	}
8165 
8166 	/*
8167 	 * Handles:	UNDESIRED -> UNDESIRED
8168 	 *		DESIRED -> DESIRED
8169 	 *		ENABLED -> ENABLED
8170 	 */
8171 	if (old_state->content_protection == state->content_protection)
8172 		return false;
8173 
8174 	/*
8175 	 * Handles:	UNDESIRED -> DESIRED
8176 	 *		DESIRED -> UNDESIRED
8177 	 *		ENABLED -> UNDESIRED
8178 	 */
8179 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8180 		return true;
8181 
8182 	/*
8183 	 * Handles:	DESIRED -> ENABLED
8184 	 */
8185 	return false;
8186 }
8187 
8188 #endif
8189 static void remove_stream(struct amdgpu_device *adev,
8190 			  struct amdgpu_crtc *acrtc,
8191 			  struct dc_stream_state *stream)
8192 {
8193 	/* this is the update mode case */
8194 
8195 	acrtc->otg_inst = -1;
8196 	acrtc->enabled = false;
8197 }
8198 
8199 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8200 			       struct dc_cursor_position *position)
8201 {
8202 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8203 	int x, y;
8204 	int xorigin = 0, yorigin = 0;
8205 
8206 	if (!crtc || !plane->state->fb)
8207 		return 0;
8208 
8209 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8210 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8211 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8212 			  __func__,
8213 			  plane->state->crtc_w,
8214 			  plane->state->crtc_h);
8215 		return -EINVAL;
8216 	}
8217 
8218 	x = plane->state->crtc_x;
8219 	y = plane->state->crtc_y;
8220 
8221 	if (x <= -amdgpu_crtc->max_cursor_width ||
8222 	    y <= -amdgpu_crtc->max_cursor_height)
8223 		return 0;
8224 
8225 	if (x < 0) {
8226 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8227 		x = 0;
8228 	}
8229 	if (y < 0) {
8230 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8231 		y = 0;
8232 	}
8233 	position->enable = true;
8234 	position->translate_by_source = true;
8235 	position->x = x;
8236 	position->y = y;
8237 	position->x_hotspot = xorigin;
8238 	position->y_hotspot = yorigin;
8239 
8240 	return 0;
8241 }
8242 
8243 static void handle_cursor_update(struct drm_plane *plane,
8244 				 struct drm_plane_state *old_plane_state)
8245 {
8246 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8247 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8248 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8249 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8250 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8251 	uint64_t address = afb ? afb->address : 0;
8252 	struct dc_cursor_position position = {0};
8253 	struct dc_cursor_attributes attributes;
8254 	int ret;
8255 
8256 	if (!plane->state->fb && !old_plane_state->fb)
8257 		return;
8258 
8259 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8260 		      __func__,
8261 		      amdgpu_crtc->crtc_id,
8262 		      plane->state->crtc_w,
8263 		      plane->state->crtc_h);
8264 
8265 	ret = get_cursor_position(plane, crtc, &position);
8266 	if (ret)
8267 		return;
8268 
8269 	if (!position.enable) {
8270 		/* turn off cursor */
8271 		if (crtc_state && crtc_state->stream) {
8272 			mutex_lock(&adev->dm.dc_lock);
8273 			dc_stream_set_cursor_position(crtc_state->stream,
8274 						      &position);
8275 			mutex_unlock(&adev->dm.dc_lock);
8276 		}
8277 		return;
8278 	}
8279 
8280 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8281 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8282 
8283 	memset(&attributes, 0, sizeof(attributes));
8284 	attributes.address.high_part = upper_32_bits(address);
8285 	attributes.address.low_part  = lower_32_bits(address);
8286 	attributes.width             = plane->state->crtc_w;
8287 	attributes.height            = plane->state->crtc_h;
8288 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8289 	attributes.rotation_angle    = 0;
8290 	attributes.attribute_flags.value = 0;
8291 
8292 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8293 
8294 	if (crtc_state->stream) {
8295 		mutex_lock(&adev->dm.dc_lock);
8296 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8297 							 &attributes))
8298 			DRM_ERROR("DC failed to set cursor attributes\n");
8299 
8300 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8301 						   &position))
8302 			DRM_ERROR("DC failed to set cursor position\n");
8303 		mutex_unlock(&adev->dm.dc_lock);
8304 	}
8305 }
8306 
8307 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8308 {
8309 
8310 	assert_spin_locked(&acrtc->base.dev->event_lock);
8311 	WARN_ON(acrtc->event);
8312 
8313 	acrtc->event = acrtc->base.state->event;
8314 
8315 	/* Set the flip status */
8316 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8317 
8318 	/* Mark this event as consumed */
8319 	acrtc->base.state->event = NULL;
8320 
8321 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8322 		     acrtc->crtc_id);
8323 }
8324 
8325 static void update_freesync_state_on_stream(
8326 	struct amdgpu_display_manager *dm,
8327 	struct dm_crtc_state *new_crtc_state,
8328 	struct dc_stream_state *new_stream,
8329 	struct dc_plane_state *surface,
8330 	u32 flip_timestamp_in_us)
8331 {
8332 	struct mod_vrr_params vrr_params;
8333 	struct dc_info_packet vrr_infopacket = {0};
8334 	struct amdgpu_device *adev = dm->adev;
8335 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8336 	unsigned long flags;
8337 	bool pack_sdp_v1_3 = false;
8338 
8339 	if (!new_stream)
8340 		return;
8341 
8342 	/*
8343 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8344 	 * For now it's sufficient to just guard against these conditions.
8345 	 */
8346 
8347 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8348 		return;
8349 
8350 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8351         vrr_params = acrtc->dm_irq_params.vrr_params;
8352 
8353 	if (surface) {
8354 		mod_freesync_handle_preflip(
8355 			dm->freesync_module,
8356 			surface,
8357 			new_stream,
8358 			flip_timestamp_in_us,
8359 			&vrr_params);
8360 
8361 		if (adev->family < AMDGPU_FAMILY_AI &&
8362 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8363 			mod_freesync_handle_v_update(dm->freesync_module,
8364 						     new_stream, &vrr_params);
8365 
8366 			/* Need to call this before the frame ends. */
8367 			dc_stream_adjust_vmin_vmax(dm->dc,
8368 						   new_crtc_state->stream,
8369 						   &vrr_params.adjust);
8370 		}
8371 	}
8372 
8373 	mod_freesync_build_vrr_infopacket(
8374 		dm->freesync_module,
8375 		new_stream,
8376 		&vrr_params,
8377 		PACKET_TYPE_VRR,
8378 		TRANSFER_FUNC_UNKNOWN,
8379 		&vrr_infopacket,
8380 		pack_sdp_v1_3);
8381 
8382 	new_crtc_state->freesync_timing_changed |=
8383 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8384 			&vrr_params.adjust,
8385 			sizeof(vrr_params.adjust)) != 0);
8386 
8387 	new_crtc_state->freesync_vrr_info_changed |=
8388 		(memcmp(&new_crtc_state->vrr_infopacket,
8389 			&vrr_infopacket,
8390 			sizeof(vrr_infopacket)) != 0);
8391 
8392 	acrtc->dm_irq_params.vrr_params = vrr_params;
8393 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8394 
8395 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8396 	new_stream->vrr_infopacket = vrr_infopacket;
8397 
8398 	if (new_crtc_state->freesync_vrr_info_changed)
8399 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8400 			      new_crtc_state->base.crtc->base.id,
8401 			      (int)new_crtc_state->base.vrr_enabled,
8402 			      (int)vrr_params.state);
8403 
8404 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8405 }
8406 
8407 static void update_stream_irq_parameters(
8408 	struct amdgpu_display_manager *dm,
8409 	struct dm_crtc_state *new_crtc_state)
8410 {
8411 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8412 	struct mod_vrr_params vrr_params;
8413 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8414 	struct amdgpu_device *adev = dm->adev;
8415 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8416 	unsigned long flags;
8417 
8418 	if (!new_stream)
8419 		return;
8420 
8421 	/*
8422 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8423 	 * For now it's sufficient to just guard against these conditions.
8424 	 */
8425 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8426 		return;
8427 
8428 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8429 	vrr_params = acrtc->dm_irq_params.vrr_params;
8430 
8431 	if (new_crtc_state->vrr_supported &&
8432 	    config.min_refresh_in_uhz &&
8433 	    config.max_refresh_in_uhz) {
8434 		/*
8435 		 * if freesync compatible mode was set, config.state will be set
8436 		 * in atomic check
8437 		 */
8438 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8439 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8440 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8441 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8442 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8443 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8444 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8445 		} else {
8446 			config.state = new_crtc_state->base.vrr_enabled ?
8447 						     VRR_STATE_ACTIVE_VARIABLE :
8448 						     VRR_STATE_INACTIVE;
8449 		}
8450 	} else {
8451 		config.state = VRR_STATE_UNSUPPORTED;
8452 	}
8453 
8454 	mod_freesync_build_vrr_params(dm->freesync_module,
8455 				      new_stream,
8456 				      &config, &vrr_params);
8457 
8458 	new_crtc_state->freesync_timing_changed |=
8459 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8460 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8461 
8462 	new_crtc_state->freesync_config = config;
8463 	/* Copy state for access from DM IRQ handler */
8464 	acrtc->dm_irq_params.freesync_config = config;
8465 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8466 	acrtc->dm_irq_params.vrr_params = vrr_params;
8467 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8468 }
8469 
8470 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8471 					    struct dm_crtc_state *new_state)
8472 {
8473 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8474 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8475 
8476 	if (!old_vrr_active && new_vrr_active) {
8477 		/* Transition VRR inactive -> active:
8478 		 * While VRR is active, we must not disable vblank irq, as a
8479 		 * reenable after disable would compute bogus vblank/pflip
8480 		 * timestamps if it likely happened inside display front-porch.
8481 		 *
8482 		 * We also need vupdate irq for the actual core vblank handling
8483 		 * at end of vblank.
8484 		 */
8485 		dm_set_vupdate_irq(new_state->base.crtc, true);
8486 		drm_crtc_vblank_get(new_state->base.crtc);
8487 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8488 				 __func__, new_state->base.crtc->base.id);
8489 	} else if (old_vrr_active && !new_vrr_active) {
8490 		/* Transition VRR active -> inactive:
8491 		 * Allow vblank irq disable again for fixed refresh rate.
8492 		 */
8493 		dm_set_vupdate_irq(new_state->base.crtc, false);
8494 		drm_crtc_vblank_put(new_state->base.crtc);
8495 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8496 				 __func__, new_state->base.crtc->base.id);
8497 	}
8498 }
8499 
8500 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8501 {
8502 	struct drm_plane *plane;
8503 	struct drm_plane_state *old_plane_state;
8504 	int i;
8505 
8506 	/*
8507 	 * TODO: Make this per-stream so we don't issue redundant updates for
8508 	 * commits with multiple streams.
8509 	 */
8510 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8511 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8512 			handle_cursor_update(plane, old_plane_state);
8513 }
8514 
8515 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8516 				    struct dc_state *dc_state,
8517 				    struct drm_device *dev,
8518 				    struct amdgpu_display_manager *dm,
8519 				    struct drm_crtc *pcrtc,
8520 				    bool wait_for_vblank)
8521 {
8522 	uint32_t i;
8523 	uint64_t timestamp_ns;
8524 	struct drm_plane *plane;
8525 	struct drm_plane_state *old_plane_state, *new_plane_state;
8526 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8527 	struct drm_crtc_state *new_pcrtc_state =
8528 			drm_atomic_get_new_crtc_state(state, pcrtc);
8529 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8530 	struct dm_crtc_state *dm_old_crtc_state =
8531 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8532 	int planes_count = 0, vpos, hpos;
8533 	long r;
8534 	unsigned long flags;
8535 	struct amdgpu_bo *abo;
8536 	uint32_t target_vblank, last_flip_vblank;
8537 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8538 	bool pflip_present = false;
8539 	struct {
8540 		struct dc_surface_update surface_updates[MAX_SURFACES];
8541 		struct dc_plane_info plane_infos[MAX_SURFACES];
8542 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8543 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8544 		struct dc_stream_update stream_update;
8545 	} *bundle;
8546 
8547 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8548 
8549 	if (!bundle) {
8550 		dm_error("Failed to allocate update bundle\n");
8551 		goto cleanup;
8552 	}
8553 
8554 	/*
8555 	 * Disable the cursor first if we're disabling all the planes.
8556 	 * It'll remain on the screen after the planes are re-enabled
8557 	 * if we don't.
8558 	 */
8559 	if (acrtc_state->active_planes == 0)
8560 		amdgpu_dm_commit_cursors(state);
8561 
8562 	/* update planes when needed */
8563 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8564 		struct drm_crtc *crtc = new_plane_state->crtc;
8565 		struct drm_crtc_state *new_crtc_state;
8566 		struct drm_framebuffer *fb = new_plane_state->fb;
8567 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8568 		bool plane_needs_flip;
8569 		struct dc_plane_state *dc_plane;
8570 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8571 
8572 		/* Cursor plane is handled after stream updates */
8573 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8574 			continue;
8575 
8576 		if (!fb || !crtc || pcrtc != crtc)
8577 			continue;
8578 
8579 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8580 		if (!new_crtc_state->active)
8581 			continue;
8582 
8583 		dc_plane = dm_new_plane_state->dc_state;
8584 
8585 		bundle->surface_updates[planes_count].surface = dc_plane;
8586 		if (new_pcrtc_state->color_mgmt_changed) {
8587 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8588 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8589 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8590 		}
8591 
8592 		fill_dc_scaling_info(new_plane_state,
8593 				     &bundle->scaling_infos[planes_count]);
8594 
8595 		bundle->surface_updates[planes_count].scaling_info =
8596 			&bundle->scaling_infos[planes_count];
8597 
8598 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8599 
8600 		pflip_present = pflip_present || plane_needs_flip;
8601 
8602 		if (!plane_needs_flip) {
8603 			planes_count += 1;
8604 			continue;
8605 		}
8606 
8607 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8608 
8609 		/*
8610 		 * Wait for all fences on this FB. Do limited wait to avoid
8611 		 * deadlock during GPU reset when this fence will not signal
8612 		 * but we hold reservation lock for the BO.
8613 		 */
8614 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8615 					  msecs_to_jiffies(5000));
8616 		if (unlikely(r <= 0))
8617 			DRM_ERROR("Waiting for fences timed out!");
8618 
8619 		fill_dc_plane_info_and_addr(
8620 			dm->adev, new_plane_state,
8621 			afb->tiling_flags,
8622 			&bundle->plane_infos[planes_count],
8623 			&bundle->flip_addrs[planes_count].address,
8624 			afb->tmz_surface, false);
8625 
8626 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8627 				 new_plane_state->plane->index,
8628 				 bundle->plane_infos[planes_count].dcc.enable);
8629 
8630 		bundle->surface_updates[planes_count].plane_info =
8631 			&bundle->plane_infos[planes_count];
8632 
8633 		/*
8634 		 * Only allow immediate flips for fast updates that don't
8635 		 * change FB pitch, DCC state, rotation or mirroing.
8636 		 */
8637 		bundle->flip_addrs[planes_count].flip_immediate =
8638 			crtc->state->async_flip &&
8639 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8640 
8641 		timestamp_ns = ktime_get_ns();
8642 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8643 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8644 		bundle->surface_updates[planes_count].surface = dc_plane;
8645 
8646 		if (!bundle->surface_updates[planes_count].surface) {
8647 			DRM_ERROR("No surface for CRTC: id=%d\n",
8648 					acrtc_attach->crtc_id);
8649 			continue;
8650 		}
8651 
8652 		if (plane == pcrtc->primary)
8653 			update_freesync_state_on_stream(
8654 				dm,
8655 				acrtc_state,
8656 				acrtc_state->stream,
8657 				dc_plane,
8658 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8659 
8660 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8661 				 __func__,
8662 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8663 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8664 
8665 		planes_count += 1;
8666 
8667 	}
8668 
8669 	if (pflip_present) {
8670 		if (!vrr_active) {
8671 			/* Use old throttling in non-vrr fixed refresh rate mode
8672 			 * to keep flip scheduling based on target vblank counts
8673 			 * working in a backwards compatible way, e.g., for
8674 			 * clients using the GLX_OML_sync_control extension or
8675 			 * DRI3/Present extension with defined target_msc.
8676 			 */
8677 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8678 		}
8679 		else {
8680 			/* For variable refresh rate mode only:
8681 			 * Get vblank of last completed flip to avoid > 1 vrr
8682 			 * flips per video frame by use of throttling, but allow
8683 			 * flip programming anywhere in the possibly large
8684 			 * variable vrr vblank interval for fine-grained flip
8685 			 * timing control and more opportunity to avoid stutter
8686 			 * on late submission of flips.
8687 			 */
8688 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8689 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8690 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8691 		}
8692 
8693 		target_vblank = last_flip_vblank + wait_for_vblank;
8694 
8695 		/*
8696 		 * Wait until we're out of the vertical blank period before the one
8697 		 * targeted by the flip
8698 		 */
8699 		while ((acrtc_attach->enabled &&
8700 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8701 							    0, &vpos, &hpos, NULL,
8702 							    NULL, &pcrtc->hwmode)
8703 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8704 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8705 			(int)(target_vblank -
8706 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8707 			usleep_range(1000, 1100);
8708 		}
8709 
8710 		/**
8711 		 * Prepare the flip event for the pageflip interrupt to handle.
8712 		 *
8713 		 * This only works in the case where we've already turned on the
8714 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8715 		 * from 0 -> n planes we have to skip a hardware generated event
8716 		 * and rely on sending it from software.
8717 		 */
8718 		if (acrtc_attach->base.state->event &&
8719 		    acrtc_state->active_planes > 0) {
8720 			drm_crtc_vblank_get(pcrtc);
8721 
8722 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8723 
8724 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8725 			prepare_flip_isr(acrtc_attach);
8726 
8727 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8728 		}
8729 
8730 		if (acrtc_state->stream) {
8731 			if (acrtc_state->freesync_vrr_info_changed)
8732 				bundle->stream_update.vrr_infopacket =
8733 					&acrtc_state->stream->vrr_infopacket;
8734 		}
8735 	}
8736 
8737 	/* Update the planes if changed or disable if we don't have any. */
8738 	if ((planes_count || acrtc_state->active_planes == 0) &&
8739 		acrtc_state->stream) {
8740 #if defined(CONFIG_DRM_AMD_DC_DCN)
8741 		/*
8742 		 * If PSR or idle optimizations are enabled then flush out
8743 		 * any pending work before hardware programming.
8744 		 */
8745 		if (dm->vblank_control_workqueue)
8746 			flush_workqueue(dm->vblank_control_workqueue);
8747 #endif
8748 
8749 		bundle->stream_update.stream = acrtc_state->stream;
8750 		if (new_pcrtc_state->mode_changed) {
8751 			bundle->stream_update.src = acrtc_state->stream->src;
8752 			bundle->stream_update.dst = acrtc_state->stream->dst;
8753 		}
8754 
8755 		if (new_pcrtc_state->color_mgmt_changed) {
8756 			/*
8757 			 * TODO: This isn't fully correct since we've actually
8758 			 * already modified the stream in place.
8759 			 */
8760 			bundle->stream_update.gamut_remap =
8761 				&acrtc_state->stream->gamut_remap_matrix;
8762 			bundle->stream_update.output_csc_transform =
8763 				&acrtc_state->stream->csc_color_matrix;
8764 			bundle->stream_update.out_transfer_func =
8765 				acrtc_state->stream->out_transfer_func;
8766 		}
8767 
8768 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8769 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8770 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8771 
8772 		/*
8773 		 * If FreeSync state on the stream has changed then we need to
8774 		 * re-adjust the min/max bounds now that DC doesn't handle this
8775 		 * as part of commit.
8776 		 */
8777 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8778 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8779 			dc_stream_adjust_vmin_vmax(
8780 				dm->dc, acrtc_state->stream,
8781 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8782 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8783 		}
8784 		mutex_lock(&dm->dc_lock);
8785 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8786 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8787 			amdgpu_dm_psr_disable(acrtc_state->stream);
8788 
8789 		dc_commit_updates_for_stream(dm->dc,
8790 						     bundle->surface_updates,
8791 						     planes_count,
8792 						     acrtc_state->stream,
8793 						     &bundle->stream_update,
8794 						     dc_state);
8795 
8796 		/**
8797 		 * Enable or disable the interrupts on the backend.
8798 		 *
8799 		 * Most pipes are put into power gating when unused.
8800 		 *
8801 		 * When power gating is enabled on a pipe we lose the
8802 		 * interrupt enablement state when power gating is disabled.
8803 		 *
8804 		 * So we need to update the IRQ control state in hardware
8805 		 * whenever the pipe turns on (since it could be previously
8806 		 * power gated) or off (since some pipes can't be power gated
8807 		 * on some ASICs).
8808 		 */
8809 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8810 			dm_update_pflip_irq_state(drm_to_adev(dev),
8811 						  acrtc_attach);
8812 
8813 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8814 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8815 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8816 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8817 
8818 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
8819 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8820 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8821 			struct amdgpu_dm_connector *aconn =
8822 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8823 
8824 			if (aconn->psr_skip_count > 0)
8825 				aconn->psr_skip_count--;
8826 
8827 			/* Allow PSR when skip count is 0. */
8828 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8829 		} else {
8830 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
8831 		}
8832 
8833 		mutex_unlock(&dm->dc_lock);
8834 	}
8835 
8836 	/*
8837 	 * Update cursor state *after* programming all the planes.
8838 	 * This avoids redundant programming in the case where we're going
8839 	 * to be disabling a single plane - those pipes are being disabled.
8840 	 */
8841 	if (acrtc_state->active_planes)
8842 		amdgpu_dm_commit_cursors(state);
8843 
8844 cleanup:
8845 	kfree(bundle);
8846 }
8847 
8848 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8849 				   struct drm_atomic_state *state)
8850 {
8851 	struct amdgpu_device *adev = drm_to_adev(dev);
8852 	struct amdgpu_dm_connector *aconnector;
8853 	struct drm_connector *connector;
8854 	struct drm_connector_state *old_con_state, *new_con_state;
8855 	struct drm_crtc_state *new_crtc_state;
8856 	struct dm_crtc_state *new_dm_crtc_state;
8857 	const struct dc_stream_status *status;
8858 	int i, inst;
8859 
8860 	/* Notify device removals. */
8861 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8862 		if (old_con_state->crtc != new_con_state->crtc) {
8863 			/* CRTC changes require notification. */
8864 			goto notify;
8865 		}
8866 
8867 		if (!new_con_state->crtc)
8868 			continue;
8869 
8870 		new_crtc_state = drm_atomic_get_new_crtc_state(
8871 			state, new_con_state->crtc);
8872 
8873 		if (!new_crtc_state)
8874 			continue;
8875 
8876 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8877 			continue;
8878 
8879 	notify:
8880 		aconnector = to_amdgpu_dm_connector(connector);
8881 
8882 		mutex_lock(&adev->dm.audio_lock);
8883 		inst = aconnector->audio_inst;
8884 		aconnector->audio_inst = -1;
8885 		mutex_unlock(&adev->dm.audio_lock);
8886 
8887 		amdgpu_dm_audio_eld_notify(adev, inst);
8888 	}
8889 
8890 	/* Notify audio device additions. */
8891 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8892 		if (!new_con_state->crtc)
8893 			continue;
8894 
8895 		new_crtc_state = drm_atomic_get_new_crtc_state(
8896 			state, new_con_state->crtc);
8897 
8898 		if (!new_crtc_state)
8899 			continue;
8900 
8901 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8902 			continue;
8903 
8904 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8905 		if (!new_dm_crtc_state->stream)
8906 			continue;
8907 
8908 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8909 		if (!status)
8910 			continue;
8911 
8912 		aconnector = to_amdgpu_dm_connector(connector);
8913 
8914 		mutex_lock(&adev->dm.audio_lock);
8915 		inst = status->audio_inst;
8916 		aconnector->audio_inst = inst;
8917 		mutex_unlock(&adev->dm.audio_lock);
8918 
8919 		amdgpu_dm_audio_eld_notify(adev, inst);
8920 	}
8921 }
8922 
8923 /*
8924  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8925  * @crtc_state: the DRM CRTC state
8926  * @stream_state: the DC stream state.
8927  *
8928  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8929  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8930  */
8931 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8932 						struct dc_stream_state *stream_state)
8933 {
8934 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8935 }
8936 
8937 /**
8938  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8939  * @state: The atomic state to commit
8940  *
8941  * This will tell DC to commit the constructed DC state from atomic_check,
8942  * programming the hardware. Any failures here implies a hardware failure, since
8943  * atomic check should have filtered anything non-kosher.
8944  */
8945 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8946 {
8947 	struct drm_device *dev = state->dev;
8948 	struct amdgpu_device *adev = drm_to_adev(dev);
8949 	struct amdgpu_display_manager *dm = &adev->dm;
8950 	struct dm_atomic_state *dm_state;
8951 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8952 	uint32_t i, j;
8953 	struct drm_crtc *crtc;
8954 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8955 	unsigned long flags;
8956 	bool wait_for_vblank = true;
8957 	struct drm_connector *connector;
8958 	struct drm_connector_state *old_con_state, *new_con_state;
8959 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8960 	int crtc_disable_count = 0;
8961 	bool mode_set_reset_required = false;
8962 
8963 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8964 
8965 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8966 
8967 	dm_state = dm_atomic_get_new_state(state);
8968 	if (dm_state && dm_state->context) {
8969 		dc_state = dm_state->context;
8970 	} else {
8971 		/* No state changes, retain current state. */
8972 		dc_state_temp = dc_create_state(dm->dc);
8973 		ASSERT(dc_state_temp);
8974 		dc_state = dc_state_temp;
8975 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8976 	}
8977 
8978 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8979 				       new_crtc_state, i) {
8980 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8981 
8982 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8983 
8984 		if (old_crtc_state->active &&
8985 		    (!new_crtc_state->active ||
8986 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8987 			manage_dm_interrupts(adev, acrtc, false);
8988 			dc_stream_release(dm_old_crtc_state->stream);
8989 		}
8990 	}
8991 
8992 	drm_atomic_helper_calc_timestamping_constants(state);
8993 
8994 	/* update changed items */
8995 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8996 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8997 
8998 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8999 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9000 
9001 		DRM_DEBUG_ATOMIC(
9002 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9003 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9004 			"connectors_changed:%d\n",
9005 			acrtc->crtc_id,
9006 			new_crtc_state->enable,
9007 			new_crtc_state->active,
9008 			new_crtc_state->planes_changed,
9009 			new_crtc_state->mode_changed,
9010 			new_crtc_state->active_changed,
9011 			new_crtc_state->connectors_changed);
9012 
9013 		/* Disable cursor if disabling crtc */
9014 		if (old_crtc_state->active && !new_crtc_state->active) {
9015 			struct dc_cursor_position position;
9016 
9017 			memset(&position, 0, sizeof(position));
9018 			mutex_lock(&dm->dc_lock);
9019 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9020 			mutex_unlock(&dm->dc_lock);
9021 		}
9022 
9023 		/* Copy all transient state flags into dc state */
9024 		if (dm_new_crtc_state->stream) {
9025 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9026 							    dm_new_crtc_state->stream);
9027 		}
9028 
9029 		/* handles headless hotplug case, updating new_state and
9030 		 * aconnector as needed
9031 		 */
9032 
9033 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9034 
9035 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9036 
9037 			if (!dm_new_crtc_state->stream) {
9038 				/*
9039 				 * this could happen because of issues with
9040 				 * userspace notifications delivery.
9041 				 * In this case userspace tries to set mode on
9042 				 * display which is disconnected in fact.
9043 				 * dc_sink is NULL in this case on aconnector.
9044 				 * We expect reset mode will come soon.
9045 				 *
9046 				 * This can also happen when unplug is done
9047 				 * during resume sequence ended
9048 				 *
9049 				 * In this case, we want to pretend we still
9050 				 * have a sink to keep the pipe running so that
9051 				 * hw state is consistent with the sw state
9052 				 */
9053 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9054 						__func__, acrtc->base.base.id);
9055 				continue;
9056 			}
9057 
9058 			if (dm_old_crtc_state->stream)
9059 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9060 
9061 			pm_runtime_get_noresume(dev->dev);
9062 
9063 			acrtc->enabled = true;
9064 			acrtc->hw_mode = new_crtc_state->mode;
9065 			crtc->hwmode = new_crtc_state->mode;
9066 			mode_set_reset_required = true;
9067 		} else if (modereset_required(new_crtc_state)) {
9068 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9069 			/* i.e. reset mode */
9070 			if (dm_old_crtc_state->stream)
9071 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9072 
9073 			mode_set_reset_required = true;
9074 		}
9075 	} /* for_each_crtc_in_state() */
9076 
9077 	if (dc_state) {
9078 		/* if there mode set or reset, disable eDP PSR */
9079 		if (mode_set_reset_required) {
9080 #if defined(CONFIG_DRM_AMD_DC_DCN)
9081 			if (dm->vblank_control_workqueue)
9082 				flush_workqueue(dm->vblank_control_workqueue);
9083 #endif
9084 			amdgpu_dm_psr_disable_all(dm);
9085 		}
9086 
9087 		dm_enable_per_frame_crtc_master_sync(dc_state);
9088 		mutex_lock(&dm->dc_lock);
9089 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9090 #if defined(CONFIG_DRM_AMD_DC_DCN)
9091                /* Allow idle optimization when vblank count is 0 for display off */
9092                if (dm->active_vblank_irq_count == 0)
9093                    dc_allow_idle_optimizations(dm->dc,true);
9094 #endif
9095 		mutex_unlock(&dm->dc_lock);
9096 	}
9097 
9098 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9099 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9100 
9101 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9102 
9103 		if (dm_new_crtc_state->stream != NULL) {
9104 			const struct dc_stream_status *status =
9105 					dc_stream_get_status(dm_new_crtc_state->stream);
9106 
9107 			if (!status)
9108 				status = dc_stream_get_status_from_state(dc_state,
9109 									 dm_new_crtc_state->stream);
9110 			if (!status)
9111 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9112 			else
9113 				acrtc->otg_inst = status->primary_otg_inst;
9114 		}
9115 	}
9116 #ifdef CONFIG_DRM_AMD_DC_HDCP
9117 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9118 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9119 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9120 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9121 
9122 		new_crtc_state = NULL;
9123 
9124 		if (acrtc)
9125 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9126 
9127 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9128 
9129 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9130 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9131 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9132 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9133 			dm_new_con_state->update_hdcp = true;
9134 			continue;
9135 		}
9136 
9137 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9138 			hdcp_update_display(
9139 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9140 				new_con_state->hdcp_content_type,
9141 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9142 	}
9143 #endif
9144 
9145 	/* Handle connector state changes */
9146 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9147 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9148 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9149 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9150 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9151 		struct dc_stream_update stream_update;
9152 		struct dc_info_packet hdr_packet;
9153 		struct dc_stream_status *status = NULL;
9154 		bool abm_changed, hdr_changed, scaling_changed;
9155 
9156 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9157 		memset(&stream_update, 0, sizeof(stream_update));
9158 
9159 		if (acrtc) {
9160 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9161 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9162 		}
9163 
9164 		/* Skip any modesets/resets */
9165 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9166 			continue;
9167 
9168 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9169 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9170 
9171 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9172 							     dm_old_con_state);
9173 
9174 		abm_changed = dm_new_crtc_state->abm_level !=
9175 			      dm_old_crtc_state->abm_level;
9176 
9177 		hdr_changed =
9178 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9179 
9180 		if (!scaling_changed && !abm_changed && !hdr_changed)
9181 			continue;
9182 
9183 		stream_update.stream = dm_new_crtc_state->stream;
9184 		if (scaling_changed) {
9185 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9186 					dm_new_con_state, dm_new_crtc_state->stream);
9187 
9188 			stream_update.src = dm_new_crtc_state->stream->src;
9189 			stream_update.dst = dm_new_crtc_state->stream->dst;
9190 		}
9191 
9192 		if (abm_changed) {
9193 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9194 
9195 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9196 		}
9197 
9198 		if (hdr_changed) {
9199 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9200 			stream_update.hdr_static_metadata = &hdr_packet;
9201 		}
9202 
9203 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9204 
9205 		if (WARN_ON(!status))
9206 			continue;
9207 
9208 		WARN_ON(!status->plane_count);
9209 
9210 		/*
9211 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9212 		 * Here we create an empty update on each plane.
9213 		 * To fix this, DC should permit updating only stream properties.
9214 		 */
9215 		for (j = 0; j < status->plane_count; j++)
9216 			dummy_updates[j].surface = status->plane_states[0];
9217 
9218 
9219 		mutex_lock(&dm->dc_lock);
9220 		dc_commit_updates_for_stream(dm->dc,
9221 						     dummy_updates,
9222 						     status->plane_count,
9223 						     dm_new_crtc_state->stream,
9224 						     &stream_update,
9225 						     dc_state);
9226 		mutex_unlock(&dm->dc_lock);
9227 	}
9228 
9229 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9230 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9231 				      new_crtc_state, i) {
9232 		if (old_crtc_state->active && !new_crtc_state->active)
9233 			crtc_disable_count++;
9234 
9235 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9236 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9237 
9238 		/* For freesync config update on crtc state and params for irq */
9239 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9240 
9241 		/* Handle vrr on->off / off->on transitions */
9242 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9243 						dm_new_crtc_state);
9244 	}
9245 
9246 	/**
9247 	 * Enable interrupts for CRTCs that are newly enabled or went through
9248 	 * a modeset. It was intentionally deferred until after the front end
9249 	 * state was modified to wait until the OTG was on and so the IRQ
9250 	 * handlers didn't access stale or invalid state.
9251 	 */
9252 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9253 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9254 #ifdef CONFIG_DEBUG_FS
9255 		bool configure_crc = false;
9256 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9257 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9258 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9259 #endif
9260 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9261 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9262 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9263 #endif
9264 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9265 
9266 		if (new_crtc_state->active &&
9267 		    (!old_crtc_state->active ||
9268 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9269 			dc_stream_retain(dm_new_crtc_state->stream);
9270 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9271 			manage_dm_interrupts(adev, acrtc, true);
9272 
9273 #ifdef CONFIG_DEBUG_FS
9274 			/**
9275 			 * Frontend may have changed so reapply the CRC capture
9276 			 * settings for the stream.
9277 			 */
9278 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9279 
9280 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9281 				configure_crc = true;
9282 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9283 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9284 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9285 					acrtc->dm_irq_params.crc_window.update_win = true;
9286 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9287 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9288 					crc_rd_wrk->crtc = crtc;
9289 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9290 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9291 				}
9292 #endif
9293 			}
9294 
9295 			if (configure_crc)
9296 				if (amdgpu_dm_crtc_configure_crc_source(
9297 					crtc, dm_new_crtc_state, cur_crc_src))
9298 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9299 #endif
9300 		}
9301 	}
9302 
9303 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9304 		if (new_crtc_state->async_flip)
9305 			wait_for_vblank = false;
9306 
9307 	/* update planes when needed per crtc*/
9308 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9309 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9310 
9311 		if (dm_new_crtc_state->stream)
9312 			amdgpu_dm_commit_planes(state, dc_state, dev,
9313 						dm, crtc, wait_for_vblank);
9314 	}
9315 
9316 	/* Update audio instances for each connector. */
9317 	amdgpu_dm_commit_audio(dev, state);
9318 
9319 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9320 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9321 	/* restore the backlight level */
9322 	for (i = 0; i < dm->num_of_edps; i++) {
9323 		if (dm->backlight_dev[i] &&
9324 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9325 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9326 	}
9327 #endif
9328 	/*
9329 	 * send vblank event on all events not handled in flip and
9330 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9331 	 */
9332 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9333 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9334 
9335 		if (new_crtc_state->event)
9336 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9337 
9338 		new_crtc_state->event = NULL;
9339 	}
9340 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9341 
9342 	/* Signal HW programming completion */
9343 	drm_atomic_helper_commit_hw_done(state);
9344 
9345 	if (wait_for_vblank)
9346 		drm_atomic_helper_wait_for_flip_done(dev, state);
9347 
9348 	drm_atomic_helper_cleanup_planes(dev, state);
9349 
9350 	/* return the stolen vga memory back to VRAM */
9351 	if (!adev->mman.keep_stolen_vga_memory)
9352 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9353 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9354 
9355 	/*
9356 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9357 	 * so we can put the GPU into runtime suspend if we're not driving any
9358 	 * displays anymore
9359 	 */
9360 	for (i = 0; i < crtc_disable_count; i++)
9361 		pm_runtime_put_autosuspend(dev->dev);
9362 	pm_runtime_mark_last_busy(dev->dev);
9363 
9364 	if (dc_state_temp)
9365 		dc_release_state(dc_state_temp);
9366 }
9367 
9368 
9369 static int dm_force_atomic_commit(struct drm_connector *connector)
9370 {
9371 	int ret = 0;
9372 	struct drm_device *ddev = connector->dev;
9373 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9374 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9375 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9376 	struct drm_connector_state *conn_state;
9377 	struct drm_crtc_state *crtc_state;
9378 	struct drm_plane_state *plane_state;
9379 
9380 	if (!state)
9381 		return -ENOMEM;
9382 
9383 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9384 
9385 	/* Construct an atomic state to restore previous display setting */
9386 
9387 	/*
9388 	 * Attach connectors to drm_atomic_state
9389 	 */
9390 	conn_state = drm_atomic_get_connector_state(state, connector);
9391 
9392 	ret = PTR_ERR_OR_ZERO(conn_state);
9393 	if (ret)
9394 		goto out;
9395 
9396 	/* Attach crtc to drm_atomic_state*/
9397 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9398 
9399 	ret = PTR_ERR_OR_ZERO(crtc_state);
9400 	if (ret)
9401 		goto out;
9402 
9403 	/* force a restore */
9404 	crtc_state->mode_changed = true;
9405 
9406 	/* Attach plane to drm_atomic_state */
9407 	plane_state = drm_atomic_get_plane_state(state, plane);
9408 
9409 	ret = PTR_ERR_OR_ZERO(plane_state);
9410 	if (ret)
9411 		goto out;
9412 
9413 	/* Call commit internally with the state we just constructed */
9414 	ret = drm_atomic_commit(state);
9415 
9416 out:
9417 	drm_atomic_state_put(state);
9418 	if (ret)
9419 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9420 
9421 	return ret;
9422 }
9423 
9424 /*
9425  * This function handles all cases when set mode does not come upon hotplug.
9426  * This includes when a display is unplugged then plugged back into the
9427  * same port and when running without usermode desktop manager supprot
9428  */
9429 void dm_restore_drm_connector_state(struct drm_device *dev,
9430 				    struct drm_connector *connector)
9431 {
9432 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9433 	struct amdgpu_crtc *disconnected_acrtc;
9434 	struct dm_crtc_state *acrtc_state;
9435 
9436 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9437 		return;
9438 
9439 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9440 	if (!disconnected_acrtc)
9441 		return;
9442 
9443 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9444 	if (!acrtc_state->stream)
9445 		return;
9446 
9447 	/*
9448 	 * If the previous sink is not released and different from the current,
9449 	 * we deduce we are in a state where we can not rely on usermode call
9450 	 * to turn on the display, so we do it here
9451 	 */
9452 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9453 		dm_force_atomic_commit(&aconnector->base);
9454 }
9455 
9456 /*
9457  * Grabs all modesetting locks to serialize against any blocking commits,
9458  * Waits for completion of all non blocking commits.
9459  */
9460 static int do_aquire_global_lock(struct drm_device *dev,
9461 				 struct drm_atomic_state *state)
9462 {
9463 	struct drm_crtc *crtc;
9464 	struct drm_crtc_commit *commit;
9465 	long ret;
9466 
9467 	/*
9468 	 * Adding all modeset locks to aquire_ctx will
9469 	 * ensure that when the framework release it the
9470 	 * extra locks we are locking here will get released to
9471 	 */
9472 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9473 	if (ret)
9474 		return ret;
9475 
9476 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9477 		spin_lock(&crtc->commit_lock);
9478 		commit = list_first_entry_or_null(&crtc->commit_list,
9479 				struct drm_crtc_commit, commit_entry);
9480 		if (commit)
9481 			drm_crtc_commit_get(commit);
9482 		spin_unlock(&crtc->commit_lock);
9483 
9484 		if (!commit)
9485 			continue;
9486 
9487 		/*
9488 		 * Make sure all pending HW programming completed and
9489 		 * page flips done
9490 		 */
9491 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9492 
9493 		if (ret > 0)
9494 			ret = wait_for_completion_interruptible_timeout(
9495 					&commit->flip_done, 10*HZ);
9496 
9497 		if (ret == 0)
9498 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9499 				  "timed out\n", crtc->base.id, crtc->name);
9500 
9501 		drm_crtc_commit_put(commit);
9502 	}
9503 
9504 	return ret < 0 ? ret : 0;
9505 }
9506 
9507 static void get_freesync_config_for_crtc(
9508 	struct dm_crtc_state *new_crtc_state,
9509 	struct dm_connector_state *new_con_state)
9510 {
9511 	struct mod_freesync_config config = {0};
9512 	struct amdgpu_dm_connector *aconnector =
9513 			to_amdgpu_dm_connector(new_con_state->base.connector);
9514 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9515 	int vrefresh = drm_mode_vrefresh(mode);
9516 	bool fs_vid_mode = false;
9517 
9518 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9519 					vrefresh >= aconnector->min_vfreq &&
9520 					vrefresh <= aconnector->max_vfreq;
9521 
9522 	if (new_crtc_state->vrr_supported) {
9523 		new_crtc_state->stream->ignore_msa_timing_param = true;
9524 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9525 
9526 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9527 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9528 		config.vsif_supported = true;
9529 		config.btr = true;
9530 
9531 		if (fs_vid_mode) {
9532 			config.state = VRR_STATE_ACTIVE_FIXED;
9533 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9534 			goto out;
9535 		} else if (new_crtc_state->base.vrr_enabled) {
9536 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9537 		} else {
9538 			config.state = VRR_STATE_INACTIVE;
9539 		}
9540 	}
9541 out:
9542 	new_crtc_state->freesync_config = config;
9543 }
9544 
9545 static void reset_freesync_config_for_crtc(
9546 	struct dm_crtc_state *new_crtc_state)
9547 {
9548 	new_crtc_state->vrr_supported = false;
9549 
9550 	memset(&new_crtc_state->vrr_infopacket, 0,
9551 	       sizeof(new_crtc_state->vrr_infopacket));
9552 }
9553 
9554 static bool
9555 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9556 				 struct drm_crtc_state *new_crtc_state)
9557 {
9558 	struct drm_display_mode old_mode, new_mode;
9559 
9560 	if (!old_crtc_state || !new_crtc_state)
9561 		return false;
9562 
9563 	old_mode = old_crtc_state->mode;
9564 	new_mode = new_crtc_state->mode;
9565 
9566 	if (old_mode.clock       == new_mode.clock &&
9567 	    old_mode.hdisplay    == new_mode.hdisplay &&
9568 	    old_mode.vdisplay    == new_mode.vdisplay &&
9569 	    old_mode.htotal      == new_mode.htotal &&
9570 	    old_mode.vtotal      != new_mode.vtotal &&
9571 	    old_mode.hsync_start == new_mode.hsync_start &&
9572 	    old_mode.vsync_start != new_mode.vsync_start &&
9573 	    old_mode.hsync_end   == new_mode.hsync_end &&
9574 	    old_mode.vsync_end   != new_mode.vsync_end &&
9575 	    old_mode.hskew       == new_mode.hskew &&
9576 	    old_mode.vscan       == new_mode.vscan &&
9577 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9578 	    (new_mode.vsync_end - new_mode.vsync_start))
9579 		return true;
9580 
9581 	return false;
9582 }
9583 
9584 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9585 	uint64_t num, den, res;
9586 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9587 
9588 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9589 
9590 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9591 	den = (unsigned long long)new_crtc_state->mode.htotal *
9592 	      (unsigned long long)new_crtc_state->mode.vtotal;
9593 
9594 	res = div_u64(num, den);
9595 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9596 }
9597 
9598 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9599 				struct drm_atomic_state *state,
9600 				struct drm_crtc *crtc,
9601 				struct drm_crtc_state *old_crtc_state,
9602 				struct drm_crtc_state *new_crtc_state,
9603 				bool enable,
9604 				bool *lock_and_validation_needed)
9605 {
9606 	struct dm_atomic_state *dm_state = NULL;
9607 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9608 	struct dc_stream_state *new_stream;
9609 	int ret = 0;
9610 
9611 	/*
9612 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9613 	 * update changed items
9614 	 */
9615 	struct amdgpu_crtc *acrtc = NULL;
9616 	struct amdgpu_dm_connector *aconnector = NULL;
9617 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9618 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9619 
9620 	new_stream = NULL;
9621 
9622 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9623 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624 	acrtc = to_amdgpu_crtc(crtc);
9625 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9626 
9627 	/* TODO This hack should go away */
9628 	if (aconnector && enable) {
9629 		/* Make sure fake sink is created in plug-in scenario */
9630 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9631 							    &aconnector->base);
9632 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9633 							    &aconnector->base);
9634 
9635 		if (IS_ERR(drm_new_conn_state)) {
9636 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9637 			goto fail;
9638 		}
9639 
9640 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9641 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9642 
9643 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9644 			goto skip_modeset;
9645 
9646 		new_stream = create_validate_stream_for_sink(aconnector,
9647 							     &new_crtc_state->mode,
9648 							     dm_new_conn_state,
9649 							     dm_old_crtc_state->stream);
9650 
9651 		/*
9652 		 * we can have no stream on ACTION_SET if a display
9653 		 * was disconnected during S3, in this case it is not an
9654 		 * error, the OS will be updated after detection, and
9655 		 * will do the right thing on next atomic commit
9656 		 */
9657 
9658 		if (!new_stream) {
9659 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9660 					__func__, acrtc->base.base.id);
9661 			ret = -ENOMEM;
9662 			goto fail;
9663 		}
9664 
9665 		/*
9666 		 * TODO: Check VSDB bits to decide whether this should
9667 		 * be enabled or not.
9668 		 */
9669 		new_stream->triggered_crtc_reset.enabled =
9670 			dm->force_timing_sync;
9671 
9672 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9673 
9674 		ret = fill_hdr_info_packet(drm_new_conn_state,
9675 					   &new_stream->hdr_static_metadata);
9676 		if (ret)
9677 			goto fail;
9678 
9679 		/*
9680 		 * If we already removed the old stream from the context
9681 		 * (and set the new stream to NULL) then we can't reuse
9682 		 * the old stream even if the stream and scaling are unchanged.
9683 		 * We'll hit the BUG_ON and black screen.
9684 		 *
9685 		 * TODO: Refactor this function to allow this check to work
9686 		 * in all conditions.
9687 		 */
9688 		if (amdgpu_freesync_vid_mode &&
9689 		    dm_new_crtc_state->stream &&
9690 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9691 			goto skip_modeset;
9692 
9693 		if (dm_new_crtc_state->stream &&
9694 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9695 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9696 			new_crtc_state->mode_changed = false;
9697 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9698 					 new_crtc_state->mode_changed);
9699 		}
9700 	}
9701 
9702 	/* mode_changed flag may get updated above, need to check again */
9703 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9704 		goto skip_modeset;
9705 
9706 	DRM_DEBUG_ATOMIC(
9707 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9708 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9709 		"connectors_changed:%d\n",
9710 		acrtc->crtc_id,
9711 		new_crtc_state->enable,
9712 		new_crtc_state->active,
9713 		new_crtc_state->planes_changed,
9714 		new_crtc_state->mode_changed,
9715 		new_crtc_state->active_changed,
9716 		new_crtc_state->connectors_changed);
9717 
9718 	/* Remove stream for any changed/disabled CRTC */
9719 	if (!enable) {
9720 
9721 		if (!dm_old_crtc_state->stream)
9722 			goto skip_modeset;
9723 
9724 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9725 		    is_timing_unchanged_for_freesync(new_crtc_state,
9726 						     old_crtc_state)) {
9727 			new_crtc_state->mode_changed = false;
9728 			DRM_DEBUG_DRIVER(
9729 				"Mode change not required for front porch change, "
9730 				"setting mode_changed to %d",
9731 				new_crtc_state->mode_changed);
9732 
9733 			set_freesync_fixed_config(dm_new_crtc_state);
9734 
9735 			goto skip_modeset;
9736 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9737 			   is_freesync_video_mode(&new_crtc_state->mode,
9738 						  aconnector)) {
9739 			struct drm_display_mode *high_mode;
9740 
9741 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
9742 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9743 				set_freesync_fixed_config(dm_new_crtc_state);
9744 			}
9745 		}
9746 
9747 		ret = dm_atomic_get_state(state, &dm_state);
9748 		if (ret)
9749 			goto fail;
9750 
9751 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9752 				crtc->base.id);
9753 
9754 		/* i.e. reset mode */
9755 		if (dc_remove_stream_from_ctx(
9756 				dm->dc,
9757 				dm_state->context,
9758 				dm_old_crtc_state->stream) != DC_OK) {
9759 			ret = -EINVAL;
9760 			goto fail;
9761 		}
9762 
9763 		dc_stream_release(dm_old_crtc_state->stream);
9764 		dm_new_crtc_state->stream = NULL;
9765 
9766 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9767 
9768 		*lock_and_validation_needed = true;
9769 
9770 	} else {/* Add stream for any updated/enabled CRTC */
9771 		/*
9772 		 * Quick fix to prevent NULL pointer on new_stream when
9773 		 * added MST connectors not found in existing crtc_state in the chained mode
9774 		 * TODO: need to dig out the root cause of that
9775 		 */
9776 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9777 			goto skip_modeset;
9778 
9779 		if (modereset_required(new_crtc_state))
9780 			goto skip_modeset;
9781 
9782 		if (modeset_required(new_crtc_state, new_stream,
9783 				     dm_old_crtc_state->stream)) {
9784 
9785 			WARN_ON(dm_new_crtc_state->stream);
9786 
9787 			ret = dm_atomic_get_state(state, &dm_state);
9788 			if (ret)
9789 				goto fail;
9790 
9791 			dm_new_crtc_state->stream = new_stream;
9792 
9793 			dc_stream_retain(new_stream);
9794 
9795 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9796 					 crtc->base.id);
9797 
9798 			if (dc_add_stream_to_ctx(
9799 					dm->dc,
9800 					dm_state->context,
9801 					dm_new_crtc_state->stream) != DC_OK) {
9802 				ret = -EINVAL;
9803 				goto fail;
9804 			}
9805 
9806 			*lock_and_validation_needed = true;
9807 		}
9808 	}
9809 
9810 skip_modeset:
9811 	/* Release extra reference */
9812 	if (new_stream)
9813 		 dc_stream_release(new_stream);
9814 
9815 	/*
9816 	 * We want to do dc stream updates that do not require a
9817 	 * full modeset below.
9818 	 */
9819 	if (!(enable && aconnector && new_crtc_state->active))
9820 		return 0;
9821 	/*
9822 	 * Given above conditions, the dc state cannot be NULL because:
9823 	 * 1. We're in the process of enabling CRTCs (just been added
9824 	 *    to the dc context, or already is on the context)
9825 	 * 2. Has a valid connector attached, and
9826 	 * 3. Is currently active and enabled.
9827 	 * => The dc stream state currently exists.
9828 	 */
9829 	BUG_ON(dm_new_crtc_state->stream == NULL);
9830 
9831 	/* Scaling or underscan settings */
9832 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9833 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9834 		update_stream_scaling_settings(
9835 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9836 
9837 	/* ABM settings */
9838 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9839 
9840 	/*
9841 	 * Color management settings. We also update color properties
9842 	 * when a modeset is needed, to ensure it gets reprogrammed.
9843 	 */
9844 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9845 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9846 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9847 		if (ret)
9848 			goto fail;
9849 	}
9850 
9851 	/* Update Freesync settings. */
9852 	get_freesync_config_for_crtc(dm_new_crtc_state,
9853 				     dm_new_conn_state);
9854 
9855 	return ret;
9856 
9857 fail:
9858 	if (new_stream)
9859 		dc_stream_release(new_stream);
9860 	return ret;
9861 }
9862 
9863 static bool should_reset_plane(struct drm_atomic_state *state,
9864 			       struct drm_plane *plane,
9865 			       struct drm_plane_state *old_plane_state,
9866 			       struct drm_plane_state *new_plane_state)
9867 {
9868 	struct drm_plane *other;
9869 	struct drm_plane_state *old_other_state, *new_other_state;
9870 	struct drm_crtc_state *new_crtc_state;
9871 	int i;
9872 
9873 	/*
9874 	 * TODO: Remove this hack once the checks below are sufficient
9875 	 * enough to determine when we need to reset all the planes on
9876 	 * the stream.
9877 	 */
9878 	if (state->allow_modeset)
9879 		return true;
9880 
9881 	/* Exit early if we know that we're adding or removing the plane. */
9882 	if (old_plane_state->crtc != new_plane_state->crtc)
9883 		return true;
9884 
9885 	/* old crtc == new_crtc == NULL, plane not in context. */
9886 	if (!new_plane_state->crtc)
9887 		return false;
9888 
9889 	new_crtc_state =
9890 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9891 
9892 	if (!new_crtc_state)
9893 		return true;
9894 
9895 	/* CRTC Degamma changes currently require us to recreate planes. */
9896 	if (new_crtc_state->color_mgmt_changed)
9897 		return true;
9898 
9899 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9900 		return true;
9901 
9902 	/*
9903 	 * If there are any new primary or overlay planes being added or
9904 	 * removed then the z-order can potentially change. To ensure
9905 	 * correct z-order and pipe acquisition the current DC architecture
9906 	 * requires us to remove and recreate all existing planes.
9907 	 *
9908 	 * TODO: Come up with a more elegant solution for this.
9909 	 */
9910 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9911 		struct amdgpu_framebuffer *old_afb, *new_afb;
9912 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9913 			continue;
9914 
9915 		if (old_other_state->crtc != new_plane_state->crtc &&
9916 		    new_other_state->crtc != new_plane_state->crtc)
9917 			continue;
9918 
9919 		if (old_other_state->crtc != new_other_state->crtc)
9920 			return true;
9921 
9922 		/* Src/dst size and scaling updates. */
9923 		if (old_other_state->src_w != new_other_state->src_w ||
9924 		    old_other_state->src_h != new_other_state->src_h ||
9925 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9926 		    old_other_state->crtc_h != new_other_state->crtc_h)
9927 			return true;
9928 
9929 		/* Rotation / mirroring updates. */
9930 		if (old_other_state->rotation != new_other_state->rotation)
9931 			return true;
9932 
9933 		/* Blending updates. */
9934 		if (old_other_state->pixel_blend_mode !=
9935 		    new_other_state->pixel_blend_mode)
9936 			return true;
9937 
9938 		/* Alpha updates. */
9939 		if (old_other_state->alpha != new_other_state->alpha)
9940 			return true;
9941 
9942 		/* Colorspace changes. */
9943 		if (old_other_state->color_range != new_other_state->color_range ||
9944 		    old_other_state->color_encoding != new_other_state->color_encoding)
9945 			return true;
9946 
9947 		/* Framebuffer checks fall at the end. */
9948 		if (!old_other_state->fb || !new_other_state->fb)
9949 			continue;
9950 
9951 		/* Pixel format changes can require bandwidth updates. */
9952 		if (old_other_state->fb->format != new_other_state->fb->format)
9953 			return true;
9954 
9955 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9956 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9957 
9958 		/* Tiling and DCC changes also require bandwidth updates. */
9959 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9960 		    old_afb->base.modifier != new_afb->base.modifier)
9961 			return true;
9962 	}
9963 
9964 	return false;
9965 }
9966 
9967 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9968 			      struct drm_plane_state *new_plane_state,
9969 			      struct drm_framebuffer *fb)
9970 {
9971 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9972 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9973 	unsigned int pitch;
9974 	bool linear;
9975 
9976 	if (fb->width > new_acrtc->max_cursor_width ||
9977 	    fb->height > new_acrtc->max_cursor_height) {
9978 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9979 				 new_plane_state->fb->width,
9980 				 new_plane_state->fb->height);
9981 		return -EINVAL;
9982 	}
9983 	if (new_plane_state->src_w != fb->width << 16 ||
9984 	    new_plane_state->src_h != fb->height << 16) {
9985 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9986 		return -EINVAL;
9987 	}
9988 
9989 	/* Pitch in pixels */
9990 	pitch = fb->pitches[0] / fb->format->cpp[0];
9991 
9992 	if (fb->width != pitch) {
9993 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9994 				 fb->width, pitch);
9995 		return -EINVAL;
9996 	}
9997 
9998 	switch (pitch) {
9999 	case 64:
10000 	case 128:
10001 	case 256:
10002 		/* FB pitch is supported by cursor plane */
10003 		break;
10004 	default:
10005 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10006 		return -EINVAL;
10007 	}
10008 
10009 	/* Core DRM takes care of checking FB modifiers, so we only need to
10010 	 * check tiling flags when the FB doesn't have a modifier. */
10011 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10012 		if (adev->family < AMDGPU_FAMILY_AI) {
10013 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10014 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10015 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10016 		} else {
10017 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10018 		}
10019 		if (!linear) {
10020 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10021 			return -EINVAL;
10022 		}
10023 	}
10024 
10025 	return 0;
10026 }
10027 
10028 static int dm_update_plane_state(struct dc *dc,
10029 				 struct drm_atomic_state *state,
10030 				 struct drm_plane *plane,
10031 				 struct drm_plane_state *old_plane_state,
10032 				 struct drm_plane_state *new_plane_state,
10033 				 bool enable,
10034 				 bool *lock_and_validation_needed)
10035 {
10036 
10037 	struct dm_atomic_state *dm_state = NULL;
10038 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10039 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10040 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10041 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10042 	struct amdgpu_crtc *new_acrtc;
10043 	bool needs_reset;
10044 	int ret = 0;
10045 
10046 
10047 	new_plane_crtc = new_plane_state->crtc;
10048 	old_plane_crtc = old_plane_state->crtc;
10049 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10050 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10051 
10052 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10053 		if (!enable || !new_plane_crtc ||
10054 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10055 			return 0;
10056 
10057 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10058 
10059 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10060 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10061 			return -EINVAL;
10062 		}
10063 
10064 		if (new_plane_state->fb) {
10065 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10066 						 new_plane_state->fb);
10067 			if (ret)
10068 				return ret;
10069 		}
10070 
10071 		return 0;
10072 	}
10073 
10074 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10075 					 new_plane_state);
10076 
10077 	/* Remove any changed/removed planes */
10078 	if (!enable) {
10079 		if (!needs_reset)
10080 			return 0;
10081 
10082 		if (!old_plane_crtc)
10083 			return 0;
10084 
10085 		old_crtc_state = drm_atomic_get_old_crtc_state(
10086 				state, old_plane_crtc);
10087 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10088 
10089 		if (!dm_old_crtc_state->stream)
10090 			return 0;
10091 
10092 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10093 				plane->base.id, old_plane_crtc->base.id);
10094 
10095 		ret = dm_atomic_get_state(state, &dm_state);
10096 		if (ret)
10097 			return ret;
10098 
10099 		if (!dc_remove_plane_from_context(
10100 				dc,
10101 				dm_old_crtc_state->stream,
10102 				dm_old_plane_state->dc_state,
10103 				dm_state->context)) {
10104 
10105 			return -EINVAL;
10106 		}
10107 
10108 
10109 		dc_plane_state_release(dm_old_plane_state->dc_state);
10110 		dm_new_plane_state->dc_state = NULL;
10111 
10112 		*lock_and_validation_needed = true;
10113 
10114 	} else { /* Add new planes */
10115 		struct dc_plane_state *dc_new_plane_state;
10116 
10117 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10118 			return 0;
10119 
10120 		if (!new_plane_crtc)
10121 			return 0;
10122 
10123 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10124 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10125 
10126 		if (!dm_new_crtc_state->stream)
10127 			return 0;
10128 
10129 		if (!needs_reset)
10130 			return 0;
10131 
10132 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10133 		if (ret)
10134 			return ret;
10135 
10136 		WARN_ON(dm_new_plane_state->dc_state);
10137 
10138 		dc_new_plane_state = dc_create_plane_state(dc);
10139 		if (!dc_new_plane_state)
10140 			return -ENOMEM;
10141 
10142 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10143 				 plane->base.id, new_plane_crtc->base.id);
10144 
10145 		ret = fill_dc_plane_attributes(
10146 			drm_to_adev(new_plane_crtc->dev),
10147 			dc_new_plane_state,
10148 			new_plane_state,
10149 			new_crtc_state);
10150 		if (ret) {
10151 			dc_plane_state_release(dc_new_plane_state);
10152 			return ret;
10153 		}
10154 
10155 		ret = dm_atomic_get_state(state, &dm_state);
10156 		if (ret) {
10157 			dc_plane_state_release(dc_new_plane_state);
10158 			return ret;
10159 		}
10160 
10161 		/*
10162 		 * Any atomic check errors that occur after this will
10163 		 * not need a release. The plane state will be attached
10164 		 * to the stream, and therefore part of the atomic
10165 		 * state. It'll be released when the atomic state is
10166 		 * cleaned.
10167 		 */
10168 		if (!dc_add_plane_to_context(
10169 				dc,
10170 				dm_new_crtc_state->stream,
10171 				dc_new_plane_state,
10172 				dm_state->context)) {
10173 
10174 			dc_plane_state_release(dc_new_plane_state);
10175 			return -EINVAL;
10176 		}
10177 
10178 		dm_new_plane_state->dc_state = dc_new_plane_state;
10179 
10180 		/* Tell DC to do a full surface update every time there
10181 		 * is a plane change. Inefficient, but works for now.
10182 		 */
10183 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10184 
10185 		*lock_and_validation_needed = true;
10186 	}
10187 
10188 
10189 	return ret;
10190 }
10191 
10192 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10193 				struct drm_crtc *crtc,
10194 				struct drm_crtc_state *new_crtc_state)
10195 {
10196 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10197 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10198 
10199 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10200 	 * cursor per pipe but it's going to inherit the scaling and
10201 	 * positioning from the underlying pipe. Check the cursor plane's
10202 	 * blending properties match the primary plane's. */
10203 
10204 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10205 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10206 	if (!new_cursor_state || !new_primary_state ||
10207 	    !new_cursor_state->fb || !new_primary_state->fb) {
10208 		return 0;
10209 	}
10210 
10211 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10212 			 (new_cursor_state->src_w >> 16);
10213 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10214 			 (new_cursor_state->src_h >> 16);
10215 
10216 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10217 			 (new_primary_state->src_w >> 16);
10218 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10219 			 (new_primary_state->src_h >> 16);
10220 
10221 	if (cursor_scale_w != primary_scale_w ||
10222 	    cursor_scale_h != primary_scale_h) {
10223 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10224 		return -EINVAL;
10225 	}
10226 
10227 	return 0;
10228 }
10229 
10230 #if defined(CONFIG_DRM_AMD_DC_DCN)
10231 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10232 {
10233 	struct drm_connector *connector;
10234 	struct drm_connector_state *conn_state;
10235 	struct amdgpu_dm_connector *aconnector = NULL;
10236 	int i;
10237 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10238 		if (conn_state->crtc != crtc)
10239 			continue;
10240 
10241 		aconnector = to_amdgpu_dm_connector(connector);
10242 		if (!aconnector->port || !aconnector->mst_port)
10243 			aconnector = NULL;
10244 		else
10245 			break;
10246 	}
10247 
10248 	if (!aconnector)
10249 		return 0;
10250 
10251 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10252 }
10253 #endif
10254 
10255 static int validate_overlay(struct drm_atomic_state *state)
10256 {
10257 	int i;
10258 	struct drm_plane *plane;
10259 	struct drm_plane_state *new_plane_state;
10260 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10261 
10262 	/* Check if primary plane is contained inside overlay */
10263 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10264 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10265 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10266 				return 0;
10267 
10268 			overlay_state = new_plane_state;
10269 			continue;
10270 		}
10271 	}
10272 
10273 	/* check if we're making changes to the overlay plane */
10274 	if (!overlay_state)
10275 		return 0;
10276 
10277 	/* check if overlay plane is enabled */
10278 	if (!overlay_state->crtc)
10279 		return 0;
10280 
10281 	/* find the primary plane for the CRTC that the overlay is enabled on */
10282 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10283 	if (IS_ERR(primary_state))
10284 		return PTR_ERR(primary_state);
10285 
10286 	/* check if primary plane is enabled */
10287 	if (!primary_state->crtc)
10288 		return 0;
10289 
10290 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10291 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10292 	    primary_state->crtc_y < overlay_state->crtc_y ||
10293 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10294 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10295 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10296 		return -EINVAL;
10297 	}
10298 
10299 	return 0;
10300 }
10301 
10302 /**
10303  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10304  * @dev: The DRM device
10305  * @state: The atomic state to commit
10306  *
10307  * Validate that the given atomic state is programmable by DC into hardware.
10308  * This involves constructing a &struct dc_state reflecting the new hardware
10309  * state we wish to commit, then querying DC to see if it is programmable. It's
10310  * important not to modify the existing DC state. Otherwise, atomic_check
10311  * may unexpectedly commit hardware changes.
10312  *
10313  * When validating the DC state, it's important that the right locks are
10314  * acquired. For full updates case which removes/adds/updates streams on one
10315  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10316  * that any such full update commit will wait for completion of any outstanding
10317  * flip using DRMs synchronization events.
10318  *
10319  * Note that DM adds the affected connectors for all CRTCs in state, when that
10320  * might not seem necessary. This is because DC stream creation requires the
10321  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10322  * be possible but non-trivial - a possible TODO item.
10323  *
10324  * Return: -Error code if validation failed.
10325  */
10326 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10327 				  struct drm_atomic_state *state)
10328 {
10329 	struct amdgpu_device *adev = drm_to_adev(dev);
10330 	struct dm_atomic_state *dm_state = NULL;
10331 	struct dc *dc = adev->dm.dc;
10332 	struct drm_connector *connector;
10333 	struct drm_connector_state *old_con_state, *new_con_state;
10334 	struct drm_crtc *crtc;
10335 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10336 	struct drm_plane *plane;
10337 	struct drm_plane_state *old_plane_state, *new_plane_state;
10338 	enum dc_status status;
10339 	int ret, i;
10340 	bool lock_and_validation_needed = false;
10341 	struct dm_crtc_state *dm_old_crtc_state;
10342 #if defined(CONFIG_DRM_AMD_DC_DCN)
10343 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10344 #endif
10345 
10346 	trace_amdgpu_dm_atomic_check_begin(state);
10347 
10348 	ret = drm_atomic_helper_check_modeset(dev, state);
10349 	if (ret)
10350 		goto fail;
10351 
10352 	/* Check connector changes */
10353 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10354 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10355 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10356 
10357 		/* Skip connectors that are disabled or part of modeset already. */
10358 		if (!old_con_state->crtc && !new_con_state->crtc)
10359 			continue;
10360 
10361 		if (!new_con_state->crtc)
10362 			continue;
10363 
10364 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10365 		if (IS_ERR(new_crtc_state)) {
10366 			ret = PTR_ERR(new_crtc_state);
10367 			goto fail;
10368 		}
10369 
10370 		if (dm_old_con_state->abm_level !=
10371 		    dm_new_con_state->abm_level)
10372 			new_crtc_state->connectors_changed = true;
10373 	}
10374 
10375 #if defined(CONFIG_DRM_AMD_DC_DCN)
10376 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10377 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10378 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10379 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10380 				if (ret)
10381 					goto fail;
10382 			}
10383 		}
10384 	}
10385 #endif
10386 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10387 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10388 
10389 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10390 		    !new_crtc_state->color_mgmt_changed &&
10391 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10392 			dm_old_crtc_state->dsc_force_changed == false)
10393 			continue;
10394 
10395 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10396 		if (ret)
10397 			goto fail;
10398 
10399 		if (!new_crtc_state->enable)
10400 			continue;
10401 
10402 		ret = drm_atomic_add_affected_connectors(state, crtc);
10403 		if (ret)
10404 			return ret;
10405 
10406 		ret = drm_atomic_add_affected_planes(state, crtc);
10407 		if (ret)
10408 			goto fail;
10409 
10410 		if (dm_old_crtc_state->dsc_force_changed)
10411 			new_crtc_state->mode_changed = true;
10412 	}
10413 
10414 	/*
10415 	 * Add all primary and overlay planes on the CRTC to the state
10416 	 * whenever a plane is enabled to maintain correct z-ordering
10417 	 * and to enable fast surface updates.
10418 	 */
10419 	drm_for_each_crtc(crtc, dev) {
10420 		bool modified = false;
10421 
10422 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10423 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10424 				continue;
10425 
10426 			if (new_plane_state->crtc == crtc ||
10427 			    old_plane_state->crtc == crtc) {
10428 				modified = true;
10429 				break;
10430 			}
10431 		}
10432 
10433 		if (!modified)
10434 			continue;
10435 
10436 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10437 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10438 				continue;
10439 
10440 			new_plane_state =
10441 				drm_atomic_get_plane_state(state, plane);
10442 
10443 			if (IS_ERR(new_plane_state)) {
10444 				ret = PTR_ERR(new_plane_state);
10445 				goto fail;
10446 			}
10447 		}
10448 	}
10449 
10450 	/* Remove exiting planes if they are modified */
10451 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10452 		ret = dm_update_plane_state(dc, state, plane,
10453 					    old_plane_state,
10454 					    new_plane_state,
10455 					    false,
10456 					    &lock_and_validation_needed);
10457 		if (ret)
10458 			goto fail;
10459 	}
10460 
10461 	/* Disable all crtcs which require disable */
10462 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10463 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10464 					   old_crtc_state,
10465 					   new_crtc_state,
10466 					   false,
10467 					   &lock_and_validation_needed);
10468 		if (ret)
10469 			goto fail;
10470 	}
10471 
10472 	/* Enable all crtcs which require enable */
10473 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10474 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10475 					   old_crtc_state,
10476 					   new_crtc_state,
10477 					   true,
10478 					   &lock_and_validation_needed);
10479 		if (ret)
10480 			goto fail;
10481 	}
10482 
10483 	ret = validate_overlay(state);
10484 	if (ret)
10485 		goto fail;
10486 
10487 	/* Add new/modified planes */
10488 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10489 		ret = dm_update_plane_state(dc, state, plane,
10490 					    old_plane_state,
10491 					    new_plane_state,
10492 					    true,
10493 					    &lock_and_validation_needed);
10494 		if (ret)
10495 			goto fail;
10496 	}
10497 
10498 	/* Run this here since we want to validate the streams we created */
10499 	ret = drm_atomic_helper_check_planes(dev, state);
10500 	if (ret)
10501 		goto fail;
10502 
10503 	/* Check cursor planes scaling */
10504 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10505 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10506 		if (ret)
10507 			goto fail;
10508 	}
10509 
10510 	if (state->legacy_cursor_update) {
10511 		/*
10512 		 * This is a fast cursor update coming from the plane update
10513 		 * helper, check if it can be done asynchronously for better
10514 		 * performance.
10515 		 */
10516 		state->async_update =
10517 			!drm_atomic_helper_async_check(dev, state);
10518 
10519 		/*
10520 		 * Skip the remaining global validation if this is an async
10521 		 * update. Cursor updates can be done without affecting
10522 		 * state or bandwidth calcs and this avoids the performance
10523 		 * penalty of locking the private state object and
10524 		 * allocating a new dc_state.
10525 		 */
10526 		if (state->async_update)
10527 			return 0;
10528 	}
10529 
10530 	/* Check scaling and underscan changes*/
10531 	/* TODO Removed scaling changes validation due to inability to commit
10532 	 * new stream into context w\o causing full reset. Need to
10533 	 * decide how to handle.
10534 	 */
10535 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10536 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10537 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10538 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10539 
10540 		/* Skip any modesets/resets */
10541 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10542 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10543 			continue;
10544 
10545 		/* Skip any thing not scale or underscan changes */
10546 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10547 			continue;
10548 
10549 		lock_and_validation_needed = true;
10550 	}
10551 
10552 	/**
10553 	 * Streams and planes are reset when there are changes that affect
10554 	 * bandwidth. Anything that affects bandwidth needs to go through
10555 	 * DC global validation to ensure that the configuration can be applied
10556 	 * to hardware.
10557 	 *
10558 	 * We have to currently stall out here in atomic_check for outstanding
10559 	 * commits to finish in this case because our IRQ handlers reference
10560 	 * DRM state directly - we can end up disabling interrupts too early
10561 	 * if we don't.
10562 	 *
10563 	 * TODO: Remove this stall and drop DM state private objects.
10564 	 */
10565 	if (lock_and_validation_needed) {
10566 		ret = dm_atomic_get_state(state, &dm_state);
10567 		if (ret)
10568 			goto fail;
10569 
10570 		ret = do_aquire_global_lock(dev, state);
10571 		if (ret)
10572 			goto fail;
10573 
10574 #if defined(CONFIG_DRM_AMD_DC_DCN)
10575 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10576 			goto fail;
10577 
10578 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10579 		if (ret)
10580 			goto fail;
10581 #endif
10582 
10583 		/*
10584 		 * Perform validation of MST topology in the state:
10585 		 * We need to perform MST atomic check before calling
10586 		 * dc_validate_global_state(), or there is a chance
10587 		 * to get stuck in an infinite loop and hang eventually.
10588 		 */
10589 		ret = drm_dp_mst_atomic_check(state);
10590 		if (ret)
10591 			goto fail;
10592 		status = dc_validate_global_state(dc, dm_state->context, false);
10593 		if (status != DC_OK) {
10594 			drm_dbg_atomic(dev,
10595 				       "DC global validation failure: %s (%d)",
10596 				       dc_status_to_str(status), status);
10597 			ret = -EINVAL;
10598 			goto fail;
10599 		}
10600 	} else {
10601 		/*
10602 		 * The commit is a fast update. Fast updates shouldn't change
10603 		 * the DC context, affect global validation, and can have their
10604 		 * commit work done in parallel with other commits not touching
10605 		 * the same resource. If we have a new DC context as part of
10606 		 * the DM atomic state from validation we need to free it and
10607 		 * retain the existing one instead.
10608 		 *
10609 		 * Furthermore, since the DM atomic state only contains the DC
10610 		 * context and can safely be annulled, we can free the state
10611 		 * and clear the associated private object now to free
10612 		 * some memory and avoid a possible use-after-free later.
10613 		 */
10614 
10615 		for (i = 0; i < state->num_private_objs; i++) {
10616 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10617 
10618 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10619 				int j = state->num_private_objs-1;
10620 
10621 				dm_atomic_destroy_state(obj,
10622 						state->private_objs[i].state);
10623 
10624 				/* If i is not at the end of the array then the
10625 				 * last element needs to be moved to where i was
10626 				 * before the array can safely be truncated.
10627 				 */
10628 				if (i != j)
10629 					state->private_objs[i] =
10630 						state->private_objs[j];
10631 
10632 				state->private_objs[j].ptr = NULL;
10633 				state->private_objs[j].state = NULL;
10634 				state->private_objs[j].old_state = NULL;
10635 				state->private_objs[j].new_state = NULL;
10636 
10637 				state->num_private_objs = j;
10638 				break;
10639 			}
10640 		}
10641 	}
10642 
10643 	/* Store the overall update type for use later in atomic check. */
10644 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10645 		struct dm_crtc_state *dm_new_crtc_state =
10646 			to_dm_crtc_state(new_crtc_state);
10647 
10648 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10649 							 UPDATE_TYPE_FULL :
10650 							 UPDATE_TYPE_FAST;
10651 	}
10652 
10653 	/* Must be success */
10654 	WARN_ON(ret);
10655 
10656 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10657 
10658 	return ret;
10659 
10660 fail:
10661 	if (ret == -EDEADLK)
10662 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10663 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10664 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10665 	else
10666 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10667 
10668 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10669 
10670 	return ret;
10671 }
10672 
10673 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10674 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10675 {
10676 	uint8_t dpcd_data;
10677 	bool capable = false;
10678 
10679 	if (amdgpu_dm_connector->dc_link &&
10680 		dm_helpers_dp_read_dpcd(
10681 				NULL,
10682 				amdgpu_dm_connector->dc_link,
10683 				DP_DOWN_STREAM_PORT_COUNT,
10684 				&dpcd_data,
10685 				sizeof(dpcd_data))) {
10686 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10687 	}
10688 
10689 	return capable;
10690 }
10691 
10692 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10693 		unsigned int offset,
10694 		unsigned int total_length,
10695 		uint8_t *data,
10696 		unsigned int length,
10697 		struct amdgpu_hdmi_vsdb_info *vsdb)
10698 {
10699 	bool res;
10700 	union dmub_rb_cmd cmd;
10701 	struct dmub_cmd_send_edid_cea *input;
10702 	struct dmub_cmd_edid_cea_output *output;
10703 
10704 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10705 		return false;
10706 
10707 	memset(&cmd, 0, sizeof(cmd));
10708 
10709 	input = &cmd.edid_cea.data.input;
10710 
10711 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10712 	cmd.edid_cea.header.sub_type = 0;
10713 	cmd.edid_cea.header.payload_bytes =
10714 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10715 	input->offset = offset;
10716 	input->length = length;
10717 	input->total_length = total_length;
10718 	memcpy(input->payload, data, length);
10719 
10720 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10721 	if (!res) {
10722 		DRM_ERROR("EDID CEA parser failed\n");
10723 		return false;
10724 	}
10725 
10726 	output = &cmd.edid_cea.data.output;
10727 
10728 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10729 		if (!output->ack.success) {
10730 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10731 					output->ack.offset);
10732 		}
10733 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10734 		if (!output->amd_vsdb.vsdb_found)
10735 			return false;
10736 
10737 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10738 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10739 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10740 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10741 	} else {
10742 		DRM_WARN("Unknown EDID CEA parser results\n");
10743 		return false;
10744 	}
10745 
10746 	return true;
10747 }
10748 
10749 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10750 		uint8_t *edid_ext, int len,
10751 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10752 {
10753 	int i;
10754 
10755 	/* send extension block to DMCU for parsing */
10756 	for (i = 0; i < len; i += 8) {
10757 		bool res;
10758 		int offset;
10759 
10760 		/* send 8 bytes a time */
10761 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10762 			return false;
10763 
10764 		if (i+8 == len) {
10765 			/* EDID block sent completed, expect result */
10766 			int version, min_rate, max_rate;
10767 
10768 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10769 			if (res) {
10770 				/* amd vsdb found */
10771 				vsdb_info->freesync_supported = 1;
10772 				vsdb_info->amd_vsdb_version = version;
10773 				vsdb_info->min_refresh_rate_hz = min_rate;
10774 				vsdb_info->max_refresh_rate_hz = max_rate;
10775 				return true;
10776 			}
10777 			/* not amd vsdb */
10778 			return false;
10779 		}
10780 
10781 		/* check for ack*/
10782 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10783 		if (!res)
10784 			return false;
10785 	}
10786 
10787 	return false;
10788 }
10789 
10790 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10791 		uint8_t *edid_ext, int len,
10792 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10793 {
10794 	int i;
10795 
10796 	/* send extension block to DMCU for parsing */
10797 	for (i = 0; i < len; i += 8) {
10798 		/* send 8 bytes a time */
10799 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10800 			return false;
10801 	}
10802 
10803 	return vsdb_info->freesync_supported;
10804 }
10805 
10806 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10807 		uint8_t *edid_ext, int len,
10808 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10809 {
10810 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10811 
10812 	if (adev->dm.dmub_srv)
10813 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10814 	else
10815 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10816 }
10817 
10818 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10819 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10820 {
10821 	uint8_t *edid_ext = NULL;
10822 	int i;
10823 	bool valid_vsdb_found = false;
10824 
10825 	/*----- drm_find_cea_extension() -----*/
10826 	/* No EDID or EDID extensions */
10827 	if (edid == NULL || edid->extensions == 0)
10828 		return -ENODEV;
10829 
10830 	/* Find CEA extension */
10831 	for (i = 0; i < edid->extensions; i++) {
10832 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10833 		if (edid_ext[0] == CEA_EXT)
10834 			break;
10835 	}
10836 
10837 	if (i == edid->extensions)
10838 		return -ENODEV;
10839 
10840 	/*----- cea_db_offsets() -----*/
10841 	if (edid_ext[0] != CEA_EXT)
10842 		return -ENODEV;
10843 
10844 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10845 
10846 	return valid_vsdb_found ? i : -ENODEV;
10847 }
10848 
10849 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10850 					struct edid *edid)
10851 {
10852 	int i = 0;
10853 	struct detailed_timing *timing;
10854 	struct detailed_non_pixel *data;
10855 	struct detailed_data_monitor_range *range;
10856 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10857 			to_amdgpu_dm_connector(connector);
10858 	struct dm_connector_state *dm_con_state = NULL;
10859 
10860 	struct drm_device *dev = connector->dev;
10861 	struct amdgpu_device *adev = drm_to_adev(dev);
10862 	bool freesync_capable = false;
10863 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10864 
10865 	if (!connector->state) {
10866 		DRM_ERROR("%s - Connector has no state", __func__);
10867 		goto update;
10868 	}
10869 
10870 	if (!edid) {
10871 		dm_con_state = to_dm_connector_state(connector->state);
10872 
10873 		amdgpu_dm_connector->min_vfreq = 0;
10874 		amdgpu_dm_connector->max_vfreq = 0;
10875 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10876 
10877 		goto update;
10878 	}
10879 
10880 	dm_con_state = to_dm_connector_state(connector->state);
10881 
10882 	if (!amdgpu_dm_connector->dc_sink) {
10883 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10884 		goto update;
10885 	}
10886 	if (!adev->dm.freesync_module)
10887 		goto update;
10888 
10889 
10890 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10891 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10892 		bool edid_check_required = false;
10893 
10894 		if (edid) {
10895 			edid_check_required = is_dp_capable_without_timing_msa(
10896 						adev->dm.dc,
10897 						amdgpu_dm_connector);
10898 		}
10899 
10900 		if (edid_check_required == true && (edid->version > 1 ||
10901 		   (edid->version == 1 && edid->revision > 1))) {
10902 			for (i = 0; i < 4; i++) {
10903 
10904 				timing	= &edid->detailed_timings[i];
10905 				data	= &timing->data.other_data;
10906 				range	= &data->data.range;
10907 				/*
10908 				 * Check if monitor has continuous frequency mode
10909 				 */
10910 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10911 					continue;
10912 				/*
10913 				 * Check for flag range limits only. If flag == 1 then
10914 				 * no additional timing information provided.
10915 				 * Default GTF, GTF Secondary curve and CVT are not
10916 				 * supported
10917 				 */
10918 				if (range->flags != 1)
10919 					continue;
10920 
10921 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10922 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10923 				amdgpu_dm_connector->pixel_clock_mhz =
10924 					range->pixel_clock_mhz * 10;
10925 
10926 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10927 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10928 
10929 				break;
10930 			}
10931 
10932 			if (amdgpu_dm_connector->max_vfreq -
10933 			    amdgpu_dm_connector->min_vfreq > 10) {
10934 
10935 				freesync_capable = true;
10936 			}
10937 		}
10938 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10939 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10940 		if (i >= 0 && vsdb_info.freesync_supported) {
10941 			timing  = &edid->detailed_timings[i];
10942 			data    = &timing->data.other_data;
10943 
10944 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10945 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10946 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10947 				freesync_capable = true;
10948 
10949 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10950 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10951 		}
10952 	}
10953 
10954 update:
10955 	if (dm_con_state)
10956 		dm_con_state->freesync_capable = freesync_capable;
10957 
10958 	if (connector->vrr_capable_property)
10959 		drm_connector_set_vrr_capable_property(connector,
10960 						       freesync_capable);
10961 }
10962 
10963 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10964 {
10965 	struct amdgpu_device *adev = drm_to_adev(dev);
10966 	struct dc *dc = adev->dm.dc;
10967 	int i;
10968 
10969 	mutex_lock(&adev->dm.dc_lock);
10970 	if (dc->current_state) {
10971 		for (i = 0; i < dc->current_state->stream_count; ++i)
10972 			dc->current_state->streams[i]
10973 				->triggered_crtc_reset.enabled =
10974 				adev->dm.force_timing_sync;
10975 
10976 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10977 		dc_trigger_sync(dc, dc->current_state);
10978 	}
10979 	mutex_unlock(&adev->dm.dc_lock);
10980 }
10981 
10982 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10983 		       uint32_t value, const char *func_name)
10984 {
10985 #ifdef DM_CHECK_ADDR_0
10986 	if (address == 0) {
10987 		DC_ERR("invalid register write. address = 0");
10988 		return;
10989 	}
10990 #endif
10991 	cgs_write_register(ctx->cgs_device, address, value);
10992 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10993 }
10994 
10995 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10996 			  const char *func_name)
10997 {
10998 	uint32_t value;
10999 #ifdef DM_CHECK_ADDR_0
11000 	if (address == 0) {
11001 		DC_ERR("invalid register read; address = 0\n");
11002 		return 0;
11003 	}
11004 #endif
11005 
11006 	if (ctx->dmub_srv &&
11007 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11008 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11009 		ASSERT(false);
11010 		return 0;
11011 	}
11012 
11013 	value = cgs_read_register(ctx->cgs_device, address);
11014 
11015 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11016 
11017 	return value;
11018 }
11019 
11020 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11021 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11022 {
11023 	struct amdgpu_device *adev = ctx->driver_context;
11024 	int ret = 0;
11025 
11026 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11027 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11028 	if (ret == 0) {
11029 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11030 		return -1;
11031 	}
11032 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11033 
11034 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11035 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11036 
11037 		// For read case, Copy data to payload
11038 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11039 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11040 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11041 			adev->dm.dmub_notify->aux_reply.length);
11042 	}
11043 
11044 	return adev->dm.dmub_notify->aux_reply.length;
11045 }
11046